kernel - Reorder critical section interlock in splz()
[dragonfly.git] / sys / platform / pc32 / i386 / ipl.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)ipl.s
38 *
39 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
40 */
41
bdc560a1
MD
42#include <machine/asmacros.h>
43#include <machine/segments.h>
44#include <machine/ipl.h>
45#include <machine/lock.h>
46#include <machine/psl.h>
47#include <machine/trap.h>
bdc560a1
MD
48
49#include "assym.s"
984263bc
MD
50
51/*
52 * AT/386
53 * Vector interrupt control section
54 *
c263294b 55 * ipending - Pending interrupts (set when a masked interrupt occurs)
5f456c40 56 * spending - Pending software interrupts
984263bc 57 */
bdc560a1
MD
58 .data
59 ALIGN_DATA
984263bc 60
46b26c5e
MD
61 .globl fastunpend_count
62fastunpend_count: .long 0
63
984263bc 64 .text
bdc560a1
MD
65 SUPERALIGN_TEXT
66
46a3f46d
MD
67 /*
68 * GENERAL NOTES
69 *
c263294b 70 * - interrupts are always called with a critical section held
46a3f46d
MD
71 *
72 * - we release our critical section when scheduling interrupt
73 * or softinterrupt threads in order so they can preempt
74 * (unless we are called manually from a critical section, in
75 * which case there will still be a critical section and
76 * they won't preempt anyway).
77 *
78 * - TD_NEST_COUNT prevents splz from nesting too deeply within
79 * itself. It is *not* actually an interrupt nesting count.
80 * PCPU(intr_nesting_level) is an interrupt nesting count.
81 *
82 * - We have to be careful in regards to local interrupts
83 * occuring simultaniously with our doreti and splz
84 * processing.
85 */
984263bc 86
ef0fdad1
MD
87 /*
88 * DORETI
89 *
90 * Handle return from interrupts, traps and syscalls. This function
c263294b
SZ
91 * checks the cpl for unmasked pending interrupts (hardware or soft)
92 * and schedules them if appropriate, then irets.
71ef2f5c 93 *
b5d16701 94 * If we are in a critical section we cannot run any pending ints.
46a3f46d 95 *
38787eef
MD
96 * NOTE: Since SPLs no longer exist, all callers of this function
97 * push $0 for the CPL. HOWEVER, we *STILL* use the cpl mask within
c263294b 98 * this function to mark interrupts which could not be dispatched
38787eef 99 * do to the unavailability of the BGL.
ef0fdad1 100 */
984263bc 101 SUPERALIGN_TEXT
bdc560a1 102 .globl doreti
2954c92f
MD
103 .type doreti,@function
104doreti:
105 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
38787eef
MD
106 popl %eax /* cpl to restore XXX */
107 movl $0,%eax /* irq mask unavailable due to BGL */
2954c92f 108 movl PCPU(curthread),%ebx
f9235b6d 109 cli /* interlock with td_critcount */
235957ed
MD
110 cmpl $0,PCPU(reqflags) /* short cut if nothing to do */
111 je 5f
f9235b6d
MD
112 testl $-1,TD_CRITCOUNT(%ebx) /* can't unpend if in critical sec */
113 jne 5f
114 incl TD_CRITCOUNT(%ebx) /* force all ints to pending */
984263bc 115doreti_next:
b4b1a37a 116 cli /* re-assert cli on loop */
38787eef 117 movl %eax,%ecx /* irq mask unavailable due to BGL */
ef0fdad1 118 notl %ecx
96728c05 119#ifdef SMP
235957ed 120 testl $RQF_IPIQ,PCPU(reqflags)
96728c05 121 jnz doreti_ipiq
1e7aaefa 122#endif
78ea5a2a
SZ
123 testl $RQF_TIMER,PCPU(reqflags)
124 jnz doreti_timer
c263294b
SZ
125 /*
126 * check for an unmasked int (6 groups)
127 */
128 movl $0,%edx
129 testl PCPU_E4(ipending,%edx),%ecx
130 jnz doreti_fast
131
132 movl $1,%edx
133 testl PCPU_E4(ipending,%edx),%ecx
134 jnz doreti_fast
135
136 movl $2,%edx
137 testl PCPU_E4(ipending,%edx),%ecx
138 jnz doreti_fast
139
140 movl $3,%edx
141 testl PCPU_E4(ipending,%edx),%ecx
142 jnz doreti_fast
143
144 movl $4,%edx
145 testl PCPU_E4(ipending,%edx),%ecx
146 jnz doreti_fast
147
148 movl $5,%edx
149 testl PCPU_E4(ipending,%edx),%ecx
a2a5ad0d
MD
150 jnz doreti_fast
151
5f456c40
MD
152 movl PCPU(spending),%ecx /* check for a pending software int */
153 cmpl $0,%ecx
154 jnz doreti_soft
155
235957ed 156 testl $RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
26a0694b 157 jz 2f
984263bc 158 testl $PSL_VM,TF_EFLAGS(%esp)
ef0fdad1 159 jz 1f
235957ed
MD
160 cmpl $1,in_vm86call /* YYY make per 'cpu'? */
161 jnz doreti_ast
ef0fdad1 1621:
0a3f9b47 163 /* ASTs are only applicable when returning to userland */
984263bc 164 testb $SEL_RPL_MASK,TF_CS(%esp)
235957ed 165 jnz doreti_ast
96728c05 1662:
984263bc 167 /*
ef0fdad1 168 * Nothing left to do, finish up. Interrupts are still disabled.
38787eef
MD
169 * %eax contains the mask of IRQ's that are not available due to
170 * BGL requirements. We can only clear RQF_INTPEND if *ALL* pending
171 * interrupts have been processed.
984263bc 172 */
f9235b6d 173 decl TD_CRITCOUNT(%ebx) /* interlocked with cli */
a2a5ad0d
MD
174 testl %eax,%eax
175 jnz 5f
5c323556 176 andl $~RQF_INTPEND,PCPU(reqflags)
ef0fdad1 1775:
984263bc 178 MEXITCOUNT
4e7c41c5
MD
179
180 /*
181 * Restore the segment registers. Since segment register values
182 * can be set from user mode, this can result in a kernel mode
183 * exception. The trap code will revector to the *_fault code
184 * which then sets up a T_PROTFLT signal. If the signal is
185 * sent to userland, sendsig() will automatically clean up all
186 * the segment registers to avoid a loop.
187 */
188 .globl doreti_popl_gs
984263bc 189 .globl doreti_popl_fs
ef0fdad1
MD
190 .globl doreti_popl_es
191 .globl doreti_popl_ds
192 .globl doreti_iret
984263bc
MD
193 .globl doreti_syscall_ret
194doreti_syscall_ret:
4e7c41c5
MD
195doreti_popl_gs:
196 popl %gs
984263bc
MD
197doreti_popl_fs:
198 popl %fs
984263bc
MD
199doreti_popl_es:
200 popl %es
984263bc
MD
201doreti_popl_ds:
202 popl %ds
203 popal
4e7c41c5 204 addl $3*4,%esp /* xflags, trap, err */
984263bc
MD
205doreti_iret:
206 iret
207
b4b1a37a
MD
208 /*
209 * Interrupts are likely disabled due to the above interlock
210 * between cli/iretq. We must enable them before calling any
211 * high level function.
212 */
984263bc
MD
213 ALIGN_TEXT
214 .globl doreti_iret_fault
215doreti_iret_fault:
4e7c41c5 216 subl $3*4,%esp /* xflags, trap, err */
984263bc
MD
217 pushal
218 pushl %ds
219 .globl doreti_popl_ds_fault
220doreti_popl_ds_fault:
221 pushl %es
222 .globl doreti_popl_es_fault
223doreti_popl_es_fault:
224 pushl %fs
225 .globl doreti_popl_fs_fault
226doreti_popl_fs_fault:
4e7c41c5
MD
227 pushl %gs
228 .globl doreti_popl_gs_fault
229doreti_popl_gs_fault:
b4b1a37a 230 sti
984263bc
MD
231 movl $0,TF_ERR(%esp) /* XXX should be the error code */
232 movl $T_PROTFLT,TF_TRAPNO(%esp)
233 jmp alltraps_with_regs_pushed
234
984263bc 235 /*
c263294b
SZ
236 * Interrupt pending. NOTE: stack context holds frame structure
237 * for interrupt procedure, do not do random pushes or pops!
984263bc 238 */
ef0fdad1
MD
239 ALIGN_TEXT
240doreti_fast:
c263294b 241 andl PCPU_E4(ipending,%edx),%ecx
b4b1a37a 242 sti
984263bc 243 bsfl %ecx, %ecx /* locate the next dispatchable int */
c263294b
SZ
244 btrl %ecx, PCPU_E4(ipending,%edx)
245 /* is it really still pending? */
ef0fdad1 246 jnc doreti_next
c263294b
SZ
247
248 shll $5, %edx
249 orl %edx, %ecx /* form intr number */
250
38787eef
MD
251 pushl %eax /* save IRQ mask unavailable for BGL */
252 /* NOTE: is also CPL in frame */
c263294b 253 call dofastunpend /* unpend intr %ecx */
984263bc 254 popl %eax
ef0fdad1 255 jmp doreti_next
984263bc 256
984263bc 257 /*
5f456c40
MD
258 * SOFT interrupt pending
259 *
260 * Temporarily back-out our critical section to allow an interrupt
261 * preempt us when we schedule it. Bump intr_nesting_level to
262 * prevent the switch code from recursing via splz too deeply.
263 */
264 ALIGN_TEXT
265doreti_soft:
b4b1a37a 266 sti
5f456c40
MD
267 bsfl %ecx,%ecx /* locate the next pending softint */
268 btrl %ecx,PCPU(spending) /* make sure its still pending */
269 jnc doreti_next
270 addl $FIRST_SOFTINT,%ecx /* actual intr number */
271 pushl %eax
272 pushl %ecx
273 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
f9235b6d 274 decl TD_CRITCOUNT(%ebx) /* so we can preempt */
c83c147e 275 call sched_ithd_soft /* YYY must pull in imasks */
f9235b6d 276 incl TD_CRITCOUNT(%ebx)
5f456c40
MD
277 decl TD_NEST_COUNT(%ebx)
278 addl $4,%esp
279 popl %eax
280 jmp doreti_next
281
282 /*
235957ed
MD
283 * AST pending. We clear RQF_AST_SIGNAL automatically, the others
284 * are cleared by the trap as they are processed.
a2a5ad0d
MD
285 *
286 * Temporarily back-out our critical section because trap() can be
287 * a long-winded call, and we want to be more syscall-like.
288 *
235957ed
MD
289 * YYY theoretically we can call lwkt_switch directly if all we need
290 * to do is a reschedule.
984263bc 291 */
235957ed 292doreti_ast:
a722be49 293 andl $~(RQF_AST_SIGNAL|RQF_AST_UPCALL),PCPU(reqflags)
a2a5ad0d
MD
294 sti
295 movl %eax,%esi /* save cpl (can't use stack) */
296 movl $T_ASTFLT,TF_TRAPNO(%esp)
c7eb0589 297 pushl %esp /* pass frame by reference */
f9235b6d 298 decl TD_CRITCOUNT(%ebx)
d080fbe8 299 call trap
f9235b6d 300 incl TD_CRITCOUNT(%ebx)
c7eb0589 301 addl $4,%esp
96728c05
MD
302 movl %esi,%eax /* restore cpl for loop */
303 jmp doreti_next
304
305#ifdef SMP
306 /*
235957ed 307 * IPIQ message pending. We clear RQF_IPIQ automatically.
96728c05
MD
308 */
309doreti_ipiq:
38787eef 310 movl %eax,%esi /* save cpl (can't use stack) */
03aa8d99 311 incl PCPU(intr_nesting_level)
235957ed 312 andl $~RQF_IPIQ,PCPU(reqflags)
b4b1a37a 313 sti
88c4d2f6 314 subl $8,%esp /* add dummy vec and ppl */
c7eb0589 315 pushl %esp /* pass frame by reference */
88c4d2f6 316 call lwkt_process_ipiq_frame
c7eb0589 317 addl $12,%esp
03aa8d99 318 decl PCPU(intr_nesting_level)
38787eef 319 movl %esi,%eax /* restore cpl for loop */
984263bc 320 jmp doreti_next
1e7aaefa 321#endif
984263bc 322
78ea5a2a
SZ
323doreti_timer:
324 movl %eax,%esi /* save cpl (can't use stack) */
325 incl PCPU(intr_nesting_level)
326 andl $~RQF_TIMER,PCPU(reqflags)
b4b1a37a 327 sti
78ea5a2a
SZ
328 subl $8,%esp /* add dummy vec and ppl */
329 pushl %esp /* pass frame by reference */
330 call lapic_timer_process_frame
331 addl $12,%esp
332 decl PCPU(intr_nesting_level)
333 movl %esi,%eax /* restore cpl for loop */
334 jmp doreti_next
335
ef0fdad1
MD
336 /*
337 * SPLZ() a C callable procedure to dispatch any unmasked pending
338 * interrupts regardless of critical section nesting. ASTs
339 * are not dispatched.
26a0694b 340 *
38787eef
MD
341 * Use %eax to track those IRQs that could not be processed
342 * due to BGL requirements.
ef0fdad1
MD
343 */
344 SUPERALIGN_TEXT
345
346ENTRY(splz)
26a0694b 347 pushfl
ef0fdad1 348 pushl %ebx
2954c92f 349 movl PCPU(curthread),%ebx
f9235b6d 350 incl TD_CRITCOUNT(%ebx)
38787eef 351 movl $0,%eax
ef0fdad1
MD
352
353splz_next:
26a0694b 354 cli
ef0fdad1
MD
355 movl %eax,%ecx /* ecx = ~CPL */
356 notl %ecx
96728c05 357#ifdef SMP
235957ed 358 testl $RQF_IPIQ,PCPU(reqflags)
96728c05 359 jnz splz_ipiq
1e7aaefa 360#endif
78ea5a2a
SZ
361 testl $RQF_TIMER,PCPU(reqflags)
362 jnz splz_timer
1e7aaefa 363
c263294b
SZ
364 /*
365 * check for an unmasked int (6 groups)
366 */
367 movl $0,%edx
368 testl PCPU_E4(ipending,%edx),%ecx
369 jnz splz_fast
370
371 movl $1,%edx
372 testl PCPU_E4(ipending,%edx),%ecx
373 jnz splz_fast
374
375 movl $2,%edx
376 testl PCPU_E4(ipending,%edx),%ecx
377 jnz splz_fast
378
379 movl $3,%edx
380 testl PCPU_E4(ipending,%edx),%ecx
381 jnz splz_fast
382
383 movl $4,%edx
384 testl PCPU_E4(ipending,%edx),%ecx
385 jnz splz_fast
386
387 movl $5,%edx
388 testl PCPU_E4(ipending,%edx),%ecx
a2a5ad0d
MD
389 jnz splz_fast
390
5f456c40
MD
391 movl PCPU(spending),%ecx
392 cmpl $0,%ecx
393 jnz splz_soft
394
f9235b6d 395 decl TD_CRITCOUNT(%ebx)
235957ed 396
3c23a41a
MD
397 /*
398 * Nothing left to do, finish up. Interrupts are still disabled.
38787eef
MD
399 * If our mask of IRQs we couldn't process due to BGL requirements
400 * is 0 then there are no pending interrupt sources left and we
401 * can clear RQF_INTPEND.
3c23a41a 402 */
a2a5ad0d
MD
403 testl %eax,%eax
404 jnz 5f
235957ed 405 andl $~RQF_INTPEND,PCPU(reqflags)
a2a5ad0d 4065:
ef0fdad1 407 popl %ebx
26a0694b 408 popfl
984263bc
MD
409 ret
410
ef0fdad1 411 /*
c263294b 412 * Interrupt pending
ef0fdad1 413 */
984263bc 414 ALIGN_TEXT
ef0fdad1 415splz_fast:
c263294b 416 andl PCPU_E4(ipending,%edx),%ecx
b4b1a37a 417 sti
ef0fdad1 418 bsfl %ecx, %ecx /* locate the next dispatchable int */
c263294b
SZ
419 btrl %ecx, PCPU_E4(ipending,%edx)
420 /* is it really still pending? */
ef0fdad1 421 jnc splz_next
c263294b
SZ
422
423 shll $5, %edx
424 orl %edx, %ecx /* form intr number */
425
ef0fdad1 426 pushl %eax
c263294b 427 call dofastunpend /* unpend intr %ecx */
ef0fdad1
MD
428 popl %eax
429 jmp splz_next
984263bc 430
5f456c40
MD
431 /*
432 * SOFT interrupt pending
433 *
434 * Temporarily back-out our critical section to allow the interrupt
435 * preempt us.
436 */
437 ALIGN_TEXT
438splz_soft:
b4b1a37a 439 sti
5f456c40
MD
440 bsfl %ecx,%ecx /* locate the next pending softint */
441 btrl %ecx,PCPU(spending) /* make sure its still pending */
442 jnc splz_next
443 addl $FIRST_SOFTINT,%ecx /* actual intr number */
5f456c40
MD
444 pushl %eax
445 pushl %ecx
5f456c40 446 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
b5fc1882 447 decl TD_CRITCOUNT(%ebx)
c83c147e 448 call sched_ithd_soft /* YYY must pull in imasks */
f9235b6d 449 incl TD_CRITCOUNT(%ebx)
5f456c40
MD
450 decl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
451 addl $4,%esp
452 popl %eax
453 jmp splz_next
454
96728c05
MD
455#ifdef SMP
456splz_ipiq:
235957ed 457 andl $~RQF_IPIQ,PCPU(reqflags)
b4b1a37a 458 sti
96728c05
MD
459 pushl %eax
460 call lwkt_process_ipiq
461 popl %eax
462 jmp splz_next
1e7aaefa 463#endif
78ea5a2a
SZ
464
465splz_timer:
466 andl $~RQF_TIMER,PCPU(reqflags)
b4b1a37a 467 sti
78ea5a2a
SZ
468 pushl %eax
469 call lapic_timer_process
470 popl %eax
471 jmp splz_next
96728c05 472
46b26c5e
MD
473 /*
474 * dofastunpend(%ecx:intr)
475 *
c263294b 476 * An interrupt previously made pending can now be run,
46b26c5e
MD
477 * execute it by pushing a dummy interrupt frame and
478 * calling ithread_fast_handler to execute or schedule it.
479 *
480 * ithread_fast_handler() returns 0 if it wants us to unmask
481 * further interrupts.
482 */
483#define PUSH_DUMMY \
484 pushfl ; /* phys int frame / flags */ \
485 pushl %cs ; /* phys int frame / cs */ \
486 pushl 12(%esp) ; /* original caller eip */ \
487 pushl $0 ; /* dummy error code */ \
488 pushl $0 ; /* dummy trap type */ \
4e7c41c5
MD
489 pushl $0 ; /* dummy xflags */ \
490 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
46b26c5e
MD
491
492#define POP_DUMMY \
4e7c41c5 493 addl $19*4,%esp ; \
46b26c5e
MD
494
495dofastunpend:
496 pushl %ebp /* frame for backtrace */
497 movl %esp,%ebp
498 PUSH_DUMMY
499 pushl %ecx /* last part of intrframe = intr */
500 incl fastunpend_count
c7eb0589 501 pushl %esp /* pass frame by reference */
46b26c5e 502 call ithread_fast_handler /* returns 0 to unmask */
c7eb0589 503 addl $4,%esp /* remove pointer, now intr on top */
46b26c5e
MD
504 cmpl $0,%eax
505 jnz 1f
506 movl MachIntrABI + MACHINTR_INTREN, %eax
507 call *%eax /* MachIntrABI.intren(intr) */
5081:
509 addl $4,%esp
510 POP_DUMMY
511 popl %ebp
512 ret
513