kernel - Reorder critical section interlock in splz()
[dragonfly.git] / sys / platform / pc32 / i386 / ipl.s
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)ipl.s
38 *
39 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
40 */
41
42#include <machine/asmacros.h>
43#include <machine/segments.h>
44#include <machine/ipl.h>
45#include <machine/lock.h>
46#include <machine/psl.h>
47#include <machine/trap.h>
48
49#include "assym.s"
50
51/*
52 * AT/386
53 * Vector interrupt control section
54 *
55 * ipending - Pending interrupts (set when a masked interrupt occurs)
56 * spending - Pending software interrupts
57 */
58 .data
59 ALIGN_DATA
60
61 .globl fastunpend_count
62fastunpend_count: .long 0
63
64 .text
65 SUPERALIGN_TEXT
66
67 /*
68 * GENERAL NOTES
69 *
70 * - interrupts are always called with a critical section held
71 *
72 * - we release our critical section when scheduling interrupt
73 * or softinterrupt threads in order so they can preempt
74 * (unless we are called manually from a critical section, in
75 * which case there will still be a critical section and
76 * they won't preempt anyway).
77 *
78 * - TD_NEST_COUNT prevents splz from nesting too deeply within
79 * itself. It is *not* actually an interrupt nesting count.
80 * PCPU(intr_nesting_level) is an interrupt nesting count.
81 *
82 * - We have to be careful in regards to local interrupts
83 * occuring simultaniously with our doreti and splz
84 * processing.
85 */
86
87 /*
88 * DORETI
89 *
90 * Handle return from interrupts, traps and syscalls. This function
91 * checks the cpl for unmasked pending interrupts (hardware or soft)
92 * and schedules them if appropriate, then irets.
93 *
94 * If we are in a critical section we cannot run any pending ints.
95 *
96 * NOTE: Since SPLs no longer exist, all callers of this function
97 * push $0 for the CPL. HOWEVER, we *STILL* use the cpl mask within
98 * this function to mark interrupts which could not be dispatched
99 * do to the unavailability of the BGL.
100 */
101 SUPERALIGN_TEXT
102 .globl doreti
103 .type doreti,@function
104doreti:
105 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
106 popl %eax /* cpl to restore XXX */
107 movl $0,%eax /* irq mask unavailable due to BGL */
108 movl PCPU(curthread),%ebx
109 cli /* interlock with td_critcount */
110 cmpl $0,PCPU(reqflags) /* short cut if nothing to do */
111 je 5f
112 testl $-1,TD_CRITCOUNT(%ebx) /* can't unpend if in critical sec */
113 jne 5f
114 incl TD_CRITCOUNT(%ebx) /* force all ints to pending */
115doreti_next:
116 cli /* re-assert cli on loop */
117 movl %eax,%ecx /* irq mask unavailable due to BGL */
118 notl %ecx
119#ifdef SMP
120 testl $RQF_IPIQ,PCPU(reqflags)
121 jnz doreti_ipiq
122#endif
123 testl $RQF_TIMER,PCPU(reqflags)
124 jnz doreti_timer
125 /*
126 * check for an unmasked int (6 groups)
127 */
128 movl $0,%edx
129 testl PCPU_E4(ipending,%edx),%ecx
130 jnz doreti_fast
131
132 movl $1,%edx
133 testl PCPU_E4(ipending,%edx),%ecx
134 jnz doreti_fast
135
136 movl $2,%edx
137 testl PCPU_E4(ipending,%edx),%ecx
138 jnz doreti_fast
139
140 movl $3,%edx
141 testl PCPU_E4(ipending,%edx),%ecx
142 jnz doreti_fast
143
144 movl $4,%edx
145 testl PCPU_E4(ipending,%edx),%ecx
146 jnz doreti_fast
147
148 movl $5,%edx
149 testl PCPU_E4(ipending,%edx),%ecx
150 jnz doreti_fast
151
152 movl PCPU(spending),%ecx /* check for a pending software int */
153 cmpl $0,%ecx
154 jnz doreti_soft
155
156 testl $RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
157 jz 2f
158 testl $PSL_VM,TF_EFLAGS(%esp)
159 jz 1f
160 cmpl $1,in_vm86call /* YYY make per 'cpu'? */
161 jnz doreti_ast
1621:
163 /* ASTs are only applicable when returning to userland */
164 testb $SEL_RPL_MASK,TF_CS(%esp)
165 jnz doreti_ast
1662:
167 /*
168 * Nothing left to do, finish up. Interrupts are still disabled.
169 * %eax contains the mask of IRQ's that are not available due to
170 * BGL requirements. We can only clear RQF_INTPEND if *ALL* pending
171 * interrupts have been processed.
172 */
173 decl TD_CRITCOUNT(%ebx) /* interlocked with cli */
174 testl %eax,%eax
175 jnz 5f
176 andl $~RQF_INTPEND,PCPU(reqflags)
1775:
178 MEXITCOUNT
179
180 /*
181 * Restore the segment registers. Since segment register values
182 * can be set from user mode, this can result in a kernel mode
183 * exception. The trap code will revector to the *_fault code
184 * which then sets up a T_PROTFLT signal. If the signal is
185 * sent to userland, sendsig() will automatically clean up all
186 * the segment registers to avoid a loop.
187 */
188 .globl doreti_popl_gs
189 .globl doreti_popl_fs
190 .globl doreti_popl_es
191 .globl doreti_popl_ds
192 .globl doreti_iret
193 .globl doreti_syscall_ret
194doreti_syscall_ret:
195doreti_popl_gs:
196 popl %gs
197doreti_popl_fs:
198 popl %fs
199doreti_popl_es:
200 popl %es
201doreti_popl_ds:
202 popl %ds
203 popal
204 addl $3*4,%esp /* xflags, trap, err */
205doreti_iret:
206 iret
207
208 /*
209 * Interrupts are likely disabled due to the above interlock
210 * between cli/iretq. We must enable them before calling any
211 * high level function.
212 */
213 ALIGN_TEXT
214 .globl doreti_iret_fault
215doreti_iret_fault:
216 subl $3*4,%esp /* xflags, trap, err */
217 pushal
218 pushl %ds
219 .globl doreti_popl_ds_fault
220doreti_popl_ds_fault:
221 pushl %es
222 .globl doreti_popl_es_fault
223doreti_popl_es_fault:
224 pushl %fs
225 .globl doreti_popl_fs_fault
226doreti_popl_fs_fault:
227 pushl %gs
228 .globl doreti_popl_gs_fault
229doreti_popl_gs_fault:
230 sti
231 movl $0,TF_ERR(%esp) /* XXX should be the error code */
232 movl $T_PROTFLT,TF_TRAPNO(%esp)
233 jmp alltraps_with_regs_pushed
234
235 /*
236 * Interrupt pending. NOTE: stack context holds frame structure
237 * for interrupt procedure, do not do random pushes or pops!
238 */
239 ALIGN_TEXT
240doreti_fast:
241 andl PCPU_E4(ipending,%edx),%ecx
242 sti
243 bsfl %ecx, %ecx /* locate the next dispatchable int */
244 btrl %ecx, PCPU_E4(ipending,%edx)
245 /* is it really still pending? */
246 jnc doreti_next
247
248 shll $5, %edx
249 orl %edx, %ecx /* form intr number */
250
251 pushl %eax /* save IRQ mask unavailable for BGL */
252 /* NOTE: is also CPL in frame */
253 call dofastunpend /* unpend intr %ecx */
254 popl %eax
255 jmp doreti_next
256
257 /*
258 * SOFT interrupt pending
259 *
260 * Temporarily back-out our critical section to allow an interrupt
261 * preempt us when we schedule it. Bump intr_nesting_level to
262 * prevent the switch code from recursing via splz too deeply.
263 */
264 ALIGN_TEXT
265doreti_soft:
266 sti
267 bsfl %ecx,%ecx /* locate the next pending softint */
268 btrl %ecx,PCPU(spending) /* make sure its still pending */
269 jnc doreti_next
270 addl $FIRST_SOFTINT,%ecx /* actual intr number */
271 pushl %eax
272 pushl %ecx
273 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
274 decl TD_CRITCOUNT(%ebx) /* so we can preempt */
275 call sched_ithd_soft /* YYY must pull in imasks */
276 incl TD_CRITCOUNT(%ebx)
277 decl TD_NEST_COUNT(%ebx)
278 addl $4,%esp
279 popl %eax
280 jmp doreti_next
281
282 /*
283 * AST pending. We clear RQF_AST_SIGNAL automatically, the others
284 * are cleared by the trap as they are processed.
285 *
286 * Temporarily back-out our critical section because trap() can be
287 * a long-winded call, and we want to be more syscall-like.
288 *
289 * YYY theoretically we can call lwkt_switch directly if all we need
290 * to do is a reschedule.
291 */
292doreti_ast:
293 andl $~(RQF_AST_SIGNAL|RQF_AST_UPCALL),PCPU(reqflags)
294 sti
295 movl %eax,%esi /* save cpl (can't use stack) */
296 movl $T_ASTFLT,TF_TRAPNO(%esp)
297 pushl %esp /* pass frame by reference */
298 decl TD_CRITCOUNT(%ebx)
299 call trap
300 incl TD_CRITCOUNT(%ebx)
301 addl $4,%esp
302 movl %esi,%eax /* restore cpl for loop */
303 jmp doreti_next
304
305#ifdef SMP
306 /*
307 * IPIQ message pending. We clear RQF_IPIQ automatically.
308 */
309doreti_ipiq:
310 movl %eax,%esi /* save cpl (can't use stack) */
311 incl PCPU(intr_nesting_level)
312 andl $~RQF_IPIQ,PCPU(reqflags)
313 sti
314 subl $8,%esp /* add dummy vec and ppl */
315 pushl %esp /* pass frame by reference */
316 call lwkt_process_ipiq_frame
317 addl $12,%esp
318 decl PCPU(intr_nesting_level)
319 movl %esi,%eax /* restore cpl for loop */
320 jmp doreti_next
321#endif
322
323doreti_timer:
324 movl %eax,%esi /* save cpl (can't use stack) */
325 incl PCPU(intr_nesting_level)
326 andl $~RQF_TIMER,PCPU(reqflags)
327 sti
328 subl $8,%esp /* add dummy vec and ppl */
329 pushl %esp /* pass frame by reference */
330 call lapic_timer_process_frame
331 addl $12,%esp
332 decl PCPU(intr_nesting_level)
333 movl %esi,%eax /* restore cpl for loop */
334 jmp doreti_next
335
336 /*
337 * SPLZ() a C callable procedure to dispatch any unmasked pending
338 * interrupts regardless of critical section nesting. ASTs
339 * are not dispatched.
340 *
341 * Use %eax to track those IRQs that could not be processed
342 * due to BGL requirements.
343 */
344 SUPERALIGN_TEXT
345
346ENTRY(splz)
347 pushfl
348 pushl %ebx
349 movl PCPU(curthread),%ebx
350 incl TD_CRITCOUNT(%ebx)
351 movl $0,%eax
352
353splz_next:
354 cli
355 movl %eax,%ecx /* ecx = ~CPL */
356 notl %ecx
357#ifdef SMP
358 testl $RQF_IPIQ,PCPU(reqflags)
359 jnz splz_ipiq
360#endif
361 testl $RQF_TIMER,PCPU(reqflags)
362 jnz splz_timer
363
364 /*
365 * check for an unmasked int (6 groups)
366 */
367 movl $0,%edx
368 testl PCPU_E4(ipending,%edx),%ecx
369 jnz splz_fast
370
371 movl $1,%edx
372 testl PCPU_E4(ipending,%edx),%ecx
373 jnz splz_fast
374
375 movl $2,%edx
376 testl PCPU_E4(ipending,%edx),%ecx
377 jnz splz_fast
378
379 movl $3,%edx
380 testl PCPU_E4(ipending,%edx),%ecx
381 jnz splz_fast
382
383 movl $4,%edx
384 testl PCPU_E4(ipending,%edx),%ecx
385 jnz splz_fast
386
387 movl $5,%edx
388 testl PCPU_E4(ipending,%edx),%ecx
389 jnz splz_fast
390
391 movl PCPU(spending),%ecx
392 cmpl $0,%ecx
393 jnz splz_soft
394
395 decl TD_CRITCOUNT(%ebx)
396
397 /*
398 * Nothing left to do, finish up. Interrupts are still disabled.
399 * If our mask of IRQs we couldn't process due to BGL requirements
400 * is 0 then there are no pending interrupt sources left and we
401 * can clear RQF_INTPEND.
402 */
403 testl %eax,%eax
404 jnz 5f
405 andl $~RQF_INTPEND,PCPU(reqflags)
4065:
407 popl %ebx
408 popfl
409 ret
410
411 /*
412 * Interrupt pending
413 */
414 ALIGN_TEXT
415splz_fast:
416 andl PCPU_E4(ipending,%edx),%ecx
417 sti
418 bsfl %ecx, %ecx /* locate the next dispatchable int */
419 btrl %ecx, PCPU_E4(ipending,%edx)
420 /* is it really still pending? */
421 jnc splz_next
422
423 shll $5, %edx
424 orl %edx, %ecx /* form intr number */
425
426 pushl %eax
427 call dofastunpend /* unpend intr %ecx */
428 popl %eax
429 jmp splz_next
430
431 /*
432 * SOFT interrupt pending
433 *
434 * Temporarily back-out our critical section to allow the interrupt
435 * preempt us.
436 */
437 ALIGN_TEXT
438splz_soft:
439 sti
440 bsfl %ecx,%ecx /* locate the next pending softint */
441 btrl %ecx,PCPU(spending) /* make sure its still pending */
442 jnc splz_next
443 addl $FIRST_SOFTINT,%ecx /* actual intr number */
444 pushl %eax
445 pushl %ecx
446 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
447 decl TD_CRITCOUNT(%ebx)
448 call sched_ithd_soft /* YYY must pull in imasks */
449 incl TD_CRITCOUNT(%ebx)
450 decl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
451 addl $4,%esp
452 popl %eax
453 jmp splz_next
454
455#ifdef SMP
456splz_ipiq:
457 andl $~RQF_IPIQ,PCPU(reqflags)
458 sti
459 pushl %eax
460 call lwkt_process_ipiq
461 popl %eax
462 jmp splz_next
463#endif
464
465splz_timer:
466 andl $~RQF_TIMER,PCPU(reqflags)
467 sti
468 pushl %eax
469 call lapic_timer_process
470 popl %eax
471 jmp splz_next
472
473 /*
474 * dofastunpend(%ecx:intr)
475 *
476 * An interrupt previously made pending can now be run,
477 * execute it by pushing a dummy interrupt frame and
478 * calling ithread_fast_handler to execute or schedule it.
479 *
480 * ithread_fast_handler() returns 0 if it wants us to unmask
481 * further interrupts.
482 */
483#define PUSH_DUMMY \
484 pushfl ; /* phys int frame / flags */ \
485 pushl %cs ; /* phys int frame / cs */ \
486 pushl 12(%esp) ; /* original caller eip */ \
487 pushl $0 ; /* dummy error code */ \
488 pushl $0 ; /* dummy trap type */ \
489 pushl $0 ; /* dummy xflags */ \
490 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
491
492#define POP_DUMMY \
493 addl $19*4,%esp ; \
494
495dofastunpend:
496 pushl %ebp /* frame for backtrace */
497 movl %esp,%ebp
498 PUSH_DUMMY
499 pushl %ecx /* last part of intrframe = intr */
500 incl fastunpend_count
501 pushl %esp /* pass frame by reference */
502 call ithread_fast_handler /* returns 0 to unmask */
503 addl $4,%esp /* remove pointer, now intr on top */
504 cmpl $0,%eax
505 jnz 1f
506 movl MachIntrABI + MACHINTR_INTREN, %eax
507 call *%eax /* MachIntrABI.intren(intr) */
5081:
509 addl $4,%esp
510 POP_DUMMY
511 popl %ebp
512 ret
513