kernel: Make SMP support default (and non-optional).
[dragonfly.git] / sys / platform / pc32 / i386 / ipl.s
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)ipl.s
38 *
39 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
40 */
41
42#include <machine/asmacros.h>
43#include <machine/segments.h>
44#include <machine/ipl.h>
45#include <machine/lock.h>
46#include <machine/psl.h>
47#include <machine/trap.h>
48
49#include "assym.s"
50
51/*
52 * AT/386
53 * Vector interrupt control section
54 *
55 * ipending - Pending interrupts (set when a masked interrupt occurs)
56 * spending - Pending software interrupts
57 */
58 .data
59 ALIGN_DATA
60
61 .globl fastunpend_count
62fastunpend_count: .long 0
63
64 .text
65 SUPERALIGN_TEXT
66
67 /*
68 * GENERAL NOTES
69 *
70 * - interrupts are always called with a critical section held
71 *
72 * - we release our critical section when scheduling interrupt
73 * or softinterrupt threads in order so they can preempt
74 * (unless we are called manually from a critical section, in
75 * which case there will still be a critical section and
76 * they won't preempt anyway).
77 *
78 * - TD_NEST_COUNT prevents splz from nesting too deeply within
79 * itself. It is *not* actually an interrupt nesting count.
80 * PCPU(intr_nesting_level) is an interrupt nesting count.
81 *
82 * - We have to be careful in regards to local interrupts
83 * occuring simultaniously with our doreti and splz
84 * processing.
85 */
86
87 /*
88 * DORETI
89 *
90 * Handle return from interrupts, traps and syscalls. This function
91 * checks the cpl for unmasked pending interrupts (hardware or soft)
92 * and schedules them if appropriate, then irets.
93 *
94 * If we are in a critical section we cannot run any pending ints.
95 *
96 * NOTE: Since SPLs no longer exist, all callers of this function
97 * push $0 for the CPL. HOWEVER, we *STILL* use the cpl mask within
98 * this function to mark interrupts which could not be dispatched
99 * do to the unavailability of the BGL.
100 */
101 SUPERALIGN_TEXT
102 .globl doreti
103 .type doreti,@function
104doreti:
105 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
106 popl %eax /* cpl to restore XXX */
107 movl $0,%eax /* irq mask unavailable due to BGL */
108 movl PCPU(curthread),%ebx
109 cli /* interlock with td_critcount */
110 cmpl $0,PCPU(reqflags) /* short cut if nothing to do */
111 je 5f
112 testl $-1,TD_CRITCOUNT(%ebx) /* can't unpend if in critical sec */
113 jne 5f
114 incl TD_CRITCOUNT(%ebx) /* force all ints to pending */
115doreti_next:
116 cli /* re-assert cli on loop */
117 movl %eax,%ecx /* irq mask unavailable due to BGL */
118 notl %ecx
119 testl $RQF_IPIQ,PCPU(reqflags)
120 jnz doreti_ipiq
121 testl $RQF_TIMER,PCPU(reqflags)
122 jnz doreti_timer
123 /*
124 * check for an unmasked int (6 groups)
125 */
126 movl $0,%edx
127 testl PCPU_E4(ipending,%edx),%ecx
128 jnz doreti_fast
129
130 movl $1,%edx
131 testl PCPU_E4(ipending,%edx),%ecx
132 jnz doreti_fast
133
134 movl $2,%edx
135 testl PCPU_E4(ipending,%edx),%ecx
136 jnz doreti_fast
137
138 movl $3,%edx
139 testl PCPU_E4(ipending,%edx),%ecx
140 jnz doreti_fast
141
142 movl $4,%edx
143 testl PCPU_E4(ipending,%edx),%ecx
144 jnz doreti_fast
145
146 movl $5,%edx
147 testl PCPU_E4(ipending,%edx),%ecx
148 jnz doreti_fast
149
150 movl PCPU(spending),%ecx /* check for a pending software int */
151 cmpl $0,%ecx
152 jnz doreti_soft
153
154 testl $RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
155 jz 2f
156 testl $PSL_VM,TF_EFLAGS(%esp)
157 jz 1f
158 cmpl $1,in_vm86call /* YYY make per 'cpu'? */
159 jnz doreti_ast
1601:
161 /* ASTs are only applicable when returning to userland */
162 testb $SEL_RPL_MASK,TF_CS(%esp)
163 jnz doreti_ast
1642:
165 /*
166 * Nothing left to do, finish up. Interrupts are still disabled.
167 * %eax contains the mask of IRQ's that are not available due to
168 * BGL requirements. We can only clear RQF_INTPEND if *ALL* pending
169 * interrupts have been processed.
170 */
171 decl TD_CRITCOUNT(%ebx) /* interlocked with cli */
172 testl %eax,%eax
173 jnz 5f
174 andl $~RQF_INTPEND,PCPU(reqflags)
1755:
176 MEXITCOUNT
177
178 /*
179 * Restore the segment registers. Since segment register values
180 * can be set from user mode, this can result in a kernel mode
181 * exception. The trap code will revector to the *_fault code
182 * which then sets up a T_PROTFLT signal. If the signal is
183 * sent to userland, sendsig() will automatically clean up all
184 * the segment registers to avoid a loop.
185 */
186 .globl doreti_popl_gs
187 .globl doreti_popl_fs
188 .globl doreti_popl_es
189 .globl doreti_popl_ds
190 .globl doreti_iret
191 .globl doreti_syscall_ret
192doreti_syscall_ret:
193doreti_popl_gs:
194 popl %gs
195doreti_popl_fs:
196 popl %fs
197doreti_popl_es:
198 popl %es
199doreti_popl_ds:
200 popl %ds
201 popal
202 addl $3*4,%esp /* xflags, trap, err */
203doreti_iret:
204 iret
205
206 /*
207 * Interrupts are likely disabled due to the above interlock
208 * between cli/iretq. We must enable them before calling any
209 * high level function.
210 */
211 ALIGN_TEXT
212 .globl doreti_iret_fault
213doreti_iret_fault:
214 subl $3*4,%esp /* xflags, trap, err */
215 pushal
216 pushl %ds
217 .globl doreti_popl_ds_fault
218doreti_popl_ds_fault:
219 pushl %es
220 .globl doreti_popl_es_fault
221doreti_popl_es_fault:
222 pushl %fs
223 .globl doreti_popl_fs_fault
224doreti_popl_fs_fault:
225 pushl %gs
226 .globl doreti_popl_gs_fault
227doreti_popl_gs_fault:
228 sti
229 movl $0,TF_ERR(%esp) /* XXX should be the error code */
230 movl $T_PROTFLT,TF_TRAPNO(%esp)
231 jmp alltraps_with_regs_pushed
232
233 /*
234 * Interrupt pending. NOTE: stack context holds frame structure
235 * for interrupt procedure, do not do random pushes or pops!
236 */
237 ALIGN_TEXT
238doreti_fast:
239 andl PCPU_E4(ipending,%edx),%ecx
240 sti
241 bsfl %ecx, %ecx /* locate the next dispatchable int */
242 btrl %ecx, PCPU_E4(ipending,%edx)
243 /* is it really still pending? */
244 jnc doreti_next
245
246 shll $5, %edx
247 orl %edx, %ecx /* form intr number */
248
249 pushl %eax /* save IRQ mask unavailable for BGL */
250 /* NOTE: is also CPL in frame */
251 call dofastunpend /* unpend intr %ecx */
252 popl %eax
253 jmp doreti_next
254
255 /*
256 * SOFT interrupt pending
257 *
258 * Temporarily back-out our critical section to allow an interrupt
259 * preempt us when we schedule it. Bump intr_nesting_level to
260 * prevent the switch code from recursing via splz too deeply.
261 */
262 ALIGN_TEXT
263doreti_soft:
264 sti
265 bsfl %ecx,%ecx /* locate the next pending softint */
266 btrl %ecx,PCPU(spending) /* make sure its still pending */
267 jnc doreti_next
268 addl $FIRST_SOFTINT,%ecx /* actual intr number */
269 pushl %eax
270 pushl %ecx
271 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
272 decl TD_CRITCOUNT(%ebx) /* so we can preempt */
273 call sched_ithd_soft /* YYY must pull in imasks */
274 incl TD_CRITCOUNT(%ebx)
275 decl TD_NEST_COUNT(%ebx)
276 addl $4,%esp
277 popl %eax
278 jmp doreti_next
279
280 /*
281 * AST pending. We clear RQF_AST_SIGNAL automatically, the others
282 * are cleared by the trap as they are processed.
283 *
284 * Temporarily back-out our critical section because trap() can be
285 * a long-winded call, and we want to be more syscall-like.
286 *
287 * YYY theoretically we can call lwkt_switch directly if all we need
288 * to do is a reschedule.
289 */
290doreti_ast:
291 andl $~(RQF_AST_SIGNAL|RQF_AST_UPCALL),PCPU(reqflags)
292 sti
293 movl %eax,%esi /* save cpl (can't use stack) */
294 movl $T_ASTFLT,TF_TRAPNO(%esp)
295 pushl %esp /* pass frame by reference */
296 decl TD_CRITCOUNT(%ebx)
297 call trap
298 incl TD_CRITCOUNT(%ebx)
299 addl $4,%esp
300 movl %esi,%eax /* restore cpl for loop */
301 jmp doreti_next
302
303 /*
304 * IPIQ message pending. We clear RQF_IPIQ automatically.
305 */
306doreti_ipiq:
307 movl %eax,%esi /* save cpl (can't use stack) */
308 incl PCPU(intr_nesting_level)
309 andl $~RQF_IPIQ,PCPU(reqflags)
310 sti
311 subl $8,%esp /* add dummy vec and ppl */
312 pushl %esp /* pass frame by reference */
313 call lwkt_process_ipiq_frame
314 addl $12,%esp
315 decl PCPU(intr_nesting_level)
316 movl %esi,%eax /* restore cpl for loop */
317 jmp doreti_next
318
319doreti_timer:
320 movl %eax,%esi /* save cpl (can't use stack) */
321 incl PCPU(intr_nesting_level)
322 andl $~RQF_TIMER,PCPU(reqflags)
323 sti
324 subl $8,%esp /* add dummy vec and ppl */
325 pushl %esp /* pass frame by reference */
326 call lapic_timer_process_frame
327 addl $12,%esp
328 decl PCPU(intr_nesting_level)
329 movl %esi,%eax /* restore cpl for loop */
330 jmp doreti_next
331
332 /*
333 * SPLZ() a C callable procedure to dispatch any unmasked pending
334 * interrupts regardless of critical section nesting. ASTs
335 * are not dispatched.
336 *
337 * Use %eax to track those IRQs that could not be processed
338 * due to BGL requirements.
339 */
340 SUPERALIGN_TEXT
341
342ENTRY(splz)
343 pushfl
344 pushl %ebx
345 movl PCPU(curthread),%ebx
346 incl TD_CRITCOUNT(%ebx)
347 movl $0,%eax
348
349splz_next:
350 cli
351 movl %eax,%ecx /* ecx = ~CPL */
352 notl %ecx
353 testl $RQF_IPIQ,PCPU(reqflags)
354 jnz splz_ipiq
355 testl $RQF_TIMER,PCPU(reqflags)
356 jnz splz_timer
357
358 /*
359 * check for an unmasked int (6 groups)
360 */
361 movl $0,%edx
362 testl PCPU_E4(ipending,%edx),%ecx
363 jnz splz_fast
364
365 movl $1,%edx
366 testl PCPU_E4(ipending,%edx),%ecx
367 jnz splz_fast
368
369 movl $2,%edx
370 testl PCPU_E4(ipending,%edx),%ecx
371 jnz splz_fast
372
373 movl $3,%edx
374 testl PCPU_E4(ipending,%edx),%ecx
375 jnz splz_fast
376
377 movl $4,%edx
378 testl PCPU_E4(ipending,%edx),%ecx
379 jnz splz_fast
380
381 movl $5,%edx
382 testl PCPU_E4(ipending,%edx),%ecx
383 jnz splz_fast
384
385 movl PCPU(spending),%ecx
386 cmpl $0,%ecx
387 jnz splz_soft
388
389 decl TD_CRITCOUNT(%ebx)
390
391 /*
392 * Nothing left to do, finish up. Interrupts are still disabled.
393 * If our mask of IRQs we couldn't process due to BGL requirements
394 * is 0 then there are no pending interrupt sources left and we
395 * can clear RQF_INTPEND.
396 */
397 testl %eax,%eax
398 jnz 5f
399 andl $~RQF_INTPEND,PCPU(reqflags)
4005:
401 popl %ebx
402 popfl
403 ret
404
405 /*
406 * Interrupt pending
407 */
408 ALIGN_TEXT
409splz_fast:
410 andl PCPU_E4(ipending,%edx),%ecx
411 sti
412 bsfl %ecx, %ecx /* locate the next dispatchable int */
413 btrl %ecx, PCPU_E4(ipending,%edx)
414 /* is it really still pending? */
415 jnc splz_next
416
417 shll $5, %edx
418 orl %edx, %ecx /* form intr number */
419
420 pushl %eax
421 call dofastunpend /* unpend intr %ecx */
422 popl %eax
423 jmp splz_next
424
425 /*
426 * SOFT interrupt pending
427 *
428 * Temporarily back-out our critical section to allow the interrupt
429 * preempt us.
430 */
431 ALIGN_TEXT
432splz_soft:
433 sti
434 bsfl %ecx,%ecx /* locate the next pending softint */
435 btrl %ecx,PCPU(spending) /* make sure its still pending */
436 jnc splz_next
437 addl $FIRST_SOFTINT,%ecx /* actual intr number */
438 pushl %eax
439 pushl %ecx
440 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
441 decl TD_CRITCOUNT(%ebx)
442 call sched_ithd_soft /* YYY must pull in imasks */
443 incl TD_CRITCOUNT(%ebx)
444 decl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
445 addl $4,%esp
446 popl %eax
447 jmp splz_next
448
449splz_ipiq:
450 andl $~RQF_IPIQ,PCPU(reqflags)
451 sti
452 pushl %eax
453 call lwkt_process_ipiq
454 popl %eax
455 jmp splz_next
456
457splz_timer:
458 andl $~RQF_TIMER,PCPU(reqflags)
459 sti
460 pushl %eax
461 call lapic_timer_process
462 popl %eax
463 jmp splz_next
464
465 /*
466 * dofastunpend(%ecx:intr)
467 *
468 * An interrupt previously made pending can now be run,
469 * execute it by pushing a dummy interrupt frame and
470 * calling ithread_fast_handler to execute or schedule it.
471 *
472 * ithread_fast_handler() returns 0 if it wants us to unmask
473 * further interrupts.
474 */
475#define PUSH_DUMMY \
476 pushfl ; /* phys int frame / flags */ \
477 pushl %cs ; /* phys int frame / cs */ \
478 pushl 12(%esp) ; /* original caller eip */ \
479 pushl $0 ; /* dummy error code */ \
480 pushl $0 ; /* dummy trap type */ \
481 pushl $0 ; /* dummy xflags */ \
482 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
483
484#define POP_DUMMY \
485 addl $19*4,%esp ; \
486
487dofastunpend:
488 pushl %ebp /* frame for backtrace */
489 movl %esp,%ebp
490 PUSH_DUMMY
491 pushl %ecx /* last part of intrframe = intr */
492 incl fastunpend_count
493 pushl %esp /* pass frame by reference */
494 call ithread_fast_handler /* returns 0 to unmask */
495 addl $4,%esp /* remove pointer, now intr on top */
496 cmpl $0,%eax
497 jnz 1f
498 movl MachIntrABI + MACHINTR_INTREN, %eax
499 call *%eax /* MachIntrABI.intren(intr) */
5001:
501 addl $4,%esp
502 POP_DUMMY
503 popl %ebp
504 ret
505