Remove upc_{control,register} syscalls and everything that has to do with it.
[dragonfly.git] / sys / platform / pc32 / i386 / ipl.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)ipl.s
38 *
39 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
40 */
41
bdc560a1
MD
42#include <machine/asmacros.h>
43#include <machine/segments.h>
44#include <machine/ipl.h>
45#include <machine/lock.h>
46#include <machine/psl.h>
47#include <machine/trap.h>
bdc560a1
MD
48
49#include "assym.s"
984263bc
MD
50
51/*
52 * AT/386
53 * Vector interrupt control section
54 *
c263294b 55 * ipending - Pending interrupts (set when a masked interrupt occurs)
5f456c40 56 * spending - Pending software interrupts
984263bc 57 */
bdc560a1
MD
58 .data
59 ALIGN_DATA
984263bc 60
46b26c5e
MD
61 .globl fastunpend_count
62fastunpend_count: .long 0
63
984263bc 64 .text
bdc560a1
MD
65 SUPERALIGN_TEXT
66
46a3f46d
MD
67 /*
68 * GENERAL NOTES
69 *
c263294b 70 * - interrupts are always called with a critical section held
46a3f46d
MD
71 *
72 * - we release our critical section when scheduling interrupt
73 * or softinterrupt threads in order so they can preempt
74 * (unless we are called manually from a critical section, in
75 * which case there will still be a critical section and
76 * they won't preempt anyway).
77 *
78 * - TD_NEST_COUNT prevents splz from nesting too deeply within
79 * itself. It is *not* actually an interrupt nesting count.
80 * PCPU(intr_nesting_level) is an interrupt nesting count.
81 *
82 * - We have to be careful in regards to local interrupts
83 * occuring simultaniously with our doreti and splz
84 * processing.
85 */
984263bc 86
ef0fdad1
MD
87 /*
88 * DORETI
89 *
90 * Handle return from interrupts, traps and syscalls. This function
c263294b
SZ
91 * checks the cpl for unmasked pending interrupts (hardware or soft)
92 * and schedules them if appropriate, then irets.
71ef2f5c 93 *
b5d16701 94 * If we are in a critical section we cannot run any pending ints.
46a3f46d 95 *
38787eef
MD
96 * NOTE: Since SPLs no longer exist, all callers of this function
97 * push $0 for the CPL. HOWEVER, we *STILL* use the cpl mask within
c263294b 98 * this function to mark interrupts which could not be dispatched
38787eef 99 * do to the unavailability of the BGL.
ef0fdad1 100 */
984263bc 101 SUPERALIGN_TEXT
bdc560a1 102 .globl doreti
2954c92f
MD
103 .type doreti,@function
104doreti:
105 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
38787eef
MD
106 popl %eax /* cpl to restore XXX */
107 movl $0,%eax /* irq mask unavailable due to BGL */
2954c92f 108 movl PCPU(curthread),%ebx
f9235b6d 109 cli /* interlock with td_critcount */
235957ed
MD
110 cmpl $0,PCPU(reqflags) /* short cut if nothing to do */
111 je 5f
f9235b6d
MD
112 testl $-1,TD_CRITCOUNT(%ebx) /* can't unpend if in critical sec */
113 jne 5f
114 incl TD_CRITCOUNT(%ebx) /* force all ints to pending */
984263bc 115doreti_next:
b4b1a37a 116 cli /* re-assert cli on loop */
38787eef 117 movl %eax,%ecx /* irq mask unavailable due to BGL */
ef0fdad1 118 notl %ecx
235957ed 119 testl $RQF_IPIQ,PCPU(reqflags)
96728c05 120 jnz doreti_ipiq
78ea5a2a
SZ
121 testl $RQF_TIMER,PCPU(reqflags)
122 jnz doreti_timer
c263294b
SZ
123 /*
124 * check for an unmasked int (6 groups)
125 */
126 movl $0,%edx
127 testl PCPU_E4(ipending,%edx),%ecx
128 jnz doreti_fast
129
130 movl $1,%edx
131 testl PCPU_E4(ipending,%edx),%ecx
132 jnz doreti_fast
133
134 movl $2,%edx
135 testl PCPU_E4(ipending,%edx),%ecx
136 jnz doreti_fast
137
138 movl $3,%edx
139 testl PCPU_E4(ipending,%edx),%ecx
140 jnz doreti_fast
141
142 movl $4,%edx
143 testl PCPU_E4(ipending,%edx),%ecx
144 jnz doreti_fast
145
146 movl $5,%edx
147 testl PCPU_E4(ipending,%edx),%ecx
a2a5ad0d
MD
148 jnz doreti_fast
149
5f456c40
MD
150 movl PCPU(spending),%ecx /* check for a pending software int */
151 cmpl $0,%ecx
152 jnz doreti_soft
153
235957ed 154 testl $RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
26a0694b 155 jz 2f
984263bc 156 testl $PSL_VM,TF_EFLAGS(%esp)
ef0fdad1 157 jz 1f
235957ed
MD
158 cmpl $1,in_vm86call /* YYY make per 'cpu'? */
159 jnz doreti_ast
ef0fdad1 1601:
0a3f9b47 161 /* ASTs are only applicable when returning to userland */
984263bc 162 testb $SEL_RPL_MASK,TF_CS(%esp)
235957ed 163 jnz doreti_ast
96728c05 1642:
984263bc 165 /*
ef0fdad1 166 * Nothing left to do, finish up. Interrupts are still disabled.
38787eef
MD
167 * %eax contains the mask of IRQ's that are not available due to
168 * BGL requirements. We can only clear RQF_INTPEND if *ALL* pending
169 * interrupts have been processed.
984263bc 170 */
f9235b6d 171 decl TD_CRITCOUNT(%ebx) /* interlocked with cli */
a2a5ad0d
MD
172 testl %eax,%eax
173 jnz 5f
5c323556 174 andl $~RQF_INTPEND,PCPU(reqflags)
ef0fdad1 1755:
984263bc 176 MEXITCOUNT
4e7c41c5
MD
177
178 /*
179 * Restore the segment registers. Since segment register values
180 * can be set from user mode, this can result in a kernel mode
181 * exception. The trap code will revector to the *_fault code
182 * which then sets up a T_PROTFLT signal. If the signal is
183 * sent to userland, sendsig() will automatically clean up all
184 * the segment registers to avoid a loop.
185 */
186 .globl doreti_popl_gs
984263bc 187 .globl doreti_popl_fs
ef0fdad1
MD
188 .globl doreti_popl_es
189 .globl doreti_popl_ds
190 .globl doreti_iret
984263bc
MD
191 .globl doreti_syscall_ret
192doreti_syscall_ret:
4e7c41c5
MD
193doreti_popl_gs:
194 popl %gs
984263bc
MD
195doreti_popl_fs:
196 popl %fs
984263bc
MD
197doreti_popl_es:
198 popl %es
984263bc
MD
199doreti_popl_ds:
200 popl %ds
201 popal
4e7c41c5 202 addl $3*4,%esp /* xflags, trap, err */
984263bc
MD
203doreti_iret:
204 iret
205
b4b1a37a
MD
206 /*
207 * Interrupts are likely disabled due to the above interlock
208 * between cli/iretq. We must enable them before calling any
209 * high level function.
210 */
984263bc
MD
211 ALIGN_TEXT
212 .globl doreti_iret_fault
213doreti_iret_fault:
4e7c41c5 214 subl $3*4,%esp /* xflags, trap, err */
984263bc
MD
215 pushal
216 pushl %ds
217 .globl doreti_popl_ds_fault
218doreti_popl_ds_fault:
219 pushl %es
220 .globl doreti_popl_es_fault
221doreti_popl_es_fault:
222 pushl %fs
223 .globl doreti_popl_fs_fault
224doreti_popl_fs_fault:
4e7c41c5
MD
225 pushl %gs
226 .globl doreti_popl_gs_fault
227doreti_popl_gs_fault:
b4b1a37a 228 sti
984263bc
MD
229 movl $0,TF_ERR(%esp) /* XXX should be the error code */
230 movl $T_PROTFLT,TF_TRAPNO(%esp)
231 jmp alltraps_with_regs_pushed
232
984263bc 233 /*
c263294b
SZ
234 * Interrupt pending. NOTE: stack context holds frame structure
235 * for interrupt procedure, do not do random pushes or pops!
984263bc 236 */
ef0fdad1
MD
237 ALIGN_TEXT
238doreti_fast:
c263294b 239 andl PCPU_E4(ipending,%edx),%ecx
b4b1a37a 240 sti
984263bc 241 bsfl %ecx, %ecx /* locate the next dispatchable int */
c263294b
SZ
242 btrl %ecx, PCPU_E4(ipending,%edx)
243 /* is it really still pending? */
ef0fdad1 244 jnc doreti_next
c263294b
SZ
245
246 shll $5, %edx
247 orl %edx, %ecx /* form intr number */
248
38787eef
MD
249 pushl %eax /* save IRQ mask unavailable for BGL */
250 /* NOTE: is also CPL in frame */
c263294b 251 call dofastunpend /* unpend intr %ecx */
984263bc 252 popl %eax
ef0fdad1 253 jmp doreti_next
984263bc 254
984263bc 255 /*
5f456c40
MD
256 * SOFT interrupt pending
257 *
258 * Temporarily back-out our critical section to allow an interrupt
259 * preempt us when we schedule it. Bump intr_nesting_level to
260 * prevent the switch code from recursing via splz too deeply.
261 */
262 ALIGN_TEXT
263doreti_soft:
b4b1a37a 264 sti
5f456c40
MD
265 bsfl %ecx,%ecx /* locate the next pending softint */
266 btrl %ecx,PCPU(spending) /* make sure its still pending */
267 jnc doreti_next
268 addl $FIRST_SOFTINT,%ecx /* actual intr number */
269 pushl %eax
270 pushl %ecx
271 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
f9235b6d 272 decl TD_CRITCOUNT(%ebx) /* so we can preempt */
c83c147e 273 call sched_ithd_soft /* YYY must pull in imasks */
f9235b6d 274 incl TD_CRITCOUNT(%ebx)
5f456c40
MD
275 decl TD_NEST_COUNT(%ebx)
276 addl $4,%esp
277 popl %eax
278 jmp doreti_next
279
280 /*
235957ed
MD
281 * AST pending. We clear RQF_AST_SIGNAL automatically, the others
282 * are cleared by the trap as they are processed.
a2a5ad0d
MD
283 *
284 * Temporarily back-out our critical section because trap() can be
285 * a long-winded call, and we want to be more syscall-like.
286 *
235957ed
MD
287 * YYY theoretically we can call lwkt_switch directly if all we need
288 * to do is a reschedule.
984263bc 289 */
235957ed 290doreti_ast:
7adb15b6 291 andl $~RQF_AST_SIGNAL,PCPU(reqflags)
a2a5ad0d
MD
292 sti
293 movl %eax,%esi /* save cpl (can't use stack) */
294 movl $T_ASTFLT,TF_TRAPNO(%esp)
c7eb0589 295 pushl %esp /* pass frame by reference */
f9235b6d 296 decl TD_CRITCOUNT(%ebx)
d080fbe8 297 call trap
f9235b6d 298 incl TD_CRITCOUNT(%ebx)
c7eb0589 299 addl $4,%esp
96728c05
MD
300 movl %esi,%eax /* restore cpl for loop */
301 jmp doreti_next
302
96728c05 303 /*
235957ed 304 * IPIQ message pending. We clear RQF_IPIQ automatically.
96728c05
MD
305 */
306doreti_ipiq:
38787eef 307 movl %eax,%esi /* save cpl (can't use stack) */
03aa8d99 308 incl PCPU(intr_nesting_level)
235957ed 309 andl $~RQF_IPIQ,PCPU(reqflags)
b4b1a37a 310 sti
88c4d2f6 311 subl $8,%esp /* add dummy vec and ppl */
c7eb0589 312 pushl %esp /* pass frame by reference */
88c4d2f6 313 call lwkt_process_ipiq_frame
c7eb0589 314 addl $12,%esp
03aa8d99 315 decl PCPU(intr_nesting_level)
38787eef 316 movl %esi,%eax /* restore cpl for loop */
984263bc
MD
317 jmp doreti_next
318
78ea5a2a
SZ
319doreti_timer:
320 movl %eax,%esi /* save cpl (can't use stack) */
321 incl PCPU(intr_nesting_level)
322 andl $~RQF_TIMER,PCPU(reqflags)
b4b1a37a 323 sti
78ea5a2a
SZ
324 subl $8,%esp /* add dummy vec and ppl */
325 pushl %esp /* pass frame by reference */
326 call lapic_timer_process_frame
327 addl $12,%esp
328 decl PCPU(intr_nesting_level)
329 movl %esi,%eax /* restore cpl for loop */
330 jmp doreti_next
331
ef0fdad1
MD
332 /*
333 * SPLZ() a C callable procedure to dispatch any unmasked pending
334 * interrupts regardless of critical section nesting. ASTs
335 * are not dispatched.
26a0694b 336 *
38787eef
MD
337 * Use %eax to track those IRQs that could not be processed
338 * due to BGL requirements.
ef0fdad1
MD
339 */
340 SUPERALIGN_TEXT
341
342ENTRY(splz)
26a0694b 343 pushfl
ef0fdad1 344 pushl %ebx
2954c92f 345 movl PCPU(curthread),%ebx
f9235b6d 346 incl TD_CRITCOUNT(%ebx)
38787eef 347 movl $0,%eax
ef0fdad1
MD
348
349splz_next:
26a0694b 350 cli
ef0fdad1
MD
351 movl %eax,%ecx /* ecx = ~CPL */
352 notl %ecx
235957ed 353 testl $RQF_IPIQ,PCPU(reqflags)
96728c05 354 jnz splz_ipiq
78ea5a2a
SZ
355 testl $RQF_TIMER,PCPU(reqflags)
356 jnz splz_timer
1e7aaefa 357
c263294b
SZ
358 /*
359 * check for an unmasked int (6 groups)
360 */
361 movl $0,%edx
362 testl PCPU_E4(ipending,%edx),%ecx
363 jnz splz_fast
364
365 movl $1,%edx
366 testl PCPU_E4(ipending,%edx),%ecx
367 jnz splz_fast
368
369 movl $2,%edx
370 testl PCPU_E4(ipending,%edx),%ecx
371 jnz splz_fast
372
373 movl $3,%edx
374 testl PCPU_E4(ipending,%edx),%ecx
375 jnz splz_fast
376
377 movl $4,%edx
378 testl PCPU_E4(ipending,%edx),%ecx
379 jnz splz_fast
380
381 movl $5,%edx
382 testl PCPU_E4(ipending,%edx),%ecx
a2a5ad0d
MD
383 jnz splz_fast
384
5f456c40
MD
385 movl PCPU(spending),%ecx
386 cmpl $0,%ecx
387 jnz splz_soft
388
f9235b6d 389 decl TD_CRITCOUNT(%ebx)
235957ed 390
3c23a41a
MD
391 /*
392 * Nothing left to do, finish up. Interrupts are still disabled.
38787eef
MD
393 * If our mask of IRQs we couldn't process due to BGL requirements
394 * is 0 then there are no pending interrupt sources left and we
395 * can clear RQF_INTPEND.
3c23a41a 396 */
a2a5ad0d
MD
397 testl %eax,%eax
398 jnz 5f
235957ed 399 andl $~RQF_INTPEND,PCPU(reqflags)
a2a5ad0d 4005:
ef0fdad1 401 popl %ebx
26a0694b 402 popfl
984263bc
MD
403 ret
404
ef0fdad1 405 /*
c263294b 406 * Interrupt pending
ef0fdad1 407 */
984263bc 408 ALIGN_TEXT
ef0fdad1 409splz_fast:
c263294b 410 andl PCPU_E4(ipending,%edx),%ecx
b4b1a37a 411 sti
ef0fdad1 412 bsfl %ecx, %ecx /* locate the next dispatchable int */
c263294b
SZ
413 btrl %ecx, PCPU_E4(ipending,%edx)
414 /* is it really still pending? */
ef0fdad1 415 jnc splz_next
c263294b
SZ
416
417 shll $5, %edx
418 orl %edx, %ecx /* form intr number */
419
ef0fdad1 420 pushl %eax
c263294b 421 call dofastunpend /* unpend intr %ecx */
ef0fdad1
MD
422 popl %eax
423 jmp splz_next
984263bc 424
5f456c40
MD
425 /*
426 * SOFT interrupt pending
427 *
428 * Temporarily back-out our critical section to allow the interrupt
429 * preempt us.
430 */
431 ALIGN_TEXT
432splz_soft:
b4b1a37a 433 sti
5f456c40
MD
434 bsfl %ecx,%ecx /* locate the next pending softint */
435 btrl %ecx,PCPU(spending) /* make sure its still pending */
436 jnc splz_next
437 addl $FIRST_SOFTINT,%ecx /* actual intr number */
5f456c40
MD
438 pushl %eax
439 pushl %ecx
5f456c40 440 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
b5fc1882 441 decl TD_CRITCOUNT(%ebx)
c83c147e 442 call sched_ithd_soft /* YYY must pull in imasks */
f9235b6d 443 incl TD_CRITCOUNT(%ebx)
5f456c40
MD
444 decl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
445 addl $4,%esp
446 popl %eax
447 jmp splz_next
448
96728c05 449splz_ipiq:
235957ed 450 andl $~RQF_IPIQ,PCPU(reqflags)
b4b1a37a 451 sti
96728c05
MD
452 pushl %eax
453 call lwkt_process_ipiq
454 popl %eax
455 jmp splz_next
78ea5a2a
SZ
456
457splz_timer:
458 andl $~RQF_TIMER,PCPU(reqflags)
b4b1a37a 459 sti
78ea5a2a
SZ
460 pushl %eax
461 call lapic_timer_process
462 popl %eax
463 jmp splz_next
96728c05 464
46b26c5e
MD
465 /*
466 * dofastunpend(%ecx:intr)
467 *
c263294b 468 * An interrupt previously made pending can now be run,
46b26c5e
MD
469 * execute it by pushing a dummy interrupt frame and
470 * calling ithread_fast_handler to execute or schedule it.
471 *
472 * ithread_fast_handler() returns 0 if it wants us to unmask
473 * further interrupts.
474 */
475#define PUSH_DUMMY \
476 pushfl ; /* phys int frame / flags */ \
477 pushl %cs ; /* phys int frame / cs */ \
478 pushl 12(%esp) ; /* original caller eip */ \
479 pushl $0 ; /* dummy error code */ \
480 pushl $0 ; /* dummy trap type */ \
4e7c41c5
MD
481 pushl $0 ; /* dummy xflags */ \
482 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
46b26c5e
MD
483
484#define POP_DUMMY \
4e7c41c5 485 addl $19*4,%esp ; \
46b26c5e
MD
486
487dofastunpend:
488 pushl %ebp /* frame for backtrace */
489 movl %esp,%ebp
490 PUSH_DUMMY
491 pushl %ecx /* last part of intrframe = intr */
492 incl fastunpend_count
c7eb0589 493 pushl %esp /* pass frame by reference */
46b26c5e 494 call ithread_fast_handler /* returns 0 to unmask */
c7eb0589 495 addl $4,%esp /* remove pointer, now intr on top */
46b26c5e
MD
496 cmpl $0,%eax
497 jnz 1f
498 movl MachIntrABI + MACHINTR_INTREN, %eax
499 call *%eax /* MachIntrABI.intren(intr) */
5001:
501 addl $4,%esp
502 POP_DUMMY
503 popl %ebp
504 ret
505