MP Implmentation 3/4: MAJOR progress on SMP, full userland MP is now working!
[dragonfly.git] / sys / platform / pc32 / isa / ipl.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)ipl.s
38 *
39 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
a2a5ad0d 40 * $DragonFly: src/sys/platform/pc32/isa/ipl.s,v 1.8 2003/07/10 04:47:54 dillon Exp $
984263bc
MD
41 */
42
43
44/*
45 * AT/386
46 * Vector interrupt control section
47 *
984263bc
MD
48 * *_imask - Interrupt masks for various spl*() functions
49 * ipending - Pending interrupts (set when a masked interrupt occurs)
50 */
51
52 .data
53 ALIGN_DATA
54
55/* current priority (all off) */
984263bc 56
2954c92f
MD
57 .globl tty_imask
58tty_imask: .long SWI_TTY_MASK
59 .globl bio_imask
60bio_imask: .long SWI_CLOCK_MASK | SWI_CAMBIO_MASK
61 .globl net_imask
62net_imask: .long SWI_NET_MASK | SWI_CAMNET_MASK
63 .globl cam_imask
64cam_imask: .long SWI_CAMBIO_MASK | SWI_CAMNET_MASK
65 .globl soft_imask
66soft_imask: .long SWI_MASK
67 .globl softnet_imask
68softnet_imask: .long SWI_NET_MASK
69 .globl softtty_imask
70softtty_imask: .long SWI_TTY_MASK
984263bc 71
984263bc
MD
72 .text
73
ef0fdad1
MD
74 /*
75 * DORETI
76 *
77 * Handle return from interrupts, traps and syscalls. This function
78 * checks the cpl for unmasked pending interrupts (fast, normal, or
79 * soft) and schedules them if appropriate, then irets.
80 */
984263bc 81 SUPERALIGN_TEXT
2954c92f
MD
82 .type doreti,@function
83doreti:
84 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
ef0fdad1 85 popl %eax /* cpl to restore */
2954c92f 86 movl PCPU(curthread),%ebx
ef0fdad1
MD
87 cli /* interlock with TDPRI_CRIT */
88 movl %eax,TD_CPL(%ebx) /* save cpl being restored */
89 cmpl $TDPRI_CRIT,TD_PRI(%ebx) /* can't unpend if in critical sec */
90 jge 5f
91 addl $TDPRI_CRIT,TD_PRI(%ebx) /* force all ints to pending */
984263bc 92doreti_next:
ef0fdad1
MD
93 sti /* allow new interrupts */
94 movl %eax,%ecx /* cpl being restored */
95 notl %ecx
96 cli /* disallow YYY remove */
96728c05
MD
97#ifdef SMP
98 testl $AST_IPIQ,PCPU(astpending)
99 jnz doreti_ipiq
100#endif
a2a5ad0d
MD
101 testl PCPU(fpending),%ecx /* check for an unmasked fast int */
102 jnz doreti_fast
103
104 testl PCPU(ipending),%ecx
105 jnz doreti_intr
2954c92f 106 testl $AST_PENDING,PCPU(astpending) /* any pending ASTs? */
26a0694b 107 jz 2f
984263bc 108 testl $PSL_VM,TF_EFLAGS(%esp)
ef0fdad1 109 jz 1f
2954c92f 110 cmpl $1,in_vm86call /* YYY make per 'cpu' */
a2a5ad0d 111 jnz doreti_ast2
ef0fdad1 1121:
984263bc 113 testb $SEL_RPL_MASK,TF_CS(%esp)
a2a5ad0d 114 jnz doreti_ast2
96728c05 1152:
984263bc 116 /*
ef0fdad1 117 * Nothing left to do, finish up. Interrupts are still disabled.
984263bc 118 */
ef0fdad1 119 subl $TDPRI_CRIT,TD_PRI(%ebx) /* interlocked with cli */
a2a5ad0d
MD
120 testl %eax,%eax
121 jnz 5f
122 movl $0,PCPU(reqpri)
ef0fdad1 1235:
2954c92f 124 decl PCPU(intr_nesting_level)
984263bc 125 MEXITCOUNT
984263bc 126 .globl doreti_popl_fs
ef0fdad1
MD
127 .globl doreti_popl_es
128 .globl doreti_popl_ds
129 .globl doreti_iret
984263bc
MD
130 .globl doreti_syscall_ret
131doreti_syscall_ret:
132doreti_popl_fs:
133 popl %fs
984263bc
MD
134doreti_popl_es:
135 popl %es
984263bc
MD
136doreti_popl_ds:
137 popl %ds
138 popal
139 addl $8,%esp
984263bc
MD
140doreti_iret:
141 iret
142
143 ALIGN_TEXT
144 .globl doreti_iret_fault
145doreti_iret_fault:
146 subl $8,%esp
147 pushal
148 pushl %ds
149 .globl doreti_popl_ds_fault
150doreti_popl_ds_fault:
151 pushl %es
152 .globl doreti_popl_es_fault
153doreti_popl_es_fault:
154 pushl %fs
155 .globl doreti_popl_fs_fault
156doreti_popl_fs_fault:
157 movl $0,TF_ERR(%esp) /* XXX should be the error code */
158 movl $T_PROTFLT,TF_TRAPNO(%esp)
159 jmp alltraps_with_regs_pushed
160
984263bc 161 /*
ef0fdad1 162 * FAST interrupt pending
984263bc 163 */
ef0fdad1
MD
164 ALIGN_TEXT
165doreti_fast:
2954c92f 166 andl PCPU(fpending),%ecx /* only check fast ints */
984263bc 167 bsfl %ecx, %ecx /* locate the next dispatchable int */
2954c92f 168 btrl %ecx, PCPU(fpending) /* is it really still pending? */
ef0fdad1 169 jnc doreti_next
96728c05
MD
170 pushl %eax /* YYY cpl (expected by frame) */
171#ifdef SMP
172 pushl %ecx /* save ecx */
173 call try_mplock
174 popl %ecx
175 testl %eax,%eax
176 jz 1f
177#endif
178 call *fastunpend(,%ecx,4) /* MP lock successful */
179#ifdef SMP
180 call rel_mplock
181#endif
182 popl %eax
183 jmp doreti_next
1841:
185 btsl %ecx, PCPU(fpending) /* oops, couldn't get the MP lock */
ef0fdad1 186 popl %eax
96728c05 187 orl PCPU(fpending),%eax
ef0fdad1
MD
188 jmp doreti_next
189
984263bc 190 /*
ef0fdad1 191 * INTR interrupt pending
96728c05
MD
192 *
193 * Temporarily back-out our critical section to allow the interrupt
194 * preempt us.
984263bc 195 */
ef0fdad1
MD
196 ALIGN_TEXT
197doreti_intr:
2954c92f 198 andl PCPU(ipending),%ecx /* only check normal ints */
ef0fdad1 199 bsfl %ecx, %ecx /* locate the next dispatchable int */
2954c92f 200 btrl %ecx, PCPU(ipending) /* is it really still pending? */
ef0fdad1 201 jnc doreti_next
984263bc
MD
202 pushl %eax
203 pushl %ecx
a2a5ad0d 204 subl $TDPRI_CRIT,TD_PRI(%ebx) /* so we can preempt */
2954c92f 205 call sched_ithd /* YYY must pull in imasks */
96728c05 206 addl $TDPRI_CRIT,TD_PRI(%ebx)
984263bc 207 addl $4,%esp
984263bc 208 popl %eax
ef0fdad1 209 jmp doreti_next
984263bc 210
984263bc 211 /*
ef0fdad1 212 * AST pending
a2a5ad0d
MD
213 *
214 * Temporarily back-out our critical section because trap() can be
215 * a long-winded call, and we want to be more syscall-like.
216 *
217 * YYY If we came in from user mode (doreti_ast1) we can call
218 * lwkt_switch *RIGHT* *NOW* to deal with interrupts more quickly,
219 * but should still fall through to the trap code to properly
220 * reschedule.
984263bc 221 */
a2a5ad0d
MD
222#if 0
223doreti_ast1:
2954c92f 224 andl $~AST_PENDING,PCPU(astpending)
984263bc 225 sti
96728c05 226 movl %eax,%esi /* save cpl (can't use stack) */
984263bc 227 movl $T_ASTFLT,TF_TRAPNO(%esp)
a2a5ad0d
MD
228 decl PCPU(intr_nesting_level) /* syscall-like, not interrupt-like */
229 subl $TDPRI_CRIT,TD_PRI(%ebx)
230 call lwkt_switch
231 jmp 1f
232#endif
233doreti_ast2:
234 andl $~AST_PENDING,PCPU(astpending)
235 sti
236 movl %eax,%esi /* save cpl (can't use stack) */
237 movl $T_ASTFLT,TF_TRAPNO(%esp)
238 decl PCPU(intr_nesting_level) /* syscall-like, not interrupt-like */
239 subl $TDPRI_CRIT,TD_PRI(%ebx)
2401: call trap
241 addl $TDPRI_CRIT,TD_PRI(%ebx)
2954c92f 242 incl PCPU(intr_nesting_level)
96728c05
MD
243 movl %esi,%eax /* restore cpl for loop */
244 jmp doreti_next
245
246#ifdef SMP
247 /*
248 * IPIQ message pending
249 */
250doreti_ipiq:
251 andl $~AST_IPIQ,PCPU(astpending)
252 call lwkt_process_ipiq
ef0fdad1 253 movl TD_CPL(%ebx),%eax /* retrieve cpl again for loop */
984263bc
MD
254 jmp doreti_next
255
96728c05 256#endif
ef0fdad1
MD
257
258 /*
259 * SPLZ() a C callable procedure to dispatch any unmasked pending
260 * interrupts regardless of critical section nesting. ASTs
261 * are not dispatched.
26a0694b
MD
262 *
263 * YYY at the moment I leave us in a critical section so as
264 * not to have to mess with the cpls which will soon be obsolete.
ef0fdad1
MD
265 */
266 SUPERALIGN_TEXT
267
268ENTRY(splz)
26a0694b 269 pushfl
ef0fdad1 270 pushl %ebx
2954c92f 271 movl PCPU(curthread),%ebx
ef0fdad1 272 movl TD_CPL(%ebx),%eax
26a0694b 273 addl $TDPRI_CRIT,TD_PRI(%ebx)
ef0fdad1
MD
274
275splz_next:
26a0694b 276 cli
ef0fdad1
MD
277 movl %eax,%ecx /* ecx = ~CPL */
278 notl %ecx
96728c05
MD
279#ifdef SMP
280 testl $AST_IPIQ,PCPU(astpending)
281 jnz splz_ipiq
282#endif
a2a5ad0d
MD
283 testl PCPU(fpending),%ecx /* check for an unmasked fast int */
284 jnz splz_fast
285
286 testl PCPU(ipending),%ecx
287 jnz splz_intr
96728c05 288
26a0694b 289 subl $TDPRI_CRIT,TD_PRI(%ebx)
a2a5ad0d
MD
290 testl %eax,%eax
291 jnz 5f
292 movl $0,PCPU(reqpri)
2935:
ef0fdad1 294 popl %ebx
26a0694b 295 popfl
984263bc
MD
296 ret
297
ef0fdad1
MD
298 /*
299 * FAST interrupt pending
300 */
984263bc 301 ALIGN_TEXT
ef0fdad1 302splz_fast:
2954c92f 303 andl PCPU(fpending),%ecx /* only check fast ints */
ef0fdad1 304 bsfl %ecx, %ecx /* locate the next dispatchable int */
2954c92f 305 btrl %ecx, PCPU(fpending) /* is it really still pending? */
ef0fdad1
MD
306 jnc splz_next
307 pushl %eax
96728c05
MD
308#ifdef SMP
309 pushl %ecx
310 call try_mplock
311 popl %ecx
312 testl %eax,%eax
313 jz 1f
314#endif
2954c92f 315 call *fastunpend(,%ecx,4)
96728c05
MD
316#ifdef SMP
317 call rel_mplock
318#endif
319 popl %eax
320 jmp splz_next
3211:
322 btsl %ecx, PCPU(fpending) /* oops, couldn't get the MP lock */
ef0fdad1 323 popl %eax
96728c05 324 orl PCPU(fpending),%eax
ef0fdad1 325 jmp splz_next
984263bc 326
ef0fdad1
MD
327 /*
328 * INTR interrupt pending
96728c05
MD
329 *
330 * Temporarily back-out our critical section to allow the interrupt
331 * preempt us.
ef0fdad1 332 */
984263bc 333 ALIGN_TEXT
ef0fdad1 334splz_intr:
2954c92f 335 andl PCPU(ipending),%ecx /* only check normal ints */
ef0fdad1 336 bsfl %ecx, %ecx /* locate the next dispatchable int */
2954c92f 337 btrl %ecx, PCPU(ipending) /* is it really still pending? */
ef0fdad1 338 jnc splz_next
26a0694b 339 sti
ef0fdad1 340 pushl %eax
984263bc 341 pushl %ecx
96728c05 342 subl $TDPRI_CRIT,TD_PRI(%ebx)
2954c92f 343 call sched_ithd /* YYY must pull in imasks */
96728c05 344 addl $TDPRI_CRIT,TD_PRI(%ebx)
ef0fdad1
MD
345 addl $4,%esp
346 popl %eax
347 jmp splz_next
984263bc 348
96728c05
MD
349#ifdef SMP
350splz_ipiq:
351 andl $~AST_IPIQ,PCPU(astpending)
352 pushl %eax
353 call lwkt_process_ipiq
354 popl %eax
355 jmp splz_next
356#endif
357
ef0fdad1
MD
358 /*
359 * APIC/ICU specific ipl functions provide masking and unmasking
360 * calls for userland.
361 */
984263bc
MD
362
363#ifdef APIC_IO
364#include "i386/isa/apic_ipl.s"
365#else
366#include "i386/isa/icu_ipl.s"
367#endif /* APIC_IO */