Finish migrating the cpl into the thread structure.
[dragonfly.git] / sys / platform / pc32 / isa / apic_ipl.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1997, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $
8f41e33b 26 * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_ipl.s,v 1.4 2003/06/22 08:54:22 dillon Exp $
984263bc
MD
27 */
28
29
30 .data
31 ALIGN_DATA
32
33/*
34 * Routines used by splz_unpend to build an interrupt frame from a
35 * trap frame. The _vec[] routines build the proper frame on the stack,
36 * then call one of _Xintr0 thru _XintrNN.
37 *
38 * used by:
39 * i386/isa/apic_ipl.s (this file): splz_unpend JUMPs to HWIs.
40 * i386/isa/clock.c: setup _vec[clock] to point at _vec8254.
41 */
42 .globl _vec
43_vec:
44 .long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7
45 .long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15
46 .long vec16, vec17, vec18, vec19, vec20, vec21, vec22, vec23
47
48/*
49 * Note:
50 * This is the UP equivilant of _imen.
51 * It is OPAQUE, and must NOT be accessed directly.
52 * It MUST be accessed along with the IO APIC as a 'critical region'.
53 * Accessed by:
54 * INTREN()
55 * INTRDIS()
56 * MAYBE_MASK_IRQ
57 * MAYBE_UNMASK_IRQ
58 * imen_dump()
59 */
60 .p2align 2 /* MUST be 32bit aligned */
61 .globl _apic_imen
62_apic_imen:
63 .long HWI_MASK
64
65
66/*
67 *
68 */
69 .text
70 SUPERALIGN_TEXT
71
72/*
73 * splz() - dispatch pending interrupts after cpl reduced
74 *
75 * Interrupt priority mechanism
76 * -- soft splXX masks with group mechanism (cpl)
77 * -- h/w masks for currently active or unused interrupts (imen)
78 * -- ipending = active interrupts currently masked by cpl
79 */
80
81ENTRY(splz)
82 /*
83 * The caller has restored cpl and checked that (ipending & ~cpl)
84 * is nonzero. However, since ipending can change at any time
85 * (by an interrupt or, with SMP, by another cpu), we have to
86 * repeat the check. At the moment we must own the MP lock in
87 * the SMP case because the interruput handlers require it. We
88 * loop until no unmasked pending interrupts remain.
89 *
90 * No new unmaksed pending interrupts will be added during the
91 * loop because, being unmasked, the interrupt code will be able
92 * to execute the interrupts.
93 *
94 * Interrupts come in two flavors: Hardware interrupts and software
95 * interrupts. We have to detect the type of interrupt (based on the
96 * position of the interrupt bit) and call the appropriate dispatch
97 * routine.
98 *
99 * NOTE: "bsfl %ecx,%ecx" is undefined when %ecx is 0 so we can't
100 * rely on the secondary btrl tests.
101 */
8f41e33b
MD
102 pushl %ebx
103 movl _curthread,%ebx
104 movl TD_MACH+MTD_CPL(%ebx),%eax
984263bc
MD
105splz_next:
106 /*
107 * We don't need any locking here. (ipending & ~cpl) cannot grow
108 * while we're looking at it - any interrupt will shrink it to 0.
109 */
f1d1c3fa 110 movl $0,_reqpri
984263bc
MD
111 movl %eax,%ecx
112 notl %ecx /* set bit = unmasked level */
113 andl _ipending,%ecx /* set bit = unmasked pending INT */
114 jne splz_unpend
8f41e33b 115 popl %ebx
984263bc
MD
116 ret
117
118 ALIGN_TEXT
119splz_unpend:
120 bsfl %ecx,%ecx
121 lock
122 btrl %ecx,_ipending
123 jnc splz_next
124 cmpl $NHWI,%ecx
125 jae splz_swi
126 /*
127 * We would prefer to call the intr handler directly here but that
128 * doesn't work for badly behaved handlers that want the interrupt
129 * frame. Also, there's a problem determining the unit number.
130 * We should change the interface so that the unit number is not
131 * determined at config time.
132 *
8f41e33b
MD
133 * The vec[] routines build the proper frame on the stack so
134 * the interrupt will eventually return to the caller or splz,
135 * then calls one of _Xintr0 thru _XintrNN.
984263bc 136 */
8f41e33b 137 popl %ebx
984263bc
MD
138 jmp *_vec(,%ecx,4)
139
140 ALIGN_TEXT
141splz_swi:
8f41e33b 142 pushl %eax /* save cpl across call */
984263bc 143 orl imasks(,%ecx,4),%eax
8f41e33b 144 movl %eax,TD_MACH+MTD_CPL(%ebx) /* set cpl for SWI */
984263bc
MD
145 call *_ihandlers(,%ecx,4)
146 popl %eax
8f41e33b 147 movl %eax,TD_MACH+MTD_CPL(%ebx) /* restore cpl and loop */
984263bc
MD
148 jmp splz_next
149
150/*
151 * Fake clock interrupt(s) so that they appear to come from our caller instead
152 * of from here, so that system profiling works.
153 * XXX do this more generally (for all vectors; look up the C entry point).
154 * XXX frame bogusness stops us from just jumping to the C entry point.
155 * We have to clear iactive since this is an unpend call, and it will be
156 * set from the time of the original INT.
157 */
158
159/*
160 * The 'generic' vector stubs.
161 */
162
163#define BUILD_VEC(irq_num) \
164 ALIGN_TEXT ; \
165__CONCAT(vec,irq_num): ; \
166 popl %eax ; \
167 pushfl ; \
168 pushl $KCSEL ; \
169 pushl %eax ; \
170 cli ; \
171 lock ; /* MP-safe */ \
172 andl $~IRQ_BIT(irq_num), iactive ; /* lazy masking */ \
173 MEXITCOUNT ; \
174 APIC_ITRACE(apic_itrace_splz, irq_num, APIC_ITRACE_SPLZ) ; \
175 jmp __CONCAT(_Xintr,irq_num)
176
177
178 BUILD_VEC(0)
179 BUILD_VEC(1)
180 BUILD_VEC(2)
181 BUILD_VEC(3)
182 BUILD_VEC(4)
183 BUILD_VEC(5)
184 BUILD_VEC(6)
185 BUILD_VEC(7)
186 BUILD_VEC(8)
187 BUILD_VEC(9)
188 BUILD_VEC(10)
189 BUILD_VEC(11)
190 BUILD_VEC(12)
191 BUILD_VEC(13)
192 BUILD_VEC(14)
193 BUILD_VEC(15)
194 BUILD_VEC(16) /* 8 additional INTs in IO APIC */
195 BUILD_VEC(17)
196 BUILD_VEC(18)
197 BUILD_VEC(19)
198 BUILD_VEC(20)
199 BUILD_VEC(21)
200 BUILD_VEC(22)
201 BUILD_VEC(23)
202
203
204/******************************************************************************
205 * XXX FIXME: figure out where these belong.
206 */
207
208/* this nonsense is to verify that masks ALWAYS have 1 and only 1 bit set */
209#define QUALIFY_MASKS_NOT
210
211#ifdef QUALIFY_MASKS
212#define QUALIFY_MASK \
213 btrl %ecx, %eax ; \
214 andl %eax, %eax ; \
215 jz 1f ; \
216 pushl $bad_mask ; \
217 call _panic ; \
2181:
219
220bad_mask: .asciz "bad mask"
221#else
222#define QUALIFY_MASK
223#endif
224
225/*
226 * (soon to be) MP-safe function to clear ONE INT mask bit.
227 * The passed arg is a 32bit u_int MASK.
228 * It sets the associated bit in _apic_imen.
229 * It sets the mask bit of the associated IO APIC register.
230 */
231ENTRY(INTREN)
232 pushfl /* save state of EI flag */
233 cli /* prevent recursion */
234 IMASK_LOCK /* enter critical reg */
235
236 movl 8(%esp), %eax /* mask into %eax */
237 bsfl %eax, %ecx /* get pin index */
238 btrl %ecx, _apic_imen /* update _apic_imen */
239
240 QUALIFY_MASK
241
242 shll $4, %ecx
243 movl CNAME(int_to_apicintpin) + 8(%ecx), %edx
244 movl CNAME(int_to_apicintpin) + 12(%ecx), %ecx
245 testl %edx, %edx
246 jz 1f
247
248 movl %ecx, (%edx) /* write the target register index */
249 movl 16(%edx), %eax /* read the target register data */
250 andl $~IOART_INTMASK, %eax /* clear mask bit */
251 movl %eax, 16(%edx) /* write the APIC register data */
2521:
253 IMASK_UNLOCK /* exit critical reg */
254 popfl /* restore old state of EI flag */
255 ret
256
257/*
258 * (soon to be) MP-safe function to set ONE INT mask bit.
259 * The passed arg is a 32bit u_int MASK.
260 * It clears the associated bit in _apic_imen.
261 * It clears the mask bit of the associated IO APIC register.
262 */
263ENTRY(INTRDIS)
264 pushfl /* save state of EI flag */
265 cli /* prevent recursion */
266 IMASK_LOCK /* enter critical reg */
267
268 movl 8(%esp), %eax /* mask into %eax */
269 bsfl %eax, %ecx /* get pin index */
270 btsl %ecx, _apic_imen /* update _apic_imen */
271
272 QUALIFY_MASK
273
274 shll $4, %ecx
275 movl CNAME(int_to_apicintpin) + 8(%ecx), %edx
276 movl CNAME(int_to_apicintpin) + 12(%ecx), %ecx
277 testl %edx, %edx
278 jz 1f
279
280 movl %ecx, (%edx) /* write the target register index */
281 movl 16(%edx), %eax /* read the target register data */
282 orl $IOART_INTMASK, %eax /* set mask bit */
283 movl %eax, 16(%edx) /* write the APIC register data */
2841:
285 IMASK_UNLOCK /* exit critical reg */
286 popfl /* restore old state of EI flag */
287 ret
288
289
290/******************************************************************************
291 *
292 */
293
294
295/*
296 * void write_ioapic_mask(int apic, u_int mask);
297 */
298
299#define _INT_MASK 0x00010000
300#define _PIN_MASK 0x00ffffff
301
302#define _OLD_ESI 0(%esp)
303#define _OLD_EBX 4(%esp)
304#define _RETADDR 8(%esp)
305#define _APIC 12(%esp)
306#define _MASK 16(%esp)
307
308 ALIGN_TEXT
309write_ioapic_mask:
310 pushl %ebx /* scratch */
311 pushl %esi /* scratch */
312
313 movl _apic_imen, %ebx
314 xorl _MASK, %ebx /* %ebx = _apic_imen ^ mask */
315 andl $_PIN_MASK, %ebx /* %ebx = _apic_imen & 0x00ffffff */
316 jz all_done /* no change, return */
317
318 movl _APIC, %esi /* APIC # */
319 movl _ioapic, %ecx
320 movl (%ecx,%esi,4), %esi /* %esi holds APIC base address */
321
322next_loop: /* %ebx = diffs, %esi = APIC base */
323 bsfl %ebx, %ecx /* %ecx = index if 1st/next set bit */
324 jz all_done
325
326 btrl %ecx, %ebx /* clear this bit in diffs */
327 leal 16(,%ecx,2), %edx /* calculate register index */
328
329 movl %edx, (%esi) /* write the target register index */
330 movl 16(%esi), %eax /* read the target register data */
331
332 btl %ecx, _MASK /* test for mask or unmask */
333 jnc clear /* bit is clear */
334 orl $_INT_MASK, %eax /* set mask bit */
335 jmp write
336clear: andl $~_INT_MASK, %eax /* clear mask bit */
337
338write: movl %eax, 16(%esi) /* write the APIC register data */
339
340 jmp next_loop /* try another pass */
341
342all_done:
343 popl %esi
344 popl %ebx
345 ret
346
347#undef _OLD_ESI
348#undef _OLD_EBX
349#undef _RETADDR
350#undef _APIC
351#undef _MASK
352
353#undef _PIN_MASK
354#undef _INT_MASK
355
356#ifdef oldcode
357
358_INTREN:
359 movl _apic_imen, %eax
360 notl %eax /* mask = ~mask */
361 andl _apic_imen, %eax /* %eax = _apic_imen & ~mask */
362
363 pushl %eax /* new (future) _apic_imen value */
364 pushl $0 /* APIC# arg */
365 call write_ioapic_mask /* modify the APIC registers */
366
367 addl $4, %esp /* remove APIC# arg from stack */
368 popl _apic_imen /* _apic_imen |= mask */
369 ret
370
371_INTRDIS:
372 movl _apic_imen, %eax
373 orl 4(%esp), %eax /* %eax = _apic_imen | mask */
374
375 pushl %eax /* new (future) _apic_imen value */
376 pushl $0 /* APIC# arg */
377 call write_ioapic_mask /* modify the APIC registers */
378
379 addl $4, %esp /* remove APIC# arg from stack */
380 popl _apic_imen /* _apic_imen |= mask */
381 ret
382
383#endif /* oldcode */
384
385
386#ifdef ready
387
388/*
389 * u_int read_io_apic_mask(int apic);
390 */
391 ALIGN_TEXT
392read_io_apic_mask:
393 ret
394
395/*
396 * Set INT mask bit for each bit set in 'mask'.
397 * Ignore INT mask bit for all others.
398 *
399 * void set_io_apic_mask(apic, u_int32_t bits);
400 */
401 ALIGN_TEXT
402set_io_apic_mask:
403 ret
404
405/*
406 * void set_ioapic_maskbit(int apic, int bit);
407 */
408 ALIGN_TEXT
409set_ioapic_maskbit:
410 ret
411
412/*
413 * Clear INT mask bit for each bit set in 'mask'.
414 * Ignore INT mask bit for all others.
415 *
416 * void clr_io_apic_mask(int apic, u_int32_t bits);
417 */
418 ALIGN_TEXT
419clr_io_apic_mask:
420 ret
421
422/*
423 * void clr_ioapic_maskbit(int apic, int bit);
424 */
425 ALIGN_TEXT
426clr_ioapic_maskbit:
427 ret
428
429#endif /** ready */
430
431/******************************************************************************
432 *
433 */
434
435/*
436 * u_int io_apic_write(int apic, int select);
437 */
438ENTRY(io_apic_read)
439 movl 4(%esp), %ecx /* APIC # */
440 movl _ioapic, %eax
441 movl (%eax,%ecx,4), %edx /* APIC base register address */
442 movl 8(%esp), %eax /* target register index */
443 movl %eax, (%edx) /* write the target register index */
444 movl 16(%edx), %eax /* read the APIC register data */
445 ret /* %eax = register value */
446
447/*
448 * void io_apic_write(int apic, int select, int value);
449 */
450ENTRY(io_apic_write)
451 movl 4(%esp), %ecx /* APIC # */
452 movl _ioapic, %eax
453 movl (%eax,%ecx,4), %edx /* APIC base register address */
454 movl 8(%esp), %eax /* target register index */
455 movl %eax, (%edx) /* write the target register index */
456 movl 12(%esp), %eax /* target register value */
457 movl %eax, 16(%edx) /* write the APIC register data */
458 ret /* %eax = void */
459
460/*
461 * Send an EOI to the local APIC.
462 */
463ENTRY(apic_eoi)
464 movl $0, _lapic+0xb0
465 ret