2 * Copyright (c) 1997, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $
26 * $DragonFly: src/sys/i386/isa/Attic/apic_ipl.s,v 1.2 2003/06/17 04:28:36 dillon Exp $
34 * Routines used by splz_unpend to build an interrupt frame from a
35 * trap frame. The _vec[] routines build the proper frame on the stack,
36 * then call one of _Xintr0 thru _XintrNN.
39 * i386/isa/apic_ipl.s (this file): splz_unpend JUMPs to HWIs.
40 * i386/isa/clock.c: setup _vec[clock] to point at _vec8254.
44 .long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7
45 .long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15
46 .long vec16, vec17, vec18, vec19, vec20, vec21, vec22, vec23
50 * This is the UP equivilant of _imen.
51 * It is OPAQUE, and must NOT be accessed directly.
52 * It MUST be accessed along with the IO APIC as a 'critical region'.
60 .p2align 2 /* MUST be 32bit aligned */
73 * splz() - dispatch pending interrupts after cpl reduced
75 * Interrupt priority mechanism
76 * -- soft splXX masks with group mechanism (cpl)
77 * -- h/w masks for currently active or unused interrupts (imen)
78 * -- ipending = active interrupts currently masked by cpl
83 * The caller has restored cpl and checked that (ipending & ~cpl)
84 * is nonzero. However, since ipending can change at any time
85 * (by an interrupt or, with SMP, by another cpu), we have to
86 * repeat the check. At the moment we must own the MP lock in
87 * the SMP case because the interruput handlers require it. We
88 * loop until no unmasked pending interrupts remain.
90 * No new unmaksed pending interrupts will be added during the
91 * loop because, being unmasked, the interrupt code will be able
92 * to execute the interrupts.
94 * Interrupts come in two flavors: Hardware interrupts and software
95 * interrupts. We have to detect the type of interrupt (based on the
96 * position of the interrupt bit) and call the appropriate dispatch
99 * NOTE: "bsfl %ecx,%ecx" is undefined when %ecx is 0 so we can't
100 * rely on the secondary btrl tests.
105 * We don't need any locking here. (ipending & ~cpl) cannot grow
106 * while we're looking at it - any interrupt will shrink it to 0.
109 notl %ecx /* set bit = unmasked level */
110 andl _ipending,%ecx /* set bit = unmasked pending INT */
123 * We would prefer to call the intr handler directly here but that
124 * doesn't work for badly behaved handlers that want the interrupt
125 * frame. Also, there's a problem determining the unit number.
126 * We should change the interface so that the unit number is not
127 * determined at config time.
129 * The vec[] routines build the proper frame on the stack,
130 * then call one of _Xintr0 thru _XintrNN.
137 orl imasks(,%ecx,4),%eax
139 call *_ihandlers(,%ecx,4)
145 * Fake clock interrupt(s) so that they appear to come from our caller instead
146 * of from here, so that system profiling works.
147 * XXX do this more generally (for all vectors; look up the C entry point).
148 * XXX frame bogusness stops us from just jumping to the C entry point.
149 * We have to clear iactive since this is an unpend call, and it will be
150 * set from the time of the original INT.
154 * The 'generic' vector stubs.
157 #define BUILD_VEC(irq_num) \
159 __CONCAT(vec,irq_num): ; \
165 lock ; /* MP-safe */ \
166 andl $~IRQ_BIT(irq_num), iactive ; /* lazy masking */ \
168 APIC_ITRACE(apic_itrace_splz, irq_num, APIC_ITRACE_SPLZ) ; \
169 jmp __CONCAT(_Xintr,irq_num)
188 BUILD_VEC(16) /* 8 additional INTs in IO APIC */
198 /******************************************************************************
199 * XXX FIXME: figure out where these belong.
202 /* this nonsense is to verify that masks ALWAYS have 1 and only 1 bit set */
203 #define QUALIFY_MASKS_NOT
206 #define QUALIFY_MASK \
214 bad_mask: .asciz "bad mask"
220 * (soon to be) MP-safe function to clear ONE INT mask bit.
221 * The passed arg is a 32bit u_int MASK.
222 * It sets the associated bit in _apic_imen.
223 * It sets the mask bit of the associated IO APIC register.
226 pushfl /* save state of EI flag */
227 cli /* prevent recursion */
228 IMASK_LOCK /* enter critical reg */
230 movl 8(%esp), %eax /* mask into %eax */
231 bsfl %eax, %ecx /* get pin index */
232 btrl %ecx, _apic_imen /* update _apic_imen */
237 movl CNAME(int_to_apicintpin) + 8(%ecx), %edx
238 movl CNAME(int_to_apicintpin) + 12(%ecx), %ecx
242 movl %ecx, (%edx) /* write the target register index */
243 movl 16(%edx), %eax /* read the target register data */
244 andl $~IOART_INTMASK, %eax /* clear mask bit */
245 movl %eax, 16(%edx) /* write the APIC register data */
247 IMASK_UNLOCK /* exit critical reg */
248 popfl /* restore old state of EI flag */
252 * (soon to be) MP-safe function to set ONE INT mask bit.
253 * The passed arg is a 32bit u_int MASK.
254 * It clears the associated bit in _apic_imen.
255 * It clears the mask bit of the associated IO APIC register.
258 pushfl /* save state of EI flag */
259 cli /* prevent recursion */
260 IMASK_LOCK /* enter critical reg */
262 movl 8(%esp), %eax /* mask into %eax */
263 bsfl %eax, %ecx /* get pin index */
264 btsl %ecx, _apic_imen /* update _apic_imen */
269 movl CNAME(int_to_apicintpin) + 8(%ecx), %edx
270 movl CNAME(int_to_apicintpin) + 12(%ecx), %ecx
274 movl %ecx, (%edx) /* write the target register index */
275 movl 16(%edx), %eax /* read the target register data */
276 orl $IOART_INTMASK, %eax /* set mask bit */
277 movl %eax, 16(%edx) /* write the APIC register data */
279 IMASK_UNLOCK /* exit critical reg */
280 popfl /* restore old state of EI flag */
284 /******************************************************************************
290 * void write_ioapic_mask(int apic, u_int mask);
293 #define _INT_MASK 0x00010000
294 #define _PIN_MASK 0x00ffffff
296 #define _OLD_ESI 0(%esp)
297 #define _OLD_EBX 4(%esp)
298 #define _RETADDR 8(%esp)
299 #define _APIC 12(%esp)
300 #define _MASK 16(%esp)
304 pushl %ebx /* scratch */
305 pushl %esi /* scratch */
307 movl _apic_imen, %ebx
308 xorl _MASK, %ebx /* %ebx = _apic_imen ^ mask */
309 andl $_PIN_MASK, %ebx /* %ebx = _apic_imen & 0x00ffffff */
310 jz all_done /* no change, return */
312 movl _APIC, %esi /* APIC # */
314 movl (%ecx,%esi,4), %esi /* %esi holds APIC base address */
316 next_loop: /* %ebx = diffs, %esi = APIC base */
317 bsfl %ebx, %ecx /* %ecx = index if 1st/next set bit */
320 btrl %ecx, %ebx /* clear this bit in diffs */
321 leal 16(,%ecx,2), %edx /* calculate register index */
323 movl %edx, (%esi) /* write the target register index */
324 movl 16(%esi), %eax /* read the target register data */
326 btl %ecx, _MASK /* test for mask or unmask */
327 jnc clear /* bit is clear */
328 orl $_INT_MASK, %eax /* set mask bit */
330 clear: andl $~_INT_MASK, %eax /* clear mask bit */
332 write: movl %eax, 16(%esi) /* write the APIC register data */
334 jmp next_loop /* try another pass */
353 movl _apic_imen, %eax
354 notl %eax /* mask = ~mask */
355 andl _apic_imen, %eax /* %eax = _apic_imen & ~mask */
357 pushl %eax /* new (future) _apic_imen value */
358 pushl $0 /* APIC# arg */
359 call write_ioapic_mask /* modify the APIC registers */
361 addl $4, %esp /* remove APIC# arg from stack */
362 popl _apic_imen /* _apic_imen |= mask */
366 movl _apic_imen, %eax
367 orl 4(%esp), %eax /* %eax = _apic_imen | mask */
369 pushl %eax /* new (future) _apic_imen value */
370 pushl $0 /* APIC# arg */
371 call write_ioapic_mask /* modify the APIC registers */
373 addl $4, %esp /* remove APIC# arg from stack */
374 popl _apic_imen /* _apic_imen |= mask */
383 * u_int read_io_apic_mask(int apic);
390 * Set INT mask bit for each bit set in 'mask'.
391 * Ignore INT mask bit for all others.
393 * void set_io_apic_mask(apic, u_int32_t bits);
400 * void set_ioapic_maskbit(int apic, int bit);
407 * Clear INT mask bit for each bit set in 'mask'.
408 * Ignore INT mask bit for all others.
410 * void clr_io_apic_mask(int apic, u_int32_t bits);
417 * void clr_ioapic_maskbit(int apic, int bit);
425 /******************************************************************************
430 * u_int io_apic_write(int apic, int select);
433 movl 4(%esp), %ecx /* APIC # */
435 movl (%eax,%ecx,4), %edx /* APIC base register address */
436 movl 8(%esp), %eax /* target register index */
437 movl %eax, (%edx) /* write the target register index */
438 movl 16(%edx), %eax /* read the APIC register data */
439 ret /* %eax = register value */
442 * void io_apic_write(int apic, int select, int value);
445 movl 4(%esp), %ecx /* APIC # */
447 movl (%eax,%ecx,4), %edx /* APIC base register address */
448 movl 8(%esp), %eax /* target register index */
449 movl %eax, (%edx) /* write the target register index */
450 movl 12(%esp), %eax /* target register value */
451 movl %eax, 16(%edx) /* write the APIC register data */
452 ret /* %eax = void */
455 * Send an EOI to the local APIC.