Finish migrating the cpl into the thread structure.
[dragonfly.git] / sys / platform / pc32 / isa / apic_vector.s
CommitLineData
984263bc
MD
1/*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
8f41e33b 4 * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_vector.s,v 1.5 2003/06/22 08:54:22 dillon Exp $
984263bc
MD
5 */
6
7
8#include <machine/apic.h>
9#include <machine/smp.h>
10
11#include "i386/isa/intr_machdep.h"
12
13/* convert an absolute IRQ# into a bitmask */
14#define IRQ_BIT(irq_num) (1 << (irq_num))
15
16/* make an index into the IO APIC from the IRQ# */
17#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
18
19
20/*
21 * Macros for interrupt interrupt entry, call to handler, and exit.
22 */
23
24#define FAST_INTR(irq_num, vec_name) \
25 .text ; \
26 SUPERALIGN_TEXT ; \
27IDTVEC(vec_name) ; \
28 pushl %eax ; /* save only call-used registers */ \
29 pushl %ecx ; \
30 pushl %edx ; \
31 pushl %ds ; \
32 MAYBE_PUSHL_ES ; \
33 pushl %fs ; \
34 movl $KDSEL,%eax ; \
35 mov %ax,%ds ; \
36 MAYBE_MOVW_AX_ES ; \
37 movl $KPSEL,%eax ; \
38 mov %ax,%fs ; \
39 FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
40 pushl _intr_unit + (irq_num) * 4 ; \
41 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
42 addl $4, %esp ; \
43 movl $0, lapic_eoi ; \
44 lock ; \
45 incl _cnt+V_INTR ; /* book-keeping can wait */ \
46 movl _intr_countp + (irq_num) * 4, %eax ; \
47 lock ; \
48 incl (%eax) ; \
49 MEXITCOUNT ; \
50 popl %fs ; \
51 MAYBE_POPL_ES ; \
52 popl %ds ; \
53 popl %edx ; \
54 popl %ecx ; \
55 popl %eax ; \
56 iret
57
58/*
59 *
60 */
61#define PUSH_FRAME \
62 pushl $0 ; /* dummy error code */ \
63 pushl $0 ; /* dummy trap type */ \
64 pushal ; \
65 pushl %ds ; /* save data and extra segments ... */ \
66 pushl %es ; \
67 pushl %fs
68
69#define POP_FRAME \
70 popl %fs ; \
71 popl %es ; \
72 popl %ds ; \
73 popal ; \
74 addl $4+4,%esp
75
76#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
77#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
78
79#define MASK_IRQ(irq_num) \
80 IMASK_LOCK ; /* into critical reg */ \
81 testl $IRQ_BIT(irq_num), _apic_imen ; \
82 jne 7f ; /* masked, don't mask */ \
83 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
84 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
85 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
86 movl %eax, (%ecx) ; /* write the index */ \
87 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
88 orl $IOART_INTMASK, %eax ; /* set the mask */ \
89 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
907: ; /* already masked */ \
91 IMASK_UNLOCK
92/*
93 * Test to see whether we are handling an edge or level triggered INT.
94 * Level-triggered INTs must still be masked as we don't clear the source,
95 * and the EOI cycle would cause redundant INTs to occur.
96 */
97#define MASK_LEVEL_IRQ(irq_num) \
98 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
99 jz 9f ; /* edge, don't mask */ \
100 MASK_IRQ(irq_num) ; \
1019:
102
103
104#ifdef APIC_INTR_REORDER
105#define EOI_IRQ(irq_num) \
106 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
107 movl (%eax), %eax ; \
108 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
109 jz 9f ; /* not active */ \
110 movl $0, lapic_eoi ; \
111 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
1129:
113
114#else
115#define EOI_IRQ(irq_num) \
116 testl $IRQ_BIT(irq_num), lapic_isr1; \
117 jz 9f ; /* not active */ \
118 movl $0, lapic_eoi; \
119 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
1209:
121#endif
122
123
124/*
125 * Test to see if the source is currntly masked, clear if so.
126 */
127#define UNMASK_IRQ(irq_num) \
128 IMASK_LOCK ; /* into critical reg */ \
129 testl $IRQ_BIT(irq_num), _apic_imen ; \
130 je 7f ; /* bit clear, not masked */ \
131 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
132 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
133 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
134 movl %eax,(%ecx) ; /* write the index */ \
135 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
136 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
137 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
1387: ; \
139 IMASK_UNLOCK
140
141#ifdef APIC_INTR_DIAGNOSTIC
142#ifdef APIC_INTR_DIAGNOSTIC_IRQ
143log_intr_event:
144 pushf
145 cli
146 pushl $CNAME(apic_itrace_debuglock)
147 call CNAME(s_lock_np)
148 addl $4, %esp
149 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
150 andl $32767, %ecx
151 movl _cpuid, %eax
152 shll $8, %eax
153 orl 8(%esp), %eax
154 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
155 incl %ecx
156 andl $32767, %ecx
157 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
158 pushl $CNAME(apic_itrace_debuglock)
159 call CNAME(s_unlock_np)
160 addl $4, %esp
161 popf
162 ret
163
164
165#define APIC_ITRACE(name, irq_num, id) \
166 lock ; /* MP-safe */ \
167 incl CNAME(name) + (irq_num) * 4 ; \
168 pushl %eax ; \
169 pushl %ecx ; \
170 pushl %edx ; \
171 movl $(irq_num), %eax ; \
172 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
173 jne 7f ; \
174 pushl $id ; \
175 call log_intr_event ; \
176 addl $4, %esp ; \
1777: ; \
178 popl %edx ; \
179 popl %ecx ; \
180 popl %eax
181#else
182#define APIC_ITRACE(name, irq_num, id) \
183 lock ; /* MP-safe */ \
184 incl CNAME(name) + (irq_num) * 4
185#endif
186
187#define APIC_ITRACE_ENTER 1
188#define APIC_ITRACE_EOI 2
189#define APIC_ITRACE_TRYISRLOCK 3
190#define APIC_ITRACE_GOTISRLOCK 4
191#define APIC_ITRACE_ENTER2 5
192#define APIC_ITRACE_LEAVE 6
193#define APIC_ITRACE_UNMASK 7
194#define APIC_ITRACE_ACTIVE 8
195#define APIC_ITRACE_MASKED 9
196#define APIC_ITRACE_NOISRLOCK 10
197#define APIC_ITRACE_MASKED2 11
198#define APIC_ITRACE_SPLZ 12
199#define APIC_ITRACE_DORETI 13
200
201#else
202#define APIC_ITRACE(name, irq_num, id)
203#endif
204
205#define INTR(irq_num, vec_name, maybe_extra_ipending) \
206 .text ; \
207 SUPERALIGN_TEXT ; \
208/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
209IDTVEC(vec_name) ; \
210 PUSH_FRAME ; \
211 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
212 mov %ax, %ds ; \
213 mov %ax, %es ; \
214 movl $KPSEL, %eax ; \
215 mov %ax, %fs ; \
216; \
217 maybe_extra_ipending ; \
218; \
219 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
220 lock ; /* MP-safe */ \
221 btsl $(irq_num), iactive ; /* lazy masking */ \
222 jc 1f ; /* already active */ \
223; \
224 MASK_LEVEL_IRQ(irq_num) ; \
225 EOI_IRQ(irq_num) ; \
2260: ; \
227 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
228 MP_TRYLOCK ; /* XXX this is going away... */ \
229 testl %eax, %eax ; /* did we get it? */ \
230 jz 3f ; /* no */ \
231; \
232 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
8f41e33b
MD
233 movl _curthread,%ebx ; \
234 testl $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%eax) ; \
984263bc 235 jne 2f ; /* this INT masked */ \
8f41e33b 236 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
f1d1c3fa 237 jge 2f ; /* in critical sec */ \
984263bc
MD
238; \
239 incb _intr_nesting_level ; \
240; \
241 /* entry point used by doreti_unpend for HWIs. */ \
242__CONCAT(Xresume,irq_num): ; \
243 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
244 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
245 movl _intr_countp + (irq_num) * 4, %eax ; \
246 lock ; incl (%eax) ; \
247; \
8f41e33b
MD
248 movl _curthread, %ebx ; \
249 movl TD_MACH+MTD_CPL(%ebx), %eax ; \
250 pushl %eax ; /* cpl restored by doreti */ \
984263bc 251 orl _intr_mask + (irq_num) * 4, %eax ; \
8f41e33b 252 movl %eax, TD_MACH+MTD_CPL(%ebx) ; \
984263bc
MD
253 lock ; \
254 andl $~IRQ_BIT(irq_num), _ipending ; \
255; \
256 pushl _intr_unit + (irq_num) * 4 ; \
257 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
258 sti ; \
259 call *_intr_handler + (irq_num) * 4 ; \
260 cli ; \
261 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
262; \
263 lock ; andl $~IRQ_BIT(irq_num), iactive ; \
264 UNMASK_IRQ(irq_num) ; \
265 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
266 sti ; /* doreti repeats cli/sti */ \
267 MEXITCOUNT ; \
268 jmp _doreti ; \
269; \
270 ALIGN_TEXT ; \
2711: ; /* active */ \
272 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
273 MASK_IRQ(irq_num) ; \
274 EOI_IRQ(irq_num) ; \
275 lock ; \
276 orl $IRQ_BIT(irq_num), _ipending ; \
f1d1c3fa 277 movl $TDPRI_CRIT,_reqpri ; \
984263bc
MD
278 lock ; \
279 btsl $(irq_num), iactive ; /* still active */ \
280 jnc 0b ; /* retry */ \
281 POP_FRAME ; \
282 iret ; /* XXX: iactive bit might be 0 now */ \
283 ALIGN_TEXT ; \
2842: ; /* masked by cpl, leave iactive set */ \
285 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
286 lock ; \
287 orl $IRQ_BIT(irq_num), _ipending ; \
f1d1c3fa 288 movl $TDPRI_CRIT,_reqpri ; \
984263bc
MD
289 MP_RELLOCK ; \
290 POP_FRAME ; \
291 iret ; \
292 ALIGN_TEXT ; \
2933: ; /* other cpu has isr lock */ \
294 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
295 lock ; \
296 orl $IRQ_BIT(irq_num), _ipending ; \
f1d1c3fa 297 movl $TDPRI_CRIT,_reqpri ; \
8f41e33b 298 testl $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%ebx) ; \
984263bc
MD
299 jne 4f ; /* this INT masked */ \
300 call forward_irq ; /* forward irq to lock holder */ \
301 POP_FRAME ; /* and return */ \
302 iret ; \
303 ALIGN_TEXT ; \
3044: ; /* blocked */ \
305 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
306 POP_FRAME ; /* and return */ \
307 iret
308
309/*
310 * Handle "spurious INTerrupts".
311 * Notes:
312 * This is different than the "spurious INTerrupt" generated by an
313 * 8259 PIC for missing INTs. See the APIC documentation for details.
314 * This routine should NOT do an 'EOI' cycle.
315 */
316 .text
317 SUPERALIGN_TEXT
318 .globl _Xspuriousint
319_Xspuriousint:
320
321 /* No EOI cycle used here */
322
323 iret
324
325
326/*
327 * Handle TLB shootdowns.
328 */
329 .text
330 SUPERALIGN_TEXT
331 .globl _Xinvltlb
332_Xinvltlb:
333 pushl %eax
334
335#ifdef COUNT_XINVLTLB_HITS
336 pushl %fs
337 movl $KPSEL, %eax
338 mov %ax, %fs
339 movl _cpuid, %eax
340 popl %fs
341 ss
342 incl _xhits(,%eax,4)
343#endif /* COUNT_XINVLTLB_HITS */
344
345 movl %cr3, %eax /* invalidate the TLB */
346 movl %eax, %cr3
347
348 ss /* stack segment, avoid %ds load */
349 movl $0, lapic_eoi /* End Of Interrupt to APIC */
350
351 popl %eax
352 iret
353
354
355#ifdef BETTER_CLOCK
356
357/*
358 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
359 *
360 * - Stores current cpu state in checkstate_cpustate[cpuid]
361 * 0 == user, 1 == sys, 2 == intr
362 * - Stores current process in checkstate_curproc[cpuid]
363 *
364 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
365 *
366 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
367 */
368
369 .text
370 SUPERALIGN_TEXT
371 .globl _Xcpucheckstate
372 .globl _checkstate_cpustate
373 .globl _checkstate_curproc
374 .globl _checkstate_pc
375_Xcpucheckstate:
376 pushl %eax
377 pushl %ebx
378 pushl %ds /* save current data segment */
379 pushl %fs
380
381 movl $KDSEL, %eax
382 mov %ax, %ds /* use KERNEL data segment */
383 movl $KPSEL, %eax
384 mov %ax, %fs
385
386 movl $0, lapic_eoi /* End Of Interrupt to APIC */
387
388 movl $0, %ebx
389 movl 20(%esp), %eax
390 andl $3, %eax
391 cmpl $3, %eax
392 je 1f
393 testl $PSL_VM, 24(%esp)
394 jne 1f
395 incl %ebx /* system or interrupt */
3961:
397 movl _cpuid, %eax
398 movl %ebx, _checkstate_cpustate(,%eax,4)
84b592ba
MD
399 movl _curthread, %ebx
400 movl TD_PROC(%ebx),%ebx
984263bc
MD
401 movl %ebx, _checkstate_curproc(,%eax,4)
402 movl 16(%esp), %ebx
403 movl %ebx, _checkstate_pc(,%eax,4)
404
405 lock /* checkstate_probed_cpus |= (1<<id) */
406 btsl %eax, _checkstate_probed_cpus
407
408 popl %fs
409 popl %ds /* restore previous data segment */
410 popl %ebx
411 popl %eax
412 iret
413
414#endif /* BETTER_CLOCK */
415
416/*
417 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
418 *
419 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
420 *
421 * - We need a better method of triggering asts on other cpus.
422 */
423
424 .text
425 SUPERALIGN_TEXT
426 .globl _Xcpuast
427_Xcpuast:
428 PUSH_FRAME
429 movl $KDSEL, %eax
430 mov %ax, %ds /* use KERNEL data segment */
431 mov %ax, %es
432 movl $KPSEL, %eax
433 mov %ax, %fs
434
435 movl _cpuid, %eax
436 lock /* checkstate_need_ast &= ~(1<<id) */
437 btrl %eax, _checkstate_need_ast
438 movl $0, lapic_eoi /* End Of Interrupt to APIC */
439
440 lock
441 btsl %eax, _checkstate_pending_ast
442 jc 1f
443
444 FAKE_MCOUNT(13*4(%esp))
445
446 /*
447 * Giant locks do not come cheap.
448 * A lot of cycles are going to be wasted here.
449 */
450 call _get_mplock
451
8f41e33b
MD
452 movl _curthread, %eax
453 pushl TD_MACH+MTD_CPL(%eax) /* cpl restored by doreti */
454
984263bc
MD
455 orl $AST_PENDING, _astpending /* XXX */
456 incb _intr_nesting_level
457 sti
458
459 pushl $0
460
461 movl _cpuid, %eax
462 lock
463 btrl %eax, _checkstate_pending_ast
464 lock
465 btrl %eax, CNAME(resched_cpus)
466 jnc 2f
467 orl $AST_PENDING+AST_RESCHED,_astpending
468 lock
469 incl CNAME(want_resched_cnt)
4702:
471 lock
472 incl CNAME(cpuast_cnt)
473 MEXITCOUNT
474 jmp _doreti
4751:
476 /* We are already in the process of delivering an ast for this CPU */
477 POP_FRAME
478 iret
479
480
481/*
482 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
483 */
484
485 .text
486 SUPERALIGN_TEXT
487 .globl _Xforward_irq
488_Xforward_irq:
489 PUSH_FRAME
490 movl $KDSEL, %eax
491 mov %ax, %ds /* use KERNEL data segment */
492 mov %ax, %es
493 movl $KPSEL, %eax
494 mov %ax, %fs
495
496 movl $0, lapic_eoi /* End Of Interrupt to APIC */
497
498 FAKE_MCOUNT(13*4(%esp))
499
500 MP_TRYLOCK
501 testl %eax,%eax /* Did we get the lock ? */
502 jz 1f /* No */
503
504 lock
505 incl CNAME(forward_irq_hitcnt)
506 cmpb $4, _intr_nesting_level
507 jae 2f
508
8f41e33b
MD
509 movl _curthread, %eax
510 pushl TD_MACH+MTD_CPL(%eax) /* cpl restored by doreti */
511
984263bc
MD
512 incb _intr_nesting_level
513 sti
514
515 pushl $0
516
517 MEXITCOUNT
518 jmp _doreti /* Handle forwarded interrupt */
5191:
520 lock
521 incl CNAME(forward_irq_misscnt)
522 call forward_irq /* Oops, we've lost the isr lock */
523 MEXITCOUNT
524 POP_FRAME
525 iret
5262:
527 lock
528 incl CNAME(forward_irq_toodeepcnt)
5293:
530 MP_RELLOCK
531 MEXITCOUNT
532 POP_FRAME
533 iret
534
535/*
536 *
537 */
538forward_irq:
539 MCOUNT
540 cmpl $0,_invltlb_ok
541 jz 4f
542
543 cmpl $0, CNAME(forward_irq_enabled)
544 jz 4f
545
546 movl _mp_lock,%eax
547 cmpl $FREE_LOCK,%eax
548 jne 1f
549 movl $0, %eax /* Pick CPU #0 if noone has lock */
5501:
551 shrl $24,%eax
552 movl _cpu_num_to_apic_id(,%eax,4),%ecx
553 shll $24,%ecx
554 movl lapic_icr_hi, %eax
555 andl $~APIC_ID_MASK, %eax
556 orl %ecx, %eax
557 movl %eax, lapic_icr_hi
558
5592:
560 movl lapic_icr_lo, %eax
561 andl $APIC_DELSTAT_MASK,%eax
562 jnz 2b
563 movl lapic_icr_lo, %eax
564 andl $APIC_RESV2_MASK, %eax
565 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
566 movl %eax, lapic_icr_lo
5673:
568 movl lapic_icr_lo, %eax
569 andl $APIC_DELSTAT_MASK,%eax
570 jnz 3b
5714:
572 ret
573
574/*
575 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
576 *
577 * - Signals its receipt.
578 * - Waits for permission to restart.
579 * - Signals its restart.
580 */
581
582 .text
583 SUPERALIGN_TEXT
584 .globl _Xcpustop
585_Xcpustop:
586 pushl %ebp
587 movl %esp, %ebp
588 pushl %eax
589 pushl %ecx
590 pushl %edx
591 pushl %ds /* save current data segment */
592 pushl %fs
593
594 movl $KDSEL, %eax
595 mov %ax, %ds /* use KERNEL data segment */
596 movl $KPSEL, %eax
597 mov %ax, %fs
598
599 movl $0, lapic_eoi /* End Of Interrupt to APIC */
600
601 movl _cpuid, %eax
602 imull $PCB_SIZE, %eax
603 leal CNAME(stoppcbs)(%eax), %eax
604 pushl %eax
605 call CNAME(savectx) /* Save process context */
606 addl $4, %esp
607
608
609 movl _cpuid, %eax
610
611 lock
612 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
6131:
614 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
615 jnc 1b
616
617 lock
618 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
619 lock
620 btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
621
622 test %eax, %eax
623 jnz 2f
624
625 movl CNAME(cpustop_restartfunc), %eax
626 test %eax, %eax
627 jz 2f
628 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
629
630 call *%eax
6312:
632 popl %fs
633 popl %ds /* restore previous data segment */
634 popl %edx
635 popl %ecx
636 popl %eax
637 movl %ebp, %esp
638 popl %ebp
639 iret
640
641
642MCOUNT_LABEL(bintr)
643 FAST_INTR(0,fastintr0)
644 FAST_INTR(1,fastintr1)
645 FAST_INTR(2,fastintr2)
646 FAST_INTR(3,fastintr3)
647 FAST_INTR(4,fastintr4)
648 FAST_INTR(5,fastintr5)
649 FAST_INTR(6,fastintr6)
650 FAST_INTR(7,fastintr7)
651 FAST_INTR(8,fastintr8)
652 FAST_INTR(9,fastintr9)
653 FAST_INTR(10,fastintr10)
654 FAST_INTR(11,fastintr11)
655 FAST_INTR(12,fastintr12)
656 FAST_INTR(13,fastintr13)
657 FAST_INTR(14,fastintr14)
658 FAST_INTR(15,fastintr15)
659 FAST_INTR(16,fastintr16)
660 FAST_INTR(17,fastintr17)
661 FAST_INTR(18,fastintr18)
662 FAST_INTR(19,fastintr19)
663 FAST_INTR(20,fastintr20)
664 FAST_INTR(21,fastintr21)
665 FAST_INTR(22,fastintr22)
666 FAST_INTR(23,fastintr23)
667
668#define CLKINTR_PENDING \
669 pushl $clock_lock ; \
670 call s_lock ; \
671 movl $1,CNAME(clkintr_pending) ; \
672 call s_unlock ; \
673 addl $4, %esp
674
675 INTR(0,intr0, CLKINTR_PENDING)
676 INTR(1,intr1,)
677 INTR(2,intr2,)
678 INTR(3,intr3,)
679 INTR(4,intr4,)
680 INTR(5,intr5,)
681 INTR(6,intr6,)
682 INTR(7,intr7,)
683 INTR(8,intr8,)
684 INTR(9,intr9,)
685 INTR(10,intr10,)
686 INTR(11,intr11,)
687 INTR(12,intr12,)
688 INTR(13,intr13,)
689 INTR(14,intr14,)
690 INTR(15,intr15,)
691 INTR(16,intr16,)
692 INTR(17,intr17,)
693 INTR(18,intr18,)
694 INTR(19,intr19,)
695 INTR(20,intr20,)
696 INTR(21,intr21,)
697 INTR(22,intr22,)
698 INTR(23,intr23,)
699MCOUNT_LABEL(eintr)
700
701/*
702 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
703 *
704 * - Calls the generic rendezvous action function.
705 */
706 .text
707 SUPERALIGN_TEXT
708 .globl _Xrendezvous
709_Xrendezvous:
710 PUSH_FRAME
711 movl $KDSEL, %eax
712 mov %ax, %ds /* use KERNEL data segment */
713 mov %ax, %es
714 movl $KPSEL, %eax
715 mov %ax, %fs
716
717 call _smp_rendezvous_action
718
719 movl $0, lapic_eoi /* End Of Interrupt to APIC */
720 POP_FRAME
721 iret
722
723
724 .data
725/*
726 * Addresses of interrupt handlers.
727 * XresumeNN: Resumption addresses for HWIs.
728 */
729 .globl _ihandlers
730_ihandlers:
731/*
732 * used by:
733 * ipl.s: doreti_unpend
734 */
735 .long Xresume0, Xresume1, Xresume2, Xresume3
736 .long Xresume4, Xresume5, Xresume6, Xresume7
737 .long Xresume8, Xresume9, Xresume10, Xresume11
738 .long Xresume12, Xresume13, Xresume14, Xresume15
739 .long Xresume16, Xresume17, Xresume18, Xresume19
740 .long Xresume20, Xresume21, Xresume22, Xresume23
741/*
742 * used by:
743 * ipl.s: doreti_unpend
744 * apic_ipl.s: splz_unpend
745 */
746 .long _swi_null, swi_net, _swi_null, _swi_null
747 .long _swi_vm, _swi_null, _softclock
748
749imasks: /* masks for interrupt handlers */
750 .space NHWI*4 /* padding; HWI masks are elsewhere */
751
752 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
753 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
754
755/* active flag for lazy masking */
756iactive:
757 .long 0
758
759#ifdef COUNT_XINVLTLB_HITS
760 .globl _xhits
761_xhits:
762 .space (NCPU * 4), 0
763#endif /* COUNT_XINVLTLB_HITS */
764
765/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
766 .globl _stopped_cpus, _started_cpus
767_stopped_cpus:
768 .long 0
769_started_cpus:
770 .long 0
771
772#ifdef BETTER_CLOCK
773 .globl _checkstate_probed_cpus
774_checkstate_probed_cpus:
775 .long 0
776#endif /* BETTER_CLOCK */
777 .globl _checkstate_need_ast
778_checkstate_need_ast:
779 .long 0
780_checkstate_pending_ast:
781 .long 0
782 .globl CNAME(forward_irq_misscnt)
783 .globl CNAME(forward_irq_toodeepcnt)
784 .globl CNAME(forward_irq_hitcnt)
785 .globl CNAME(resched_cpus)
786 .globl CNAME(want_resched_cnt)
787 .globl CNAME(cpuast_cnt)
788 .globl CNAME(cpustop_restartfunc)
789CNAME(forward_irq_misscnt):
790 .long 0
791CNAME(forward_irq_hitcnt):
792 .long 0
793CNAME(forward_irq_toodeepcnt):
794 .long 0
795CNAME(resched_cpus):
796 .long 0
797CNAME(want_resched_cnt):
798 .long 0
799CNAME(cpuast_cnt):
800 .long 0
801CNAME(cpustop_restartfunc):
802 .long 0
803
804
805
806 .globl _apic_pin_trigger
807_apic_pin_trigger:
808 .long 0
809
810 .text