This commit represents a major revamping of the clock interrupt and timebase
[dragonfly.git] / sys / platform / pc32 / isa / apic_vector.s
CommitLineData
984263bc
MD
1/*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
88c4d2f6 4 * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_vector.s,v 1.15 2004/01/30 05:42:16 dillon Exp $
984263bc
MD
5 */
6
7
8#include <machine/apic.h>
9#include <machine/smp.h>
984263bc
MD
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
8a8d5d85 13#define IRQ_LBIT(irq_num) (1 << (irq_num))
984263bc
MD
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
17
984263bc 18/*
8a8d5d85
MD
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
984263bc
MD
21 */
22#define PUSH_FRAME \
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
25 pushal ; \
26 pushl %ds ; /* save data and extra segments ... */ \
27 pushl %es ; \
8a8d5d85
MD
28 pushl %fs ; \
29 mov $KDSEL,%ax ; \
30 mov %ax,%ds ; \
31 mov %ax,%es ; \
32 mov $KPSEL,%ax ; \
33 mov %ax,%fs ; \
984263bc 34
8a8d5d85
MD
35#define PUSH_DUMMY \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
96728c05 41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
8a8d5d85
MD
42
43/*
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
46 * have to use doreti.
47 */
984263bc
MD
48#define POP_FRAME \
49 popl %fs ; \
50 popl %es ; \
51 popl %ds ; \
52 popal ; \
8a8d5d85
MD
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
54
55#define POP_DUMMY \
96728c05 56 addl $17*4,%esp ; \
984263bc
MD
57
58#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
8a8d5d85
MD
60
61/*
62 * Interrupts are expected to already be disabled when using these
63 * IMASK_*() macros.
64 */
65#define IMASK_LOCK \
66 SPIN_LOCK(imen_spinlock) ; \
67
68#define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
984263bc
MD
70
71#define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
8a8d5d85 73 testl $IRQ_LBIT(irq_num), apic_imen ; \
984263bc 74 jne 7f ; /* masked, don't mask */ \
8a8d5d85 75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
984263bc
MD
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
827: ; /* already masked */ \
8a8d5d85
MD
83 IMASK_UNLOCK ; \
84
984263bc
MD
85/*
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
89 */
90#define MASK_LEVEL_IRQ(irq_num) \
8a8d5d85 91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
984263bc
MD
92 jz 9f ; /* edge, don't mask */ \
93 MASK_IRQ(irq_num) ; \
8a8d5d85 949: ; \
984263bc
MD
95
96
97#ifdef APIC_INTR_REORDER
98#define EOI_IRQ(irq_num) \
2954c92f 99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
984263bc 100 movl (%eax), %eax ; \
2954c92f 101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
984263bc
MD
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
8a8d5d85 1049: \
984263bc
MD
105
106#else
8a8d5d85 107
984263bc 108#define EOI_IRQ(irq_num) \
8a8d5d85 109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
984263bc
MD
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
8a8d5d85
MD
1129: \
113
984263bc
MD
114#endif
115
984263bc
MD
116/*
117 * Test to see if the source is currntly masked, clear if so.
118 */
119#define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
8a8d5d85 121 testl $IRQ_LBIT(irq_num), apic_imen ; \
984263bc 122 je 7f ; /* bit clear, not masked */ \
8a8d5d85 123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
984263bc
MD
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
1307: ; \
8a8d5d85 131 IMASK_UNLOCK ; \
984263bc 132
8a8d5d85
MD
133/*
134 * Fast interrupt call handlers run in the following sequence:
135 *
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
71ef2f5c
MD
139 * doreti. Note that we cannot mess with mp_lock at all
140 * if we entered from a critical section!
8a8d5d85
MD
141 * - If we can take the interrupt clear its fpending bit,
142 * call the handler, then unmask and doreti.
143 *
144 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
145 */
146
147#define FAST_INTR(irq_num, vec_name) \
148 .text ; \
149 SUPERALIGN_TEXT ; \
150IDTVEC(vec_name) ; \
151 PUSH_FRAME ; \
152 FAKE_MCOUNT(13*4(%esp)) ; \
153 MASK_LEVEL_IRQ(irq_num) ; \
154 EOI_IRQ(irq_num) ; \
8a8d5d85
MD
155 movl PCPU(curthread),%ebx ; \
156 movl TD_CPL(%ebx),%eax ; \
984263bc 157 pushl %eax ; \
8a8d5d85
MD
158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
159 jge 1f ; \
160 testl $IRQ_LBIT(irq_num), %eax ; \
161 jz 2f ; \
1621: ; \
545a1cd3 163 /* in critical section, make interrupt pending */ \
8a8d5d85
MD
164 /* set the pending bit and return, leave interrupt masked */ \
165 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
235957ed 166 orl $RQF_INTPEND,PCPU(reqflags) ; \
8a8d5d85
MD
167 jmp 5f ; \
1682: ; \
545a1cd3 169 /* try to get the MP lock */ \
96728c05
MD
170 call try_mplock ; \
171 testl %eax,%eax ; \
545a1cd3 172 jz 6f ; \
8a8d5d85 173 /* clear pending bit, run handler */ \
03aa8d99 174 incl PCPU(intr_nesting_level) ; \
8a8d5d85
MD
175 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
176 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
177 pushl intr_unit + (irq_num) * 4 ; \
178 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
984263bc 179 addl $4, %esp ; \
8a8d5d85
MD
180 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
181 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
182 movl intr_countp + (irq_num) * 4, %eax ; \
183 incl (%eax) ; \
03aa8d99 184 decl PCPU(intr_nesting_level) ; \
96728c05 185 call rel_mplock ; \
8a8d5d85
MD
186 UNMASK_IRQ(irq_num) ; \
1875: ; \
188 MEXITCOUNT ; \
189 jmp doreti ; \
545a1cd3
MD
1906: ; \
191 /* could not get MP lock, forward the interrupt */ \
192 movl mp_lock, %eax ; /* check race */ \
193 cmpl $MP_FREE_LOCK,%eax ; \
194 je 2b ; \
195 incl PCPU(cnt)+V_FORWARDED_INTS ; \
196 subl $12,%esp ; \
197 movl $irq_num,8(%esp) ; \
198 movl $forward_fastint_remote,4(%esp) ; \
199 movl %eax,(%esp) ; \
200 call lwkt_send_ipiq ; \
201 addl $12,%esp ; \
202 jmp 5f ; \
984263bc 203
8a8d5d85
MD
204/*
205 * Restart fast interrupt held up by critical section or cpl.
206 *
207 * - Push a dummy trape frame as required by doreti
208 * - The interrupt source is already masked
209 * - Clear the fpending bit
210 * - Run the handler
211 * - Unmask the interrupt
212 * - Pop the dummy frame and do a normal return
213 *
96728c05
MD
214 * The BGL is held on call and left held on return.
215 *
8a8d5d85
MD
216 * YYY can cache gd base pointer instead of using hidden %fs
217 * prefixes.
218 */
219
220#define FAST_UNPEND(irq_num, vec_name) \
221 .text ; \
222 SUPERALIGN_TEXT ; \
223IDTVEC(vec_name) ; \
224 pushl %ebp ; \
225 movl %esp,%ebp ; \
226 PUSH_DUMMY ; \
227 pushl intr_unit + (irq_num) * 4 ; \
228 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
229 addl $4, %esp ; \
230 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
231 movl intr_countp + (irq_num) * 4, %eax ; \
232 incl (%eax) ; \
233 UNMASK_IRQ(irq_num) ; \
234 POP_DUMMY ; \
235 popl %ebp ; \
236 ret ; \
237
238/*
239 * Slow interrupt call handlers run in the following sequence:
240 *
241 * - Push the trap frame required by doreti.
242 * - Mask the interrupt and reenable its source.
243 * - If we cannot take the interrupt set its ipending bit and
244 * doreti. In addition to checking for a critical section
245 * and cpl mask we also check to see if the thread is still
71ef2f5c
MD
246 * running. Note that we cannot mess with mp_lock at all
247 * if we entered from a critical section!
96728c05
MD
248 * - If we can take the interrupt clear its ipending bit
249 * and schedule the thread. Leave interrupts masked and doreti.
8a8d5d85 250 *
96728c05
MD
251 * Note that calls to sched_ithd() are made with interrupts enabled
252 * and outside a critical section. YYY sched_ithd may preempt us
03aa8d99 253 * synchronously (fix interrupt stacking).
8a8d5d85
MD
254 *
255 * YYY can cache gd base pointer instead of using hidden %fs
256 * prefixes.
257 */
258
259#define INTR(irq_num, vec_name, maybe_extra_ipending) \
984263bc
MD
260 .text ; \
261 SUPERALIGN_TEXT ; \
984263bc
MD
262IDTVEC(vec_name) ; \
263 PUSH_FRAME ; \
984263bc 264 maybe_extra_ipending ; \
984263bc
MD
265; \
266 MASK_LEVEL_IRQ(irq_num) ; \
267 EOI_IRQ(irq_num) ; \
2954c92f 268 movl PCPU(curthread),%ebx ; \
8a8d5d85
MD
269 movl TD_CPL(%ebx),%eax ; \
270 pushl %eax ; /* cpl do restore */ \
8f41e33b 271 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
8a8d5d85 272 jge 1f ; \
8a8d5d85 273 testl $IRQ_LBIT(irq_num),%eax ; \
96728c05 274 jz 2f ; \
8a8d5d85
MD
2751: ; \
276 /* set the pending bit and return, leave the interrupt masked */ \
277 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
235957ed 278 orl $RQF_INTPEND,PCPU(reqflags) ; \
8a8d5d85
MD
279 jmp 5f ; \
2802: ; \
8a8d5d85 281 /* set running bit, clear pending bit, run handler */ \
8a8d5d85 282 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
984263bc 283 sti ; \
8a8d5d85
MD
284 pushl $irq_num ; \
285 call sched_ithd ; \
ef0fdad1 286 addl $4,%esp ; \
8a8d5d85
MD
287 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
288 movl intr_countp + (irq_num) * 4,%eax ; \
289 incl (%eax) ; \
2905: ; \
984263bc 291 MEXITCOUNT ; \
2954c92f 292 jmp doreti ; \
8a8d5d85 293
984263bc
MD
294
295/*
296 * Handle "spurious INTerrupts".
297 * Notes:
298 * This is different than the "spurious INTerrupt" generated by an
299 * 8259 PIC for missing INTs. See the APIC documentation for details.
300 * This routine should NOT do an 'EOI' cycle.
301 */
302 .text
303 SUPERALIGN_TEXT
2954c92f
MD
304 .globl Xspuriousint
305Xspuriousint:
984263bc
MD
306
307 /* No EOI cycle used here */
308
309 iret
310
311
312/*
313 * Handle TLB shootdowns.
314 */
315 .text
316 SUPERALIGN_TEXT
8a8d5d85
MD
317 .globl Xinvltlb
318Xinvltlb:
984263bc
MD
319 pushl %eax
320
321#ifdef COUNT_XINVLTLB_HITS
322 pushl %fs
323 movl $KPSEL, %eax
324 mov %ax, %fs
2954c92f 325 movl PCPU(cpuid), %eax
984263bc
MD
326 popl %fs
327 ss
328 incl _xhits(,%eax,4)
329#endif /* COUNT_XINVLTLB_HITS */
330
331 movl %cr3, %eax /* invalidate the TLB */
332 movl %eax, %cr3
333
334 ss /* stack segment, avoid %ds load */
335 movl $0, lapic_eoi /* End Of Interrupt to APIC */
336
337 popl %eax
338 iret
339
340
8a8d5d85 341#if 0
984263bc
MD
342#ifdef BETTER_CLOCK
343
344/*
345 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
346 *
347 * - Stores current cpu state in checkstate_cpustate[cpuid]
348 * 0 == user, 1 == sys, 2 == intr
349 * - Stores current process in checkstate_curproc[cpuid]
350 *
351 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
352 *
353 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
354 */
355
356 .text
357 SUPERALIGN_TEXT
2954c92f
MD
358 .globl Xcpucheckstate
359 .globl checkstate_cpustate
360 .globl checkstate_curproc
361 .globl checkstate_pc
362Xcpucheckstate:
984263bc
MD
363 pushl %eax
364 pushl %ebx
365 pushl %ds /* save current data segment */
366 pushl %fs
367
368 movl $KDSEL, %eax
369 mov %ax, %ds /* use KERNEL data segment */
370 movl $KPSEL, %eax
371 mov %ax, %fs
372
373 movl $0, lapic_eoi /* End Of Interrupt to APIC */
374
375 movl $0, %ebx
376 movl 20(%esp), %eax
377 andl $3, %eax
378 cmpl $3, %eax
379 je 1f
380 testl $PSL_VM, 24(%esp)
381 jne 1f
382 incl %ebx /* system or interrupt */
3831:
2954c92f
MD
384 movl PCPU(cpuid), %eax
385 movl %ebx, checkstate_cpustate(,%eax,4)
386 movl PCPU(curthread), %ebx
84b592ba 387 movl TD_PROC(%ebx),%ebx
2954c92f 388 movl %ebx, checkstate_curproc(,%eax,4)
984263bc 389 movl 16(%esp), %ebx
2954c92f 390 movl %ebx, checkstate_pc(,%eax,4)
984263bc
MD
391
392 lock /* checkstate_probed_cpus |= (1<<id) */
2954c92f 393 btsl %eax, checkstate_probed_cpus
984263bc
MD
394
395 popl %fs
396 popl %ds /* restore previous data segment */
397 popl %ebx
398 popl %eax
399 iret
400
401#endif /* BETTER_CLOCK */
8a8d5d85 402#endif
984263bc 403
984263bc
MD
404/*
405 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
406 *
407 * - Signals its receipt.
408 * - Waits for permission to restart.
409 * - Signals its restart.
410 */
411
412 .text
413 SUPERALIGN_TEXT
2954c92f
MD
414 .globl Xcpustop
415Xcpustop:
984263bc
MD
416 pushl %ebp
417 movl %esp, %ebp
418 pushl %eax
419 pushl %ecx
420 pushl %edx
421 pushl %ds /* save current data segment */
422 pushl %fs
423
424 movl $KDSEL, %eax
425 mov %ax, %ds /* use KERNEL data segment */
426 movl $KPSEL, %eax
427 mov %ax, %fs
428
429 movl $0, lapic_eoi /* End Of Interrupt to APIC */
430
2954c92f 431 movl PCPU(cpuid), %eax
984263bc
MD
432 imull $PCB_SIZE, %eax
433 leal CNAME(stoppcbs)(%eax), %eax
434 pushl %eax
435 call CNAME(savectx) /* Save process context */
436 addl $4, %esp
437
438
2954c92f 439 movl PCPU(cpuid), %eax
984263bc
MD
440
441 lock
2954c92f 442 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
984263bc 4431:
2954c92f 444 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
984263bc
MD
445 jnc 1b
446
447 lock
2954c92f 448 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
984263bc 449 lock
2954c92f 450 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
984263bc
MD
451
452 test %eax, %eax
453 jnz 2f
454
455 movl CNAME(cpustop_restartfunc), %eax
456 test %eax, %eax
457 jz 2f
458 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
459
460 call *%eax
4612:
462 popl %fs
463 popl %ds /* restore previous data segment */
464 popl %edx
465 popl %ecx
466 popl %eax
467 movl %ebp, %esp
468 popl %ebp
469 iret
470
96728c05
MD
471 /*
472 * For now just have one ipiq IPI, but what we really want is
473 * to have one for each source cpu to the APICs don't get stalled
474 * backlogging the requests.
475 */
476 .text
477 SUPERALIGN_TEXT
478 .globl Xipiq
479Xipiq:
480 PUSH_FRAME
481 movl $0, lapic_eoi /* End Of Interrupt to APIC */
482 FAKE_MCOUNT(13*4(%esp))
483
484 movl PCPU(curthread),%ebx
485 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
486 jge 1f
88c4d2f6 487 subl $8,%esp /* make same as interrupt frame */
03aa8d99 488 incl PCPU(intr_nesting_level)
96728c05 489 addl $TDPRI_CRIT,TD_PRI(%ebx)
88c4d2f6 490 call lwkt_process_ipiq_frame
96728c05 491 subl $TDPRI_CRIT,TD_PRI(%ebx)
03aa8d99 492 decl PCPU(intr_nesting_level)
88c4d2f6 493 addl $8,%esp
96728c05 494 pushl TD_CPL(%ebx)
96728c05
MD
495 MEXITCOUNT
496 jmp doreti
4971:
235957ed 498 orl $RQF_IPIQ,PCPU(reqflags)
96728c05
MD
499 MEXITCOUNT
500 POP_FRAME
501 iret
984263bc
MD
502
503MCOUNT_LABEL(bintr)
504 FAST_INTR(0,fastintr0)
505 FAST_INTR(1,fastintr1)
506 FAST_INTR(2,fastintr2)
507 FAST_INTR(3,fastintr3)
508 FAST_INTR(4,fastintr4)
509 FAST_INTR(5,fastintr5)
510 FAST_INTR(6,fastintr6)
511 FAST_INTR(7,fastintr7)
512 FAST_INTR(8,fastintr8)
513 FAST_INTR(9,fastintr9)
514 FAST_INTR(10,fastintr10)
515 FAST_INTR(11,fastintr11)
516 FAST_INTR(12,fastintr12)
517 FAST_INTR(13,fastintr13)
518 FAST_INTR(14,fastintr14)
519 FAST_INTR(15,fastintr15)
520 FAST_INTR(16,fastintr16)
521 FAST_INTR(17,fastintr17)
522 FAST_INTR(18,fastintr18)
523 FAST_INTR(19,fastintr19)
524 FAST_INTR(20,fastintr20)
525 FAST_INTR(21,fastintr21)
526 FAST_INTR(22,fastintr22)
527 FAST_INTR(23,fastintr23)
528
8a8d5d85 529 /* YYY what is this garbage? */
984263bc 530
88c4d2f6 531 INTR(0,intr0,)
984263bc
MD
532 INTR(1,intr1,)
533 INTR(2,intr2,)
534 INTR(3,intr3,)
535 INTR(4,intr4,)
536 INTR(5,intr5,)
537 INTR(6,intr6,)
538 INTR(7,intr7,)
539 INTR(8,intr8,)
540 INTR(9,intr9,)
541 INTR(10,intr10,)
542 INTR(11,intr11,)
543 INTR(12,intr12,)
544 INTR(13,intr13,)
545 INTR(14,intr14,)
546 INTR(15,intr15,)
547 INTR(16,intr16,)
548 INTR(17,intr17,)
549 INTR(18,intr18,)
550 INTR(19,intr19,)
551 INTR(20,intr20,)
552 INTR(21,intr21,)
553 INTR(22,intr22,)
554 INTR(23,intr23,)
8a8d5d85
MD
555
556 FAST_UNPEND(0,fastunpend0)
557 FAST_UNPEND(1,fastunpend1)
558 FAST_UNPEND(2,fastunpend2)
559 FAST_UNPEND(3,fastunpend3)
560 FAST_UNPEND(4,fastunpend4)
561 FAST_UNPEND(5,fastunpend5)
562 FAST_UNPEND(6,fastunpend6)
563 FAST_UNPEND(7,fastunpend7)
564 FAST_UNPEND(8,fastunpend8)
565 FAST_UNPEND(9,fastunpend9)
566 FAST_UNPEND(10,fastunpend10)
567 FAST_UNPEND(11,fastunpend11)
568 FAST_UNPEND(12,fastunpend12)
569 FAST_UNPEND(13,fastunpend13)
570 FAST_UNPEND(14,fastunpend14)
571 FAST_UNPEND(15,fastunpend15)
572 FAST_UNPEND(16,fastunpend16)
573 FAST_UNPEND(17,fastunpend17)
574 FAST_UNPEND(18,fastunpend18)
575 FAST_UNPEND(19,fastunpend19)
576 FAST_UNPEND(20,fastunpend20)
577 FAST_UNPEND(21,fastunpend21)
578 FAST_UNPEND(22,fastunpend22)
579 FAST_UNPEND(23,fastunpend23)
984263bc
MD
580MCOUNT_LABEL(eintr)
581
8a8d5d85
MD
582 /*
583 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
584 *
585 * - Calls the generic rendezvous action function.
586 */
984263bc
MD
587 .text
588 SUPERALIGN_TEXT
8a8d5d85
MD
589 .globl Xrendezvous
590Xrendezvous:
984263bc
MD
591 PUSH_FRAME
592 movl $KDSEL, %eax
593 mov %ax, %ds /* use KERNEL data segment */
594 mov %ax, %es
595 movl $KPSEL, %eax
596 mov %ax, %fs
597
8a8d5d85 598 call smp_rendezvous_action
984263bc
MD
599
600 movl $0, lapic_eoi /* End Of Interrupt to APIC */
601 POP_FRAME
602 iret
603
604
605 .data
ef0fdad1
MD
606
607#if 0
984263bc
MD
608/*
609 * Addresses of interrupt handlers.
610 * XresumeNN: Resumption addresses for HWIs.
611 */
612 .globl _ihandlers
613_ihandlers:
614/*
615 * used by:
616 * ipl.s: doreti_unpend
617 */
618 .long Xresume0, Xresume1, Xresume2, Xresume3
619 .long Xresume4, Xresume5, Xresume6, Xresume7
620 .long Xresume8, Xresume9, Xresume10, Xresume11
621 .long Xresume12, Xresume13, Xresume14, Xresume15
622 .long Xresume16, Xresume17, Xresume18, Xresume19
623 .long Xresume20, Xresume21, Xresume22, Xresume23
624/*
625 * used by:
626 * ipl.s: doreti_unpend
627 * apic_ipl.s: splz_unpend
628 */
629 .long _swi_null, swi_net, _swi_null, _swi_null
630 .long _swi_vm, _swi_null, _softclock
631
632imasks: /* masks for interrupt handlers */
633 .space NHWI*4 /* padding; HWI masks are elsewhere */
634
635 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
636 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
8a8d5d85 637#endif /* 0 */
984263bc 638
984263bc
MD
639
640#ifdef COUNT_XINVLTLB_HITS
2954c92f
MD
641 .globl xhits
642xhits:
984263bc
MD
643 .space (NCPU * 4), 0
644#endif /* COUNT_XINVLTLB_HITS */
645
646/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
2954c92f
MD
647 .globl stopped_cpus, started_cpus
648stopped_cpus:
984263bc 649 .long 0
2954c92f 650started_cpus:
984263bc
MD
651 .long 0
652
653#ifdef BETTER_CLOCK
2954c92f
MD
654 .globl checkstate_probed_cpus
655checkstate_probed_cpus:
984263bc
MD
656 .long 0
657#endif /* BETTER_CLOCK */
984263bc 658 .globl CNAME(cpustop_restartfunc)
984263bc
MD
659CNAME(cpustop_restartfunc):
660 .long 0
661
2954c92f
MD
662 .globl apic_pin_trigger
663apic_pin_trigger:
984263bc
MD
664 .long 0
665
666 .text