Forward FAST interrupts to the MP lock holder + minor fixes.
[dragonfly.git] / sys / i386 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.11 2003/07/12 16:55:50 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * Push an interrupt frame in a format acceptable to doreti, reload
20  * the segment registers for the kernel.
21  */
22 #define PUSH_FRAME                                                      \
23         pushl   $0 ;            /* dummy error code */                  \
24         pushl   $0 ;            /* dummy trap type */                   \
25         pushal ;                                                        \
26         pushl   %ds ;           /* save data and extra segments ... */  \
27         pushl   %es ;                                                   \
28         pushl   %fs ;                                                   \
29         mov     $KDSEL,%ax ;                                            \
30         mov     %ax,%ds ;                                               \
31         mov     %ax,%es ;                                               \
32         mov     $KPSEL,%ax ;                                            \
33         mov     %ax,%fs ;                                               \
34
35 #define PUSH_DUMMY                                                      \
36         pushfl ;                /* phys int frame / flags */            \
37         pushl %cs ;             /* phys int frame / cs */               \
38         pushl   12(%esp) ;      /* original caller eip */               \
39         pushl   $0 ;            /* dummy error code */                  \
40         pushl   $0 ;            /* dummy trap type */                   \
41         subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
42
43 /*
44  * Warning: POP_FRAME can only be used if there is no chance of a
45  * segment register being changed (e.g. by procfs), which is why syscalls
46  * have to use doreti.
47  */
48 #define POP_FRAME                                                       \
49         popl    %fs ;                                                   \
50         popl    %es ;                                                   \
51         popl    %ds ;                                                   \
52         popal ;                                                         \
53         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
54
55 #define POP_DUMMY                                                       \
56         addl    $17*4,%esp ;                                            \
57
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
60
61 /*
62  * Interrupts are expected to already be disabled when using these
63  * IMASK_*() macros.
64  */
65 #define IMASK_LOCK                                                      \
66         SPIN_LOCK(imen_spinlock) ;                                      \
67
68 #define IMASK_UNLOCK                                                    \
69         SPIN_UNLOCK(imen_spinlock) ;                                    \
70         
71 #define MASK_IRQ(irq_num)                                               \
72         IMASK_LOCK ;                            /* into critical reg */ \
73         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
74         jne     7f ;                    /* masked, don't mask */        \
75         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
76         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
77         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
78         movl    %eax, (%ecx) ;                  /* write the index */   \
79         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
80         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
81         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
82 7: ;                                            /* already masked */    \
83         IMASK_UNLOCK ;                                                  \
84
85 /*
86  * Test to see whether we are handling an edge or level triggered INT.
87  *  Level-triggered INTs must still be masked as we don't clear the source,
88  *  and the EOI cycle would cause redundant INTs to occur.
89  */
90 #define MASK_LEVEL_IRQ(irq_num)                                         \
91         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
92         jz      9f ;                            /* edge, don't mask */  \
93         MASK_IRQ(irq_num) ;                                             \
94 9: ;                                                                    \
95
96
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num)                                                \
99         movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
100         movl    (%eax), %eax ;                                          \
101         testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
102         jz      9f ;                            /* not active */        \
103         movl    $0, lapic_eoi ;                                         \
104 9:                                                                      \
105
106 #else
107
108 #define EOI_IRQ(irq_num)                                                \
109         testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
110         jz      9f      ;                       /* not active */        \
111         movl    $0, lapic_eoi;                                          \
112 9:                                                                      \
113
114 #endif
115         
116 /*
117  * Test to see if the source is currntly masked, clear if so.
118  */
119 #define UNMASK_IRQ(irq_num)                                     \
120         IMASK_LOCK ;                            /* into critical reg */ \
121         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
122         je      7f ;                    /* bit clear, not masked */     \
123         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
124         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
125         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
126         movl    %eax,(%ecx) ;                   /* write the index */   \
127         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
128         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
129         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
130 7: ;                                                                    \
131         IMASK_UNLOCK ;                                                  \
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.
140  *      - If we can take the interrupt clear its fpending bit,
141  *        call the handler, then unmask and doreti.
142  *
143  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
144  */
145
146 #define FAST_INTR(irq_num, vec_name)                                    \
147         .text ;                                                         \
148         SUPERALIGN_TEXT ;                                               \
149 IDTVEC(vec_name) ;                                                      \
150         PUSH_FRAME ;                                                    \
151         FAKE_MCOUNT(13*4(%esp)) ;                                       \
152         MASK_LEVEL_IRQ(irq_num) ;                                       \
153         EOI_IRQ(irq_num) ;                                              \
154         incl    PCPU(intr_nesting_level) ;                              \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    TD_CPL(%ebx),%eax ;                                     \
157         pushl   %eax ;                                                  \
158         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
159         jge     1f ;                                                    \
160         testl   $IRQ_LBIT(irq_num), %eax ;                              \
161         jz      2f ;                                                    \
162 1: ;                                                                    \
163         /* in critical section, make interrupt pending */               \
164         /* set the pending bit and return, leave interrupt masked */    \
165         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
166         movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
167         jmp     5f ;                                                    \
168 2: ;                                                                    \
169         /* try to get the MP lock */                                    \
170         call    try_mplock ;                                            \
171         testl   %eax,%eax ;                                             \
172         jz      6f ;                                                    \
173         /* clear pending bit, run handler */                            \
174         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
175         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
176         pushl   intr_unit + (irq_num) * 4 ;                             \
177         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
178         addl    $4, %esp ;                                              \
179         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
180         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
181         movl    intr_countp + (irq_num) * 4, %eax ;                     \
182         incl    (%eax) ;                                                \
183         call    rel_mplock ;                                            \
184         UNMASK_IRQ(irq_num) ;                                           \
185 5: ;                                                                    \
186         MEXITCOUNT ;                                                    \
187         jmp     doreti ;                                                \
188 6: ;                                                                    \
189         /* could not get MP lock, forward the interrupt */              \
190         movl    mp_lock, %eax ;          /* check race */               \
191         cmpl    $MP_FREE_LOCK,%eax ;                                    \
192         je      2b ;                                                    \
193         incl    PCPU(cnt)+V_FORWARDED_INTS ;                            \
194         subl    $12,%esp ;                                              \
195         movl    $irq_num,8(%esp) ;                                      \
196         movl    $forward_fastint_remote,4(%esp) ;                       \
197         movl    %eax,(%esp) ;                                           \
198         call    lwkt_send_ipiq ;                                        \
199         addl    $12,%esp ;                                              \
200         jmp     5f ;                                                    \
201
202 /*
203  * Restart fast interrupt held up by critical section or cpl.
204  *
205  *      - Push a dummy trape frame as required by doreti
206  *      - The interrupt source is already masked
207  *      - Clear the fpending bit
208  *      - Run the handler
209  *      - Unmask the interrupt
210  *      - Pop the dummy frame and do a normal return
211  *
212  *      The BGL is held on call and left held on return.
213  *
214  *      YYY can cache gd base pointer instead of using hidden %fs
215  *      prefixes.
216  */
217
218 #define FAST_UNPEND(irq_num, vec_name)                                  \
219         .text ;                                                         \
220         SUPERALIGN_TEXT ;                                               \
221 IDTVEC(vec_name) ;                                                      \
222         pushl   %ebp ;                                                  \
223         movl    %esp,%ebp ;                                             \
224         PUSH_DUMMY ;                                                    \
225         pushl   intr_unit + (irq_num) * 4 ;                             \
226         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
227         addl    $4, %esp ;                                              \
228         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
229         movl    intr_countp + (irq_num) * 4, %eax ;                     \
230         incl    (%eax) ;                                                \
231         UNMASK_IRQ(irq_num) ;                                           \
232         POP_DUMMY ;                                                     \
233         popl %ebp ;                                                     \
234         ret ;                                                           \
235
236 /*
237  * Slow interrupt call handlers run in the following sequence:
238  *
239  *      - Push the trap frame required by doreti.
240  *      - Mask the interrupt and reenable its source.
241  *      - If we cannot take the interrupt set its ipending bit and
242  *        doreti.  In addition to checking for a critical section
243  *        and cpl mask we also check to see if the thread is still
244  *        running.
245  *      - If we can take the interrupt clear its ipending bit
246  *        and schedule the thread.  Leave interrupts masked and doreti.
247  *
248  *      Note that calls to sched_ithd() are made with interrupts enabled
249  *      and outside a critical section.  YYY sched_ithd may preempt us
250  *      synchronously (fix interrupt stacking)
251  *
252  *      YYY can cache gd base pointer instead of using hidden %fs
253  *      prefixes.
254  */
255
256 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
257         .text ;                                                         \
258         SUPERALIGN_TEXT ;                                               \
259 IDTVEC(vec_name) ;                                                      \
260         PUSH_FRAME ;                                                    \
261         maybe_extra_ipending ;                                          \
262 ;                                                                       \
263         MASK_LEVEL_IRQ(irq_num) ;                                       \
264         EOI_IRQ(irq_num) ;                                              \
265         incl    PCPU(intr_nesting_level) ;                              \
266         movl    PCPU(curthread),%ebx ;                                  \
267         movl    TD_CPL(%ebx),%eax ;                                     \
268         pushl   %eax ;          /* cpl do restore */                    \
269         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
270         jge     1f ;                                                    \
271         testl   $IRQ_LBIT(irq_num),%eax ;                               \
272         jz      2f ;                                                    \
273 1: ;                                                                    \
274         /* set the pending bit and return, leave the interrupt masked */ \
275         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
276         movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
277         jmp     5f ;                                                    \
278 2: ;                                                                    \
279         /* set running bit, clear pending bit, run handler */           \
280         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
281         sti ;                                                           \
282         pushl   $irq_num ;                                              \
283         call    sched_ithd ;                                            \
284         addl    $4,%esp ;                                               \
285         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
286         movl    intr_countp + (irq_num) * 4,%eax ;                      \
287         incl    (%eax) ;                                                \
288 5: ;                                                                    \
289         MEXITCOUNT ;                                                    \
290         jmp     doreti ;                                                \
291
292
293 /*
294  * Handle "spurious INTerrupts".
295  * Notes:
296  *  This is different than the "spurious INTerrupt" generated by an
297  *   8259 PIC for missing INTs.  See the APIC documentation for details.
298  *  This routine should NOT do an 'EOI' cycle.
299  */
300         .text
301         SUPERALIGN_TEXT
302         .globl Xspuriousint
303 Xspuriousint:
304
305         /* No EOI cycle used here */
306
307         iret
308
309
310 /*
311  * Handle TLB shootdowns.
312  */
313         .text
314         SUPERALIGN_TEXT
315         .globl  Xinvltlb
316 Xinvltlb:
317         pushl   %eax
318
319 #ifdef COUNT_XINVLTLB_HITS
320         pushl   %fs
321         movl    $KPSEL, %eax
322         mov     %ax, %fs
323         movl    PCPU(cpuid), %eax
324         popl    %fs
325         ss
326         incl    _xhits(,%eax,4)
327 #endif /* COUNT_XINVLTLB_HITS */
328
329         movl    %cr3, %eax              /* invalidate the TLB */
330         movl    %eax, %cr3
331
332         ss                              /* stack segment, avoid %ds load */
333         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
334
335         popl    %eax
336         iret
337
338
339 #if 0
340 #ifdef BETTER_CLOCK
341
342 /*
343  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
344  *
345  *  - Stores current cpu state in checkstate_cpustate[cpuid]
346  *      0 == user, 1 == sys, 2 == intr
347  *  - Stores current process in checkstate_curproc[cpuid]
348  *
349  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
350  *
351  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
352  */
353
354         .text
355         SUPERALIGN_TEXT
356         .globl Xcpucheckstate
357         .globl checkstate_cpustate
358         .globl checkstate_curproc
359         .globl checkstate_pc
360 Xcpucheckstate:
361         pushl   %eax
362         pushl   %ebx            
363         pushl   %ds                     /* save current data segment */
364         pushl   %fs
365
366         movl    $KDSEL, %eax
367         mov     %ax, %ds                /* use KERNEL data segment */
368         movl    $KPSEL, %eax
369         mov     %ax, %fs
370
371         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
372
373         movl    $0, %ebx                
374         movl    20(%esp), %eax  
375         andl    $3, %eax
376         cmpl    $3, %eax
377         je      1f
378         testl   $PSL_VM, 24(%esp)
379         jne     1f
380         incl    %ebx                    /* system or interrupt */
381 1:      
382         movl    PCPU(cpuid), %eax
383         movl    %ebx, checkstate_cpustate(,%eax,4)
384         movl    PCPU(curthread), %ebx
385         movl    TD_PROC(%ebx),%ebx
386         movl    %ebx, checkstate_curproc(,%eax,4)
387         movl    16(%esp), %ebx
388         movl    %ebx, checkstate_pc(,%eax,4)
389
390         lock                            /* checkstate_probed_cpus |= (1<<id) */
391         btsl    %eax, checkstate_probed_cpus
392
393         popl    %fs
394         popl    %ds                     /* restore previous data segment */
395         popl    %ebx
396         popl    %eax
397         iret
398
399 #endif /* BETTER_CLOCK */
400 #endif
401
402 /*
403  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
404  *
405  *  - Signals its receipt.
406  *  - Waits for permission to restart.
407  *  - Signals its restart.
408  */
409
410         .text
411         SUPERALIGN_TEXT
412         .globl Xcpustop
413 Xcpustop:
414         pushl   %ebp
415         movl    %esp, %ebp
416         pushl   %eax
417         pushl   %ecx
418         pushl   %edx
419         pushl   %ds                     /* save current data segment */
420         pushl   %fs
421
422         movl    $KDSEL, %eax
423         mov     %ax, %ds                /* use KERNEL data segment */
424         movl    $KPSEL, %eax
425         mov     %ax, %fs
426
427         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
428
429         movl    PCPU(cpuid), %eax
430         imull   $PCB_SIZE, %eax
431         leal    CNAME(stoppcbs)(%eax), %eax
432         pushl   %eax
433         call    CNAME(savectx)          /* Save process context */
434         addl    $4, %esp
435         
436                 
437         movl    PCPU(cpuid), %eax
438
439         lock
440         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
441 1:
442         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
443         jnc     1b
444
445         lock
446         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
447         lock
448         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
449
450         test    %eax, %eax
451         jnz     2f
452
453         movl    CNAME(cpustop_restartfunc), %eax
454         test    %eax, %eax
455         jz      2f
456         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
457
458         call    *%eax
459 2:
460         popl    %fs
461         popl    %ds                     /* restore previous data segment */
462         popl    %edx
463         popl    %ecx
464         popl    %eax
465         movl    %ebp, %esp
466         popl    %ebp
467         iret
468
469         /*
470          * For now just have one ipiq IPI, but what we really want is
471          * to have one for each source cpu to the APICs don't get stalled
472          * backlogging the requests.
473          */
474         .text
475         SUPERALIGN_TEXT
476         .globl Xipiq
477 Xipiq:
478         PUSH_FRAME
479         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
480         FAKE_MCOUNT(13*4(%esp))
481
482         movl    PCPU(curthread),%ebx
483         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
484         jge     1f
485         addl    $TDPRI_CRIT,TD_PRI(%ebx)
486         call    lwkt_process_ipiq
487         subl    $TDPRI_CRIT,TD_PRI(%ebx)
488         pushl   TD_CPL(%ebx)
489         incl    PCPU(intr_nesting_level)
490         MEXITCOUNT
491         jmp     doreti
492 1:
493         movl    $TDPRI_CRIT,PCPU(reqpri)
494         orl     $AST_IPIQ,PCPU(astpending)
495         MEXITCOUNT
496         POP_FRAME
497         iret
498
499 MCOUNT_LABEL(bintr)
500         FAST_INTR(0,fastintr0)
501         FAST_INTR(1,fastintr1)
502         FAST_INTR(2,fastintr2)
503         FAST_INTR(3,fastintr3)
504         FAST_INTR(4,fastintr4)
505         FAST_INTR(5,fastintr5)
506         FAST_INTR(6,fastintr6)
507         FAST_INTR(7,fastintr7)
508         FAST_INTR(8,fastintr8)
509         FAST_INTR(9,fastintr9)
510         FAST_INTR(10,fastintr10)
511         FAST_INTR(11,fastintr11)
512         FAST_INTR(12,fastintr12)
513         FAST_INTR(13,fastintr13)
514         FAST_INTR(14,fastintr14)
515         FAST_INTR(15,fastintr15)
516         FAST_INTR(16,fastintr16)
517         FAST_INTR(17,fastintr17)
518         FAST_INTR(18,fastintr18)
519         FAST_INTR(19,fastintr19)
520         FAST_INTR(20,fastintr20)
521         FAST_INTR(21,fastintr21)
522         FAST_INTR(22,fastintr22)
523         FAST_INTR(23,fastintr23)
524         
525         /* YYY what is this garbage? */
526 #define CLKINTR_PENDING                                                 \
527         call    clock_lock ;                                            \
528         movl $1,CNAME(clkintr_pending) ;                                \
529         call    clock_unlock ;                                          \
530
531         INTR(0,intr0, CLKINTR_PENDING)
532         INTR(1,intr1,)
533         INTR(2,intr2,)
534         INTR(3,intr3,)
535         INTR(4,intr4,)
536         INTR(5,intr5,)
537         INTR(6,intr6,)
538         INTR(7,intr7,)
539         INTR(8,intr8,)
540         INTR(9,intr9,)
541         INTR(10,intr10,)
542         INTR(11,intr11,)
543         INTR(12,intr12,)
544         INTR(13,intr13,)
545         INTR(14,intr14,)
546         INTR(15,intr15,)
547         INTR(16,intr16,)
548         INTR(17,intr17,)
549         INTR(18,intr18,)
550         INTR(19,intr19,)
551         INTR(20,intr20,)
552         INTR(21,intr21,)
553         INTR(22,intr22,)
554         INTR(23,intr23,)
555
556         FAST_UNPEND(0,fastunpend0)
557         FAST_UNPEND(1,fastunpend1)
558         FAST_UNPEND(2,fastunpend2)
559         FAST_UNPEND(3,fastunpend3)
560         FAST_UNPEND(4,fastunpend4)
561         FAST_UNPEND(5,fastunpend5)
562         FAST_UNPEND(6,fastunpend6)
563         FAST_UNPEND(7,fastunpend7)
564         FAST_UNPEND(8,fastunpend8)
565         FAST_UNPEND(9,fastunpend9)
566         FAST_UNPEND(10,fastunpend10)
567         FAST_UNPEND(11,fastunpend11)
568         FAST_UNPEND(12,fastunpend12)
569         FAST_UNPEND(13,fastunpend13)
570         FAST_UNPEND(14,fastunpend14)
571         FAST_UNPEND(15,fastunpend15)
572         FAST_UNPEND(16,fastunpend16)
573         FAST_UNPEND(17,fastunpend17)
574         FAST_UNPEND(18,fastunpend18)
575         FAST_UNPEND(19,fastunpend19)
576         FAST_UNPEND(20,fastunpend20)
577         FAST_UNPEND(21,fastunpend21)
578         FAST_UNPEND(22,fastunpend22)
579         FAST_UNPEND(23,fastunpend23)
580 MCOUNT_LABEL(eintr)
581
582         /*
583          * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
584          *
585          * - Calls the generic rendezvous action function.
586          */
587         .text
588         SUPERALIGN_TEXT
589         .globl  Xrendezvous
590 Xrendezvous:
591         PUSH_FRAME
592         movl    $KDSEL, %eax
593         mov     %ax, %ds                /* use KERNEL data segment */
594         mov     %ax, %es
595         movl    $KPSEL, %eax
596         mov     %ax, %fs
597
598         call    smp_rendezvous_action
599
600         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
601         POP_FRAME
602         iret
603         
604         
605         .data
606
607 #if 0
608 /*
609  * Addresses of interrupt handlers.
610  *  XresumeNN: Resumption addresses for HWIs.
611  */
612         .globl _ihandlers
613 _ihandlers:
614 /*
615  * used by:
616  *  ipl.s:      doreti_unpend
617  */
618         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
619         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
620         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
621         .long   Xresume12, Xresume13, Xresume14, Xresume15 
622         .long   Xresume16, Xresume17, Xresume18, Xresume19
623         .long   Xresume20, Xresume21, Xresume22, Xresume23
624 /*
625  * used by:
626  *  ipl.s:      doreti_unpend
627  *  apic_ipl.s: splz_unpend
628  */
629         .long   _swi_null, swi_net, _swi_null, _swi_null
630         .long   _swi_vm, _swi_null, _softclock
631
632 imasks:                         /* masks for interrupt handlers */
633         .space  NHWI*4          /* padding; HWI masks are elsewhere */
634
635         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
636         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
637 #endif  /* 0 */
638
639
640 #ifdef COUNT_XINVLTLB_HITS
641         .globl  xhits
642 xhits:
643         .space  (NCPU * 4), 0
644 #endif /* COUNT_XINVLTLB_HITS */
645
646 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
647         .globl stopped_cpus, started_cpus
648 stopped_cpus:
649         .long   0
650 started_cpus:
651         .long   0
652
653 #ifdef BETTER_CLOCK
654         .globl checkstate_probed_cpus
655 checkstate_probed_cpus:
656         .long   0       
657 #endif /* BETTER_CLOCK */
658         .globl CNAME(cpustop_restartfunc)
659 CNAME(cpustop_restartfunc):
660         .long 0
661                 
662         .globl  apic_pin_trigger
663 apic_pin_trigger:
664         .long   0
665
666         .text