Merge from vendor branch LIBSTDC++:
[dragonfly.git] / sys / i386 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.14 2003/09/25 23:49:08 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * Push an interrupt frame in a format acceptable to doreti, reload
20  * the segment registers for the kernel.
21  */
22 #define PUSH_FRAME                                                      \
23         pushl   $0 ;            /* dummy error code */                  \
24         pushl   $0 ;            /* dummy trap type */                   \
25         pushal ;                                                        \
26         pushl   %ds ;           /* save data and extra segments ... */  \
27         pushl   %es ;                                                   \
28         pushl   %fs ;                                                   \
29         mov     $KDSEL,%ax ;                                            \
30         mov     %ax,%ds ;                                               \
31         mov     %ax,%es ;                                               \
32         mov     $KPSEL,%ax ;                                            \
33         mov     %ax,%fs ;                                               \
34
35 #define PUSH_DUMMY                                                      \
36         pushfl ;                /* phys int frame / flags */            \
37         pushl %cs ;             /* phys int frame / cs */               \
38         pushl   12(%esp) ;      /* original caller eip */               \
39         pushl   $0 ;            /* dummy error code */                  \
40         pushl   $0 ;            /* dummy trap type */                   \
41         subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
42
43 /*
44  * Warning: POP_FRAME can only be used if there is no chance of a
45  * segment register being changed (e.g. by procfs), which is why syscalls
46  * have to use doreti.
47  */
48 #define POP_FRAME                                                       \
49         popl    %fs ;                                                   \
50         popl    %es ;                                                   \
51         popl    %ds ;                                                   \
52         popal ;                                                         \
53         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
54
55 #define POP_DUMMY                                                       \
56         addl    $17*4,%esp ;                                            \
57
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
60
61 /*
62  * Interrupts are expected to already be disabled when using these
63  * IMASK_*() macros.
64  */
65 #define IMASK_LOCK                                                      \
66         SPIN_LOCK(imen_spinlock) ;                                      \
67
68 #define IMASK_UNLOCK                                                    \
69         SPIN_UNLOCK(imen_spinlock) ;                                    \
70         
71 #define MASK_IRQ(irq_num)                                               \
72         IMASK_LOCK ;                            /* into critical reg */ \
73         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
74         jne     7f ;                    /* masked, don't mask */        \
75         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
76         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
77         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
78         movl    %eax, (%ecx) ;                  /* write the index */   \
79         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
80         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
81         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
82 7: ;                                            /* already masked */    \
83         IMASK_UNLOCK ;                                                  \
84
85 /*
86  * Test to see whether we are handling an edge or level triggered INT.
87  *  Level-triggered INTs must still be masked as we don't clear the source,
88  *  and the EOI cycle would cause redundant INTs to occur.
89  */
90 #define MASK_LEVEL_IRQ(irq_num)                                         \
91         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
92         jz      9f ;                            /* edge, don't mask */  \
93         MASK_IRQ(irq_num) ;                                             \
94 9: ;                                                                    \
95
96
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num)                                                \
99         movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
100         movl    (%eax), %eax ;                                          \
101         testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
102         jz      9f ;                            /* not active */        \
103         movl    $0, lapic_eoi ;                                         \
104 9:                                                                      \
105
106 #else
107
108 #define EOI_IRQ(irq_num)                                                \
109         testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
110         jz      9f      ;                       /* not active */        \
111         movl    $0, lapic_eoi;                                          \
112 9:                                                                      \
113
114 #endif
115         
116 /*
117  * Test to see if the source is currntly masked, clear if so.
118  */
119 #define UNMASK_IRQ(irq_num)                                     \
120         IMASK_LOCK ;                            /* into critical reg */ \
121         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
122         je      7f ;                    /* bit clear, not masked */     \
123         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
124         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
125         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
126         movl    %eax,(%ecx) ;                   /* write the index */   \
127         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
128         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
129         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
130 7: ;                                                                    \
131         IMASK_UNLOCK ;                                                  \
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.  Note that we cannot mess with mp_lock at all
140  *        if we entered from a critical section!
141  *      - If we can take the interrupt clear its fpending bit,
142  *        call the handler, then unmask and doreti.
143  *
144  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
145  */
146
147 #define FAST_INTR(irq_num, vec_name)                                    \
148         .text ;                                                         \
149         SUPERALIGN_TEXT ;                                               \
150 IDTVEC(vec_name) ;                                                      \
151         PUSH_FRAME ;                                                    \
152         FAKE_MCOUNT(13*4(%esp)) ;                                       \
153         MASK_LEVEL_IRQ(irq_num) ;                                       \
154         EOI_IRQ(irq_num) ;                                              \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    TD_CPL(%ebx),%eax ;                                     \
157         pushl   %eax ;                                                  \
158         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
159         jge     1f ;                                                    \
160         testl   $IRQ_LBIT(irq_num), %eax ;                              \
161         jz      2f ;                                                    \
162 1: ;                                                                    \
163         /* in critical section, make interrupt pending */               \
164         /* set the pending bit and return, leave interrupt masked */    \
165         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
166         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
167         jmp     5f ;                                                    \
168 2: ;                                                                    \
169         /* try to get the MP lock */                                    \
170         call    try_mplock ;                                            \
171         testl   %eax,%eax ;                                             \
172         jz      6f ;                                                    \
173         /* clear pending bit, run handler */                            \
174         incl    PCPU(intr_nesting_level) ;                              \
175         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
176         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
177         pushl   intr_unit + (irq_num) * 4 ;                             \
178         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
179         addl    $4, %esp ;                                              \
180         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
181         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
182         movl    intr_countp + (irq_num) * 4, %eax ;                     \
183         incl    (%eax) ;                                                \
184         decl    PCPU(intr_nesting_level) ;                              \
185         call    rel_mplock ;                                            \
186         UNMASK_IRQ(irq_num) ;                                           \
187 5: ;                                                                    \
188         MEXITCOUNT ;                                                    \
189         jmp     doreti ;                                                \
190 6: ;                                                                    \
191         /* could not get MP lock, forward the interrupt */              \
192         movl    mp_lock, %eax ;          /* check race */               \
193         cmpl    $MP_FREE_LOCK,%eax ;                                    \
194         je      2b ;                                                    \
195         incl    PCPU(cnt)+V_FORWARDED_INTS ;                            \
196         subl    $12,%esp ;                                              \
197         movl    $irq_num,8(%esp) ;                                      \
198         movl    $forward_fastint_remote,4(%esp) ;                       \
199         movl    %eax,(%esp) ;                                           \
200         call    lwkt_send_ipiq ;                                        \
201         addl    $12,%esp ;                                              \
202         jmp     5f ;                                                    \
203
204 /*
205  * Restart fast interrupt held up by critical section or cpl.
206  *
207  *      - Push a dummy trape frame as required by doreti
208  *      - The interrupt source is already masked
209  *      - Clear the fpending bit
210  *      - Run the handler
211  *      - Unmask the interrupt
212  *      - Pop the dummy frame and do a normal return
213  *
214  *      The BGL is held on call and left held on return.
215  *
216  *      YYY can cache gd base pointer instead of using hidden %fs
217  *      prefixes.
218  */
219
220 #define FAST_UNPEND(irq_num, vec_name)                                  \
221         .text ;                                                         \
222         SUPERALIGN_TEXT ;                                               \
223 IDTVEC(vec_name) ;                                                      \
224         pushl   %ebp ;                                                  \
225         movl    %esp,%ebp ;                                             \
226         PUSH_DUMMY ;                                                    \
227         pushl   intr_unit + (irq_num) * 4 ;                             \
228         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
229         addl    $4, %esp ;                                              \
230         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
231         movl    intr_countp + (irq_num) * 4, %eax ;                     \
232         incl    (%eax) ;                                                \
233         UNMASK_IRQ(irq_num) ;                                           \
234         POP_DUMMY ;                                                     \
235         popl %ebp ;                                                     \
236         ret ;                                                           \
237
238 /*
239  * Slow interrupt call handlers run in the following sequence:
240  *
241  *      - Push the trap frame required by doreti.
242  *      - Mask the interrupt and reenable its source.
243  *      - If we cannot take the interrupt set its ipending bit and
244  *        doreti.  In addition to checking for a critical section
245  *        and cpl mask we also check to see if the thread is still
246  *        running.  Note that we cannot mess with mp_lock at all
247  *        if we entered from a critical section!
248  *      - If we can take the interrupt clear its ipending bit
249  *        and schedule the thread.  Leave interrupts masked and doreti.
250  *
251  *      Note that calls to sched_ithd() are made with interrupts enabled
252  *      and outside a critical section.  YYY sched_ithd may preempt us
253  *      synchronously (fix interrupt stacking).
254  *
255  *      YYY can cache gd base pointer instead of using hidden %fs
256  *      prefixes.
257  */
258
259 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
260         .text ;                                                         \
261         SUPERALIGN_TEXT ;                                               \
262 IDTVEC(vec_name) ;                                                      \
263         PUSH_FRAME ;                                                    \
264         maybe_extra_ipending ;                                          \
265 ;                                                                       \
266         MASK_LEVEL_IRQ(irq_num) ;                                       \
267         EOI_IRQ(irq_num) ;                                              \
268         movl    PCPU(curthread),%ebx ;                                  \
269         movl    TD_CPL(%ebx),%eax ;                                     \
270         pushl   %eax ;          /* cpl do restore */                    \
271         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
272         jge     1f ;                                                    \
273         testl   $IRQ_LBIT(irq_num),%eax ;                               \
274         jz      2f ;                                                    \
275 1: ;                                                                    \
276         /* set the pending bit and return, leave the interrupt masked */ \
277         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
278         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
279         jmp     5f ;                                                    \
280 2: ;                                                                    \
281         /* set running bit, clear pending bit, run handler */           \
282         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
283         sti ;                                                           \
284         pushl   $irq_num ;                                              \
285         call    sched_ithd ;                                            \
286         addl    $4,%esp ;                                               \
287         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
288         movl    intr_countp + (irq_num) * 4,%eax ;                      \
289         incl    (%eax) ;                                                \
290 5: ;                                                                    \
291         MEXITCOUNT ;                                                    \
292         jmp     doreti ;                                                \
293
294
295 /*
296  * Handle "spurious INTerrupts".
297  * Notes:
298  *  This is different than the "spurious INTerrupt" generated by an
299  *   8259 PIC for missing INTs.  See the APIC documentation for details.
300  *  This routine should NOT do an 'EOI' cycle.
301  */
302         .text
303         SUPERALIGN_TEXT
304         .globl Xspuriousint
305 Xspuriousint:
306
307         /* No EOI cycle used here */
308
309         iret
310
311
312 /*
313  * Handle TLB shootdowns.
314  */
315         .text
316         SUPERALIGN_TEXT
317         .globl  Xinvltlb
318 Xinvltlb:
319         pushl   %eax
320
321 #ifdef COUNT_XINVLTLB_HITS
322         pushl   %fs
323         movl    $KPSEL, %eax
324         mov     %ax, %fs
325         movl    PCPU(cpuid), %eax
326         popl    %fs
327         ss
328         incl    _xhits(,%eax,4)
329 #endif /* COUNT_XINVLTLB_HITS */
330
331         movl    %cr3, %eax              /* invalidate the TLB */
332         movl    %eax, %cr3
333
334         ss                              /* stack segment, avoid %ds load */
335         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
336
337         popl    %eax
338         iret
339
340
341 #if 0
342 #ifdef BETTER_CLOCK
343
344 /*
345  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
346  *
347  *  - Stores current cpu state in checkstate_cpustate[cpuid]
348  *      0 == user, 1 == sys, 2 == intr
349  *  - Stores current process in checkstate_curproc[cpuid]
350  *
351  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
352  *
353  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
354  */
355
356         .text
357         SUPERALIGN_TEXT
358         .globl Xcpucheckstate
359         .globl checkstate_cpustate
360         .globl checkstate_curproc
361         .globl checkstate_pc
362 Xcpucheckstate:
363         pushl   %eax
364         pushl   %ebx            
365         pushl   %ds                     /* save current data segment */
366         pushl   %fs
367
368         movl    $KDSEL, %eax
369         mov     %ax, %ds                /* use KERNEL data segment */
370         movl    $KPSEL, %eax
371         mov     %ax, %fs
372
373         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
374
375         movl    $0, %ebx                
376         movl    20(%esp), %eax  
377         andl    $3, %eax
378         cmpl    $3, %eax
379         je      1f
380         testl   $PSL_VM, 24(%esp)
381         jne     1f
382         incl    %ebx                    /* system or interrupt */
383 1:      
384         movl    PCPU(cpuid), %eax
385         movl    %ebx, checkstate_cpustate(,%eax,4)
386         movl    PCPU(curthread), %ebx
387         movl    TD_PROC(%ebx),%ebx
388         movl    %ebx, checkstate_curproc(,%eax,4)
389         movl    16(%esp), %ebx
390         movl    %ebx, checkstate_pc(,%eax,4)
391
392         lock                            /* checkstate_probed_cpus |= (1<<id) */
393         btsl    %eax, checkstate_probed_cpus
394
395         popl    %fs
396         popl    %ds                     /* restore previous data segment */
397         popl    %ebx
398         popl    %eax
399         iret
400
401 #endif /* BETTER_CLOCK */
402 #endif
403
404 /*
405  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
406  *
407  *  - Signals its receipt.
408  *  - Waits for permission to restart.
409  *  - Signals its restart.
410  */
411
412         .text
413         SUPERALIGN_TEXT
414         .globl Xcpustop
415 Xcpustop:
416         pushl   %ebp
417         movl    %esp, %ebp
418         pushl   %eax
419         pushl   %ecx
420         pushl   %edx
421         pushl   %ds                     /* save current data segment */
422         pushl   %fs
423
424         movl    $KDSEL, %eax
425         mov     %ax, %ds                /* use KERNEL data segment */
426         movl    $KPSEL, %eax
427         mov     %ax, %fs
428
429         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
430
431         movl    PCPU(cpuid), %eax
432         imull   $PCB_SIZE, %eax
433         leal    CNAME(stoppcbs)(%eax), %eax
434         pushl   %eax
435         call    CNAME(savectx)          /* Save process context */
436         addl    $4, %esp
437         
438                 
439         movl    PCPU(cpuid), %eax
440
441         lock
442         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
443 1:
444         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
445         jnc     1b
446
447         lock
448         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
449         lock
450         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
451
452         test    %eax, %eax
453         jnz     2f
454
455         movl    CNAME(cpustop_restartfunc), %eax
456         test    %eax, %eax
457         jz      2f
458         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
459
460         call    *%eax
461 2:
462         popl    %fs
463         popl    %ds                     /* restore previous data segment */
464         popl    %edx
465         popl    %ecx
466         popl    %eax
467         movl    %ebp, %esp
468         popl    %ebp
469         iret
470
471         /*
472          * For now just have one ipiq IPI, but what we really want is
473          * to have one for each source cpu to the APICs don't get stalled
474          * backlogging the requests.
475          */
476         .text
477         SUPERALIGN_TEXT
478         .globl Xipiq
479 Xipiq:
480         PUSH_FRAME
481         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
482         FAKE_MCOUNT(13*4(%esp))
483
484         movl    PCPU(curthread),%ebx
485         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
486         jge     1f
487         incl    PCPU(intr_nesting_level)
488         addl    $TDPRI_CRIT,TD_PRI(%ebx)
489         call    lwkt_process_ipiq
490         subl    $TDPRI_CRIT,TD_PRI(%ebx)
491         decl    PCPU(intr_nesting_level)
492         pushl   TD_CPL(%ebx)
493         MEXITCOUNT
494         jmp     doreti
495 1:
496         orl     $RQF_IPIQ,PCPU(reqflags)
497         MEXITCOUNT
498         POP_FRAME
499         iret
500
501 MCOUNT_LABEL(bintr)
502         FAST_INTR(0,fastintr0)
503         FAST_INTR(1,fastintr1)
504         FAST_INTR(2,fastintr2)
505         FAST_INTR(3,fastintr3)
506         FAST_INTR(4,fastintr4)
507         FAST_INTR(5,fastintr5)
508         FAST_INTR(6,fastintr6)
509         FAST_INTR(7,fastintr7)
510         FAST_INTR(8,fastintr8)
511         FAST_INTR(9,fastintr9)
512         FAST_INTR(10,fastintr10)
513         FAST_INTR(11,fastintr11)
514         FAST_INTR(12,fastintr12)
515         FAST_INTR(13,fastintr13)
516         FAST_INTR(14,fastintr14)
517         FAST_INTR(15,fastintr15)
518         FAST_INTR(16,fastintr16)
519         FAST_INTR(17,fastintr17)
520         FAST_INTR(18,fastintr18)
521         FAST_INTR(19,fastintr19)
522         FAST_INTR(20,fastintr20)
523         FAST_INTR(21,fastintr21)
524         FAST_INTR(22,fastintr22)
525         FAST_INTR(23,fastintr23)
526         
527         /* YYY what is this garbage? */
528 #define CLKINTR_PENDING                                                 \
529         call    clock_lock ;                                            \
530         movl $1,CNAME(clkintr_pending) ;                                \
531         call    clock_unlock ;                                          \
532
533         INTR(0,intr0, CLKINTR_PENDING)
534         INTR(1,intr1,)
535         INTR(2,intr2,)
536         INTR(3,intr3,)
537         INTR(4,intr4,)
538         INTR(5,intr5,)
539         INTR(6,intr6,)
540         INTR(7,intr7,)
541         INTR(8,intr8,)
542         INTR(9,intr9,)
543         INTR(10,intr10,)
544         INTR(11,intr11,)
545         INTR(12,intr12,)
546         INTR(13,intr13,)
547         INTR(14,intr14,)
548         INTR(15,intr15,)
549         INTR(16,intr16,)
550         INTR(17,intr17,)
551         INTR(18,intr18,)
552         INTR(19,intr19,)
553         INTR(20,intr20,)
554         INTR(21,intr21,)
555         INTR(22,intr22,)
556         INTR(23,intr23,)
557
558         FAST_UNPEND(0,fastunpend0)
559         FAST_UNPEND(1,fastunpend1)
560         FAST_UNPEND(2,fastunpend2)
561         FAST_UNPEND(3,fastunpend3)
562         FAST_UNPEND(4,fastunpend4)
563         FAST_UNPEND(5,fastunpend5)
564         FAST_UNPEND(6,fastunpend6)
565         FAST_UNPEND(7,fastunpend7)
566         FAST_UNPEND(8,fastunpend8)
567         FAST_UNPEND(9,fastunpend9)
568         FAST_UNPEND(10,fastunpend10)
569         FAST_UNPEND(11,fastunpend11)
570         FAST_UNPEND(12,fastunpend12)
571         FAST_UNPEND(13,fastunpend13)
572         FAST_UNPEND(14,fastunpend14)
573         FAST_UNPEND(15,fastunpend15)
574         FAST_UNPEND(16,fastunpend16)
575         FAST_UNPEND(17,fastunpend17)
576         FAST_UNPEND(18,fastunpend18)
577         FAST_UNPEND(19,fastunpend19)
578         FAST_UNPEND(20,fastunpend20)
579         FAST_UNPEND(21,fastunpend21)
580         FAST_UNPEND(22,fastunpend22)
581         FAST_UNPEND(23,fastunpend23)
582 MCOUNT_LABEL(eintr)
583
584         /*
585          * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
586          *
587          * - Calls the generic rendezvous action function.
588          */
589         .text
590         SUPERALIGN_TEXT
591         .globl  Xrendezvous
592 Xrendezvous:
593         PUSH_FRAME
594         movl    $KDSEL, %eax
595         mov     %ax, %ds                /* use KERNEL data segment */
596         mov     %ax, %es
597         movl    $KPSEL, %eax
598         mov     %ax, %fs
599
600         call    smp_rendezvous_action
601
602         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
603         POP_FRAME
604         iret
605         
606         
607         .data
608
609 #if 0
610 /*
611  * Addresses of interrupt handlers.
612  *  XresumeNN: Resumption addresses for HWIs.
613  */
614         .globl _ihandlers
615 _ihandlers:
616 /*
617  * used by:
618  *  ipl.s:      doreti_unpend
619  */
620         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
621         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
622         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
623         .long   Xresume12, Xresume13, Xresume14, Xresume15 
624         .long   Xresume16, Xresume17, Xresume18, Xresume19
625         .long   Xresume20, Xresume21, Xresume22, Xresume23
626 /*
627  * used by:
628  *  ipl.s:      doreti_unpend
629  *  apic_ipl.s: splz_unpend
630  */
631         .long   _swi_null, swi_net, _swi_null, _swi_null
632         .long   _swi_vm, _swi_null, _softclock
633
634 imasks:                         /* masks for interrupt handlers */
635         .space  NHWI*4          /* padding; HWI masks are elsewhere */
636
637         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
638         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
639 #endif  /* 0 */
640
641
642 #ifdef COUNT_XINVLTLB_HITS
643         .globl  xhits
644 xhits:
645         .space  (NCPU * 4), 0
646 #endif /* COUNT_XINVLTLB_HITS */
647
648 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
649         .globl stopped_cpus, started_cpus
650 stopped_cpus:
651         .long   0
652 started_cpus:
653         .long   0
654
655 #ifdef BETTER_CLOCK
656         .globl checkstate_probed_cpus
657 checkstate_probed_cpus:
658         .long   0       
659 #endif /* BETTER_CLOCK */
660         .globl CNAME(cpustop_restartfunc)
661 CNAME(cpustop_restartfunc):
662         .long 0
663                 
664         .globl  apic_pin_trigger
665 apic_pin_trigger:
666         .long   0
667
668         .text