MP Implmentation 3B/4: Remove Xcpuast and Xforward_irq, replacing them
[dragonfly.git] / sys / platform / pc32 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.10 2003/07/11 01:23:23 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * Push an interrupt frame in a format acceptable to doreti, reload
20  * the segment registers for the kernel.
21  */
22 #define PUSH_FRAME                                                      \
23         pushl   $0 ;            /* dummy error code */                  \
24         pushl   $0 ;            /* dummy trap type */                   \
25         pushal ;                                                        \
26         pushl   %ds ;           /* save data and extra segments ... */  \
27         pushl   %es ;                                                   \
28         pushl   %fs ;                                                   \
29         mov     $KDSEL,%ax ;                                            \
30         mov     %ax,%ds ;                                               \
31         mov     %ax,%es ;                                               \
32         mov     $KPSEL,%ax ;                                            \
33         mov     %ax,%fs ;                                               \
34
35 #define PUSH_DUMMY                                                      \
36         pushfl ;                /* phys int frame / flags */            \
37         pushl %cs ;             /* phys int frame / cs */               \
38         pushl   12(%esp) ;      /* original caller eip */               \
39         pushl   $0 ;            /* dummy error code */                  \
40         pushl   $0 ;            /* dummy trap type */                   \
41         subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
42
43 /*
44  * Warning: POP_FRAME can only be used if there is no chance of a
45  * segment register being changed (e.g. by procfs), which is why syscalls
46  * have to use doreti.
47  */
48 #define POP_FRAME                                                       \
49         popl    %fs ;                                                   \
50         popl    %es ;                                                   \
51         popl    %ds ;                                                   \
52         popal ;                                                         \
53         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
54
55 #define POP_DUMMY                                                       \
56         addl    $17*4,%esp ;                                            \
57
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
60
61 /*
62  * Interrupts are expected to already be disabled when using these
63  * IMASK_*() macros.
64  */
65 #define IMASK_LOCK                                                      \
66         SPIN_LOCK(imen_spinlock) ;                                      \
67
68 #define IMASK_UNLOCK                                                    \
69         SPIN_UNLOCK(imen_spinlock) ;                                    \
70         
71 #define MASK_IRQ(irq_num)                                               \
72         IMASK_LOCK ;                            /* into critical reg */ \
73         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
74         jne     7f ;                    /* masked, don't mask */        \
75         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
76         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
77         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
78         movl    %eax, (%ecx) ;                  /* write the index */   \
79         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
80         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
81         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
82 7: ;                                            /* already masked */    \
83         IMASK_UNLOCK ;                                                  \
84
85 /*
86  * Test to see whether we are handling an edge or level triggered INT.
87  *  Level-triggered INTs must still be masked as we don't clear the source,
88  *  and the EOI cycle would cause redundant INTs to occur.
89  */
90 #define MASK_LEVEL_IRQ(irq_num)                                         \
91         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
92         jz      9f ;                            /* edge, don't mask */  \
93         MASK_IRQ(irq_num) ;                                             \
94 9: ;                                                                    \
95
96
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num)                                                \
99         movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
100         movl    (%eax), %eax ;                                          \
101         testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
102         jz      9f ;                            /* not active */        \
103         movl    $0, lapic_eoi ;                                         \
104 9:                                                                      \
105
106 #else
107
108 #define EOI_IRQ(irq_num)                                                \
109         testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
110         jz      9f      ;                       /* not active */        \
111         movl    $0, lapic_eoi;                                          \
112 9:                                                                      \
113
114 #endif
115         
116 /*
117  * Test to see if the source is currntly masked, clear if so.
118  */
119 #define UNMASK_IRQ(irq_num)                                     \
120         IMASK_LOCK ;                            /* into critical reg */ \
121         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
122         je      7f ;                    /* bit clear, not masked */     \
123         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
124         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
125         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
126         movl    %eax,(%ecx) ;                   /* write the index */   \
127         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
128         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
129         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
130 7: ;                                                                    \
131         IMASK_UNLOCK ;                                                  \
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.
140  *      - If we can take the interrupt clear its fpending bit,
141  *        call the handler, then unmask and doreti.
142  *
143  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
144  */
145
146 #define FAST_INTR(irq_num, vec_name)                                    \
147         .text ;                                                         \
148         SUPERALIGN_TEXT ;                                               \
149 IDTVEC(vec_name) ;                                                      \
150         PUSH_FRAME ;                                                    \
151         FAKE_MCOUNT(13*4(%esp)) ;                                       \
152         MASK_LEVEL_IRQ(irq_num) ;                                       \
153         EOI_IRQ(irq_num) ;                                              \
154         incl    PCPU(intr_nesting_level) ;                              \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    TD_CPL(%ebx),%eax ;                                     \
157         pushl   %eax ;                                                  \
158         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
159         jge     1f ;                                                    \
160         testl   $IRQ_LBIT(irq_num), %eax ;                              \
161         jz      2f ;                                                    \
162 1: ;                                                                    \
163         /* set the pending bit and return, leave interrupt masked */    \
164         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
165         movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
166         jmp     5f ;                                                    \
167 2: ;                                                                    \
168         /* try to get giant */                                          \
169         call    try_mplock ;                                            \
170         testl   %eax,%eax ;                                             \
171         jz      1b ;                                                    \
172         /* clear pending bit, run handler */                            \
173         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
174         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
175         pushl   intr_unit + (irq_num) * 4 ;                             \
176         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
177         addl    $4, %esp ;                                              \
178         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
179         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
180         movl    intr_countp + (irq_num) * 4, %eax ;                     \
181         incl    (%eax) ;                                                \
182         call    rel_mplock ;                                            \
183         UNMASK_IRQ(irq_num) ;                                           \
184 5: ;                                                                    \
185         MEXITCOUNT ;                                                    \
186         jmp     doreti ;                                                \
187
188 /*
189  * Restart fast interrupt held up by critical section or cpl.
190  *
191  *      - Push a dummy trape frame as required by doreti
192  *      - The interrupt source is already masked
193  *      - Clear the fpending bit
194  *      - Run the handler
195  *      - Unmask the interrupt
196  *      - Pop the dummy frame and do a normal return
197  *
198  *      The BGL is held on call and left held on return.
199  *
200  *      YYY can cache gd base pointer instead of using hidden %fs
201  *      prefixes.
202  */
203
204 #define FAST_UNPEND(irq_num, vec_name)                                  \
205         .text ;                                                         \
206         SUPERALIGN_TEXT ;                                               \
207 IDTVEC(vec_name) ;                                                      \
208         pushl   %ebp ;                                                  \
209         movl    %esp,%ebp ;                                             \
210         PUSH_DUMMY ;                                                    \
211         pushl   intr_unit + (irq_num) * 4 ;                             \
212         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
213         addl    $4, %esp ;                                              \
214         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
215         movl    intr_countp + (irq_num) * 4, %eax ;                     \
216         incl    (%eax) ;                                                \
217         UNMASK_IRQ(irq_num) ;                                           \
218         POP_DUMMY ;                                                     \
219         popl %ebp ;                                                     \
220         ret ;                                                           \
221
222 /*
223  * Slow interrupt call handlers run in the following sequence:
224  *
225  *      - Push the trap frame required by doreti.
226  *      - Mask the interrupt and reenable its source.
227  *      - If we cannot take the interrupt set its ipending bit and
228  *        doreti.  In addition to checking for a critical section
229  *        and cpl mask we also check to see if the thread is still
230  *        running.
231  *      - If we can take the interrupt clear its ipending bit
232  *        and schedule the thread.  Leave interrupts masked and doreti.
233  *
234  *      Note that calls to sched_ithd() are made with interrupts enabled
235  *      and outside a critical section.  YYY sched_ithd may preempt us
236  *      synchronously (fix interrupt stacking)
237  *
238  *      YYY can cache gd base pointer instead of using hidden %fs
239  *      prefixes.
240  */
241
242 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
243         .text ;                                                         \
244         SUPERALIGN_TEXT ;                                               \
245 IDTVEC(vec_name) ;                                                      \
246         PUSH_FRAME ;                                                    \
247         maybe_extra_ipending ;                                          \
248 ;                                                                       \
249         MASK_LEVEL_IRQ(irq_num) ;                                       \
250         EOI_IRQ(irq_num) ;                                              \
251         incl    PCPU(intr_nesting_level) ;                              \
252         movl    PCPU(curthread),%ebx ;                                  \
253         movl    TD_CPL(%ebx),%eax ;                                     \
254         pushl   %eax ;          /* cpl do restore */                    \
255         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
256         jge     1f ;                                                    \
257         testl   $IRQ_LBIT(irq_num),%eax ;                               \
258         jz      2f ;                                                    \
259 1: ;                                                                    \
260         /* set the pending bit and return, leave the interrupt masked */ \
261         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
262         movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
263         jmp     5f ;                                                    \
264 2: ;                                                                    \
265         /* set running bit, clear pending bit, run handler */           \
266         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
267         sti ;                                                           \
268         pushl   $irq_num ;                                              \
269         call    sched_ithd ;                                            \
270         addl    $4,%esp ;                                               \
271         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
272         movl    intr_countp + (irq_num) * 4,%eax ;                      \
273         incl    (%eax) ;                                                \
274 5: ;                                                                    \
275         MEXITCOUNT ;                                                    \
276         jmp     doreti ;                                                \
277
278 /*
279  * Unmask a slow interrupt.  This function is used by interrupt threads
280  * after they have descheduled themselves to reenable interrupts and
281  * possibly cause a reschedule to occur.
282  */
283
284 #define INTR_UNMASK(irq_num, vec_name, icu)                             \
285         .text ;                                                         \
286         SUPERALIGN_TEXT ;                                               \
287 IDTVEC(vec_name) ;                                                      \
288         pushl %ebp ;     /* frame for ddb backtrace */                  \
289         movl    %esp, %ebp ;                                            \
290         UNMASK_IRQ(irq_num) ;                                           \
291         popl %ebp ;                                                     \
292         ret ;                                                           \
293
294 #if 0
295         /* XXX forward_irq to cpu holding the BGL? */
296
297         ALIGN_TEXT ;                                                    \
298 3: ;                    /* other cpu has isr lock */                    \
299         lock ;                                                          \
300         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
301         movl    $TDPRI_CRIT,_reqpri ;                                   \
302         testl   $IRQ_LBIT(irq_num), TD_CPL(%ebx) ;              \
303         jne     4f ;                            /* this INT masked */   \
304         call    forward_irq ;    /* forward irq to lock holder */       \
305         POP_FRAME ;                             /* and return */        \
306         iret ;                                                          \
307         ALIGN_TEXT ;                                                    \
308 4: ;                                            /* blocked */           \
309         POP_FRAME ;                             /* and return */        \
310         iret
311
312 /*
313  * Handle "spurious INTerrupts".
314  * Notes:
315  *  This is different than the "spurious INTerrupt" generated by an
316  *   8259 PIC for missing INTs.  See the APIC documentation for details.
317  *  This routine should NOT do an 'EOI' cycle.
318  */
319
320 #endif
321
322         .text
323         SUPERALIGN_TEXT
324         .globl Xspuriousint
325 Xspuriousint:
326
327         /* No EOI cycle used here */
328
329         iret
330
331
332 /*
333  * Handle TLB shootdowns.
334  */
335         .text
336         SUPERALIGN_TEXT
337         .globl  Xinvltlb
338 Xinvltlb:
339         pushl   %eax
340
341 #ifdef COUNT_XINVLTLB_HITS
342         pushl   %fs
343         movl    $KPSEL, %eax
344         mov     %ax, %fs
345         movl    PCPU(cpuid), %eax
346         popl    %fs
347         ss
348         incl    _xhits(,%eax,4)
349 #endif /* COUNT_XINVLTLB_HITS */
350
351         movl    %cr3, %eax              /* invalidate the TLB */
352         movl    %eax, %cr3
353
354         ss                              /* stack segment, avoid %ds load */
355         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
356
357         popl    %eax
358         iret
359
360
361 #if 0
362 #ifdef BETTER_CLOCK
363
364 /*
365  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
366  *
367  *  - Stores current cpu state in checkstate_cpustate[cpuid]
368  *      0 == user, 1 == sys, 2 == intr
369  *  - Stores current process in checkstate_curproc[cpuid]
370  *
371  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
372  *
373  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
374  */
375
376         .text
377         SUPERALIGN_TEXT
378         .globl Xcpucheckstate
379         .globl checkstate_cpustate
380         .globl checkstate_curproc
381         .globl checkstate_pc
382 Xcpucheckstate:
383         pushl   %eax
384         pushl   %ebx            
385         pushl   %ds                     /* save current data segment */
386         pushl   %fs
387
388         movl    $KDSEL, %eax
389         mov     %ax, %ds                /* use KERNEL data segment */
390         movl    $KPSEL, %eax
391         mov     %ax, %fs
392
393         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
394
395         movl    $0, %ebx                
396         movl    20(%esp), %eax  
397         andl    $3, %eax
398         cmpl    $3, %eax
399         je      1f
400         testl   $PSL_VM, 24(%esp)
401         jne     1f
402         incl    %ebx                    /* system or interrupt */
403 1:      
404         movl    PCPU(cpuid), %eax
405         movl    %ebx, checkstate_cpustate(,%eax,4)
406         movl    PCPU(curthread), %ebx
407         movl    TD_PROC(%ebx),%ebx
408         movl    %ebx, checkstate_curproc(,%eax,4)
409         movl    16(%esp), %ebx
410         movl    %ebx, checkstate_pc(,%eax,4)
411
412         lock                            /* checkstate_probed_cpus |= (1<<id) */
413         btsl    %eax, checkstate_probed_cpus
414
415         popl    %fs
416         popl    %ds                     /* restore previous data segment */
417         popl    %ebx
418         popl    %eax
419         iret
420
421 #endif /* BETTER_CLOCK */
422 #endif
423
424 /*
425  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
426  *
427  *  - Signals its receipt.
428  *  - Waits for permission to restart.
429  *  - Signals its restart.
430  */
431
432         .text
433         SUPERALIGN_TEXT
434         .globl Xcpustop
435 Xcpustop:
436         pushl   %ebp
437         movl    %esp, %ebp
438         pushl   %eax
439         pushl   %ecx
440         pushl   %edx
441         pushl   %ds                     /* save current data segment */
442         pushl   %fs
443
444         movl    $KDSEL, %eax
445         mov     %ax, %ds                /* use KERNEL data segment */
446         movl    $KPSEL, %eax
447         mov     %ax, %fs
448
449         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
450
451         movl    PCPU(cpuid), %eax
452         imull   $PCB_SIZE, %eax
453         leal    CNAME(stoppcbs)(%eax), %eax
454         pushl   %eax
455         call    CNAME(savectx)          /* Save process context */
456         addl    $4, %esp
457         
458                 
459         movl    PCPU(cpuid), %eax
460
461         lock
462         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
463 1:
464         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
465         jnc     1b
466
467         lock
468         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
469         lock
470         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
471
472         test    %eax, %eax
473         jnz     2f
474
475         movl    CNAME(cpustop_restartfunc), %eax
476         test    %eax, %eax
477         jz      2f
478         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
479
480         call    *%eax
481 2:
482         popl    %fs
483         popl    %ds                     /* restore previous data segment */
484         popl    %edx
485         popl    %ecx
486         popl    %eax
487         movl    %ebp, %esp
488         popl    %ebp
489         iret
490
491         /*
492          * For now just have one ipiq IPI, but what we really want is
493          * to have one for each source cpu to the APICs don't get stalled
494          * backlogging the requests.
495          */
496         .text
497         SUPERALIGN_TEXT
498         .globl Xipiq
499 Xipiq:
500         PUSH_FRAME
501         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
502         FAKE_MCOUNT(13*4(%esp))
503
504         movl    PCPU(curthread),%ebx
505         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
506         jge     1f
507         addl    $TDPRI_CRIT,TD_PRI(%ebx)
508         call    lwkt_process_ipiq
509         subl    $TDPRI_CRIT,TD_PRI(%ebx)
510         pushl   TD_CPL(%ebx)
511         incl    PCPU(intr_nesting_level)
512         MEXITCOUNT
513         jmp     doreti
514 1:
515         movl    $TDPRI_CRIT,PCPU(reqpri)
516         orl     $AST_IPIQ,PCPU(astpending)
517         MEXITCOUNT
518         POP_FRAME
519         iret
520
521 MCOUNT_LABEL(bintr)
522         FAST_INTR(0,fastintr0)
523         FAST_INTR(1,fastintr1)
524         FAST_INTR(2,fastintr2)
525         FAST_INTR(3,fastintr3)
526         FAST_INTR(4,fastintr4)
527         FAST_INTR(5,fastintr5)
528         FAST_INTR(6,fastintr6)
529         FAST_INTR(7,fastintr7)
530         FAST_INTR(8,fastintr8)
531         FAST_INTR(9,fastintr9)
532         FAST_INTR(10,fastintr10)
533         FAST_INTR(11,fastintr11)
534         FAST_INTR(12,fastintr12)
535         FAST_INTR(13,fastintr13)
536         FAST_INTR(14,fastintr14)
537         FAST_INTR(15,fastintr15)
538         FAST_INTR(16,fastintr16)
539         FAST_INTR(17,fastintr17)
540         FAST_INTR(18,fastintr18)
541         FAST_INTR(19,fastintr19)
542         FAST_INTR(20,fastintr20)
543         FAST_INTR(21,fastintr21)
544         FAST_INTR(22,fastintr22)
545         FAST_INTR(23,fastintr23)
546         
547         /* YYY what is this garbage? */
548 #define CLKINTR_PENDING                                                 \
549         call    clock_lock ;                                            \
550         movl $1,CNAME(clkintr_pending) ;                                \
551         call    clock_unlock ;                                          \
552
553         INTR(0,intr0, CLKINTR_PENDING)
554         INTR(1,intr1,)
555         INTR(2,intr2,)
556         INTR(3,intr3,)
557         INTR(4,intr4,)
558         INTR(5,intr5,)
559         INTR(6,intr6,)
560         INTR(7,intr7,)
561         INTR(8,intr8,)
562         INTR(9,intr9,)
563         INTR(10,intr10,)
564         INTR(11,intr11,)
565         INTR(12,intr12,)
566         INTR(13,intr13,)
567         INTR(14,intr14,)
568         INTR(15,intr15,)
569         INTR(16,intr16,)
570         INTR(17,intr17,)
571         INTR(18,intr18,)
572         INTR(19,intr19,)
573         INTR(20,intr20,)
574         INTR(21,intr21,)
575         INTR(22,intr22,)
576         INTR(23,intr23,)
577
578         FAST_UNPEND(0,fastunpend0)
579         FAST_UNPEND(1,fastunpend1)
580         FAST_UNPEND(2,fastunpend2)
581         FAST_UNPEND(3,fastunpend3)
582         FAST_UNPEND(4,fastunpend4)
583         FAST_UNPEND(5,fastunpend5)
584         FAST_UNPEND(6,fastunpend6)
585         FAST_UNPEND(7,fastunpend7)
586         FAST_UNPEND(8,fastunpend8)
587         FAST_UNPEND(9,fastunpend9)
588         FAST_UNPEND(10,fastunpend10)
589         FAST_UNPEND(11,fastunpend11)
590         FAST_UNPEND(12,fastunpend12)
591         FAST_UNPEND(13,fastunpend13)
592         FAST_UNPEND(14,fastunpend14)
593         FAST_UNPEND(15,fastunpend15)
594         FAST_UNPEND(16,fastunpend16)
595         FAST_UNPEND(17,fastunpend17)
596         FAST_UNPEND(18,fastunpend18)
597         FAST_UNPEND(19,fastunpend19)
598         FAST_UNPEND(20,fastunpend20)
599         FAST_UNPEND(21,fastunpend21)
600         FAST_UNPEND(22,fastunpend22)
601         FAST_UNPEND(23,fastunpend23)
602 MCOUNT_LABEL(eintr)
603
604         /*
605          * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
606          *
607          * - Calls the generic rendezvous action function.
608          */
609         .text
610         SUPERALIGN_TEXT
611         .globl  Xrendezvous
612 Xrendezvous:
613         PUSH_FRAME
614         movl    $KDSEL, %eax
615         mov     %ax, %ds                /* use KERNEL data segment */
616         mov     %ax, %es
617         movl    $KPSEL, %eax
618         mov     %ax, %fs
619
620         call    smp_rendezvous_action
621
622         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
623         POP_FRAME
624         iret
625         
626         
627         .data
628
629 #if 0
630 /*
631  * Addresses of interrupt handlers.
632  *  XresumeNN: Resumption addresses for HWIs.
633  */
634         .globl _ihandlers
635 _ihandlers:
636 /*
637  * used by:
638  *  ipl.s:      doreti_unpend
639  */
640         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
641         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
642         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
643         .long   Xresume12, Xresume13, Xresume14, Xresume15 
644         .long   Xresume16, Xresume17, Xresume18, Xresume19
645         .long   Xresume20, Xresume21, Xresume22, Xresume23
646 /*
647  * used by:
648  *  ipl.s:      doreti_unpend
649  *  apic_ipl.s: splz_unpend
650  */
651         .long   _swi_null, swi_net, _swi_null, _swi_null
652         .long   _swi_vm, _swi_null, _softclock
653
654 imasks:                         /* masks for interrupt handlers */
655         .space  NHWI*4          /* padding; HWI masks are elsewhere */
656
657         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
658         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
659 #endif  /* 0 */
660
661
662 #ifdef COUNT_XINVLTLB_HITS
663         .globl  xhits
664 xhits:
665         .space  (NCPU * 4), 0
666 #endif /* COUNT_XINVLTLB_HITS */
667
668 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
669         .globl stopped_cpus, started_cpus
670 stopped_cpus:
671         .long   0
672 started_cpus:
673         .long   0
674
675 #ifdef BETTER_CLOCK
676         .globl checkstate_probed_cpus
677 checkstate_probed_cpus:
678         .long   0       
679 #endif /* BETTER_CLOCK */
680         .globl CNAME(cpustop_restartfunc)
681 CNAME(cpustop_restartfunc):
682         .long 0
683                 
684         .globl  apic_pin_trigger
685 apic_pin_trigger:
686         .long   0
687
688         .text