MP Implementation 1/2: Get the APIC code working again, sweetly integrate the
[dragonfly.git] / sys / platform / pc32 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.8 2003/07/06 21:23:49 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * Push an interrupt frame in a format acceptable to doreti, reload
20  * the segment registers for the kernel.
21  */
22 #define PUSH_FRAME                                                      \
23         pushl   $0 ;            /* dummy error code */                  \
24         pushl   $0 ;            /* dummy trap type */                   \
25         pushal ;                                                        \
26         pushl   %ds ;           /* save data and extra segments ... */  \
27         pushl   %es ;                                                   \
28         pushl   %fs ;                                                   \
29         mov     $KDSEL,%ax ;                                            \
30         mov     %ax,%ds ;                                               \
31         mov     %ax,%es ;                                               \
32         mov     $KPSEL,%ax ;                                            \
33         mov     %ax,%fs ;                                               \
34
35 #define PUSH_DUMMY                                                      \
36         pushfl ;                /* phys int frame / flags */            \
37         pushl %cs ;             /* phys int frame / cs */               \
38         pushl   12(%esp) ;      /* original caller eip */               \
39         pushl   $0 ;            /* dummy error code */                  \
40         pushl   $0 ;            /* dummy trap type */                   \
41         subl    $11*4,%esp ;    /* pushal + 3 seg regs (dummy) */       \
42
43 /*
44  * Warning: POP_FRAME can only be used if there is no chance of a
45  * segment register being changed (e.g. by procfs), which is why syscalls
46  * have to use doreti.
47  */
48 #define POP_FRAME                                                       \
49         popl    %fs ;                                                   \
50         popl    %es ;                                                   \
51         popl    %ds ;                                                   \
52         popal ;                                                         \
53         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
54
55 #define POP_DUMMY                                                       \
56         addl    $16*4,%esp ;                                            \
57
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
60
61 /*
62  * Interrupts are expected to already be disabled when using these
63  * IMASK_*() macros.
64  */
65 #define IMASK_LOCK                                                      \
66         SPIN_LOCK(imen_spinlock) ;                                      \
67
68 #define IMASK_UNLOCK                                                    \
69         SPIN_UNLOCK(imen_spinlock) ;                                    \
70         
71 #define MASK_IRQ(irq_num)                                               \
72         IMASK_LOCK ;                            /* into critical reg */ \
73         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
74         jne     7f ;                    /* masked, don't mask */        \
75         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
76         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
77         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
78         movl    %eax, (%ecx) ;                  /* write the index */   \
79         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
80         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
81         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
82 7: ;                                            /* already masked */    \
83         IMASK_UNLOCK ;                                                  \
84
85 /*
86  * Test to see whether we are handling an edge or level triggered INT.
87  *  Level-triggered INTs must still be masked as we don't clear the source,
88  *  and the EOI cycle would cause redundant INTs to occur.
89  */
90 #define MASK_LEVEL_IRQ(irq_num)                                         \
91         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
92         jz      9f ;                            /* edge, don't mask */  \
93         MASK_IRQ(irq_num) ;                                             \
94 9: ;                                                                    \
95
96
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num)                                                \
99         movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
100         movl    (%eax), %eax ;                                          \
101         testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
102         jz      9f ;                            /* not active */        \
103         movl    $0, lapic_eoi ;                                         \
104 9:                                                                      \
105
106 #else
107
108 #define EOI_IRQ(irq_num)                                                \
109         testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
110         jz      9f      ;                       /* not active */        \
111         movl    $0, lapic_eoi;                                          \
112 9:                                                                      \
113
114 #endif
115         
116 /*
117  * Test to see if the source is currntly masked, clear if so.
118  */
119 #define UNMASK_IRQ(irq_num)                                     \
120         IMASK_LOCK ;                            /* into critical reg */ \
121         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
122         je      7f ;                    /* bit clear, not masked */     \
123         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
124         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
125         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
126         movl    %eax,(%ecx) ;                   /* write the index */   \
127         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
128         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
129         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
130 7: ;                                                                    \
131         IMASK_UNLOCK ;                                                  \
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.
140  *      - If we can take the interrupt clear its fpending bit,
141  *        call the handler, then unmask and doreti.
142  *
143  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
144  */
145
146 #define FAST_INTR(irq_num, vec_name)                                    \
147         .text ;                                                         \
148         SUPERALIGN_TEXT ;                                               \
149 IDTVEC(vec_name) ;                                                      \
150         PUSH_FRAME ;                                                    \
151         FAKE_MCOUNT(13*4(%esp)) ;                                       \
152         MASK_LEVEL_IRQ(irq_num) ;                                       \
153         EOI_IRQ(irq_num) ;                                              \
154         incl    PCPU(intr_nesting_level) ;                              \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    TD_CPL(%ebx),%eax ;                                     \
157         pushl   %eax ;                                                  \
158         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
159         jge     1f ;                                                    \
160         testl   $IRQ_LBIT(irq_num), %eax ;                              \
161         jz      2f ;                                                    \
162 1: ;                                                                    \
163         /* set the pending bit and return, leave interrupt masked */    \
164         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
165         movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
166         jmp     5f ;                                                    \
167 2: ;                                                                    \
168         /* clear pending bit, run handler */                            \
169         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
170         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
171         pushl   intr_unit + (irq_num) * 4 ;                             \
172         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
173         addl    $4, %esp ;                                              \
174         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
175         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
176         movl    intr_countp + (irq_num) * 4, %eax ;                     \
177         incl    (%eax) ;                                                \
178         UNMASK_IRQ(irq_num) ;                                           \
179 5: ;                                                                    \
180         MEXITCOUNT ;                                                    \
181         jmp     doreti ;                                                \
182
183 /*
184  * Restart fast interrupt held up by critical section or cpl.
185  *
186  *      - Push a dummy trape frame as required by doreti
187  *      - The interrupt source is already masked
188  *      - Clear the fpending bit
189  *      - Run the handler
190  *      - Unmask the interrupt
191  *      - Pop the dummy frame and do a normal return
192  *
193  *      YYY can cache gd base pointer instead of using hidden %fs
194  *      prefixes.
195  */
196
197 #define FAST_UNPEND(irq_num, vec_name)                                  \
198         .text ;                                                         \
199         SUPERALIGN_TEXT ;                                               \
200 IDTVEC(vec_name) ;                                                      \
201         pushl   %ebp ;                                                  \
202         movl    %esp,%ebp ;                                             \
203         PUSH_DUMMY ;                                                    \
204         pushl   intr_unit + (irq_num) * 4 ;                             \
205         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
206         addl    $4, %esp ;                                              \
207         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
208         movl    intr_countp + (irq_num) * 4, %eax ;                     \
209         incl    (%eax) ;                                                \
210         UNMASK_IRQ(irq_num) ;                                           \
211         POP_DUMMY ;                                                     \
212         popl %ebp ;                                                     \
213         ret ;                                                           \
214
215 /*
216  * Slow interrupt call handlers run in the following sequence:
217  *
218  *      - Push the trap frame required by doreti.
219  *      - Mask the interrupt and reenable its source.
220  *      - If we cannot take the interrupt set its ipending bit and
221  *        doreti.  In addition to checking for a critical section
222  *        and cpl mask we also check to see if the thread is still
223  *        running.
224  *      - If we can take the interrupt clear its ipending bit,
225  *        set its irunning bit, and schedule the thread.  Leave
226  *        interrupts masked and doreti.
227  *
228  *      the interrupt thread will run its handlers and loop if
229  *      ipending is found to be set.  ipending/irunning interlock
230  *      the interrupt thread with the interrupt.  The handler calls
231  *      UNPEND when it is through.
232  *
233  *      Note that we do not enable interrupts when calling sched_ithd.
234  *      YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
235  *
236  *      YYY can cache gd base pointer instead of using hidden %fs
237  *      prefixes.
238  */
239
240 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
241         .text ;                                                         \
242         SUPERALIGN_TEXT ;                                               \
243 IDTVEC(vec_name) ;                                                      \
244         PUSH_FRAME ;                                                    \
245         maybe_extra_ipending ;                                          \
246 ;                                                                       \
247         MASK_LEVEL_IRQ(irq_num) ;                                       \
248         EOI_IRQ(irq_num) ;                                              \
249         incl    PCPU(intr_nesting_level) ;                              \
250         movl    PCPU(curthread),%ebx ;                                  \
251         movl    TD_CPL(%ebx),%eax ;                                     \
252         pushl   %eax ;          /* cpl do restore */                    \
253         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
254         jge     1f ;                                                    \
255         testl   $IRQ_LBIT(irq_num),PCPU(irunning) ;                     \
256         jnz     1f ;                                                    \
257         testl   $IRQ_LBIT(irq_num),%eax ;                               \
258         jz      1f ;                                                    \
259 1: ;                                                                    \
260         /* set the pending bit and return, leave the interrupt masked */ \
261         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
262         movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
263         jmp     5f ;                                                    \
264 2: ;                                                                    \
265         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
266         /* set running bit, clear pending bit, run handler */           \
267         orl     $IRQ_LBIT(irq_num), PCPU(irunning) ;                    \
268         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
269         sti ;                                                           \
270         pushl   $irq_num ;                                              \
271         call    sched_ithd ;                                            \
272         addl    $4,%esp ;                                               \
273         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
274         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
275         movl    intr_countp + (irq_num) * 4,%eax ;                      \
276         incl    (%eax) ;                                                \
277 5: ;                                                                    \
278         MEXITCOUNT ;                                                    \
279         jmp     doreti ;                                                \
280
281 /*
282  * Unmask a slow interrupt.  This function is used by interrupt threads
283  * after they have descheduled themselves to reenable interrupts and
284  * possibly cause a reschedule to occur.  The interrupt's irunning bit
285  * is cleared prior to unmasking.
286  */
287
288 #define INTR_UNMASK(irq_num, vec_name, icu)                             \
289         .text ;                                                         \
290         SUPERALIGN_TEXT ;                                               \
291 IDTVEC(vec_name) ;                                                      \
292         pushl %ebp ;     /* frame for ddb backtrace */                  \
293         movl    %esp, %ebp ;                                            \
294         andl    $~IRQ_LBIT(irq_num), PCPU(irunning) ;                   \
295         UNMASK_IRQ(irq_num) ;                                           \
296         popl %ebp ;                                                     \
297         ret ;                                                           \
298
299 #if 0
300         /* XXX forward_irq to cpu holding the BGL? */
301
302         ALIGN_TEXT ;                                                    \
303 3: ;                    /* other cpu has isr lock */                    \
304         lock ;                                                          \
305         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
306         movl    $TDPRI_CRIT,_reqpri ;                                   \
307         testl   $IRQ_LBIT(irq_num), TD_CPL(%ebx) ;              \
308         jne     4f ;                            /* this INT masked */   \
309         call    forward_irq ;    /* forward irq to lock holder */       \
310         POP_FRAME ;                             /* and return */        \
311         iret ;                                                          \
312         ALIGN_TEXT ;                                                    \
313 4: ;                                            /* blocked */           \
314         POP_FRAME ;                             /* and return */        \
315         iret
316
317 /*
318  * Handle "spurious INTerrupts".
319  * Notes:
320  *  This is different than the "spurious INTerrupt" generated by an
321  *   8259 PIC for missing INTs.  See the APIC documentation for details.
322  *  This routine should NOT do an 'EOI' cycle.
323  */
324
325 #endif
326
327         .text
328         SUPERALIGN_TEXT
329         .globl Xspuriousint
330 Xspuriousint:
331
332         /* No EOI cycle used here */
333
334         iret
335
336
337 /*
338  * Handle TLB shootdowns.
339  */
340         .text
341         SUPERALIGN_TEXT
342         .globl  Xinvltlb
343 Xinvltlb:
344         pushl   %eax
345
346 #ifdef COUNT_XINVLTLB_HITS
347         pushl   %fs
348         movl    $KPSEL, %eax
349         mov     %ax, %fs
350         movl    PCPU(cpuid), %eax
351         popl    %fs
352         ss
353         incl    _xhits(,%eax,4)
354 #endif /* COUNT_XINVLTLB_HITS */
355
356         movl    %cr3, %eax              /* invalidate the TLB */
357         movl    %eax, %cr3
358
359         ss                              /* stack segment, avoid %ds load */
360         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
361
362         popl    %eax
363         iret
364
365
366 #if 0
367 #ifdef BETTER_CLOCK
368
369 /*
370  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
371  *
372  *  - Stores current cpu state in checkstate_cpustate[cpuid]
373  *      0 == user, 1 == sys, 2 == intr
374  *  - Stores current process in checkstate_curproc[cpuid]
375  *
376  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
377  *
378  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
379  */
380
381         .text
382         SUPERALIGN_TEXT
383         .globl Xcpucheckstate
384         .globl checkstate_cpustate
385         .globl checkstate_curproc
386         .globl checkstate_pc
387 Xcpucheckstate:
388         pushl   %eax
389         pushl   %ebx            
390         pushl   %ds                     /* save current data segment */
391         pushl   %fs
392
393         movl    $KDSEL, %eax
394         mov     %ax, %ds                /* use KERNEL data segment */
395         movl    $KPSEL, %eax
396         mov     %ax, %fs
397
398         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
399
400         movl    $0, %ebx                
401         movl    20(%esp), %eax  
402         andl    $3, %eax
403         cmpl    $3, %eax
404         je      1f
405         testl   $PSL_VM, 24(%esp)
406         jne     1f
407         incl    %ebx                    /* system or interrupt */
408 1:      
409         movl    PCPU(cpuid), %eax
410         movl    %ebx, checkstate_cpustate(,%eax,4)
411         movl    PCPU(curthread), %ebx
412         movl    TD_PROC(%ebx),%ebx
413         movl    %ebx, checkstate_curproc(,%eax,4)
414         movl    16(%esp), %ebx
415         movl    %ebx, checkstate_pc(,%eax,4)
416
417         lock                            /* checkstate_probed_cpus |= (1<<id) */
418         btsl    %eax, checkstate_probed_cpus
419
420         popl    %fs
421         popl    %ds                     /* restore previous data segment */
422         popl    %ebx
423         popl    %eax
424         iret
425
426 #endif /* BETTER_CLOCK */
427 #endif
428
429 /*
430  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
431  *
432  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
433  *  - MP safe in regards to setting AST_PENDING because doreti is in
434  *    a cli mode when it checks.
435  */
436
437         .text
438         SUPERALIGN_TEXT
439         .globl Xcpuast
440 Xcpuast:
441         PUSH_FRAME
442
443         movl    PCPU(cpuid), %eax
444         lock                            /* checkstate_need_ast &= ~(1<<id) */
445         btrl    %eax, checkstate_need_ast
446         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
447
448         lock
449         btsl    %eax, checkstate_pending_ast
450         jc      1f
451
452         FAKE_MCOUNT(13*4(%esp))
453
454         movl    PCPU(curthread), %eax
455         pushl   TD_CPL(%eax)            /* cpl restored by doreti */
456
457         orl     $AST_PENDING, PCPU(astpending)  /* XXX */
458         incb    PCPU(intr_nesting_level)
459         sti
460         
461         movl    PCPU(cpuid), %eax
462         lock    
463         btrl    %eax, checkstate_pending_ast
464         lock    
465         btrl    %eax, CNAME(resched_cpus)
466         jnc     2f
467         orl     $AST_PENDING+AST_RESCHED,PCPU(astpending)
468 2:              
469         MEXITCOUNT
470         jmp     doreti
471 1:
472         /* We are already in the process of delivering an ast for this CPU */
473         POP_FRAME
474         iret                    
475
476
477 /*
478  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
479  */
480
481         .text
482         SUPERALIGN_TEXT
483         .globl Xforward_irq
484 Xforward_irq:
485         PUSH_FRAME
486
487         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
488
489         FAKE_MCOUNT(13*4(%esp))
490
491         call    try_mplock
492         testl   %eax,%eax               /* Did we get the lock ? */
493         jz  1f                          /* No */
494
495         incl    PCPU(cnt)+V_FORWARDED_HITS
496         
497         movl    PCPU(curthread), %eax
498         pushl   TD_CPL(%eax)            /* cpl restored by doreti */
499
500         incb    PCPU(intr_nesting_level)
501         sti
502         
503         MEXITCOUNT
504         jmp     doreti                  /* Handle forwarded interrupt */
505 1:
506         incl    PCPU(cnt)+V_FORWARDED_MISSES
507         call    forward_irq     /* Oops, we've lost the isr lock */
508         MEXITCOUNT
509         POP_FRAME
510         iret
511 3:      
512         call    rel_mplock
513         MEXITCOUNT
514         POP_FRAME
515         iret
516
517 /*
518  * 
519  */
520 forward_irq:
521         MCOUNT
522         cmpl    $0,invltlb_ok
523         jz      4f
524
525         cmpl    $0, CNAME(forward_irq_enabled)
526         jz      4f
527
528         movl    mp_lock,%eax
529         cmpl    $MP_FREE_LOCK,%eax
530         jne     1f
531         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
532 1:
533         shrl    $24,%eax
534         movl    cpu_num_to_apic_id(,%eax,4),%ecx
535         shll    $24,%ecx
536         movl    lapic_icr_hi, %eax
537         andl    $~APIC_ID_MASK, %eax
538         orl     %ecx, %eax
539         movl    %eax, lapic_icr_hi
540
541 2:
542         movl    lapic_icr_lo, %eax
543         andl    $APIC_DELSTAT_MASK,%eax
544         jnz     2b
545         movl    lapic_icr_lo, %eax
546         andl    $APIC_RESV2_MASK, %eax
547         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
548         movl    %eax, lapic_icr_lo
549 3:
550         movl    lapic_icr_lo, %eax
551         andl    $APIC_DELSTAT_MASK,%eax
552         jnz     3b
553 4:              
554         ret
555         
556 /*
557  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
558  *
559  *  - Signals its receipt.
560  *  - Waits for permission to restart.
561  *  - Signals its restart.
562  */
563
564         .text
565         SUPERALIGN_TEXT
566         .globl Xcpustop
567 Xcpustop:
568         pushl   %ebp
569         movl    %esp, %ebp
570         pushl   %eax
571         pushl   %ecx
572         pushl   %edx
573         pushl   %ds                     /* save current data segment */
574         pushl   %fs
575
576         movl    $KDSEL, %eax
577         mov     %ax, %ds                /* use KERNEL data segment */
578         movl    $KPSEL, %eax
579         mov     %ax, %fs
580
581         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
582
583         movl    PCPU(cpuid), %eax
584         imull   $PCB_SIZE, %eax
585         leal    CNAME(stoppcbs)(%eax), %eax
586         pushl   %eax
587         call    CNAME(savectx)          /* Save process context */
588         addl    $4, %esp
589         
590                 
591         movl    PCPU(cpuid), %eax
592
593         lock
594         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
595 1:
596         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
597         jnc     1b
598
599         lock
600         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
601         lock
602         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
603
604         test    %eax, %eax
605         jnz     2f
606
607         movl    CNAME(cpustop_restartfunc), %eax
608         test    %eax, %eax
609         jz      2f
610         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
611
612         call    *%eax
613 2:
614         popl    %fs
615         popl    %ds                     /* restore previous data segment */
616         popl    %edx
617         popl    %ecx
618         popl    %eax
619         movl    %ebp, %esp
620         popl    %ebp
621         iret
622
623
624 MCOUNT_LABEL(bintr)
625         FAST_INTR(0,fastintr0)
626         FAST_INTR(1,fastintr1)
627         FAST_INTR(2,fastintr2)
628         FAST_INTR(3,fastintr3)
629         FAST_INTR(4,fastintr4)
630         FAST_INTR(5,fastintr5)
631         FAST_INTR(6,fastintr6)
632         FAST_INTR(7,fastintr7)
633         FAST_INTR(8,fastintr8)
634         FAST_INTR(9,fastintr9)
635         FAST_INTR(10,fastintr10)
636         FAST_INTR(11,fastintr11)
637         FAST_INTR(12,fastintr12)
638         FAST_INTR(13,fastintr13)
639         FAST_INTR(14,fastintr14)
640         FAST_INTR(15,fastintr15)
641         FAST_INTR(16,fastintr16)
642         FAST_INTR(17,fastintr17)
643         FAST_INTR(18,fastintr18)
644         FAST_INTR(19,fastintr19)
645         FAST_INTR(20,fastintr20)
646         FAST_INTR(21,fastintr21)
647         FAST_INTR(22,fastintr22)
648         FAST_INTR(23,fastintr23)
649         
650         /* YYY what is this garbage? */
651 #define CLKINTR_PENDING                                                 \
652         call    clock_lock ;                                            \
653         movl $1,CNAME(clkintr_pending) ;                                \
654         call    clock_unlock ;                                          \
655
656         INTR(0,intr0, CLKINTR_PENDING)
657         INTR(1,intr1,)
658         INTR(2,intr2,)
659         INTR(3,intr3,)
660         INTR(4,intr4,)
661         INTR(5,intr5,)
662         INTR(6,intr6,)
663         INTR(7,intr7,)
664         INTR(8,intr8,)
665         INTR(9,intr9,)
666         INTR(10,intr10,)
667         INTR(11,intr11,)
668         INTR(12,intr12,)
669         INTR(13,intr13,)
670         INTR(14,intr14,)
671         INTR(15,intr15,)
672         INTR(16,intr16,)
673         INTR(17,intr17,)
674         INTR(18,intr18,)
675         INTR(19,intr19,)
676         INTR(20,intr20,)
677         INTR(21,intr21,)
678         INTR(22,intr22,)
679         INTR(23,intr23,)
680
681         FAST_UNPEND(0,fastunpend0)
682         FAST_UNPEND(1,fastunpend1)
683         FAST_UNPEND(2,fastunpend2)
684         FAST_UNPEND(3,fastunpend3)
685         FAST_UNPEND(4,fastunpend4)
686         FAST_UNPEND(5,fastunpend5)
687         FAST_UNPEND(6,fastunpend6)
688         FAST_UNPEND(7,fastunpend7)
689         FAST_UNPEND(8,fastunpend8)
690         FAST_UNPEND(9,fastunpend9)
691         FAST_UNPEND(10,fastunpend10)
692         FAST_UNPEND(11,fastunpend11)
693         FAST_UNPEND(12,fastunpend12)
694         FAST_UNPEND(13,fastunpend13)
695         FAST_UNPEND(14,fastunpend14)
696         FAST_UNPEND(15,fastunpend15)
697         FAST_UNPEND(16,fastunpend16)
698         FAST_UNPEND(17,fastunpend17)
699         FAST_UNPEND(18,fastunpend18)
700         FAST_UNPEND(19,fastunpend19)
701         FAST_UNPEND(20,fastunpend20)
702         FAST_UNPEND(21,fastunpend21)
703         FAST_UNPEND(22,fastunpend22)
704         FAST_UNPEND(23,fastunpend23)
705 MCOUNT_LABEL(eintr)
706
707         /*
708          * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
709          *
710          * - Calls the generic rendezvous action function.
711          */
712         .text
713         SUPERALIGN_TEXT
714         .globl  Xrendezvous
715 Xrendezvous:
716         PUSH_FRAME
717         movl    $KDSEL, %eax
718         mov     %ax, %ds                /* use KERNEL data segment */
719         mov     %ax, %es
720         movl    $KPSEL, %eax
721         mov     %ax, %fs
722
723         call    smp_rendezvous_action
724
725         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
726         POP_FRAME
727         iret
728         
729         
730         .data
731
732 #if 0
733 /*
734  * Addresses of interrupt handlers.
735  *  XresumeNN: Resumption addresses for HWIs.
736  */
737         .globl _ihandlers
738 _ihandlers:
739 /*
740  * used by:
741  *  ipl.s:      doreti_unpend
742  */
743         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
744         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
745         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
746         .long   Xresume12, Xresume13, Xresume14, Xresume15 
747         .long   Xresume16, Xresume17, Xresume18, Xresume19
748         .long   Xresume20, Xresume21, Xresume22, Xresume23
749 /*
750  * used by:
751  *  ipl.s:      doreti_unpend
752  *  apic_ipl.s: splz_unpend
753  */
754         .long   _swi_null, swi_net, _swi_null, _swi_null
755         .long   _swi_vm, _swi_null, _softclock
756
757 imasks:                         /* masks for interrupt handlers */
758         .space  NHWI*4          /* padding; HWI masks are elsewhere */
759
760         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
761         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
762 #endif  /* 0 */
763
764
765 #ifdef COUNT_XINVLTLB_HITS
766         .globl  xhits
767 xhits:
768         .space  (NCPU * 4), 0
769 #endif /* COUNT_XINVLTLB_HITS */
770
771 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
772         .globl stopped_cpus, started_cpus
773 stopped_cpus:
774         .long   0
775 started_cpus:
776         .long   0
777
778 #ifdef BETTER_CLOCK
779         .globl checkstate_probed_cpus
780 checkstate_probed_cpus:
781         .long   0       
782 #endif /* BETTER_CLOCK */
783         .globl checkstate_need_ast
784 checkstate_need_ast:
785         .long   0
786 checkstate_pending_ast:
787         .long   0
788         .globl CNAME(resched_cpus)
789         .globl CNAME(cpustop_restartfunc)
790 CNAME(resched_cpus):
791         .long 0
792 CNAME(cpustop_restartfunc):
793         .long 0
794                 
795
796
797         .globl  apic_pin_trigger
798 apic_pin_trigger:
799         .long   0
800
801         .text