Add the DragonFly cvs id and perform general cleanups on cvs/rcs/sccs ids. Most
[dragonfly.git] / sys / i386 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.2 2003/06/17 04:28:36 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10
11 #include "i386/isa/intr_machdep.h"
12
13 /* convert an absolute IRQ# into a bitmask */
14 #define IRQ_BIT(irq_num)        (1 << (irq_num))
15
16 /* make an index into the IO APIC from the IRQ# */
17 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
18
19
20 /*
21  * Macros for interrupt interrupt entry, call to handler, and exit.
22  */
23
24 #define FAST_INTR(irq_num, vec_name)                                    \
25         .text ;                                                         \
26         SUPERALIGN_TEXT ;                                               \
27 IDTVEC(vec_name) ;                                                      \
28         pushl   %eax ;          /* save only call-used registers */     \
29         pushl   %ecx ;                                                  \
30         pushl   %edx ;                                                  \
31         pushl   %ds ;                                                   \
32         MAYBE_PUSHL_ES ;                                                \
33         pushl   %fs ;                                                   \
34         movl    $KDSEL,%eax ;                                           \
35         mov     %ax,%ds ;                                               \
36         MAYBE_MOVW_AX_ES ;                                              \
37         movl    $KPSEL,%eax ;                                           \
38         mov     %ax,%fs ;                                               \
39         FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
40         pushl   _intr_unit + (irq_num) * 4 ;                            \
41         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
42         addl    $4, %esp ;                                              \
43         movl    $0, lapic_eoi ;                                         \
44         lock ;                                                          \
45         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
46         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
47         lock ;                                                          \
48         incl    (%eax) ;                                                \
49         MEXITCOUNT ;                                                    \
50         popl    %fs ;                                                   \
51         MAYBE_POPL_ES ;                                                 \
52         popl    %ds ;                                                   \
53         popl    %edx ;                                                  \
54         popl    %ecx ;                                                  \
55         popl    %eax ;                                                  \
56         iret
57
58 /*
59  * 
60  */
61 #define PUSH_FRAME                                                      \
62         pushl   $0 ;            /* dummy error code */                  \
63         pushl   $0 ;            /* dummy trap type */                   \
64         pushal ;                                                        \
65         pushl   %ds ;           /* save data and extra segments ... */  \
66         pushl   %es ;                                                   \
67         pushl   %fs
68
69 #define POP_FRAME                                                       \
70         popl    %fs ;                                                   \
71         popl    %es ;                                                   \
72         popl    %ds ;                                                   \
73         popal ;                                                         \
74         addl    $4+4,%esp
75
76 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
77 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
78         
79 #define MASK_IRQ(irq_num)                                               \
80         IMASK_LOCK ;                            /* into critical reg */ \
81         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
82         jne     7f ;                    /* masked, don't mask */        \
83         orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
84         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
85         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
86         movl    %eax, (%ecx) ;                  /* write the index */   \
87         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
88         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
89         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
90 7: ;                                            /* already masked */    \
91         IMASK_UNLOCK
92 /*
93  * Test to see whether we are handling an edge or level triggered INT.
94  *  Level-triggered INTs must still be masked as we don't clear the source,
95  *  and the EOI cycle would cause redundant INTs to occur.
96  */
97 #define MASK_LEVEL_IRQ(irq_num)                                         \
98         testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
99         jz      9f ;                            /* edge, don't mask */  \
100         MASK_IRQ(irq_num) ;                                             \
101 9:
102
103
104 #ifdef APIC_INTR_REORDER
105 #define EOI_IRQ(irq_num)                                                \
106         movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
107         movl    (%eax), %eax ;                                          \
108         testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
109         jz      9f ;                            /* not active */        \
110         movl    $0, lapic_eoi ;                                         \
111         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
112 9:
113
114 #else
115 #define EOI_IRQ(irq_num)                                                \
116         testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
117         jz      9f      ;                       /* not active */        \
118         movl    $0, lapic_eoi;                                          \
119         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
120 9:
121 #endif
122         
123         
124 /*
125  * Test to see if the source is currntly masked, clear if so.
126  */
127 #define UNMASK_IRQ(irq_num)                                     \
128         IMASK_LOCK ;                            /* into critical reg */ \
129         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
130         je      7f ;                    /* bit clear, not masked */     \
131         andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
132         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
133         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
134         movl    %eax,(%ecx) ;                   /* write the index */   \
135         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
136         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
137         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
138 7: ;                                                                    \
139         IMASK_UNLOCK
140
141 #ifdef APIC_INTR_DIAGNOSTIC
142 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
143 log_intr_event:
144         pushf
145         cli
146         pushl   $CNAME(apic_itrace_debuglock)
147         call    CNAME(s_lock_np)
148         addl    $4, %esp
149         movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
150         andl    $32767, %ecx
151         movl    _cpuid, %eax
152         shll    $8,     %eax
153         orl     8(%esp), %eax
154         movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
155         incl    %ecx
156         andl    $32767, %ecx
157         movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
158         pushl   $CNAME(apic_itrace_debuglock)
159         call    CNAME(s_unlock_np)
160         addl    $4, %esp
161         popf
162         ret
163         
164
165 #define APIC_ITRACE(name, irq_num, id)                                  \
166         lock ;                                  /* MP-safe */           \
167         incl    CNAME(name) + (irq_num) * 4 ;                           \
168         pushl   %eax ;                                                  \
169         pushl   %ecx ;                                                  \
170         pushl   %edx ;                                                  \
171         movl    $(irq_num), %eax ;                                      \
172         cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
173         jne     7f ;                                                    \
174         pushl   $id ;                                                   \
175         call    log_intr_event ;                                        \
176         addl    $4, %esp ;                                              \
177 7: ;                                                                    \
178         popl    %edx ;                                                  \
179         popl    %ecx ;                                                  \
180         popl    %eax
181 #else
182 #define APIC_ITRACE(name, irq_num, id)                                  \
183         lock ;                                  /* MP-safe */           \
184         incl    CNAME(name) + (irq_num) * 4
185 #endif
186
187 #define APIC_ITRACE_ENTER 1
188 #define APIC_ITRACE_EOI 2
189 #define APIC_ITRACE_TRYISRLOCK 3
190 #define APIC_ITRACE_GOTISRLOCK 4
191 #define APIC_ITRACE_ENTER2 5
192 #define APIC_ITRACE_LEAVE 6
193 #define APIC_ITRACE_UNMASK 7
194 #define APIC_ITRACE_ACTIVE 8
195 #define APIC_ITRACE_MASKED 9
196 #define APIC_ITRACE_NOISRLOCK 10
197 #define APIC_ITRACE_MASKED2 11
198 #define APIC_ITRACE_SPLZ 12
199 #define APIC_ITRACE_DORETI 13   
200         
201 #else   
202 #define APIC_ITRACE(name, irq_num, id)
203 #endif
204                 
205 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
206         .text ;                                                         \
207         SUPERALIGN_TEXT ;                                               \
208 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
209 IDTVEC(vec_name) ;                                                      \
210         PUSH_FRAME ;                                                    \
211         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
212         mov     %ax, %ds ;                                              \
213         mov     %ax, %es ;                                              \
214         movl    $KPSEL, %eax ;                                          \
215         mov     %ax, %fs ;                                              \
216 ;                                                                       \
217         maybe_extra_ipending ;                                          \
218 ;                                                                       \
219         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
220         lock ;                                  /* MP-safe */           \
221         btsl    $(irq_num), iactive ;           /* lazy masking */      \
222         jc      1f ;                            /* already active */    \
223 ;                                                                       \
224         MASK_LEVEL_IRQ(irq_num) ;                                       \
225         EOI_IRQ(irq_num) ;                                              \
226 0: ;                                                                    \
227         APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
228         MP_TRYLOCK ;            /* XXX this is going away... */         \
229         testl   %eax, %eax ;                    /* did we get it? */    \
230         jz      3f ;                            /* no */                \
231 ;                                                                       \
232         APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
233         testl   $IRQ_BIT(irq_num), _cpl ;                               \
234         jne     2f ;                            /* this INT masked */   \
235 ;                                                                       \
236         incb    _intr_nesting_level ;                                   \
237 ;                                                                       \
238   /* entry point used by doreti_unpend for HWIs. */                     \
239 __CONCAT(Xresume,irq_num): ;                                            \
240         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
241         lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
242         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
243         lock ;  incl    (%eax) ;                                        \
244 ;                                                                       \
245         movl    _cpl, %eax ;                                            \
246         pushl   %eax ;                                                  \
247         orl     _intr_mask + (irq_num) * 4, %eax ;                      \
248         movl    %eax, _cpl ;                                            \
249         lock ;                                                          \
250         andl    $~IRQ_BIT(irq_num), _ipending ;                         \
251 ;                                                                       \
252         pushl   _intr_unit + (irq_num) * 4 ;                            \
253         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
254         sti ;                                                           \
255         call    *_intr_handler + (irq_num) * 4 ;                        \
256         cli ;                                                           \
257         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
258 ;                                                                       \
259         lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
260         UNMASK_IRQ(irq_num) ;                                           \
261         APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
262         sti ;                           /* doreti repeats cli/sti */    \
263         MEXITCOUNT ;                                                    \
264         jmp     _doreti ;                                               \
265 ;                                                                       \
266         ALIGN_TEXT ;                                                    \
267 1: ;                                            /* active  */           \
268         APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
269         MASK_IRQ(irq_num) ;                                             \
270         EOI_IRQ(irq_num) ;                                              \
271         lock ;                                                          \
272         orl     $IRQ_BIT(irq_num), _ipending ;                          \
273         lock ;                                                          \
274         btsl    $(irq_num), iactive ;           /* still active */      \
275         jnc     0b ;                            /* retry */             \
276         POP_FRAME ;                                                     \
277         iret ;          /* XXX:  iactive bit might be 0 now */          \
278         ALIGN_TEXT ;                                                    \
279 2: ;                            /* masked by cpl, leave iactive set */  \
280         APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
281         lock ;                                                          \
282         orl     $IRQ_BIT(irq_num), _ipending ;                          \
283         MP_RELLOCK ;                                                    \
284         POP_FRAME ;                                                     \
285         iret ;                                                          \
286         ALIGN_TEXT ;                                                    \
287 3: ;                    /* other cpu has isr lock */                    \
288         APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
289         lock ;                                                          \
290         orl     $IRQ_BIT(irq_num), _ipending ;                          \
291         testl   $IRQ_BIT(irq_num), _cpl ;                               \
292         jne     4f ;                            /* this INT masked */   \
293         call    forward_irq ;    /* forward irq to lock holder */       \
294         POP_FRAME ;                             /* and return */        \
295         iret ;                                                          \
296         ALIGN_TEXT ;                                                    \
297 4: ;                                            /* blocked */           \
298         APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
299         POP_FRAME ;                             /* and return */        \
300         iret
301
302 /*
303  * Handle "spurious INTerrupts".
304  * Notes:
305  *  This is different than the "spurious INTerrupt" generated by an
306  *   8259 PIC for missing INTs.  See the APIC documentation for details.
307  *  This routine should NOT do an 'EOI' cycle.
308  */
309         .text
310         SUPERALIGN_TEXT
311         .globl _Xspuriousint
312 _Xspuriousint:
313
314         /* No EOI cycle used here */
315
316         iret
317
318
319 /*
320  * Handle TLB shootdowns.
321  */
322         .text
323         SUPERALIGN_TEXT
324         .globl  _Xinvltlb
325 _Xinvltlb:
326         pushl   %eax
327
328 #ifdef COUNT_XINVLTLB_HITS
329         pushl   %fs
330         movl    $KPSEL, %eax
331         mov     %ax, %fs
332         movl    _cpuid, %eax
333         popl    %fs
334         ss
335         incl    _xhits(,%eax,4)
336 #endif /* COUNT_XINVLTLB_HITS */
337
338         movl    %cr3, %eax              /* invalidate the TLB */
339         movl    %eax, %cr3
340
341         ss                              /* stack segment, avoid %ds load */
342         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
343
344         popl    %eax
345         iret
346
347
348 #ifdef BETTER_CLOCK
349
350 /*
351  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
352  *
353  *  - Stores current cpu state in checkstate_cpustate[cpuid]
354  *      0 == user, 1 == sys, 2 == intr
355  *  - Stores current process in checkstate_curproc[cpuid]
356  *
357  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
358  *
359  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
360  */
361
362         .text
363         SUPERALIGN_TEXT
364         .globl _Xcpucheckstate
365         .globl _checkstate_cpustate
366         .globl _checkstate_curproc
367         .globl _checkstate_pc
368 _Xcpucheckstate:
369         pushl   %eax
370         pushl   %ebx            
371         pushl   %ds                     /* save current data segment */
372         pushl   %fs
373
374         movl    $KDSEL, %eax
375         mov     %ax, %ds                /* use KERNEL data segment */
376         movl    $KPSEL, %eax
377         mov     %ax, %fs
378
379         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
380
381         movl    $0, %ebx                
382         movl    20(%esp), %eax  
383         andl    $3, %eax
384         cmpl    $3, %eax
385         je      1f
386         testl   $PSL_VM, 24(%esp)
387         jne     1f
388         incl    %ebx                    /* system or interrupt */
389 1:      
390         movl    _cpuid, %eax
391         movl    %ebx, _checkstate_cpustate(,%eax,4)
392         movl    _curproc, %ebx
393         movl    %ebx, _checkstate_curproc(,%eax,4)
394         movl    16(%esp), %ebx
395         movl    %ebx, _checkstate_pc(,%eax,4)
396
397         lock                            /* checkstate_probed_cpus |= (1<<id) */
398         btsl    %eax, _checkstate_probed_cpus
399
400         popl    %fs
401         popl    %ds                     /* restore previous data segment */
402         popl    %ebx
403         popl    %eax
404         iret
405
406 #endif /* BETTER_CLOCK */
407
408 /*
409  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
410  *
411  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
412  *
413  *  - We need a better method of triggering asts on other cpus.
414  */
415
416         .text
417         SUPERALIGN_TEXT
418         .globl _Xcpuast
419 _Xcpuast:
420         PUSH_FRAME
421         movl    $KDSEL, %eax
422         mov     %ax, %ds                /* use KERNEL data segment */
423         mov     %ax, %es
424         movl    $KPSEL, %eax
425         mov     %ax, %fs
426
427         movl    _cpuid, %eax
428         lock                            /* checkstate_need_ast &= ~(1<<id) */
429         btrl    %eax, _checkstate_need_ast
430         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
431
432         lock
433         btsl    %eax, _checkstate_pending_ast
434         jc      1f
435
436         FAKE_MCOUNT(13*4(%esp))
437
438         /* 
439          * Giant locks do not come cheap.
440          * A lot of cycles are going to be wasted here.
441          */
442         call    _get_mplock
443
444         movl    _cpl, %eax
445         pushl   %eax
446         orl     $AST_PENDING, _astpending       /* XXX */
447         incb    _intr_nesting_level
448         sti
449         
450         pushl   $0
451         
452         movl    _cpuid, %eax
453         lock    
454         btrl    %eax, _checkstate_pending_ast
455         lock    
456         btrl    %eax, CNAME(resched_cpus)
457         jnc     2f
458         orl     $AST_PENDING+AST_RESCHED,_astpending
459         lock
460         incl    CNAME(want_resched_cnt)
461 2:              
462         lock
463         incl    CNAME(cpuast_cnt)
464         MEXITCOUNT
465         jmp     _doreti
466 1:
467         /* We are already in the process of delivering an ast for this CPU */
468         POP_FRAME
469         iret                    
470
471
472 /*
473  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
474  */
475
476         .text
477         SUPERALIGN_TEXT
478         .globl _Xforward_irq
479 _Xforward_irq:
480         PUSH_FRAME
481         movl    $KDSEL, %eax
482         mov     %ax, %ds                /* use KERNEL data segment */
483         mov     %ax, %es
484         movl    $KPSEL, %eax
485         mov     %ax, %fs
486
487         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
488
489         FAKE_MCOUNT(13*4(%esp))
490
491         MP_TRYLOCK
492         testl   %eax,%eax               /* Did we get the lock ? */
493         jz  1f                          /* No */
494
495         lock
496         incl    CNAME(forward_irq_hitcnt)
497         cmpb    $4, _intr_nesting_level
498         jae     2f
499         
500         movl    _cpl, %eax
501         pushl   %eax
502         incb    _intr_nesting_level
503         sti
504         
505         pushl   $0
506
507         MEXITCOUNT
508         jmp     _doreti                 /* Handle forwarded interrupt */
509 1:
510         lock
511         incl    CNAME(forward_irq_misscnt)
512         call    forward_irq     /* Oops, we've lost the isr lock */
513         MEXITCOUNT
514         POP_FRAME
515         iret
516 2:
517         lock
518         incl    CNAME(forward_irq_toodeepcnt)
519 3:      
520         MP_RELLOCK
521         MEXITCOUNT
522         POP_FRAME
523         iret
524
525 /*
526  * 
527  */
528 forward_irq:
529         MCOUNT
530         cmpl    $0,_invltlb_ok
531         jz      4f
532
533         cmpl    $0, CNAME(forward_irq_enabled)
534         jz      4f
535
536         movl    _mp_lock,%eax
537         cmpl    $FREE_LOCK,%eax
538         jne     1f
539         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
540 1:
541         shrl    $24,%eax
542         movl    _cpu_num_to_apic_id(,%eax,4),%ecx
543         shll    $24,%ecx
544         movl    lapic_icr_hi, %eax
545         andl    $~APIC_ID_MASK, %eax
546         orl     %ecx, %eax
547         movl    %eax, lapic_icr_hi
548
549 2:
550         movl    lapic_icr_lo, %eax
551         andl    $APIC_DELSTAT_MASK,%eax
552         jnz     2b
553         movl    lapic_icr_lo, %eax
554         andl    $APIC_RESV2_MASK, %eax
555         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
556         movl    %eax, lapic_icr_lo
557 3:
558         movl    lapic_icr_lo, %eax
559         andl    $APIC_DELSTAT_MASK,%eax
560         jnz     3b
561 4:              
562         ret
563         
564 /*
565  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
566  *
567  *  - Signals its receipt.
568  *  - Waits for permission to restart.
569  *  - Signals its restart.
570  */
571
572         .text
573         SUPERALIGN_TEXT
574         .globl _Xcpustop
575 _Xcpustop:
576         pushl   %ebp
577         movl    %esp, %ebp
578         pushl   %eax
579         pushl   %ecx
580         pushl   %edx
581         pushl   %ds                     /* save current data segment */
582         pushl   %fs
583
584         movl    $KDSEL, %eax
585         mov     %ax, %ds                /* use KERNEL data segment */
586         movl    $KPSEL, %eax
587         mov     %ax, %fs
588
589         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
590
591         movl    _cpuid, %eax
592         imull   $PCB_SIZE, %eax
593         leal    CNAME(stoppcbs)(%eax), %eax
594         pushl   %eax
595         call    CNAME(savectx)          /* Save process context */
596         addl    $4, %esp
597         
598                 
599         movl    _cpuid, %eax
600
601         lock
602         btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
603 1:
604         btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
605         jnc     1b
606
607         lock
608         btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
609         lock
610         btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
611
612         test    %eax, %eax
613         jnz     2f
614
615         movl    CNAME(cpustop_restartfunc), %eax
616         test    %eax, %eax
617         jz      2f
618         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
619
620         call    *%eax
621 2:
622         popl    %fs
623         popl    %ds                     /* restore previous data segment */
624         popl    %edx
625         popl    %ecx
626         popl    %eax
627         movl    %ebp, %esp
628         popl    %ebp
629         iret
630
631
632 MCOUNT_LABEL(bintr)
633         FAST_INTR(0,fastintr0)
634         FAST_INTR(1,fastintr1)
635         FAST_INTR(2,fastintr2)
636         FAST_INTR(3,fastintr3)
637         FAST_INTR(4,fastintr4)
638         FAST_INTR(5,fastintr5)
639         FAST_INTR(6,fastintr6)
640         FAST_INTR(7,fastintr7)
641         FAST_INTR(8,fastintr8)
642         FAST_INTR(9,fastintr9)
643         FAST_INTR(10,fastintr10)
644         FAST_INTR(11,fastintr11)
645         FAST_INTR(12,fastintr12)
646         FAST_INTR(13,fastintr13)
647         FAST_INTR(14,fastintr14)
648         FAST_INTR(15,fastintr15)
649         FAST_INTR(16,fastintr16)
650         FAST_INTR(17,fastintr17)
651         FAST_INTR(18,fastintr18)
652         FAST_INTR(19,fastintr19)
653         FAST_INTR(20,fastintr20)
654         FAST_INTR(21,fastintr21)
655         FAST_INTR(22,fastintr22)
656         FAST_INTR(23,fastintr23)
657         
658 #define CLKINTR_PENDING                                                 \
659         pushl $clock_lock ;                                             \
660         call s_lock ;                                                   \
661         movl $1,CNAME(clkintr_pending) ;                                \
662         call s_unlock ;                                                 \
663         addl $4, %esp
664
665         INTR(0,intr0, CLKINTR_PENDING)
666         INTR(1,intr1,)
667         INTR(2,intr2,)
668         INTR(3,intr3,)
669         INTR(4,intr4,)
670         INTR(5,intr5,)
671         INTR(6,intr6,)
672         INTR(7,intr7,)
673         INTR(8,intr8,)
674         INTR(9,intr9,)
675         INTR(10,intr10,)
676         INTR(11,intr11,)
677         INTR(12,intr12,)
678         INTR(13,intr13,)
679         INTR(14,intr14,)
680         INTR(15,intr15,)
681         INTR(16,intr16,)
682         INTR(17,intr17,)
683         INTR(18,intr18,)
684         INTR(19,intr19,)
685         INTR(20,intr20,)
686         INTR(21,intr21,)
687         INTR(22,intr22,)
688         INTR(23,intr23,)
689 MCOUNT_LABEL(eintr)
690
691 /*
692  * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
693  *
694  * - Calls the generic rendezvous action function.
695  */
696         .text
697         SUPERALIGN_TEXT
698         .globl  _Xrendezvous
699 _Xrendezvous:
700         PUSH_FRAME
701         movl    $KDSEL, %eax
702         mov     %ax, %ds                /* use KERNEL data segment */
703         mov     %ax, %es
704         movl    $KPSEL, %eax
705         mov     %ax, %fs
706
707         call    _smp_rendezvous_action
708
709         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
710         POP_FRAME
711         iret
712         
713         
714         .data
715 /*
716  * Addresses of interrupt handlers.
717  *  XresumeNN: Resumption addresses for HWIs.
718  */
719         .globl _ihandlers
720 _ihandlers:
721 /*
722  * used by:
723  *  ipl.s:      doreti_unpend
724  */
725         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
726         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
727         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
728         .long   Xresume12, Xresume13, Xresume14, Xresume15 
729         .long   Xresume16, Xresume17, Xresume18, Xresume19
730         .long   Xresume20, Xresume21, Xresume22, Xresume23
731 /*
732  * used by:
733  *  ipl.s:      doreti_unpend
734  *  apic_ipl.s: splz_unpend
735  */
736         .long   _swi_null, swi_net, _swi_null, _swi_null
737         .long   _swi_vm, _swi_null, _softclock
738
739 imasks:                         /* masks for interrupt handlers */
740         .space  NHWI*4          /* padding; HWI masks are elsewhere */
741
742         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
743         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
744
745 /* active flag for lazy masking */
746 iactive:
747         .long   0
748
749 #ifdef COUNT_XINVLTLB_HITS
750         .globl  _xhits
751 _xhits:
752         .space  (NCPU * 4), 0
753 #endif /* COUNT_XINVLTLB_HITS */
754
755 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
756         .globl _stopped_cpus, _started_cpus
757 _stopped_cpus:
758         .long   0
759 _started_cpus:
760         .long   0
761
762 #ifdef BETTER_CLOCK
763         .globl _checkstate_probed_cpus
764 _checkstate_probed_cpus:
765         .long   0       
766 #endif /* BETTER_CLOCK */
767         .globl _checkstate_need_ast
768 _checkstate_need_ast:
769         .long   0
770 _checkstate_pending_ast:
771         .long   0
772         .globl CNAME(forward_irq_misscnt)
773         .globl CNAME(forward_irq_toodeepcnt)
774         .globl CNAME(forward_irq_hitcnt)
775         .globl CNAME(resched_cpus)
776         .globl CNAME(want_resched_cnt)
777         .globl CNAME(cpuast_cnt)
778         .globl CNAME(cpustop_restartfunc)
779 CNAME(forward_irq_misscnt):     
780         .long 0
781 CNAME(forward_irq_hitcnt):      
782         .long 0
783 CNAME(forward_irq_toodeepcnt):
784         .long 0
785 CNAME(resched_cpus):
786         .long 0
787 CNAME(want_resched_cnt):
788         .long 0
789 CNAME(cpuast_cnt):
790         .long 0
791 CNAME(cpustop_restartfunc):
792         .long 0
793                 
794
795
796         .globl  _apic_pin_trigger
797 _apic_pin_trigger:
798         .long   0
799
800         .text