thread stage 8: add crit_enter(), per-thread cpl handling, fix deferred
[dragonfly.git] / sys / i386 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.4 2003/06/21 07:54:56 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10
11 #include "i386/isa/intr_machdep.h"
12
13 /* convert an absolute IRQ# into a bitmask */
14 #define IRQ_BIT(irq_num)        (1 << (irq_num))
15
16 /* make an index into the IO APIC from the IRQ# */
17 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
18
19
20 /*
21  * Macros for interrupt interrupt entry, call to handler, and exit.
22  */
23
24 #define FAST_INTR(irq_num, vec_name)                                    \
25         .text ;                                                         \
26         SUPERALIGN_TEXT ;                                               \
27 IDTVEC(vec_name) ;                                                      \
28         pushl   %eax ;          /* save only call-used registers */     \
29         pushl   %ecx ;                                                  \
30         pushl   %edx ;                                                  \
31         pushl   %ds ;                                                   \
32         MAYBE_PUSHL_ES ;                                                \
33         pushl   %fs ;                                                   \
34         movl    $KDSEL,%eax ;                                           \
35         mov     %ax,%ds ;                                               \
36         MAYBE_MOVW_AX_ES ;                                              \
37         movl    $KPSEL,%eax ;                                           \
38         mov     %ax,%fs ;                                               \
39         FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
40         pushl   _intr_unit + (irq_num) * 4 ;                            \
41         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
42         addl    $4, %esp ;                                              \
43         movl    $0, lapic_eoi ;                                         \
44         lock ;                                                          \
45         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
46         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
47         lock ;                                                          \
48         incl    (%eax) ;                                                \
49         MEXITCOUNT ;                                                    \
50         popl    %fs ;                                                   \
51         MAYBE_POPL_ES ;                                                 \
52         popl    %ds ;                                                   \
53         popl    %edx ;                                                  \
54         popl    %ecx ;                                                  \
55         popl    %eax ;                                                  \
56         iret
57
58 /*
59  * 
60  */
61 #define PUSH_FRAME                                                      \
62         pushl   $0 ;            /* dummy error code */                  \
63         pushl   $0 ;            /* dummy trap type */                   \
64         pushal ;                                                        \
65         pushl   %ds ;           /* save data and extra segments ... */  \
66         pushl   %es ;                                                   \
67         pushl   %fs
68
69 #define POP_FRAME                                                       \
70         popl    %fs ;                                                   \
71         popl    %es ;                                                   \
72         popl    %ds ;                                                   \
73         popal ;                                                         \
74         addl    $4+4,%esp
75
76 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
77 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
78         
79 #define MASK_IRQ(irq_num)                                               \
80         IMASK_LOCK ;                            /* into critical reg */ \
81         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
82         jne     7f ;                    /* masked, don't mask */        \
83         orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
84         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
85         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
86         movl    %eax, (%ecx) ;                  /* write the index */   \
87         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
88         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
89         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
90 7: ;                                            /* already masked */    \
91         IMASK_UNLOCK
92 /*
93  * Test to see whether we are handling an edge or level triggered INT.
94  *  Level-triggered INTs must still be masked as we don't clear the source,
95  *  and the EOI cycle would cause redundant INTs to occur.
96  */
97 #define MASK_LEVEL_IRQ(irq_num)                                         \
98         testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
99         jz      9f ;                            /* edge, don't mask */  \
100         MASK_IRQ(irq_num) ;                                             \
101 9:
102
103
104 #ifdef APIC_INTR_REORDER
105 #define EOI_IRQ(irq_num)                                                \
106         movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
107         movl    (%eax), %eax ;                                          \
108         testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
109         jz      9f ;                            /* not active */        \
110         movl    $0, lapic_eoi ;                                         \
111         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
112 9:
113
114 #else
115 #define EOI_IRQ(irq_num)                                                \
116         testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
117         jz      9f      ;                       /* not active */        \
118         movl    $0, lapic_eoi;                                          \
119         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
120 9:
121 #endif
122         
123         
124 /*
125  * Test to see if the source is currntly masked, clear if so.
126  */
127 #define UNMASK_IRQ(irq_num)                                     \
128         IMASK_LOCK ;                            /* into critical reg */ \
129         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
130         je      7f ;                    /* bit clear, not masked */     \
131         andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
132         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
133         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
134         movl    %eax,(%ecx) ;                   /* write the index */   \
135         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
136         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
137         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
138 7: ;                                                                    \
139         IMASK_UNLOCK
140
141 #ifdef APIC_INTR_DIAGNOSTIC
142 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
143 log_intr_event:
144         pushf
145         cli
146         pushl   $CNAME(apic_itrace_debuglock)
147         call    CNAME(s_lock_np)
148         addl    $4, %esp
149         movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
150         andl    $32767, %ecx
151         movl    _cpuid, %eax
152         shll    $8,     %eax
153         orl     8(%esp), %eax
154         movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
155         incl    %ecx
156         andl    $32767, %ecx
157         movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
158         pushl   $CNAME(apic_itrace_debuglock)
159         call    CNAME(s_unlock_np)
160         addl    $4, %esp
161         popf
162         ret
163         
164
165 #define APIC_ITRACE(name, irq_num, id)                                  \
166         lock ;                                  /* MP-safe */           \
167         incl    CNAME(name) + (irq_num) * 4 ;                           \
168         pushl   %eax ;                                                  \
169         pushl   %ecx ;                                                  \
170         pushl   %edx ;                                                  \
171         movl    $(irq_num), %eax ;                                      \
172         cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
173         jne     7f ;                                                    \
174         pushl   $id ;                                                   \
175         call    log_intr_event ;                                        \
176         addl    $4, %esp ;                                              \
177 7: ;                                                                    \
178         popl    %edx ;                                                  \
179         popl    %ecx ;                                                  \
180         popl    %eax
181 #else
182 #define APIC_ITRACE(name, irq_num, id)                                  \
183         lock ;                                  /* MP-safe */           \
184         incl    CNAME(name) + (irq_num) * 4
185 #endif
186
187 #define APIC_ITRACE_ENTER 1
188 #define APIC_ITRACE_EOI 2
189 #define APIC_ITRACE_TRYISRLOCK 3
190 #define APIC_ITRACE_GOTISRLOCK 4
191 #define APIC_ITRACE_ENTER2 5
192 #define APIC_ITRACE_LEAVE 6
193 #define APIC_ITRACE_UNMASK 7
194 #define APIC_ITRACE_ACTIVE 8
195 #define APIC_ITRACE_MASKED 9
196 #define APIC_ITRACE_NOISRLOCK 10
197 #define APIC_ITRACE_MASKED2 11
198 #define APIC_ITRACE_SPLZ 12
199 #define APIC_ITRACE_DORETI 13   
200         
201 #else   
202 #define APIC_ITRACE(name, irq_num, id)
203 #endif
204                 
205 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
206         .text ;                                                         \
207         SUPERALIGN_TEXT ;                                               \
208 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
209 IDTVEC(vec_name) ;                                                      \
210         PUSH_FRAME ;                                                    \
211         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
212         mov     %ax, %ds ;                                              \
213         mov     %ax, %es ;                                              \
214         movl    $KPSEL, %eax ;                                          \
215         mov     %ax, %fs ;                                              \
216 ;                                                                       \
217         maybe_extra_ipending ;                                          \
218 ;                                                                       \
219         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
220         lock ;                                  /* MP-safe */           \
221         btsl    $(irq_num), iactive ;           /* lazy masking */      \
222         jc      1f ;                            /* already active */    \
223 ;                                                                       \
224         MASK_LEVEL_IRQ(irq_num) ;                                       \
225         EOI_IRQ(irq_num) ;                                              \
226 0: ;                                                                    \
227         APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
228         MP_TRYLOCK ;            /* XXX this is going away... */         \
229         testl   %eax, %eax ;                    /* did we get it? */    \
230         jz      3f ;                            /* no */                \
231 ;                                                                       \
232         APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
233         testl   $IRQ_BIT(irq_num), _cpl ;                               \
234         jne     2f ;                            /* this INT masked */   \
235         movl    _curthread,%eax ;                                       \
236         cmpl    $TDPRI_CRIT,TD_PRI(%eax) ;                              \
237         jge     2f ;                            /* in critical sec */   \
238 ;                                                                       \
239         incb    _intr_nesting_level ;                                   \
240 ;                                                                       \
241   /* entry point used by doreti_unpend for HWIs. */                     \
242 __CONCAT(Xresume,irq_num): ;                                            \
243         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
244         lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
245         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
246         lock ;  incl    (%eax) ;                                        \
247 ;                                                                       \
248         movl    _cpl, %eax ;                                            \
249         pushl   %eax ;                                                  \
250         orl     _intr_mask + (irq_num) * 4, %eax ;                      \
251         movl    %eax, _cpl ;                                            \
252         lock ;                                                          \
253         andl    $~IRQ_BIT(irq_num), _ipending ;                         \
254 ;                                                                       \
255         pushl   _intr_unit + (irq_num) * 4 ;                            \
256         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
257         sti ;                                                           \
258         call    *_intr_handler + (irq_num) * 4 ;                        \
259         cli ;                                                           \
260         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
261 ;                                                                       \
262         lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
263         UNMASK_IRQ(irq_num) ;                                           \
264         APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
265         sti ;                           /* doreti repeats cli/sti */    \
266         MEXITCOUNT ;                                                    \
267         jmp     _doreti ;                                               \
268 ;                                                                       \
269         ALIGN_TEXT ;                                                    \
270 1: ;                                            /* active  */           \
271         APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
272         MASK_IRQ(irq_num) ;                                             \
273         EOI_IRQ(irq_num) ;                                              \
274         lock ;                                                          \
275         orl     $IRQ_BIT(irq_num), _ipending ;                          \
276         movl    $TDPRI_CRIT,_reqpri ;                                   \
277         lock ;                                                          \
278         btsl    $(irq_num), iactive ;           /* still active */      \
279         jnc     0b ;                            /* retry */             \
280         POP_FRAME ;                                                     \
281         iret ;          /* XXX:  iactive bit might be 0 now */          \
282         ALIGN_TEXT ;                                                    \
283 2: ;                            /* masked by cpl, leave iactive set */  \
284         APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
285         lock ;                                                          \
286         orl     $IRQ_BIT(irq_num), _ipending ;                          \
287         movl    $TDPRI_CRIT,_reqpri ;                                   \
288         MP_RELLOCK ;                                                    \
289         POP_FRAME ;                                                     \
290         iret ;                                                          \
291         ALIGN_TEXT ;                                                    \
292 3: ;                    /* other cpu has isr lock */                    \
293         APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
294         lock ;                                                          \
295         orl     $IRQ_BIT(irq_num), _ipending ;                          \
296         movl    $TDPRI_CRIT,_reqpri ;                                   \
297         testl   $IRQ_BIT(irq_num), _cpl ;                               \
298         jne     4f ;                            /* this INT masked */   \
299         call    forward_irq ;    /* forward irq to lock holder */       \
300         POP_FRAME ;                             /* and return */        \
301         iret ;                                                          \
302         ALIGN_TEXT ;                                                    \
303 4: ;                                            /* blocked */           \
304         APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
305         POP_FRAME ;                             /* and return */        \
306         iret
307
308 /*
309  * Handle "spurious INTerrupts".
310  * Notes:
311  *  This is different than the "spurious INTerrupt" generated by an
312  *   8259 PIC for missing INTs.  See the APIC documentation for details.
313  *  This routine should NOT do an 'EOI' cycle.
314  */
315         .text
316         SUPERALIGN_TEXT
317         .globl _Xspuriousint
318 _Xspuriousint:
319
320         /* No EOI cycle used here */
321
322         iret
323
324
325 /*
326  * Handle TLB shootdowns.
327  */
328         .text
329         SUPERALIGN_TEXT
330         .globl  _Xinvltlb
331 _Xinvltlb:
332         pushl   %eax
333
334 #ifdef COUNT_XINVLTLB_HITS
335         pushl   %fs
336         movl    $KPSEL, %eax
337         mov     %ax, %fs
338         movl    _cpuid, %eax
339         popl    %fs
340         ss
341         incl    _xhits(,%eax,4)
342 #endif /* COUNT_XINVLTLB_HITS */
343
344         movl    %cr3, %eax              /* invalidate the TLB */
345         movl    %eax, %cr3
346
347         ss                              /* stack segment, avoid %ds load */
348         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
349
350         popl    %eax
351         iret
352
353
354 #ifdef BETTER_CLOCK
355
356 /*
357  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
358  *
359  *  - Stores current cpu state in checkstate_cpustate[cpuid]
360  *      0 == user, 1 == sys, 2 == intr
361  *  - Stores current process in checkstate_curproc[cpuid]
362  *
363  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
364  *
365  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
366  */
367
368         .text
369         SUPERALIGN_TEXT
370         .globl _Xcpucheckstate
371         .globl _checkstate_cpustate
372         .globl _checkstate_curproc
373         .globl _checkstate_pc
374 _Xcpucheckstate:
375         pushl   %eax
376         pushl   %ebx            
377         pushl   %ds                     /* save current data segment */
378         pushl   %fs
379
380         movl    $KDSEL, %eax
381         mov     %ax, %ds                /* use KERNEL data segment */
382         movl    $KPSEL, %eax
383         mov     %ax, %fs
384
385         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
386
387         movl    $0, %ebx                
388         movl    20(%esp), %eax  
389         andl    $3, %eax
390         cmpl    $3, %eax
391         je      1f
392         testl   $PSL_VM, 24(%esp)
393         jne     1f
394         incl    %ebx                    /* system or interrupt */
395 1:      
396         movl    _cpuid, %eax
397         movl    %ebx, _checkstate_cpustate(,%eax,4)
398         movl    _curthread, %ebx
399         movl    TD_PROC(%ebx),%ebx
400         movl    %ebx, _checkstate_curproc(,%eax,4)
401         movl    16(%esp), %ebx
402         movl    %ebx, _checkstate_pc(,%eax,4)
403
404         lock                            /* checkstate_probed_cpus |= (1<<id) */
405         btsl    %eax, _checkstate_probed_cpus
406
407         popl    %fs
408         popl    %ds                     /* restore previous data segment */
409         popl    %ebx
410         popl    %eax
411         iret
412
413 #endif /* BETTER_CLOCK */
414
415 /*
416  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
417  *
418  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
419  *
420  *  - We need a better method of triggering asts on other cpus.
421  */
422
423         .text
424         SUPERALIGN_TEXT
425         .globl _Xcpuast
426 _Xcpuast:
427         PUSH_FRAME
428         movl    $KDSEL, %eax
429         mov     %ax, %ds                /* use KERNEL data segment */
430         mov     %ax, %es
431         movl    $KPSEL, %eax
432         mov     %ax, %fs
433
434         movl    _cpuid, %eax
435         lock                            /* checkstate_need_ast &= ~(1<<id) */
436         btrl    %eax, _checkstate_need_ast
437         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
438
439         lock
440         btsl    %eax, _checkstate_pending_ast
441         jc      1f
442
443         FAKE_MCOUNT(13*4(%esp))
444
445         /* 
446          * Giant locks do not come cheap.
447          * A lot of cycles are going to be wasted here.
448          */
449         call    _get_mplock
450
451         movl    _cpl, %eax
452         pushl   %eax
453         orl     $AST_PENDING, _astpending       /* XXX */
454         incb    _intr_nesting_level
455         sti
456         
457         pushl   $0
458         
459         movl    _cpuid, %eax
460         lock    
461         btrl    %eax, _checkstate_pending_ast
462         lock    
463         btrl    %eax, CNAME(resched_cpus)
464         jnc     2f
465         orl     $AST_PENDING+AST_RESCHED,_astpending
466         lock
467         incl    CNAME(want_resched_cnt)
468 2:              
469         lock
470         incl    CNAME(cpuast_cnt)
471         MEXITCOUNT
472         jmp     _doreti
473 1:
474         /* We are already in the process of delivering an ast for this CPU */
475         POP_FRAME
476         iret                    
477
478
479 /*
480  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
481  */
482
483         .text
484         SUPERALIGN_TEXT
485         .globl _Xforward_irq
486 _Xforward_irq:
487         PUSH_FRAME
488         movl    $KDSEL, %eax
489         mov     %ax, %ds                /* use KERNEL data segment */
490         mov     %ax, %es
491         movl    $KPSEL, %eax
492         mov     %ax, %fs
493
494         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
495
496         FAKE_MCOUNT(13*4(%esp))
497
498         MP_TRYLOCK
499         testl   %eax,%eax               /* Did we get the lock ? */
500         jz  1f                          /* No */
501
502         lock
503         incl    CNAME(forward_irq_hitcnt)
504         cmpb    $4, _intr_nesting_level
505         jae     2f
506         
507         movl    _cpl, %eax
508         pushl   %eax
509         incb    _intr_nesting_level
510         sti
511         
512         pushl   $0
513
514         MEXITCOUNT
515         jmp     _doreti                 /* Handle forwarded interrupt */
516 1:
517         lock
518         incl    CNAME(forward_irq_misscnt)
519         call    forward_irq     /* Oops, we've lost the isr lock */
520         MEXITCOUNT
521         POP_FRAME
522         iret
523 2:
524         lock
525         incl    CNAME(forward_irq_toodeepcnt)
526 3:      
527         MP_RELLOCK
528         MEXITCOUNT
529         POP_FRAME
530         iret
531
532 /*
533  * 
534  */
535 forward_irq:
536         MCOUNT
537         cmpl    $0,_invltlb_ok
538         jz      4f
539
540         cmpl    $0, CNAME(forward_irq_enabled)
541         jz      4f
542
543         movl    _mp_lock,%eax
544         cmpl    $FREE_LOCK,%eax
545         jne     1f
546         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
547 1:
548         shrl    $24,%eax
549         movl    _cpu_num_to_apic_id(,%eax,4),%ecx
550         shll    $24,%ecx
551         movl    lapic_icr_hi, %eax
552         andl    $~APIC_ID_MASK, %eax
553         orl     %ecx, %eax
554         movl    %eax, lapic_icr_hi
555
556 2:
557         movl    lapic_icr_lo, %eax
558         andl    $APIC_DELSTAT_MASK,%eax
559         jnz     2b
560         movl    lapic_icr_lo, %eax
561         andl    $APIC_RESV2_MASK, %eax
562         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
563         movl    %eax, lapic_icr_lo
564 3:
565         movl    lapic_icr_lo, %eax
566         andl    $APIC_DELSTAT_MASK,%eax
567         jnz     3b
568 4:              
569         ret
570         
571 /*
572  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
573  *
574  *  - Signals its receipt.
575  *  - Waits for permission to restart.
576  *  - Signals its restart.
577  */
578
579         .text
580         SUPERALIGN_TEXT
581         .globl _Xcpustop
582 _Xcpustop:
583         pushl   %ebp
584         movl    %esp, %ebp
585         pushl   %eax
586         pushl   %ecx
587         pushl   %edx
588         pushl   %ds                     /* save current data segment */
589         pushl   %fs
590
591         movl    $KDSEL, %eax
592         mov     %ax, %ds                /* use KERNEL data segment */
593         movl    $KPSEL, %eax
594         mov     %ax, %fs
595
596         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
597
598         movl    _cpuid, %eax
599         imull   $PCB_SIZE, %eax
600         leal    CNAME(stoppcbs)(%eax), %eax
601         pushl   %eax
602         call    CNAME(savectx)          /* Save process context */
603         addl    $4, %esp
604         
605                 
606         movl    _cpuid, %eax
607
608         lock
609         btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
610 1:
611         btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
612         jnc     1b
613
614         lock
615         btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
616         lock
617         btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
618
619         test    %eax, %eax
620         jnz     2f
621
622         movl    CNAME(cpustop_restartfunc), %eax
623         test    %eax, %eax
624         jz      2f
625         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
626
627         call    *%eax
628 2:
629         popl    %fs
630         popl    %ds                     /* restore previous data segment */
631         popl    %edx
632         popl    %ecx
633         popl    %eax
634         movl    %ebp, %esp
635         popl    %ebp
636         iret
637
638
639 MCOUNT_LABEL(bintr)
640         FAST_INTR(0,fastintr0)
641         FAST_INTR(1,fastintr1)
642         FAST_INTR(2,fastintr2)
643         FAST_INTR(3,fastintr3)
644         FAST_INTR(4,fastintr4)
645         FAST_INTR(5,fastintr5)
646         FAST_INTR(6,fastintr6)
647         FAST_INTR(7,fastintr7)
648         FAST_INTR(8,fastintr8)
649         FAST_INTR(9,fastintr9)
650         FAST_INTR(10,fastintr10)
651         FAST_INTR(11,fastintr11)
652         FAST_INTR(12,fastintr12)
653         FAST_INTR(13,fastintr13)
654         FAST_INTR(14,fastintr14)
655         FAST_INTR(15,fastintr15)
656         FAST_INTR(16,fastintr16)
657         FAST_INTR(17,fastintr17)
658         FAST_INTR(18,fastintr18)
659         FAST_INTR(19,fastintr19)
660         FAST_INTR(20,fastintr20)
661         FAST_INTR(21,fastintr21)
662         FAST_INTR(22,fastintr22)
663         FAST_INTR(23,fastintr23)
664         
665 #define CLKINTR_PENDING                                                 \
666         pushl $clock_lock ;                                             \
667         call s_lock ;                                                   \
668         movl $1,CNAME(clkintr_pending) ;                                \
669         call s_unlock ;                                                 \
670         addl $4, %esp
671
672         INTR(0,intr0, CLKINTR_PENDING)
673         INTR(1,intr1,)
674         INTR(2,intr2,)
675         INTR(3,intr3,)
676         INTR(4,intr4,)
677         INTR(5,intr5,)
678         INTR(6,intr6,)
679         INTR(7,intr7,)
680         INTR(8,intr8,)
681         INTR(9,intr9,)
682         INTR(10,intr10,)
683         INTR(11,intr11,)
684         INTR(12,intr12,)
685         INTR(13,intr13,)
686         INTR(14,intr14,)
687         INTR(15,intr15,)
688         INTR(16,intr16,)
689         INTR(17,intr17,)
690         INTR(18,intr18,)
691         INTR(19,intr19,)
692         INTR(20,intr20,)
693         INTR(21,intr21,)
694         INTR(22,intr22,)
695         INTR(23,intr23,)
696 MCOUNT_LABEL(eintr)
697
698 /*
699  * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
700  *
701  * - Calls the generic rendezvous action function.
702  */
703         .text
704         SUPERALIGN_TEXT
705         .globl  _Xrendezvous
706 _Xrendezvous:
707         PUSH_FRAME
708         movl    $KDSEL, %eax
709         mov     %ax, %ds                /* use KERNEL data segment */
710         mov     %ax, %es
711         movl    $KPSEL, %eax
712         mov     %ax, %fs
713
714         call    _smp_rendezvous_action
715
716         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
717         POP_FRAME
718         iret
719         
720         
721         .data
722 /*
723  * Addresses of interrupt handlers.
724  *  XresumeNN: Resumption addresses for HWIs.
725  */
726         .globl _ihandlers
727 _ihandlers:
728 /*
729  * used by:
730  *  ipl.s:      doreti_unpend
731  */
732         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
733         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
734         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
735         .long   Xresume12, Xresume13, Xresume14, Xresume15 
736         .long   Xresume16, Xresume17, Xresume18, Xresume19
737         .long   Xresume20, Xresume21, Xresume22, Xresume23
738 /*
739  * used by:
740  *  ipl.s:      doreti_unpend
741  *  apic_ipl.s: splz_unpend
742  */
743         .long   _swi_null, swi_net, _swi_null, _swi_null
744         .long   _swi_vm, _swi_null, _softclock
745
746 imasks:                         /* masks for interrupt handlers */
747         .space  NHWI*4          /* padding; HWI masks are elsewhere */
748
749         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
750         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
751
752 /* active flag for lazy masking */
753 iactive:
754         .long   0
755
756 #ifdef COUNT_XINVLTLB_HITS
757         .globl  _xhits
758 _xhits:
759         .space  (NCPU * 4), 0
760 #endif /* COUNT_XINVLTLB_HITS */
761
762 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
763         .globl _stopped_cpus, _started_cpus
764 _stopped_cpus:
765         .long   0
766 _started_cpus:
767         .long   0
768
769 #ifdef BETTER_CLOCK
770         .globl _checkstate_probed_cpus
771 _checkstate_probed_cpus:
772         .long   0       
773 #endif /* BETTER_CLOCK */
774         .globl _checkstate_need_ast
775 _checkstate_need_ast:
776         .long   0
777 _checkstate_pending_ast:
778         .long   0
779         .globl CNAME(forward_irq_misscnt)
780         .globl CNAME(forward_irq_toodeepcnt)
781         .globl CNAME(forward_irq_hitcnt)
782         .globl CNAME(resched_cpus)
783         .globl CNAME(want_resched_cnt)
784         .globl CNAME(cpuast_cnt)
785         .globl CNAME(cpustop_restartfunc)
786 CNAME(forward_irq_misscnt):     
787         .long 0
788 CNAME(forward_irq_hitcnt):      
789         .long 0
790 CNAME(forward_irq_toodeepcnt):
791         .long 0
792 CNAME(resched_cpus):
793         .long 0
794 CNAME(want_resched_cnt):
795         .long 0
796 CNAME(cpuast_cnt):
797         .long 0
798 CNAME(cpustop_restartfunc):
799         .long 0
800                 
801
802
803         .globl  _apic_pin_trigger
804 _apic_pin_trigger:
805         .long   0
806
807         .text