thread stage 1: convert curproc to curthread, embed struct thread in proc.
[dragonfly.git] / sys / i386 / isa / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.3 2003/06/18 06:33:33 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10
11 #include "i386/isa/intr_machdep.h"
12
13 /* convert an absolute IRQ# into a bitmask */
14 #define IRQ_BIT(irq_num)        (1 << (irq_num))
15
16 /* make an index into the IO APIC from the IRQ# */
17 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
18
19
20 /*
21  * Macros for interrupt interrupt entry, call to handler, and exit.
22  */
23
24 #define FAST_INTR(irq_num, vec_name)                                    \
25         .text ;                                                         \
26         SUPERALIGN_TEXT ;                                               \
27 IDTVEC(vec_name) ;                                                      \
28         pushl   %eax ;          /* save only call-used registers */     \
29         pushl   %ecx ;                                                  \
30         pushl   %edx ;                                                  \
31         pushl   %ds ;                                                   \
32         MAYBE_PUSHL_ES ;                                                \
33         pushl   %fs ;                                                   \
34         movl    $KDSEL,%eax ;                                           \
35         mov     %ax,%ds ;                                               \
36         MAYBE_MOVW_AX_ES ;                                              \
37         movl    $KPSEL,%eax ;                                           \
38         mov     %ax,%fs ;                                               \
39         FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
40         pushl   _intr_unit + (irq_num) * 4 ;                            \
41         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
42         addl    $4, %esp ;                                              \
43         movl    $0, lapic_eoi ;                                         \
44         lock ;                                                          \
45         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
46         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
47         lock ;                                                          \
48         incl    (%eax) ;                                                \
49         MEXITCOUNT ;                                                    \
50         popl    %fs ;                                                   \
51         MAYBE_POPL_ES ;                                                 \
52         popl    %ds ;                                                   \
53         popl    %edx ;                                                  \
54         popl    %ecx ;                                                  \
55         popl    %eax ;                                                  \
56         iret
57
58 /*
59  * 
60  */
61 #define PUSH_FRAME                                                      \
62         pushl   $0 ;            /* dummy error code */                  \
63         pushl   $0 ;            /* dummy trap type */                   \
64         pushal ;                                                        \
65         pushl   %ds ;           /* save data and extra segments ... */  \
66         pushl   %es ;                                                   \
67         pushl   %fs
68
69 #define POP_FRAME                                                       \
70         popl    %fs ;                                                   \
71         popl    %es ;                                                   \
72         popl    %ds ;                                                   \
73         popal ;                                                         \
74         addl    $4+4,%esp
75
76 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
77 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
78         
79 #define MASK_IRQ(irq_num)                                               \
80         IMASK_LOCK ;                            /* into critical reg */ \
81         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
82         jne     7f ;                    /* masked, don't mask */        \
83         orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
84         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
85         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
86         movl    %eax, (%ecx) ;                  /* write the index */   \
87         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
88         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
89         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
90 7: ;                                            /* already masked */    \
91         IMASK_UNLOCK
92 /*
93  * Test to see whether we are handling an edge or level triggered INT.
94  *  Level-triggered INTs must still be masked as we don't clear the source,
95  *  and the EOI cycle would cause redundant INTs to occur.
96  */
97 #define MASK_LEVEL_IRQ(irq_num)                                         \
98         testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
99         jz      9f ;                            /* edge, don't mask */  \
100         MASK_IRQ(irq_num) ;                                             \
101 9:
102
103
104 #ifdef APIC_INTR_REORDER
105 #define EOI_IRQ(irq_num)                                                \
106         movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
107         movl    (%eax), %eax ;                                          \
108         testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
109         jz      9f ;                            /* not active */        \
110         movl    $0, lapic_eoi ;                                         \
111         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
112 9:
113
114 #else
115 #define EOI_IRQ(irq_num)                                                \
116         testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
117         jz      9f      ;                       /* not active */        \
118         movl    $0, lapic_eoi;                                          \
119         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
120 9:
121 #endif
122         
123         
124 /*
125  * Test to see if the source is currntly masked, clear if so.
126  */
127 #define UNMASK_IRQ(irq_num)                                     \
128         IMASK_LOCK ;                            /* into critical reg */ \
129         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
130         je      7f ;                    /* bit clear, not masked */     \
131         andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
132         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
133         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
134         movl    %eax,(%ecx) ;                   /* write the index */   \
135         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
136         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
137         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
138 7: ;                                                                    \
139         IMASK_UNLOCK
140
141 #ifdef APIC_INTR_DIAGNOSTIC
142 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
143 log_intr_event:
144         pushf
145         cli
146         pushl   $CNAME(apic_itrace_debuglock)
147         call    CNAME(s_lock_np)
148         addl    $4, %esp
149         movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
150         andl    $32767, %ecx
151         movl    _cpuid, %eax
152         shll    $8,     %eax
153         orl     8(%esp), %eax
154         movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
155         incl    %ecx
156         andl    $32767, %ecx
157         movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
158         pushl   $CNAME(apic_itrace_debuglock)
159         call    CNAME(s_unlock_np)
160         addl    $4, %esp
161         popf
162         ret
163         
164
165 #define APIC_ITRACE(name, irq_num, id)                                  \
166         lock ;                                  /* MP-safe */           \
167         incl    CNAME(name) + (irq_num) * 4 ;                           \
168         pushl   %eax ;                                                  \
169         pushl   %ecx ;                                                  \
170         pushl   %edx ;                                                  \
171         movl    $(irq_num), %eax ;                                      \
172         cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
173         jne     7f ;                                                    \
174         pushl   $id ;                                                   \
175         call    log_intr_event ;                                        \
176         addl    $4, %esp ;                                              \
177 7: ;                                                                    \
178         popl    %edx ;                                                  \
179         popl    %ecx ;                                                  \
180         popl    %eax
181 #else
182 #define APIC_ITRACE(name, irq_num, id)                                  \
183         lock ;                                  /* MP-safe */           \
184         incl    CNAME(name) + (irq_num) * 4
185 #endif
186
187 #define APIC_ITRACE_ENTER 1
188 #define APIC_ITRACE_EOI 2
189 #define APIC_ITRACE_TRYISRLOCK 3
190 #define APIC_ITRACE_GOTISRLOCK 4
191 #define APIC_ITRACE_ENTER2 5
192 #define APIC_ITRACE_LEAVE 6
193 #define APIC_ITRACE_UNMASK 7
194 #define APIC_ITRACE_ACTIVE 8
195 #define APIC_ITRACE_MASKED 9
196 #define APIC_ITRACE_NOISRLOCK 10
197 #define APIC_ITRACE_MASKED2 11
198 #define APIC_ITRACE_SPLZ 12
199 #define APIC_ITRACE_DORETI 13   
200         
201 #else   
202 #define APIC_ITRACE(name, irq_num, id)
203 #endif
204                 
205 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
206         .text ;                                                         \
207         SUPERALIGN_TEXT ;                                               \
208 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
209 IDTVEC(vec_name) ;                                                      \
210         PUSH_FRAME ;                                                    \
211         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
212         mov     %ax, %ds ;                                              \
213         mov     %ax, %es ;                                              \
214         movl    $KPSEL, %eax ;                                          \
215         mov     %ax, %fs ;                                              \
216 ;                                                                       \
217         maybe_extra_ipending ;                                          \
218 ;                                                                       \
219         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
220         lock ;                                  /* MP-safe */           \
221         btsl    $(irq_num), iactive ;           /* lazy masking */      \
222         jc      1f ;                            /* already active */    \
223 ;                                                                       \
224         MASK_LEVEL_IRQ(irq_num) ;                                       \
225         EOI_IRQ(irq_num) ;                                              \
226 0: ;                                                                    \
227         APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
228         MP_TRYLOCK ;            /* XXX this is going away... */         \
229         testl   %eax, %eax ;                    /* did we get it? */    \
230         jz      3f ;                            /* no */                \
231 ;                                                                       \
232         APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
233         testl   $IRQ_BIT(irq_num), _cpl ;                               \
234         jne     2f ;                            /* this INT masked */   \
235 ;                                                                       \
236         incb    _intr_nesting_level ;                                   \
237 ;                                                                       \
238   /* entry point used by doreti_unpend for HWIs. */                     \
239 __CONCAT(Xresume,irq_num): ;                                            \
240         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
241         lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
242         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
243         lock ;  incl    (%eax) ;                                        \
244 ;                                                                       \
245         movl    _cpl, %eax ;                                            \
246         pushl   %eax ;                                                  \
247         orl     _intr_mask + (irq_num) * 4, %eax ;                      \
248         movl    %eax, _cpl ;                                            \
249         lock ;                                                          \
250         andl    $~IRQ_BIT(irq_num), _ipending ;                         \
251 ;                                                                       \
252         pushl   _intr_unit + (irq_num) * 4 ;                            \
253         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
254         sti ;                                                           \
255         call    *_intr_handler + (irq_num) * 4 ;                        \
256         cli ;                                                           \
257         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
258 ;                                                                       \
259         lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
260         UNMASK_IRQ(irq_num) ;                                           \
261         APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
262         sti ;                           /* doreti repeats cli/sti */    \
263         MEXITCOUNT ;                                                    \
264         jmp     _doreti ;                                               \
265 ;                                                                       \
266         ALIGN_TEXT ;                                                    \
267 1: ;                                            /* active  */           \
268         APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
269         MASK_IRQ(irq_num) ;                                             \
270         EOI_IRQ(irq_num) ;                                              \
271         lock ;                                                          \
272         orl     $IRQ_BIT(irq_num), _ipending ;                          \
273         lock ;                                                          \
274         btsl    $(irq_num), iactive ;           /* still active */      \
275         jnc     0b ;                            /* retry */             \
276         POP_FRAME ;                                                     \
277         iret ;          /* XXX:  iactive bit might be 0 now */          \
278         ALIGN_TEXT ;                                                    \
279 2: ;                            /* masked by cpl, leave iactive set */  \
280         APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
281         lock ;                                                          \
282         orl     $IRQ_BIT(irq_num), _ipending ;                          \
283         MP_RELLOCK ;                                                    \
284         POP_FRAME ;                                                     \
285         iret ;                                                          \
286         ALIGN_TEXT ;                                                    \
287 3: ;                    /* other cpu has isr lock */                    \
288         APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
289         lock ;                                                          \
290         orl     $IRQ_BIT(irq_num), _ipending ;                          \
291         testl   $IRQ_BIT(irq_num), _cpl ;                               \
292         jne     4f ;                            /* this INT masked */   \
293         call    forward_irq ;    /* forward irq to lock holder */       \
294         POP_FRAME ;                             /* and return */        \
295         iret ;                                                          \
296         ALIGN_TEXT ;                                                    \
297 4: ;                                            /* blocked */           \
298         APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
299         POP_FRAME ;                             /* and return */        \
300         iret
301
302 /*
303  * Handle "spurious INTerrupts".
304  * Notes:
305  *  This is different than the "spurious INTerrupt" generated by an
306  *   8259 PIC for missing INTs.  See the APIC documentation for details.
307  *  This routine should NOT do an 'EOI' cycle.
308  */
309         .text
310         SUPERALIGN_TEXT
311         .globl _Xspuriousint
312 _Xspuriousint:
313
314         /* No EOI cycle used here */
315
316         iret
317
318
319 /*
320  * Handle TLB shootdowns.
321  */
322         .text
323         SUPERALIGN_TEXT
324         .globl  _Xinvltlb
325 _Xinvltlb:
326         pushl   %eax
327
328 #ifdef COUNT_XINVLTLB_HITS
329         pushl   %fs
330         movl    $KPSEL, %eax
331         mov     %ax, %fs
332         movl    _cpuid, %eax
333         popl    %fs
334         ss
335         incl    _xhits(,%eax,4)
336 #endif /* COUNT_XINVLTLB_HITS */
337
338         movl    %cr3, %eax              /* invalidate the TLB */
339         movl    %eax, %cr3
340
341         ss                              /* stack segment, avoid %ds load */
342         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
343
344         popl    %eax
345         iret
346
347
348 #ifdef BETTER_CLOCK
349
350 /*
351  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
352  *
353  *  - Stores current cpu state in checkstate_cpustate[cpuid]
354  *      0 == user, 1 == sys, 2 == intr
355  *  - Stores current process in checkstate_curproc[cpuid]
356  *
357  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
358  *
359  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
360  */
361
362         .text
363         SUPERALIGN_TEXT
364         .globl _Xcpucheckstate
365         .globl _checkstate_cpustate
366         .globl _checkstate_curproc
367         .globl _checkstate_pc
368 _Xcpucheckstate:
369         pushl   %eax
370         pushl   %ebx            
371         pushl   %ds                     /* save current data segment */
372         pushl   %fs
373
374         movl    $KDSEL, %eax
375         mov     %ax, %ds                /* use KERNEL data segment */
376         movl    $KPSEL, %eax
377         mov     %ax, %fs
378
379         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
380
381         movl    $0, %ebx                
382         movl    20(%esp), %eax  
383         andl    $3, %eax
384         cmpl    $3, %eax
385         je      1f
386         testl   $PSL_VM, 24(%esp)
387         jne     1f
388         incl    %ebx                    /* system or interrupt */
389 1:      
390         movl    _cpuid, %eax
391         movl    %ebx, _checkstate_cpustate(,%eax,4)
392         movl    _curthread, %ebx
393         movl    TD_PROC(%ebx),%ebx
394         movl    %ebx, _checkstate_curproc(,%eax,4)
395         movl    16(%esp), %ebx
396         movl    %ebx, _checkstate_pc(,%eax,4)
397
398         lock                            /* checkstate_probed_cpus |= (1<<id) */
399         btsl    %eax, _checkstate_probed_cpus
400
401         popl    %fs
402         popl    %ds                     /* restore previous data segment */
403         popl    %ebx
404         popl    %eax
405         iret
406
407 #endif /* BETTER_CLOCK */
408
409 /*
410  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
411  *
412  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
413  *
414  *  - We need a better method of triggering asts on other cpus.
415  */
416
417         .text
418         SUPERALIGN_TEXT
419         .globl _Xcpuast
420 _Xcpuast:
421         PUSH_FRAME
422         movl    $KDSEL, %eax
423         mov     %ax, %ds                /* use KERNEL data segment */
424         mov     %ax, %es
425         movl    $KPSEL, %eax
426         mov     %ax, %fs
427
428         movl    _cpuid, %eax
429         lock                            /* checkstate_need_ast &= ~(1<<id) */
430         btrl    %eax, _checkstate_need_ast
431         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
432
433         lock
434         btsl    %eax, _checkstate_pending_ast
435         jc      1f
436
437         FAKE_MCOUNT(13*4(%esp))
438
439         /* 
440          * Giant locks do not come cheap.
441          * A lot of cycles are going to be wasted here.
442          */
443         call    _get_mplock
444
445         movl    _cpl, %eax
446         pushl   %eax
447         orl     $AST_PENDING, _astpending       /* XXX */
448         incb    _intr_nesting_level
449         sti
450         
451         pushl   $0
452         
453         movl    _cpuid, %eax
454         lock    
455         btrl    %eax, _checkstate_pending_ast
456         lock    
457         btrl    %eax, CNAME(resched_cpus)
458         jnc     2f
459         orl     $AST_PENDING+AST_RESCHED,_astpending
460         lock
461         incl    CNAME(want_resched_cnt)
462 2:              
463         lock
464         incl    CNAME(cpuast_cnt)
465         MEXITCOUNT
466         jmp     _doreti
467 1:
468         /* We are already in the process of delivering an ast for this CPU */
469         POP_FRAME
470         iret                    
471
472
473 /*
474  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
475  */
476
477         .text
478         SUPERALIGN_TEXT
479         .globl _Xforward_irq
480 _Xforward_irq:
481         PUSH_FRAME
482         movl    $KDSEL, %eax
483         mov     %ax, %ds                /* use KERNEL data segment */
484         mov     %ax, %es
485         movl    $KPSEL, %eax
486         mov     %ax, %fs
487
488         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
489
490         FAKE_MCOUNT(13*4(%esp))
491
492         MP_TRYLOCK
493         testl   %eax,%eax               /* Did we get the lock ? */
494         jz  1f                          /* No */
495
496         lock
497         incl    CNAME(forward_irq_hitcnt)
498         cmpb    $4, _intr_nesting_level
499         jae     2f
500         
501         movl    _cpl, %eax
502         pushl   %eax
503         incb    _intr_nesting_level
504         sti
505         
506         pushl   $0
507
508         MEXITCOUNT
509         jmp     _doreti                 /* Handle forwarded interrupt */
510 1:
511         lock
512         incl    CNAME(forward_irq_misscnt)
513         call    forward_irq     /* Oops, we've lost the isr lock */
514         MEXITCOUNT
515         POP_FRAME
516         iret
517 2:
518         lock
519         incl    CNAME(forward_irq_toodeepcnt)
520 3:      
521         MP_RELLOCK
522         MEXITCOUNT
523         POP_FRAME
524         iret
525
526 /*
527  * 
528  */
529 forward_irq:
530         MCOUNT
531         cmpl    $0,_invltlb_ok
532         jz      4f
533
534         cmpl    $0, CNAME(forward_irq_enabled)
535         jz      4f
536
537         movl    _mp_lock,%eax
538         cmpl    $FREE_LOCK,%eax
539         jne     1f
540         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
541 1:
542         shrl    $24,%eax
543         movl    _cpu_num_to_apic_id(,%eax,4),%ecx
544         shll    $24,%ecx
545         movl    lapic_icr_hi, %eax
546         andl    $~APIC_ID_MASK, %eax
547         orl     %ecx, %eax
548         movl    %eax, lapic_icr_hi
549
550 2:
551         movl    lapic_icr_lo, %eax
552         andl    $APIC_DELSTAT_MASK,%eax
553         jnz     2b
554         movl    lapic_icr_lo, %eax
555         andl    $APIC_RESV2_MASK, %eax
556         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
557         movl    %eax, lapic_icr_lo
558 3:
559         movl    lapic_icr_lo, %eax
560         andl    $APIC_DELSTAT_MASK,%eax
561         jnz     3b
562 4:              
563         ret
564         
565 /*
566  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
567  *
568  *  - Signals its receipt.
569  *  - Waits for permission to restart.
570  *  - Signals its restart.
571  */
572
573         .text
574         SUPERALIGN_TEXT
575         .globl _Xcpustop
576 _Xcpustop:
577         pushl   %ebp
578         movl    %esp, %ebp
579         pushl   %eax
580         pushl   %ecx
581         pushl   %edx
582         pushl   %ds                     /* save current data segment */
583         pushl   %fs
584
585         movl    $KDSEL, %eax
586         mov     %ax, %ds                /* use KERNEL data segment */
587         movl    $KPSEL, %eax
588         mov     %ax, %fs
589
590         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
591
592         movl    _cpuid, %eax
593         imull   $PCB_SIZE, %eax
594         leal    CNAME(stoppcbs)(%eax), %eax
595         pushl   %eax
596         call    CNAME(savectx)          /* Save process context */
597         addl    $4, %esp
598         
599                 
600         movl    _cpuid, %eax
601
602         lock
603         btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
604 1:
605         btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
606         jnc     1b
607
608         lock
609         btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
610         lock
611         btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
612
613         test    %eax, %eax
614         jnz     2f
615
616         movl    CNAME(cpustop_restartfunc), %eax
617         test    %eax, %eax
618         jz      2f
619         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
620
621         call    *%eax
622 2:
623         popl    %fs
624         popl    %ds                     /* restore previous data segment */
625         popl    %edx
626         popl    %ecx
627         popl    %eax
628         movl    %ebp, %esp
629         popl    %ebp
630         iret
631
632
633 MCOUNT_LABEL(bintr)
634         FAST_INTR(0,fastintr0)
635         FAST_INTR(1,fastintr1)
636         FAST_INTR(2,fastintr2)
637         FAST_INTR(3,fastintr3)
638         FAST_INTR(4,fastintr4)
639         FAST_INTR(5,fastintr5)
640         FAST_INTR(6,fastintr6)
641         FAST_INTR(7,fastintr7)
642         FAST_INTR(8,fastintr8)
643         FAST_INTR(9,fastintr9)
644         FAST_INTR(10,fastintr10)
645         FAST_INTR(11,fastintr11)
646         FAST_INTR(12,fastintr12)
647         FAST_INTR(13,fastintr13)
648         FAST_INTR(14,fastintr14)
649         FAST_INTR(15,fastintr15)
650         FAST_INTR(16,fastintr16)
651         FAST_INTR(17,fastintr17)
652         FAST_INTR(18,fastintr18)
653         FAST_INTR(19,fastintr19)
654         FAST_INTR(20,fastintr20)
655         FAST_INTR(21,fastintr21)
656         FAST_INTR(22,fastintr22)
657         FAST_INTR(23,fastintr23)
658         
659 #define CLKINTR_PENDING                                                 \
660         pushl $clock_lock ;                                             \
661         call s_lock ;                                                   \
662         movl $1,CNAME(clkintr_pending) ;                                \
663         call s_unlock ;                                                 \
664         addl $4, %esp
665
666         INTR(0,intr0, CLKINTR_PENDING)
667         INTR(1,intr1,)
668         INTR(2,intr2,)
669         INTR(3,intr3,)
670         INTR(4,intr4,)
671         INTR(5,intr5,)
672         INTR(6,intr6,)
673         INTR(7,intr7,)
674         INTR(8,intr8,)
675         INTR(9,intr9,)
676         INTR(10,intr10,)
677         INTR(11,intr11,)
678         INTR(12,intr12,)
679         INTR(13,intr13,)
680         INTR(14,intr14,)
681         INTR(15,intr15,)
682         INTR(16,intr16,)
683         INTR(17,intr17,)
684         INTR(18,intr18,)
685         INTR(19,intr19,)
686         INTR(20,intr20,)
687         INTR(21,intr21,)
688         INTR(22,intr22,)
689         INTR(23,intr23,)
690 MCOUNT_LABEL(eintr)
691
692 /*
693  * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
694  *
695  * - Calls the generic rendezvous action function.
696  */
697         .text
698         SUPERALIGN_TEXT
699         .globl  _Xrendezvous
700 _Xrendezvous:
701         PUSH_FRAME
702         movl    $KDSEL, %eax
703         mov     %ax, %ds                /* use KERNEL data segment */
704         mov     %ax, %es
705         movl    $KPSEL, %eax
706         mov     %ax, %fs
707
708         call    _smp_rendezvous_action
709
710         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
711         POP_FRAME
712         iret
713         
714         
715         .data
716 /*
717  * Addresses of interrupt handlers.
718  *  XresumeNN: Resumption addresses for HWIs.
719  */
720         .globl _ihandlers
721 _ihandlers:
722 /*
723  * used by:
724  *  ipl.s:      doreti_unpend
725  */
726         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
727         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
728         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
729         .long   Xresume12, Xresume13, Xresume14, Xresume15 
730         .long   Xresume16, Xresume17, Xresume18, Xresume19
731         .long   Xresume20, Xresume21, Xresume22, Xresume23
732 /*
733  * used by:
734  *  ipl.s:      doreti_unpend
735  *  apic_ipl.s: splz_unpend
736  */
737         .long   _swi_null, swi_net, _swi_null, _swi_null
738         .long   _swi_vm, _swi_null, _softclock
739
740 imasks:                         /* masks for interrupt handlers */
741         .space  NHWI*4          /* padding; HWI masks are elsewhere */
742
743         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
744         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
745
746 /* active flag for lazy masking */
747 iactive:
748         .long   0
749
750 #ifdef COUNT_XINVLTLB_HITS
751         .globl  _xhits
752 _xhits:
753         .space  (NCPU * 4), 0
754 #endif /* COUNT_XINVLTLB_HITS */
755
756 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
757         .globl _stopped_cpus, _started_cpus
758 _stopped_cpus:
759         .long   0
760 _started_cpus:
761         .long   0
762
763 #ifdef BETTER_CLOCK
764         .globl _checkstate_probed_cpus
765 _checkstate_probed_cpus:
766         .long   0       
767 #endif /* BETTER_CLOCK */
768         .globl _checkstate_need_ast
769 _checkstate_need_ast:
770         .long   0
771 _checkstate_pending_ast:
772         .long   0
773         .globl CNAME(forward_irq_misscnt)
774         .globl CNAME(forward_irq_toodeepcnt)
775         .globl CNAME(forward_irq_hitcnt)
776         .globl CNAME(resched_cpus)
777         .globl CNAME(want_resched_cnt)
778         .globl CNAME(cpuast_cnt)
779         .globl CNAME(cpustop_restartfunc)
780 CNAME(forward_irq_misscnt):     
781         .long 0
782 CNAME(forward_irq_hitcnt):      
783         .long 0
784 CNAME(forward_irq_toodeepcnt):
785         .long 0
786 CNAME(resched_cpus):
787         .long 0
788 CNAME(want_resched_cnt):
789         .long 0
790 CNAME(cpuast_cnt):
791         .long 0
792 CNAME(cpustop_restartfunc):
793         .long 0
794                 
795
796
797         .globl  _apic_pin_trigger
798 _apic_pin_trigger:
799         .long   0
800
801         .text