threaded interrupts 1: Rewrite the ICU interrupt code, splz, and doreti code.
[dragonfly.git] / sys / platform / pc32 / isa / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_vector.s,v 1.6 2003/06/29 03:28:43 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10
11 #include "i386/isa/intr_machdep.h"
12
13 /* convert an absolute IRQ# into a bitmask */
14 #define IRQ_BIT(irq_num)        (1 << (irq_num))
15
16 /* make an index into the IO APIC from the IRQ# */
17 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
18
19
20 /*
21  * Macros for interrupt interrupt entry, call to handler, and exit.
22  */
23
24 #define FAST_INTR(irq_num, vec_name)                                    \
25         .text ;                                                         \
26         SUPERALIGN_TEXT ;                                               \
27 IDTVEC(vec_name) ;                                                      \
28         pushl   %eax ;          /* save only call-used registers */     \
29         pushl   %ecx ;                                                  \
30         pushl   %edx ;                                                  \
31         pushl   %ds ;                                                   \
32         pushl   %es ;                                                   \
33         pushl   %fs ;                                                   \
34         movl    $KDSEL,%eax ;                                           \
35         mov     %ax,%ds ;                                               \
36         movl    %ax,%es ;                                               \
37         movl    $KPSEL,%eax ;                                           \
38         mov     %ax,%fs ;                                               \
39         FAKE_MCOUNT(6*4(%esp)) ;                                        \
40         pushl   _intr_unit + (irq_num) * 4 ;                            \
41         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
42         addl    $4, %esp ;                                              \
43         movl    $0, lapic_eoi ;                                         \
44         lock ;                                                          \
45         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
46         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
47         lock ;                                                          \
48         incl    (%eax) ;                                                \
49         MEXITCOUNT ;                                                    \
50         popl    %fs ;                                                   \
51         popl    %es ;                                                   \
52         popl    %ds ;                                                   \
53         popl    %edx ;                                                  \
54         popl    %ecx ;                                                  \
55         popl    %eax ;                                                  \
56         iret
57
58 /*
59  * 
60  */
61 #define PUSH_FRAME                                                      \
62         pushl   $0 ;            /* dummy error code */                  \
63         pushl   $0 ;            /* dummy trap type */                   \
64         pushal ;                                                        \
65         pushl   %ds ;           /* save data and extra segments ... */  \
66         pushl   %es ;                                                   \
67         pushl   %fs
68
69 #define POP_FRAME                                                       \
70         popl    %fs ;                                                   \
71         popl    %es ;                                                   \
72         popl    %ds ;                                                   \
73         popal ;                                                         \
74         addl    $4+4,%esp
75
76 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
77 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
78         
79 #define MASK_IRQ(irq_num)                                               \
80         IMASK_LOCK ;                            /* into critical reg */ \
81         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
82         jne     7f ;                    /* masked, don't mask */        \
83         orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
84         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
85         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
86         movl    %eax, (%ecx) ;                  /* write the index */   \
87         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
88         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
89         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
90 7: ;                                            /* already masked */    \
91         IMASK_UNLOCK
92 /*
93  * Test to see whether we are handling an edge or level triggered INT.
94  *  Level-triggered INTs must still be masked as we don't clear the source,
95  *  and the EOI cycle would cause redundant INTs to occur.
96  */
97 #define MASK_LEVEL_IRQ(irq_num)                                         \
98         testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
99         jz      9f ;                            /* edge, don't mask */  \
100         MASK_IRQ(irq_num) ;                                             \
101 9:
102
103
104 #ifdef APIC_INTR_REORDER
105 #define EOI_IRQ(irq_num)                                                \
106         movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
107         movl    (%eax), %eax ;                                          \
108         testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
109         jz      9f ;                            /* not active */        \
110         movl    $0, lapic_eoi ;                                         \
111         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
112 9:
113
114 #else
115 #define EOI_IRQ(irq_num)                                                \
116         testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
117         jz      9f      ;                       /* not active */        \
118         movl    $0, lapic_eoi;                                          \
119         APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
120 9:
121 #endif
122         
123         
124 /*
125  * Test to see if the source is currntly masked, clear if so.
126  */
127 #define UNMASK_IRQ(irq_num)                                     \
128         IMASK_LOCK ;                            /* into critical reg */ \
129         testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
130         je      7f ;                    /* bit clear, not masked */     \
131         andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
132         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
133         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
134         movl    %eax,(%ecx) ;                   /* write the index */   \
135         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
136         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
137         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
138 7: ;                                                                    \
139         IMASK_UNLOCK
140
141 #ifdef APIC_INTR_DIAGNOSTIC
142 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
143 log_intr_event:
144         pushf
145         cli
146         pushl   $CNAME(apic_itrace_debuglock)
147         call    CNAME(s_lock_np)
148         addl    $4, %esp
149         movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
150         andl    $32767, %ecx
151         movl    _cpuid, %eax
152         shll    $8,     %eax
153         orl     8(%esp), %eax
154         movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
155         incl    %ecx
156         andl    $32767, %ecx
157         movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
158         pushl   $CNAME(apic_itrace_debuglock)
159         call    CNAME(s_unlock_np)
160         addl    $4, %esp
161         popf
162         ret
163         
164
165 #define APIC_ITRACE(name, irq_num, id)                                  \
166         lock ;                                  /* MP-safe */           \
167         incl    CNAME(name) + (irq_num) * 4 ;                           \
168         pushl   %eax ;                                                  \
169         pushl   %ecx ;                                                  \
170         pushl   %edx ;                                                  \
171         movl    $(irq_num), %eax ;                                      \
172         cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
173         jne     7f ;                                                    \
174         pushl   $id ;                                                   \
175         call    log_intr_event ;                                        \
176         addl    $4, %esp ;                                              \
177 7: ;                                                                    \
178         popl    %edx ;                                                  \
179         popl    %ecx ;                                                  \
180         popl    %eax
181 #else
182 #define APIC_ITRACE(name, irq_num, id)                                  \
183         lock ;                                  /* MP-safe */           \
184         incl    CNAME(name) + (irq_num) * 4
185 #endif
186
187 #define APIC_ITRACE_ENTER 1
188 #define APIC_ITRACE_EOI 2
189 #define APIC_ITRACE_TRYISRLOCK 3
190 #define APIC_ITRACE_GOTISRLOCK 4
191 #define APIC_ITRACE_ENTER2 5
192 #define APIC_ITRACE_LEAVE 6
193 #define APIC_ITRACE_UNMASK 7
194 #define APIC_ITRACE_ACTIVE 8
195 #define APIC_ITRACE_MASKED 9
196 #define APIC_ITRACE_NOISRLOCK 10
197 #define APIC_ITRACE_MASKED2 11
198 #define APIC_ITRACE_SPLZ 12
199 #define APIC_ITRACE_DORETI 13   
200         
201 #else   
202 #define APIC_ITRACE(name, irq_num, id)
203 #endif
204                 
205 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
206         .text ;                                                         \
207         SUPERALIGN_TEXT ;                                               \
208 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
209 IDTVEC(vec_name) ;                                                      \
210         PUSH_FRAME ;                                                    \
211         movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
212         mov     %ax, %ds ;                                              \
213         mov     %ax, %es ;                                              \
214         movl    $KPSEL, %eax ;                                          \
215         mov     %ax, %fs ;                                              \
216 ;                                                                       \
217         maybe_extra_ipending ;                                          \
218 ;                                                                       \
219         APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
220         lock ;                                  /* MP-safe */           \
221         btsl    $(irq_num), iactive ;           /* lazy masking */      \
222         jc      1f ;                            /* already active */    \
223 ;                                                                       \
224         MASK_LEVEL_IRQ(irq_num) ;                                       \
225         EOI_IRQ(irq_num) ;                                              \
226 0: ;                                                                    \
227         APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
228         MP_TRYLOCK ;            /* XXX this is going away... */         \
229         testl   %eax, %eax ;                    /* did we get it? */    \
230         jz      3f ;                            /* no */                \
231 ;                                                                       \
232         APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
233         movl    _curthread,%ebx ;                                       \
234         testl   $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%eax) ;              \
235         jne     2f ;                            /* this INT masked */   \
236         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
237         jge     2f ;                            /* in critical sec */   \
238 ;                                                                       \
239         incb    _intr_nesting_level ;                                   \
240 ;                                                                       \
241   /* entry point used by doreti_unpend for HWIs. */                     \
242 __CONCAT(Xresume,irq_num): ;                                            \
243         FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
244         lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
245         movl    _intr_countp + (irq_num) * 4, %eax ;                    \
246         lock ;  incl    (%eax) ;                                        \
247 ;                                                                       \
248         movl    _curthread, %ebx ;                                      \
249         movl    TD_MACH+MTD_CPL(%ebx), %eax ;                           \
250         pushl   %eax ;   /* cpl restored by doreti */                   \
251         orl     _intr_mask + (irq_num) * 4, %eax ;                      \
252         movl    %eax, TD_MACH+MTD_CPL(%ebx) ;                           \
253         lock ;                                                          \
254         andl    $~IRQ_BIT(irq_num), _ipending ;                         \
255 ;                                                                       \
256         pushl   _intr_unit + (irq_num) * 4 ;                            \
257         APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
258         sti ;                                                           \
259         call    *_intr_handler + (irq_num) * 4 ;                        \
260         cli ;                                                           \
261         APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
262         addl    $4,%esp ;                                               \
263 ;                                                                       \
264         lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
265         UNMASK_IRQ(irq_num) ;                                           \
266         APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
267         sti ;                           /* doreti repeats cli/sti */    \
268         MEXITCOUNT ;                                                    \
269         jmp     _doreti ;                                               \
270 ;                                                                       \
271         ALIGN_TEXT ;                                                    \
272 1: ;                                            /* active  */           \
273         APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
274         MASK_IRQ(irq_num) ;                                             \
275         EOI_IRQ(irq_num) ;                                              \
276         lock ;                                                          \
277         orl     $IRQ_BIT(irq_num), _ipending ;                          \
278         movl    $TDPRI_CRIT,_reqpri ;                                   \
279         lock ;                                                          \
280         btsl    $(irq_num), iactive ;           /* still active */      \
281         jnc     0b ;                            /* retry */             \
282         POP_FRAME ;                                                     \
283         iret ;          /* XXX:  iactive bit might be 0 now */          \
284         ALIGN_TEXT ;                                                    \
285 2: ;                            /* masked by cpl, leave iactive set */  \
286         APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
287         lock ;                                                          \
288         orl     $IRQ_BIT(irq_num), _ipending ;                          \
289         movl    $TDPRI_CRIT,_reqpri ;                                   \
290         MP_RELLOCK ;                                                    \
291         POP_FRAME ;                                                     \
292         iret ;                                                          \
293         ALIGN_TEXT ;                                                    \
294 3: ;                    /* other cpu has isr lock */                    \
295         APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
296         lock ;                                                          \
297         orl     $IRQ_BIT(irq_num), _ipending ;                          \
298         movl    $TDPRI_CRIT,_reqpri ;                                   \
299         testl   $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%ebx) ;              \
300         jne     4f ;                            /* this INT masked */   \
301         call    forward_irq ;    /* forward irq to lock holder */       \
302         POP_FRAME ;                             /* and return */        \
303         iret ;                                                          \
304         ALIGN_TEXT ;                                                    \
305 4: ;                                            /* blocked */           \
306         APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
307         POP_FRAME ;                             /* and return */        \
308         iret
309
310 /*
311  * Handle "spurious INTerrupts".
312  * Notes:
313  *  This is different than the "spurious INTerrupt" generated by an
314  *   8259 PIC for missing INTs.  See the APIC documentation for details.
315  *  This routine should NOT do an 'EOI' cycle.
316  */
317         .text
318         SUPERALIGN_TEXT
319         .globl _Xspuriousint
320 _Xspuriousint:
321
322         /* No EOI cycle used here */
323
324         iret
325
326
327 /*
328  * Handle TLB shootdowns.
329  */
330         .text
331         SUPERALIGN_TEXT
332         .globl  _Xinvltlb
333 _Xinvltlb:
334         pushl   %eax
335
336 #ifdef COUNT_XINVLTLB_HITS
337         pushl   %fs
338         movl    $KPSEL, %eax
339         mov     %ax, %fs
340         movl    _cpuid, %eax
341         popl    %fs
342         ss
343         incl    _xhits(,%eax,4)
344 #endif /* COUNT_XINVLTLB_HITS */
345
346         movl    %cr3, %eax              /* invalidate the TLB */
347         movl    %eax, %cr3
348
349         ss                              /* stack segment, avoid %ds load */
350         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
351
352         popl    %eax
353         iret
354
355
356 #ifdef BETTER_CLOCK
357
358 /*
359  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
360  *
361  *  - Stores current cpu state in checkstate_cpustate[cpuid]
362  *      0 == user, 1 == sys, 2 == intr
363  *  - Stores current process in checkstate_curproc[cpuid]
364  *
365  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
366  *
367  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
368  */
369
370         .text
371         SUPERALIGN_TEXT
372         .globl _Xcpucheckstate
373         .globl _checkstate_cpustate
374         .globl _checkstate_curproc
375         .globl _checkstate_pc
376 _Xcpucheckstate:
377         pushl   %eax
378         pushl   %ebx            
379         pushl   %ds                     /* save current data segment */
380         pushl   %fs
381
382         movl    $KDSEL, %eax
383         mov     %ax, %ds                /* use KERNEL data segment */
384         movl    $KPSEL, %eax
385         mov     %ax, %fs
386
387         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
388
389         movl    $0, %ebx                
390         movl    20(%esp), %eax  
391         andl    $3, %eax
392         cmpl    $3, %eax
393         je      1f
394         testl   $PSL_VM, 24(%esp)
395         jne     1f
396         incl    %ebx                    /* system or interrupt */
397 1:      
398         movl    _cpuid, %eax
399         movl    %ebx, _checkstate_cpustate(,%eax,4)
400         movl    _curthread, %ebx
401         movl    TD_PROC(%ebx),%ebx
402         movl    %ebx, _checkstate_curproc(,%eax,4)
403         movl    16(%esp), %ebx
404         movl    %ebx, _checkstate_pc(,%eax,4)
405
406         lock                            /* checkstate_probed_cpus |= (1<<id) */
407         btsl    %eax, _checkstate_probed_cpus
408
409         popl    %fs
410         popl    %ds                     /* restore previous data segment */
411         popl    %ebx
412         popl    %eax
413         iret
414
415 #endif /* BETTER_CLOCK */
416
417 /*
418  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
419  *
420  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
421  *
422  *  - We need a better method of triggering asts on other cpus.
423  */
424
425         .text
426         SUPERALIGN_TEXT
427         .globl _Xcpuast
428 _Xcpuast:
429         PUSH_FRAME
430         movl    $KDSEL, %eax
431         mov     %ax, %ds                /* use KERNEL data segment */
432         mov     %ax, %es
433         movl    $KPSEL, %eax
434         mov     %ax, %fs
435
436         movl    _cpuid, %eax
437         lock                            /* checkstate_need_ast &= ~(1<<id) */
438         btrl    %eax, _checkstate_need_ast
439         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
440
441         lock
442         btsl    %eax, _checkstate_pending_ast
443         jc      1f
444
445         FAKE_MCOUNT(13*4(%esp))
446
447         /* 
448          * Giant locks do not come cheap.
449          * A lot of cycles are going to be wasted here.
450          */
451         call    _get_mplock
452
453         movl    _curthread, %eax
454         pushl   TD_MACH+MTD_CPL(%eax)           /* cpl restored by doreti */
455
456         orl     $AST_PENDING, _astpending       /* XXX */
457         incb    _intr_nesting_level
458         sti
459         
460         movl    _cpuid, %eax
461         lock    
462         btrl    %eax, _checkstate_pending_ast
463         lock    
464         btrl    %eax, CNAME(resched_cpus)
465         jnc     2f
466         orl     $AST_PENDING+AST_RESCHED,_astpending
467         lock
468         incl    CNAME(want_resched_cnt)
469 2:              
470         lock
471         incl    CNAME(cpuast_cnt)
472         MEXITCOUNT
473         jmp     _doreti
474 1:
475         /* We are already in the process of delivering an ast for this CPU */
476         POP_FRAME
477         iret                    
478
479
480 /*
481  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
482  */
483
484         .text
485         SUPERALIGN_TEXT
486         .globl _Xforward_irq
487 _Xforward_irq:
488         PUSH_FRAME
489         movl    $KDSEL, %eax
490         mov     %ax, %ds                /* use KERNEL data segment */
491         mov     %ax, %es
492         movl    $KPSEL, %eax
493         mov     %ax, %fs
494
495         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
496
497         FAKE_MCOUNT(13*4(%esp))
498
499         MP_TRYLOCK
500         testl   %eax,%eax               /* Did we get the lock ? */
501         jz  1f                          /* No */
502
503         lock
504         incl    CNAME(forward_irq_hitcnt)
505         cmpb    $4, _intr_nesting_level
506         jae     2f
507         
508         movl    _curthread, %eax
509         pushl   TD_MACH+MTD_CPL(%eax)           /* cpl restored by doreti */
510
511         incb    _intr_nesting_level
512         sti
513         
514         MEXITCOUNT
515         jmp     _doreti                 /* Handle forwarded interrupt */
516 1:
517         lock
518         incl    CNAME(forward_irq_misscnt)
519         call    forward_irq     /* Oops, we've lost the isr lock */
520         MEXITCOUNT
521         POP_FRAME
522         iret
523 2:
524         lock
525         incl    CNAME(forward_irq_toodeepcnt)
526 3:      
527         MP_RELLOCK
528         MEXITCOUNT
529         POP_FRAME
530         iret
531
532 /*
533  * 
534  */
535 forward_irq:
536         MCOUNT
537         cmpl    $0,_invltlb_ok
538         jz      4f
539
540         cmpl    $0, CNAME(forward_irq_enabled)
541         jz      4f
542
543         movl    _mp_lock,%eax
544         cmpl    $FREE_LOCK,%eax
545         jne     1f
546         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
547 1:
548         shrl    $24,%eax
549         movl    _cpu_num_to_apic_id(,%eax,4),%ecx
550         shll    $24,%ecx
551         movl    lapic_icr_hi, %eax
552         andl    $~APIC_ID_MASK, %eax
553         orl     %ecx, %eax
554         movl    %eax, lapic_icr_hi
555
556 2:
557         movl    lapic_icr_lo, %eax
558         andl    $APIC_DELSTAT_MASK,%eax
559         jnz     2b
560         movl    lapic_icr_lo, %eax
561         andl    $APIC_RESV2_MASK, %eax
562         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
563         movl    %eax, lapic_icr_lo
564 3:
565         movl    lapic_icr_lo, %eax
566         andl    $APIC_DELSTAT_MASK,%eax
567         jnz     3b
568 4:              
569         ret
570         
571 /*
572  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
573  *
574  *  - Signals its receipt.
575  *  - Waits for permission to restart.
576  *  - Signals its restart.
577  */
578
579         .text
580         SUPERALIGN_TEXT
581         .globl _Xcpustop
582 _Xcpustop:
583         pushl   %ebp
584         movl    %esp, %ebp
585         pushl   %eax
586         pushl   %ecx
587         pushl   %edx
588         pushl   %ds                     /* save current data segment */
589         pushl   %fs
590
591         movl    $KDSEL, %eax
592         mov     %ax, %ds                /* use KERNEL data segment */
593         movl    $KPSEL, %eax
594         mov     %ax, %fs
595
596         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
597
598         movl    _cpuid, %eax
599         imull   $PCB_SIZE, %eax
600         leal    CNAME(stoppcbs)(%eax), %eax
601         pushl   %eax
602         call    CNAME(savectx)          /* Save process context */
603         addl    $4, %esp
604         
605                 
606         movl    _cpuid, %eax
607
608         lock
609         btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
610 1:
611         btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
612         jnc     1b
613
614         lock
615         btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
616         lock
617         btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
618
619         test    %eax, %eax
620         jnz     2f
621
622         movl    CNAME(cpustop_restartfunc), %eax
623         test    %eax, %eax
624         jz      2f
625         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
626
627         call    *%eax
628 2:
629         popl    %fs
630         popl    %ds                     /* restore previous data segment */
631         popl    %edx
632         popl    %ecx
633         popl    %eax
634         movl    %ebp, %esp
635         popl    %ebp
636         iret
637
638
639 MCOUNT_LABEL(bintr)
640         FAST_INTR(0,fastintr0)
641         FAST_INTR(1,fastintr1)
642         FAST_INTR(2,fastintr2)
643         FAST_INTR(3,fastintr3)
644         FAST_INTR(4,fastintr4)
645         FAST_INTR(5,fastintr5)
646         FAST_INTR(6,fastintr6)
647         FAST_INTR(7,fastintr7)
648         FAST_INTR(8,fastintr8)
649         FAST_INTR(9,fastintr9)
650         FAST_INTR(10,fastintr10)
651         FAST_INTR(11,fastintr11)
652         FAST_INTR(12,fastintr12)
653         FAST_INTR(13,fastintr13)
654         FAST_INTR(14,fastintr14)
655         FAST_INTR(15,fastintr15)
656         FAST_INTR(16,fastintr16)
657         FAST_INTR(17,fastintr17)
658         FAST_INTR(18,fastintr18)
659         FAST_INTR(19,fastintr19)
660         FAST_INTR(20,fastintr20)
661         FAST_INTR(21,fastintr21)
662         FAST_INTR(22,fastintr22)
663         FAST_INTR(23,fastintr23)
664         
665 #define CLKINTR_PENDING                                                 \
666         pushl $clock_lock ;                                             \
667         call s_lock ;                                                   \
668         movl $1,CNAME(clkintr_pending) ;                                \
669         call s_unlock ;                                                 \
670         addl $4, %esp
671
672         INTR(0,intr0, CLKINTR_PENDING)
673         INTR(1,intr1,)
674         INTR(2,intr2,)
675         INTR(3,intr3,)
676         INTR(4,intr4,)
677         INTR(5,intr5,)
678         INTR(6,intr6,)
679         INTR(7,intr7,)
680         INTR(8,intr8,)
681         INTR(9,intr9,)
682         INTR(10,intr10,)
683         INTR(11,intr11,)
684         INTR(12,intr12,)
685         INTR(13,intr13,)
686         INTR(14,intr14,)
687         INTR(15,intr15,)
688         INTR(16,intr16,)
689         INTR(17,intr17,)
690         INTR(18,intr18,)
691         INTR(19,intr19,)
692         INTR(20,intr20,)
693         INTR(21,intr21,)
694         INTR(22,intr22,)
695         INTR(23,intr23,)
696 MCOUNT_LABEL(eintr)
697
698 /*
699  * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
700  *
701  * - Calls the generic rendezvous action function.
702  */
703         .text
704         SUPERALIGN_TEXT
705         .globl  _Xrendezvous
706 _Xrendezvous:
707         PUSH_FRAME
708         movl    $KDSEL, %eax
709         mov     %ax, %ds                /* use KERNEL data segment */
710         mov     %ax, %es
711         movl    $KPSEL, %eax
712         mov     %ax, %fs
713
714         call    _smp_rendezvous_action
715
716         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
717         POP_FRAME
718         iret
719         
720         
721         .data
722
723 #if 0
724 /*
725  * Addresses of interrupt handlers.
726  *  XresumeNN: Resumption addresses for HWIs.
727  */
728         .globl _ihandlers
729 _ihandlers:
730 /*
731  * used by:
732  *  ipl.s:      doreti_unpend
733  */
734         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
735         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
736         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
737         .long   Xresume12, Xresume13, Xresume14, Xresume15 
738         .long   Xresume16, Xresume17, Xresume18, Xresume19
739         .long   Xresume20, Xresume21, Xresume22, Xresume23
740 /*
741  * used by:
742  *  ipl.s:      doreti_unpend
743  *  apic_ipl.s: splz_unpend
744  */
745         .long   _swi_null, swi_net, _swi_null, _swi_null
746         .long   _swi_vm, _swi_null, _softclock
747
748 imasks:                         /* masks for interrupt handlers */
749         .space  NHWI*4          /* padding; HWI masks are elsewhere */
750
751         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
752         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
753 #endif
754
755 /* active flag for lazy masking */
756 iactive:
757         .long   0
758
759 #ifdef COUNT_XINVLTLB_HITS
760         .globl  _xhits
761 _xhits:
762         .space  (NCPU * 4), 0
763 #endif /* COUNT_XINVLTLB_HITS */
764
765 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
766         .globl _stopped_cpus, _started_cpus
767 _stopped_cpus:
768         .long   0
769 _started_cpus:
770         .long   0
771
772 #ifdef BETTER_CLOCK
773         .globl _checkstate_probed_cpus
774 _checkstate_probed_cpus:
775         .long   0       
776 #endif /* BETTER_CLOCK */
777         .globl _checkstate_need_ast
778 _checkstate_need_ast:
779         .long   0
780 _checkstate_pending_ast:
781         .long   0
782         .globl CNAME(forward_irq_misscnt)
783         .globl CNAME(forward_irq_toodeepcnt)
784         .globl CNAME(forward_irq_hitcnt)
785         .globl CNAME(resched_cpus)
786         .globl CNAME(want_resched_cnt)
787         .globl CNAME(cpuast_cnt)
788         .globl CNAME(cpustop_restartfunc)
789 CNAME(forward_irq_misscnt):     
790         .long 0
791 CNAME(forward_irq_hitcnt):      
792         .long 0
793 CNAME(forward_irq_toodeepcnt):
794         .long 0
795 CNAME(resched_cpus):
796         .long 0
797 CNAME(want_resched_cnt):
798         .long 0
799 CNAME(cpuast_cnt):
800         .long 0
801 CNAME(cpustop_restartfunc):
802         .long 0
803                 
804
805
806         .globl  _apic_pin_trigger
807 _apic_pin_trigger:
808         .long   0
809
810         .text