MP Implementation 2/4: Implement a poor-man's IPI messaging subsystem,
[dragonfly.git] / sys / i386 / isa / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.9 2003/07/08 06:27:27 dillon Exp $
5  */
6
7
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * Push an interrupt frame in a format acceptable to doreti, reload
20  * the segment registers for the kernel.
21  */
22 #define PUSH_FRAME                                                      \
23         pushl   $0 ;            /* dummy error code */                  \
24         pushl   $0 ;            /* dummy trap type */                   \
25         pushal ;                                                        \
26         pushl   %ds ;           /* save data and extra segments ... */  \
27         pushl   %es ;                                                   \
28         pushl   %fs ;                                                   \
29         mov     $KDSEL,%ax ;                                            \
30         mov     %ax,%ds ;                                               \
31         mov     %ax,%es ;                                               \
32         mov     $KPSEL,%ax ;                                            \
33         mov     %ax,%fs ;                                               \
34
35 #define PUSH_DUMMY                                                      \
36         pushfl ;                /* phys int frame / flags */            \
37         pushl %cs ;             /* phys int frame / cs */               \
38         pushl   12(%esp) ;      /* original caller eip */               \
39         pushl   $0 ;            /* dummy error code */                  \
40         pushl   $0 ;            /* dummy trap type */                   \
41         subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
42
43 /*
44  * Warning: POP_FRAME can only be used if there is no chance of a
45  * segment register being changed (e.g. by procfs), which is why syscalls
46  * have to use doreti.
47  */
48 #define POP_FRAME                                                       \
49         popl    %fs ;                                                   \
50         popl    %es ;                                                   \
51         popl    %ds ;                                                   \
52         popal ;                                                         \
53         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
54
55 #define POP_DUMMY                                                       \
56         addl    $17*4,%esp ;                                            \
57
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
60
61 /*
62  * Interrupts are expected to already be disabled when using these
63  * IMASK_*() macros.
64  */
65 #define IMASK_LOCK                                                      \
66         SPIN_LOCK(imen_spinlock) ;                                      \
67
68 #define IMASK_UNLOCK                                                    \
69         SPIN_UNLOCK(imen_spinlock) ;                                    \
70         
71 #define MASK_IRQ(irq_num)                                               \
72         IMASK_LOCK ;                            /* into critical reg */ \
73         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
74         jne     7f ;                    /* masked, don't mask */        \
75         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
76         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
77         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
78         movl    %eax, (%ecx) ;                  /* write the index */   \
79         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
80         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
81         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
82 7: ;                                            /* already masked */    \
83         IMASK_UNLOCK ;                                                  \
84
85 /*
86  * Test to see whether we are handling an edge or level triggered INT.
87  *  Level-triggered INTs must still be masked as we don't clear the source,
88  *  and the EOI cycle would cause redundant INTs to occur.
89  */
90 #define MASK_LEVEL_IRQ(irq_num)                                         \
91         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
92         jz      9f ;                            /* edge, don't mask */  \
93         MASK_IRQ(irq_num) ;                                             \
94 9: ;                                                                    \
95
96
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num)                                                \
99         movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
100         movl    (%eax), %eax ;                                          \
101         testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
102         jz      9f ;                            /* not active */        \
103         movl    $0, lapic_eoi ;                                         \
104 9:                                                                      \
105
106 #else
107
108 #define EOI_IRQ(irq_num)                                                \
109         testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
110         jz      9f      ;                       /* not active */        \
111         movl    $0, lapic_eoi;                                          \
112 9:                                                                      \
113
114 #endif
115         
116 /*
117  * Test to see if the source is currntly masked, clear if so.
118  */
119 #define UNMASK_IRQ(irq_num)                                     \
120         IMASK_LOCK ;                            /* into critical reg */ \
121         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
122         je      7f ;                    /* bit clear, not masked */     \
123         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
124         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
125         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
126         movl    %eax,(%ecx) ;                   /* write the index */   \
127         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
128         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
129         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
130 7: ;                                                                    \
131         IMASK_UNLOCK ;                                                  \
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.
140  *      - If we can take the interrupt clear its fpending bit,
141  *        call the handler, then unmask and doreti.
142  *
143  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
144  */
145
146 #define FAST_INTR(irq_num, vec_name)                                    \
147         .text ;                                                         \
148         SUPERALIGN_TEXT ;                                               \
149 IDTVEC(vec_name) ;                                                      \
150         PUSH_FRAME ;                                                    \
151         FAKE_MCOUNT(13*4(%esp)) ;                                       \
152         MASK_LEVEL_IRQ(irq_num) ;                                       \
153         EOI_IRQ(irq_num) ;                                              \
154         incl    PCPU(intr_nesting_level) ;                              \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    TD_CPL(%ebx),%eax ;                                     \
157         pushl   %eax ;                                                  \
158         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
159         jge     1f ;                                                    \
160         testl   $IRQ_LBIT(irq_num), %eax ;                              \
161         jz      2f ;                                                    \
162 1: ;                                                                    \
163         /* set the pending bit and return, leave interrupt masked */    \
164         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
165         movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
166         jmp     5f ;                                                    \
167 2: ;                                                                    \
168         /* try to get giant */                                          \
169         call    try_mplock ;                                            \
170         testl   %eax,%eax ;                                             \
171         jz      1b ;                                                    \
172         /* clear pending bit, run handler */                            \
173         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
174         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
175         pushl   intr_unit + (irq_num) * 4 ;                             \
176         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
177         addl    $4, %esp ;                                              \
178         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
179         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
180         movl    intr_countp + (irq_num) * 4, %eax ;                     \
181         incl    (%eax) ;                                                \
182         call    rel_mplock ;                                            \
183         UNMASK_IRQ(irq_num) ;                                           \
184 5: ;                                                                    \
185         MEXITCOUNT ;                                                    \
186         jmp     doreti ;                                                \
187
188 /*
189  * Restart fast interrupt held up by critical section or cpl.
190  *
191  *      - Push a dummy trape frame as required by doreti
192  *      - The interrupt source is already masked
193  *      - Clear the fpending bit
194  *      - Run the handler
195  *      - Unmask the interrupt
196  *      - Pop the dummy frame and do a normal return
197  *
198  *      The BGL is held on call and left held on return.
199  *
200  *      YYY can cache gd base pointer instead of using hidden %fs
201  *      prefixes.
202  */
203
204 #define FAST_UNPEND(irq_num, vec_name)                                  \
205         .text ;                                                         \
206         SUPERALIGN_TEXT ;                                               \
207 IDTVEC(vec_name) ;                                                      \
208         pushl   %ebp ;                                                  \
209         movl    %esp,%ebp ;                                             \
210         PUSH_DUMMY ;                                                    \
211         pushl   intr_unit + (irq_num) * 4 ;                             \
212         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
213         addl    $4, %esp ;                                              \
214         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
215         movl    intr_countp + (irq_num) * 4, %eax ;                     \
216         incl    (%eax) ;                                                \
217         UNMASK_IRQ(irq_num) ;                                           \
218         POP_DUMMY ;                                                     \
219         popl %ebp ;                                                     \
220         ret ;                                                           \
221
222 /*
223  * Slow interrupt call handlers run in the following sequence:
224  *
225  *      - Push the trap frame required by doreti.
226  *      - Mask the interrupt and reenable its source.
227  *      - If we cannot take the interrupt set its ipending bit and
228  *        doreti.  In addition to checking for a critical section
229  *        and cpl mask we also check to see if the thread is still
230  *        running.
231  *      - If we can take the interrupt clear its ipending bit
232  *        and schedule the thread.  Leave interrupts masked and doreti.
233  *
234  *      Note that calls to sched_ithd() are made with interrupts enabled
235  *      and outside a critical section.  YYY sched_ithd may preempt us
236  *      synchronously (fix interrupt stacking)
237  *
238  *      YYY can cache gd base pointer instead of using hidden %fs
239  *      prefixes.
240  */
241
242 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
243         .text ;                                                         \
244         SUPERALIGN_TEXT ;                                               \
245 IDTVEC(vec_name) ;                                                      \
246         PUSH_FRAME ;                                                    \
247         maybe_extra_ipending ;                                          \
248 ;                                                                       \
249         MASK_LEVEL_IRQ(irq_num) ;                                       \
250         EOI_IRQ(irq_num) ;                                              \
251         incl    PCPU(intr_nesting_level) ;                              \
252         movl    PCPU(curthread),%ebx ;                                  \
253         movl    TD_CPL(%ebx),%eax ;                                     \
254         pushl   %eax ;          /* cpl do restore */                    \
255         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
256         jge     1f ;                                                    \
257         testl   $IRQ_LBIT(irq_num),%eax ;                               \
258         jz      2f ;                                                    \
259 1: ;                                                                    \
260         /* set the pending bit and return, leave the interrupt masked */ \
261         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
262         movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
263         jmp     5f ;                                                    \
264 2: ;                                                                    \
265         /* set running bit, clear pending bit, run handler */           \
266         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
267         sti ;                                                           \
268         pushl   $irq_num ;                                              \
269         call    sched_ithd ;                                            \
270         addl    $4,%esp ;                                               \
271         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
272         movl    intr_countp + (irq_num) * 4,%eax ;                      \
273         incl    (%eax) ;                                                \
274 5: ;                                                                    \
275         MEXITCOUNT ;                                                    \
276         jmp     doreti ;                                                \
277
278 /*
279  * Unmask a slow interrupt.  This function is used by interrupt threads
280  * after they have descheduled themselves to reenable interrupts and
281  * possibly cause a reschedule to occur.
282  */
283
284 #define INTR_UNMASK(irq_num, vec_name, icu)                             \
285         .text ;                                                         \
286         SUPERALIGN_TEXT ;                                               \
287 IDTVEC(vec_name) ;                                                      \
288         pushl %ebp ;     /* frame for ddb backtrace */                  \
289         movl    %esp, %ebp ;                                            \
290         UNMASK_IRQ(irq_num) ;                                           \
291         popl %ebp ;                                                     \
292         ret ;                                                           \
293
294 #if 0
295         /* XXX forward_irq to cpu holding the BGL? */
296
297         ALIGN_TEXT ;                                                    \
298 3: ;                    /* other cpu has isr lock */                    \
299         lock ;                                                          \
300         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
301         movl    $TDPRI_CRIT,_reqpri ;                                   \
302         testl   $IRQ_LBIT(irq_num), TD_CPL(%ebx) ;              \
303         jne     4f ;                            /* this INT masked */   \
304         call    forward_irq ;    /* forward irq to lock holder */       \
305         POP_FRAME ;                             /* and return */        \
306         iret ;                                                          \
307         ALIGN_TEXT ;                                                    \
308 4: ;                                            /* blocked */           \
309         POP_FRAME ;                             /* and return */        \
310         iret
311
312 /*
313  * Handle "spurious INTerrupts".
314  * Notes:
315  *  This is different than the "spurious INTerrupt" generated by an
316  *   8259 PIC for missing INTs.  See the APIC documentation for details.
317  *  This routine should NOT do an 'EOI' cycle.
318  */
319
320 #endif
321
322         .text
323         SUPERALIGN_TEXT
324         .globl Xspuriousint
325 Xspuriousint:
326
327         /* No EOI cycle used here */
328
329         iret
330
331
332 /*
333  * Handle TLB shootdowns.
334  */
335         .text
336         SUPERALIGN_TEXT
337         .globl  Xinvltlb
338 Xinvltlb:
339         pushl   %eax
340
341 #ifdef COUNT_XINVLTLB_HITS
342         pushl   %fs
343         movl    $KPSEL, %eax
344         mov     %ax, %fs
345         movl    PCPU(cpuid), %eax
346         popl    %fs
347         ss
348         incl    _xhits(,%eax,4)
349 #endif /* COUNT_XINVLTLB_HITS */
350
351         movl    %cr3, %eax              /* invalidate the TLB */
352         movl    %eax, %cr3
353
354         ss                              /* stack segment, avoid %ds load */
355         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
356
357         popl    %eax
358         iret
359
360
361 #if 0
362 #ifdef BETTER_CLOCK
363
364 /*
365  * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
366  *
367  *  - Stores current cpu state in checkstate_cpustate[cpuid]
368  *      0 == user, 1 == sys, 2 == intr
369  *  - Stores current process in checkstate_curproc[cpuid]
370  *
371  *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
372  *
373  * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
374  */
375
376         .text
377         SUPERALIGN_TEXT
378         .globl Xcpucheckstate
379         .globl checkstate_cpustate
380         .globl checkstate_curproc
381         .globl checkstate_pc
382 Xcpucheckstate:
383         pushl   %eax
384         pushl   %ebx            
385         pushl   %ds                     /* save current data segment */
386         pushl   %fs
387
388         movl    $KDSEL, %eax
389         mov     %ax, %ds                /* use KERNEL data segment */
390         movl    $KPSEL, %eax
391         mov     %ax, %fs
392
393         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
394
395         movl    $0, %ebx                
396         movl    20(%esp), %eax  
397         andl    $3, %eax
398         cmpl    $3, %eax
399         je      1f
400         testl   $PSL_VM, 24(%esp)
401         jne     1f
402         incl    %ebx                    /* system or interrupt */
403 1:      
404         movl    PCPU(cpuid), %eax
405         movl    %ebx, checkstate_cpustate(,%eax,4)
406         movl    PCPU(curthread), %ebx
407         movl    TD_PROC(%ebx),%ebx
408         movl    %ebx, checkstate_curproc(,%eax,4)
409         movl    16(%esp), %ebx
410         movl    %ebx, checkstate_pc(,%eax,4)
411
412         lock                            /* checkstate_probed_cpus |= (1<<id) */
413         btsl    %eax, checkstate_probed_cpus
414
415         popl    %fs
416         popl    %ds                     /* restore previous data segment */
417         popl    %ebx
418         popl    %eax
419         iret
420
421 #endif /* BETTER_CLOCK */
422 #endif
423
424 /*
425  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
426  *
427  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
428  *  - MP safe in regards to setting AST_PENDING because doreti is in
429  *    a cli mode when it checks.
430  */
431
432         .text
433         SUPERALIGN_TEXT
434         .globl Xcpuast
435 Xcpuast:
436         PUSH_FRAME
437
438         movl    PCPU(cpuid), %eax
439         lock                            /* checkstate_need_ast &= ~(1<<id) */
440         btrl    %eax, checkstate_need_ast
441         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
442
443         lock
444         btsl    %eax, checkstate_pending_ast
445         jc      1f
446
447         FAKE_MCOUNT(13*4(%esp))
448
449         movl    PCPU(curthread), %eax
450         pushl   TD_CPL(%eax)            /* cpl restored by doreti */
451
452         orl     $AST_PENDING, PCPU(astpending)  /* XXX */
453         incl    PCPU(intr_nesting_level)
454         sti
455         
456         movl    PCPU(cpuid), %eax
457         lock    
458         btrl    %eax, checkstate_pending_ast
459         lock    
460         btrl    %eax, CNAME(resched_cpus)
461         jnc     2f
462         orl     $AST_PENDING+AST_RESCHED,PCPU(astpending)
463 2:              
464         MEXITCOUNT
465         jmp     doreti
466 1:
467         /* We are already in the process of delivering an ast for this CPU */
468         POP_FRAME
469         iret                    
470
471
472 /*
473  *       Executed by a CPU when it receives an XFORWARD_IRQ IPI.
474  */
475
476         .text
477         SUPERALIGN_TEXT
478         .globl Xforward_irq
479 Xforward_irq:
480         PUSH_FRAME
481
482         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
483
484         FAKE_MCOUNT(13*4(%esp))
485
486         call    try_mplock
487         testl   %eax,%eax               /* Did we get the lock ? */
488         jz  1f                          /* No */
489
490         incl    PCPU(cnt)+V_FORWARDED_HITS
491         
492         movl    PCPU(curthread), %eax
493         pushl   TD_CPL(%eax)            /* cpl restored by doreti */
494
495         incl    PCPU(intr_nesting_level)
496         sti
497         
498         MEXITCOUNT
499         jmp     doreti                  /* Handle forwarded interrupt */
500 1:
501         incl    PCPU(cnt)+V_FORWARDED_MISSES
502         call    forward_irq     /* Oops, we've lost the isr lock */
503         MEXITCOUNT
504         POP_FRAME
505         iret
506 3:      
507         call    rel_mplock
508         MEXITCOUNT
509         POP_FRAME
510         iret
511
512 /*
513  * 
514  */
515 forward_irq:
516         MCOUNT
517         cmpl    $0,invltlb_ok
518         jz      4f
519
520         cmpl    $0, CNAME(forward_irq_enabled)
521         jz      4f
522
523         movl    mp_lock,%eax
524         cmpl    $MP_FREE_LOCK,%eax
525         jne     1f
526         movl    $0, %eax                /* Pick CPU #0 if noone has lock */
527 1:
528         shrl    $24,%eax
529         movl    cpu_num_to_apic_id(,%eax,4),%ecx
530         shll    $24,%ecx
531         movl    lapic_icr_hi, %eax
532         andl    $~APIC_ID_MASK, %eax
533         orl     %ecx, %eax
534         movl    %eax, lapic_icr_hi
535
536 2:
537         movl    lapic_icr_lo, %eax
538         andl    $APIC_DELSTAT_MASK,%eax
539         jnz     2b
540         movl    lapic_icr_lo, %eax
541         andl    $APIC_RESV2_MASK, %eax
542         orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
543         movl    %eax, lapic_icr_lo
544 3:
545         movl    lapic_icr_lo, %eax
546         andl    $APIC_DELSTAT_MASK,%eax
547         jnz     3b
548 4:              
549         ret
550
551 /*
552  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
553  *
554  *  - Signals its receipt.
555  *  - Waits for permission to restart.
556  *  - Signals its restart.
557  */
558
559         .text
560         SUPERALIGN_TEXT
561         .globl Xcpustop
562 Xcpustop:
563         pushl   %ebp
564         movl    %esp, %ebp
565         pushl   %eax
566         pushl   %ecx
567         pushl   %edx
568         pushl   %ds                     /* save current data segment */
569         pushl   %fs
570
571         movl    $KDSEL, %eax
572         mov     %ax, %ds                /* use KERNEL data segment */
573         movl    $KPSEL, %eax
574         mov     %ax, %fs
575
576         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
577
578         movl    PCPU(cpuid), %eax
579         imull   $PCB_SIZE, %eax
580         leal    CNAME(stoppcbs)(%eax), %eax
581         pushl   %eax
582         call    CNAME(savectx)          /* Save process context */
583         addl    $4, %esp
584         
585                 
586         movl    PCPU(cpuid), %eax
587
588         lock
589         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
590 1:
591         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
592         jnc     1b
593
594         lock
595         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
596         lock
597         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
598
599         test    %eax, %eax
600         jnz     2f
601
602         movl    CNAME(cpustop_restartfunc), %eax
603         test    %eax, %eax
604         jz      2f
605         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
606
607         call    *%eax
608 2:
609         popl    %fs
610         popl    %ds                     /* restore previous data segment */
611         popl    %edx
612         popl    %ecx
613         popl    %eax
614         movl    %ebp, %esp
615         popl    %ebp
616         iret
617
618         /*
619          * For now just have one ipiq IPI, but what we really want is
620          * to have one for each source cpu to the APICs don't get stalled
621          * backlogging the requests.
622          */
623         .text
624         SUPERALIGN_TEXT
625         .globl Xipiq
626 Xipiq:
627         PUSH_FRAME
628         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
629         FAKE_MCOUNT(13*4(%esp))
630
631         movl    PCPU(curthread),%ebx
632         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
633         jge     1f
634         addl    $TDPRI_CRIT,TD_PRI(%ebx)
635         call    lwkt_process_ipiq
636         subl    $TDPRI_CRIT,TD_PRI(%ebx)
637         pushl   TD_CPL(%ebx)
638         incl    PCPU(intr_nesting_level)
639         MEXITCOUNT
640         jmp     doreti
641 1:
642         movl    $TDPRI_CRIT,PCPU(reqpri)
643         orl     $AST_IPIQ,PCPU(astpending)
644         MEXITCOUNT
645         POP_FRAME
646         iret
647
648 MCOUNT_LABEL(bintr)
649         FAST_INTR(0,fastintr0)
650         FAST_INTR(1,fastintr1)
651         FAST_INTR(2,fastintr2)
652         FAST_INTR(3,fastintr3)
653         FAST_INTR(4,fastintr4)
654         FAST_INTR(5,fastintr5)
655         FAST_INTR(6,fastintr6)
656         FAST_INTR(7,fastintr7)
657         FAST_INTR(8,fastintr8)
658         FAST_INTR(9,fastintr9)
659         FAST_INTR(10,fastintr10)
660         FAST_INTR(11,fastintr11)
661         FAST_INTR(12,fastintr12)
662         FAST_INTR(13,fastintr13)
663         FAST_INTR(14,fastintr14)
664         FAST_INTR(15,fastintr15)
665         FAST_INTR(16,fastintr16)
666         FAST_INTR(17,fastintr17)
667         FAST_INTR(18,fastintr18)
668         FAST_INTR(19,fastintr19)
669         FAST_INTR(20,fastintr20)
670         FAST_INTR(21,fastintr21)
671         FAST_INTR(22,fastintr22)
672         FAST_INTR(23,fastintr23)
673         
674         /* YYY what is this garbage? */
675 #define CLKINTR_PENDING                                                 \
676         call    clock_lock ;                                            \
677         movl $1,CNAME(clkintr_pending) ;                                \
678         call    clock_unlock ;                                          \
679
680         INTR(0,intr0, CLKINTR_PENDING)
681         INTR(1,intr1,)
682         INTR(2,intr2,)
683         INTR(3,intr3,)
684         INTR(4,intr4,)
685         INTR(5,intr5,)
686         INTR(6,intr6,)
687         INTR(7,intr7,)
688         INTR(8,intr8,)
689         INTR(9,intr9,)
690         INTR(10,intr10,)
691         INTR(11,intr11,)
692         INTR(12,intr12,)
693         INTR(13,intr13,)
694         INTR(14,intr14,)
695         INTR(15,intr15,)
696         INTR(16,intr16,)
697         INTR(17,intr17,)
698         INTR(18,intr18,)
699         INTR(19,intr19,)
700         INTR(20,intr20,)
701         INTR(21,intr21,)
702         INTR(22,intr22,)
703         INTR(23,intr23,)
704
705         FAST_UNPEND(0,fastunpend0)
706         FAST_UNPEND(1,fastunpend1)
707         FAST_UNPEND(2,fastunpend2)
708         FAST_UNPEND(3,fastunpend3)
709         FAST_UNPEND(4,fastunpend4)
710         FAST_UNPEND(5,fastunpend5)
711         FAST_UNPEND(6,fastunpend6)
712         FAST_UNPEND(7,fastunpend7)
713         FAST_UNPEND(8,fastunpend8)
714         FAST_UNPEND(9,fastunpend9)
715         FAST_UNPEND(10,fastunpend10)
716         FAST_UNPEND(11,fastunpend11)
717         FAST_UNPEND(12,fastunpend12)
718         FAST_UNPEND(13,fastunpend13)
719         FAST_UNPEND(14,fastunpend14)
720         FAST_UNPEND(15,fastunpend15)
721         FAST_UNPEND(16,fastunpend16)
722         FAST_UNPEND(17,fastunpend17)
723         FAST_UNPEND(18,fastunpend18)
724         FAST_UNPEND(19,fastunpend19)
725         FAST_UNPEND(20,fastunpend20)
726         FAST_UNPEND(21,fastunpend21)
727         FAST_UNPEND(22,fastunpend22)
728         FAST_UNPEND(23,fastunpend23)
729 MCOUNT_LABEL(eintr)
730
731         /*
732          * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
733          *
734          * - Calls the generic rendezvous action function.
735          */
736         .text
737         SUPERALIGN_TEXT
738         .globl  Xrendezvous
739 Xrendezvous:
740         PUSH_FRAME
741         movl    $KDSEL, %eax
742         mov     %ax, %ds                /* use KERNEL data segment */
743         mov     %ax, %es
744         movl    $KPSEL, %eax
745         mov     %ax, %fs
746
747         call    smp_rendezvous_action
748
749         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
750         POP_FRAME
751         iret
752         
753         
754         .data
755
756 #if 0
757 /*
758  * Addresses of interrupt handlers.
759  *  XresumeNN: Resumption addresses for HWIs.
760  */
761         .globl _ihandlers
762 _ihandlers:
763 /*
764  * used by:
765  *  ipl.s:      doreti_unpend
766  */
767         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
768         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
769         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
770         .long   Xresume12, Xresume13, Xresume14, Xresume15 
771         .long   Xresume16, Xresume17, Xresume18, Xresume19
772         .long   Xresume20, Xresume21, Xresume22, Xresume23
773 /*
774  * used by:
775  *  ipl.s:      doreti_unpend
776  *  apic_ipl.s: splz_unpend
777  */
778         .long   _swi_null, swi_net, _swi_null, _swi_null
779         .long   _swi_vm, _swi_null, _softclock
780
781 imasks:                         /* masks for interrupt handlers */
782         .space  NHWI*4          /* padding; HWI masks are elsewhere */
783
784         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
785         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
786 #endif  /* 0 */
787
788
789 #ifdef COUNT_XINVLTLB_HITS
790         .globl  xhits
791 xhits:
792         .space  (NCPU * 4), 0
793 #endif /* COUNT_XINVLTLB_HITS */
794
795 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
796         .globl stopped_cpus, started_cpus
797 stopped_cpus:
798         .long   0
799 started_cpus:
800         .long   0
801
802 #ifdef BETTER_CLOCK
803         .globl checkstate_probed_cpus
804 checkstate_probed_cpus:
805         .long   0       
806 #endif /* BETTER_CLOCK */
807         .globl checkstate_need_ast
808 checkstate_need_ast:
809         .long   0
810 checkstate_pending_ast:
811         .long   0
812         .globl CNAME(resched_cpus)
813         .globl CNAME(cpustop_restartfunc)
814 CNAME(resched_cpus):
815         .long 0
816 CNAME(cpustop_restartfunc):
817         .long 0
818                 
819         .globl  apic_pin_trigger
820 apic_pin_trigger:
821         .long   0
822
823         .text