Remove all remaining SPL code. Replace the mtd_cpl field in the machine
[dragonfly.git] / sys / i386 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.19 2005/06/16 21:12:47 dillon Exp $
5  */
6
7
8 #include <machine/apicreg.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * Push an interrupt frame in a format acceptable to doreti, reload
20  * the segment registers for the kernel.
21  */
22 #define PUSH_FRAME                                                      \
23         pushl   $0 ;            /* dummy error code */                  \
24         pushl   $0 ;            /* dummy trap type */                   \
25         pushal ;                                                        \
26         pushl   %ds ;           /* save data and extra segments ... */  \
27         pushl   %es ;                                                   \
28         pushl   %fs ;                                                   \
29         mov     $KDSEL,%ax ;                                            \
30         mov     %ax,%ds ;                                               \
31         mov     %ax,%es ;                                               \
32         mov     $KPSEL,%ax ;                                            \
33         mov     %ax,%fs ;                                               \
34
35 #define PUSH_DUMMY                                                      \
36         pushfl ;                /* phys int frame / flags */            \
37         pushl %cs ;             /* phys int frame / cs */               \
38         pushl   12(%esp) ;      /* original caller eip */               \
39         pushl   $0 ;            /* dummy error code */                  \
40         pushl   $0 ;            /* dummy trap type */                   \
41         subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
42
43 /*
44  * Warning: POP_FRAME can only be used if there is no chance of a
45  * segment register being changed (e.g. by procfs), which is why syscalls
46  * have to use doreti.
47  */
48 #define POP_FRAME                                                       \
49         popl    %fs ;                                                   \
50         popl    %es ;                                                   \
51         popl    %ds ;                                                   \
52         popal ;                                                         \
53         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
54
55 #define POP_DUMMY                                                       \
56         addl    $17*4,%esp ;                                            \
57
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
60
61 /*
62  * Interrupts are expected to already be disabled when using these
63  * IMASK_*() macros.
64  */
65 #define IMASK_LOCK                                                      \
66         SPIN_LOCK(imen_spinlock) ;                                      \
67
68 #define IMASK_UNLOCK                                                    \
69         SPIN_UNLOCK(imen_spinlock) ;                                    \
70         
71 #define MASK_IRQ(irq_num)                                               \
72         IMASK_LOCK ;                            /* into critical reg */ \
73         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
74         jne     7f ;                    /* masked, don't mask */        \
75         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
76         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
77         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
78         movl    %eax, (%ecx) ;                  /* write the index */   \
79         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
80         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
81         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
82 7: ;                                            /* already masked */    \
83         IMASK_UNLOCK ;                                                  \
84
85 /*
86  * Test to see whether we are handling an edge or level triggered INT.
87  *  Level-triggered INTs must still be masked as we don't clear the source,
88  *  and the EOI cycle would cause redundant INTs to occur.
89  */
90 #define MASK_LEVEL_IRQ(irq_num)                                         \
91         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
92         jz      9f ;                            /* edge, don't mask */  \
93         MASK_IRQ(irq_num) ;                                             \
94 9: ;                                                                    \
95
96
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num)                                                \
99         movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
100         movl    (%eax), %eax ;                                          \
101         testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
102         jz      9f ;                            /* not active */        \
103         movl    $0, lapic_eoi ;                                         \
104 9:                                                                      \
105
106 #else
107
108 #define EOI_IRQ(irq_num)                                                \
109         testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
110         jz      9f      ;                       /* not active */        \
111         movl    $0, lapic_eoi;                                          \
112 9:                                                                      \
113
114 #endif
115         
116 /*
117  * Test to see if the source is currntly masked, clear if so.
118  */
119 #define UNMASK_IRQ(irq_num)                                     \
120         IMASK_LOCK ;                            /* into critical reg */ \
121         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
122         je      7f ;                    /* bit clear, not masked */     \
123         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
124         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
125         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
126         movl    %eax,(%ecx) ;                   /* write the index */   \
127         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
128         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
129         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
130 7: ;                                                                    \
131         IMASK_UNLOCK ;                                                  \
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.  Note that we cannot mess with mp_lock at all
140  *        if we entered from a critical section!
141  *      - If we can take the interrupt clear its fpending bit,
142  *        call the handler, then unmask and doreti.
143  *
144  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
145  */
146
147 #define FAST_INTR(irq_num, vec_name)                                    \
148         .text ;                                                         \
149         SUPERALIGN_TEXT ;                                               \
150 IDTVEC(vec_name) ;                                                      \
151         PUSH_FRAME ;                                                    \
152         FAKE_MCOUNT(13*4(%esp)) ;                                       \
153         MASK_LEVEL_IRQ(irq_num) ;                                       \
154         EOI_IRQ(irq_num) ;                                              \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
157         pushl   %eax ;                                                  \
158         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
159         jl      2f ;                                                    \
160 1: ;                                                                    \
161         /* in critical section, make interrupt pending */               \
162         /* set the pending bit and return, leave interrupt masked */    \
163         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
164         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
165         jmp     5f ;                                                    \
166 2: ;                                                                    \
167         /* try to get the MP lock */                                    \
168         call    try_mplock ;                                            \
169         testl   %eax,%eax ;                                             \
170         jz      6f ;                                                    \
171         /* clear pending bit, run handler */                            \
172         incl    PCPU(intr_nesting_level) ;                              \
173         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
174         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
175         pushl   intr_unit + (irq_num) * 4 ;                             \
176         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
177         addl    $4, %esp ;                                              \
178         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
179         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
180         movl    intr_countp + (irq_num) * 4, %eax ;                     \
181         incl    (%eax) ;                                                \
182         decl    PCPU(intr_nesting_level) ;                              \
183         call    rel_mplock ;                                            \
184         UNMASK_IRQ(irq_num) ;                                           \
185 5: ;                                                                    \
186         MEXITCOUNT ;                                                    \
187         jmp     doreti ;                                                \
188 6: ;                                                                    \
189         /* could not get the MP lock, forward the interrupt */          \
190         movl    mp_lock, %eax ;          /* check race */               \
191         cmpl    $MP_FREE_LOCK,%eax ;                                    \
192         je      2b ;                                                    \
193         incl    PCPU(cnt)+V_FORWARDED_INTS ;                            \
194         subl    $12,%esp ;                                              \
195         movl    $irq_num,8(%esp) ;                                      \
196         movl    $forward_fastint_remote,4(%esp) ;                       \
197         movl    %eax,(%esp) ;                                           \
198         call    lwkt_send_ipiq_bycpu ;                                  \
199         addl    $12,%esp ;                                              \
200         jmp     5f ;                                                    \
201
202 /*
203  * Restart fast interrupt held up by critical section or cpl.
204  *
205  *      - Push a dummy trape frame as required by doreti
206  *      - The interrupt source is already masked
207  *      - Clear the fpending bit
208  *      - Run the handler
209  *      - Unmask the interrupt
210  *      - Pop the dummy frame and do a normal return
211  *
212  *      The BGL is held on call and left held on return.
213  *
214  *      YYY can cache gd base pointer instead of using hidden %fs
215  *      prefixes.
216  */
217
218 #define FAST_UNPEND(irq_num, vec_name)                                  \
219         .text ;                                                         \
220         SUPERALIGN_TEXT ;                                               \
221 IDTVEC(vec_name) ;                                                      \
222         pushl   %ebp ;                                                  \
223         movl    %esp,%ebp ;                                             \
224         PUSH_DUMMY ;                                                    \
225         pushl   intr_unit + (irq_num) * 4 ;                             \
226         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
227         addl    $4, %esp ;                                              \
228         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
229         movl    intr_countp + (irq_num) * 4, %eax ;                     \
230         incl    (%eax) ;                                                \
231         UNMASK_IRQ(irq_num) ;                                           \
232         POP_DUMMY ;                                                     \
233         popl %ebp ;                                                     \
234         ret ;                                                           \
235
236 /*
237  * Slow interrupt call handlers run in the following sequence:
238  *
239  *      - Push the trap frame required by doreti.
240  *      - Mask the interrupt and reenable its source.
241  *      - If we cannot take the interrupt set its ipending bit and
242  *        doreti.  In addition to checking for a critical section
243  *        and cpl mask we also check to see if the thread is still
244  *        running.  Note that we cannot mess with mp_lock at all
245  *        if we entered from a critical section!
246  *      - If we can take the interrupt clear its ipending bit
247  *        and schedule the thread.  Leave interrupts masked and doreti.
248  *
249  *      Note that calls to sched_ithd() are made with interrupts enabled
250  *      and outside a critical section.  YYY sched_ithd may preempt us
251  *      synchronously (fix interrupt stacking).
252  *
253  *      YYY can cache gd base pointer instead of using hidden %fs
254  *      prefixes.
255  */
256
257 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
258         .text ;                                                         \
259         SUPERALIGN_TEXT ;                                               \
260 IDTVEC(vec_name) ;                                                      \
261         PUSH_FRAME ;                                                    \
262         maybe_extra_ipending ;                                          \
263 ;                                                                       \
264         MASK_LEVEL_IRQ(irq_num) ;                                       \
265         EOI_IRQ(irq_num) ;                                              \
266         movl    PCPU(curthread),%ebx ;                                  \
267         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
268         pushl   %eax ;          /* cpl do restore */                    \
269         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
270         jl      2f ;                                                    \
271 1: ;                                                                    \
272         /* set the pending bit and return, leave the interrupt masked */ \
273         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
274         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
275         jmp     5f ;                                                    \
276 2: ;                                                                    \
277         /* set running bit, clear pending bit, run handler */           \
278         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
279         sti ;                                                           \
280         pushl   $irq_num ;                                              \
281         call    sched_ithd ;                                            \
282         addl    $4,%esp ;                                               \
283         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
284         movl    intr_countp + (irq_num) * 4,%eax ;                      \
285         incl    (%eax) ;                                                \
286 5: ;                                                                    \
287         MEXITCOUNT ;                                                    \
288         jmp     doreti ;                                                \
289
290
291 /*
292  * Handle "spurious INTerrupts".
293  * Notes:
294  *  This is different than the "spurious INTerrupt" generated by an
295  *   8259 PIC for missing INTs.  See the APIC documentation for details.
296  *  This routine should NOT do an 'EOI' cycle.
297  */
298         .text
299         SUPERALIGN_TEXT
300         .globl Xspuriousint
301 Xspuriousint:
302
303         /* No EOI cycle used here */
304
305         iret
306
307
308 /*
309  * Handle TLB shootdowns.
310  */
311         .text
312         SUPERALIGN_TEXT
313         .globl  Xinvltlb
314 Xinvltlb:
315         pushl   %eax
316
317 #ifdef COUNT_XINVLTLB_HITS
318         pushl   %fs
319         movl    $KPSEL, %eax
320         mov     %ax, %fs
321         movl    PCPU(cpuid), %eax
322         popl    %fs
323         ss
324         incl    _xhits(,%eax,4)
325 #endif /* COUNT_XINVLTLB_HITS */
326
327         movl    %cr3, %eax              /* invalidate the TLB */
328         movl    %eax, %cr3
329
330         ss                              /* stack segment, avoid %ds load */
331         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
332
333         popl    %eax
334         iret
335
336
337 /*
338  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
339  *
340  *  - Signals its receipt.
341  *  - Waits for permission to restart.
342  *  - Signals its restart.
343  */
344
345         .text
346         SUPERALIGN_TEXT
347         .globl Xcpustop
348 Xcpustop:
349         pushl   %ebp
350         movl    %esp, %ebp
351         pushl   %eax
352         pushl   %ecx
353         pushl   %edx
354         pushl   %ds                     /* save current data segment */
355         pushl   %fs
356
357         movl    $KDSEL, %eax
358         mov     %ax, %ds                /* use KERNEL data segment */
359         movl    $KPSEL, %eax
360         mov     %ax, %fs
361
362         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
363
364         movl    PCPU(cpuid), %eax
365         imull   $PCB_SIZE, %eax
366         leal    CNAME(stoppcbs)(%eax), %eax
367         pushl   %eax
368         call    CNAME(savectx)          /* Save process context */
369         addl    $4, %esp
370         
371                 
372         movl    PCPU(cpuid), %eax
373
374         lock
375         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
376 1:
377         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
378         jnc     1b
379
380         lock
381         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
382         lock
383         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
384
385         test    %eax, %eax
386         jnz     2f
387
388         movl    CNAME(cpustop_restartfunc), %eax
389         test    %eax, %eax
390         jz      2f
391         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
392
393         call    *%eax
394 2:
395         popl    %fs
396         popl    %ds                     /* restore previous data segment */
397         popl    %edx
398         popl    %ecx
399         popl    %eax
400         movl    %ebp, %esp
401         popl    %ebp
402         iret
403
404         /*
405          * For now just have one ipiq IPI, but what we really want is
406          * to have one for each source cpu to the APICs don't get stalled
407          * backlogging the requests.
408          */
409         .text
410         SUPERALIGN_TEXT
411         .globl Xipiq
412 Xipiq:
413         PUSH_FRAME
414         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
415         FAKE_MCOUNT(13*4(%esp))
416
417         movl    PCPU(curthread),%ebx
418         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
419         jge     1f
420         subl    $8,%esp                 /* make same as interrupt frame */
421         incl    PCPU(intr_nesting_level)
422         addl    $TDPRI_CRIT,TD_PRI(%ebx)
423         call    lwkt_process_ipiq_frame
424         subl    $TDPRI_CRIT,TD_PRI(%ebx)
425         decl    PCPU(intr_nesting_level)
426         addl    $8,%esp
427         pushl   $0                      /* CPL for frame (REMOVED) */
428         MEXITCOUNT
429         jmp     doreti
430 1:
431         orl     $RQF_IPIQ,PCPU(reqflags)
432         MEXITCOUNT
433         POP_FRAME
434         iret
435
436 MCOUNT_LABEL(bintr)
437         FAST_INTR(0,fastintr0)
438         FAST_INTR(1,fastintr1)
439         FAST_INTR(2,fastintr2)
440         FAST_INTR(3,fastintr3)
441         FAST_INTR(4,fastintr4)
442         FAST_INTR(5,fastintr5)
443         FAST_INTR(6,fastintr6)
444         FAST_INTR(7,fastintr7)
445         FAST_INTR(8,fastintr8)
446         FAST_INTR(9,fastintr9)
447         FAST_INTR(10,fastintr10)
448         FAST_INTR(11,fastintr11)
449         FAST_INTR(12,fastintr12)
450         FAST_INTR(13,fastintr13)
451         FAST_INTR(14,fastintr14)
452         FAST_INTR(15,fastintr15)
453         FAST_INTR(16,fastintr16)
454         FAST_INTR(17,fastintr17)
455         FAST_INTR(18,fastintr18)
456         FAST_INTR(19,fastintr19)
457         FAST_INTR(20,fastintr20)
458         FAST_INTR(21,fastintr21)
459         FAST_INTR(22,fastintr22)
460         FAST_INTR(23,fastintr23)
461         
462         /* YYY what is this garbage? */
463
464         INTR(0,intr0,)
465         INTR(1,intr1,)
466         INTR(2,intr2,)
467         INTR(3,intr3,)
468         INTR(4,intr4,)
469         INTR(5,intr5,)
470         INTR(6,intr6,)
471         INTR(7,intr7,)
472         INTR(8,intr8,)
473         INTR(9,intr9,)
474         INTR(10,intr10,)
475         INTR(11,intr11,)
476         INTR(12,intr12,)
477         INTR(13,intr13,)
478         INTR(14,intr14,)
479         INTR(15,intr15,)
480         INTR(16,intr16,)
481         INTR(17,intr17,)
482         INTR(18,intr18,)
483         INTR(19,intr19,)
484         INTR(20,intr20,)
485         INTR(21,intr21,)
486         INTR(22,intr22,)
487         INTR(23,intr23,)
488
489         FAST_UNPEND(0,fastunpend0)
490         FAST_UNPEND(1,fastunpend1)
491         FAST_UNPEND(2,fastunpend2)
492         FAST_UNPEND(3,fastunpend3)
493         FAST_UNPEND(4,fastunpend4)
494         FAST_UNPEND(5,fastunpend5)
495         FAST_UNPEND(6,fastunpend6)
496         FAST_UNPEND(7,fastunpend7)
497         FAST_UNPEND(8,fastunpend8)
498         FAST_UNPEND(9,fastunpend9)
499         FAST_UNPEND(10,fastunpend10)
500         FAST_UNPEND(11,fastunpend11)
501         FAST_UNPEND(12,fastunpend12)
502         FAST_UNPEND(13,fastunpend13)
503         FAST_UNPEND(14,fastunpend14)
504         FAST_UNPEND(15,fastunpend15)
505         FAST_UNPEND(16,fastunpend16)
506         FAST_UNPEND(17,fastunpend17)
507         FAST_UNPEND(18,fastunpend18)
508         FAST_UNPEND(19,fastunpend19)
509         FAST_UNPEND(20,fastunpend20)
510         FAST_UNPEND(21,fastunpend21)
511         FAST_UNPEND(22,fastunpend22)
512         FAST_UNPEND(23,fastunpend23)
513 MCOUNT_LABEL(eintr)
514
515         /*
516          * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
517          *
518          * - Calls the generic rendezvous action function.
519          */
520         .text
521         SUPERALIGN_TEXT
522         .globl  Xrendezvous
523 Xrendezvous:
524         PUSH_FRAME
525         movl    $KDSEL, %eax
526         mov     %ax, %ds                /* use KERNEL data segment */
527         mov     %ax, %es
528         movl    $KPSEL, %eax
529         mov     %ax, %fs
530
531         call    smp_rendezvous_action
532
533         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
534         POP_FRAME
535         iret
536         
537         
538         .data
539
540 #ifdef COUNT_XINVLTLB_HITS
541         .globl  xhits
542 xhits:
543         .space  (NCPU * 4), 0
544 #endif /* COUNT_XINVLTLB_HITS */
545
546 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
547         .globl stopped_cpus, started_cpus
548 stopped_cpus:
549         .long   0
550 started_cpus:
551         .long   0
552
553         .globl CNAME(cpustop_restartfunc)
554 CNAME(cpustop_restartfunc):
555         .long 0
556                 
557         .globl  apic_pin_trigger
558 apic_pin_trigger:
559         .long   0
560
561         .text