Merge branch 'vendor/FILE'
[dragonfly.git] / sys / platform / pc32 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
5  */
6
7 #include "use_npx.h"
8 #include "opt_auto_eoi.h"
9
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/isa.h>
17
18 #include "assym.s"
19
20 #include "apicreg.h"
21 #include "apic_ipl.h"
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
24
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
27
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
30
31 #ifdef SMP
32 #define MPLOCKED     lock ;
33 #else
34 #define MPLOCKED
35 #endif
36
37 /*
38  * Push an interrupt frame in a format acceptable to doreti, reload
39  * the segment registers for the kernel.
40  */
41 #define PUSH_FRAME                                                      \
42         pushl   $0 ;            /* dummy error code */                  \
43         pushl   $0 ;            /* dummy trap type */                   \
44         pushl   $0 ;            /* dummy xflags type */                 \
45         pushal ;                                                        \
46         pushl   %ds ;           /* save data and extra segments ... */  \
47         pushl   %es ;                                                   \
48         pushl   %fs ;                                                   \
49         pushl   %gs ;                                                   \
50         cld ;                                                           \
51         mov     $KDSEL,%ax ;                                            \
52         mov     %ax,%ds ;                                               \
53         mov     %ax,%es ;                                               \
54         mov     %ax,%gs ;                                               \
55         mov     $KPSEL,%ax ;                                            \
56         mov     %ax,%fs ;                                               \
57
58 #define PUSH_DUMMY                                                      \
59         pushfl ;                /* phys int frame / flags */            \
60         pushl %cs ;             /* phys int frame / cs */               \
61         pushl   12(%esp) ;      /* original caller eip */               \
62         pushl   $0 ;            /* dummy error code */                  \
63         pushl   $0 ;            /* dummy trap type */                   \
64         pushl   $0 ;            /* dummy xflags type */                 \
65         subl    $13*4,%esp ;    /* pushal + 4 seg regs (dummy) + CPL */ \
66
67 /*
68  * Warning: POP_FRAME can only be used if there is no chance of a
69  * segment register being changed (e.g. by procfs), which is why syscalls
70  * have to use doreti.
71  */
72 #define POP_FRAME                                                       \
73         popl    %gs ;                                                   \
74         popl    %fs ;                                                   \
75         popl    %es ;                                                   \
76         popl    %ds ;                                                   \
77         popal ;                                                         \
78         addl    $3*4,%esp ;     /* dummy xflags, trap & error codes */  \
79
80 #define POP_DUMMY                                                       \
81         addl    $19*4,%esp ;                                            \
82
83 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
84 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
85
86 #define MASK_IRQ(irq_num)                                               \
87         APIC_IMASK_LOCK ;                       /* into critical reg */ \
88         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
89         jne     7f ;                    /* masked, don't mask */        \
90         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
91         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
92         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
93         movl    %eax, (%ecx) ;                  /* write the index */   \
94         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
95         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
96         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
97 7: ;                                            /* already masked */    \
98         APIC_IMASK_UNLOCK ;                                             \
99
100 /*
101  * Test to see whether we are handling an edge or level triggered INT.
102  *  Level-triggered INTs must still be masked as we don't clear the source,
103  *  and the EOI cycle would cause redundant INTs to occur.
104  */
105 #define MASK_LEVEL_IRQ(irq_num)                                         \
106         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
107         jz      9f ;                            /* edge, don't mask */  \
108         MASK_IRQ(irq_num) ;                                             \
109 9: ;                                                                    \
110
111 /*
112  * Test to see if the source is currntly masked, clear if so.
113  */
114 #define UNMASK_IRQ(irq_num)                                     \
115         cmpl    $0,%eax ;                                               \
116         jnz     8f ;                                                    \
117         APIC_IMASK_LOCK ;                       /* into critical reg */ \
118         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
119         je      7f ;                    /* bit clear, not masked */     \
120         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
121         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
122         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
123         movl    %eax,(%ecx) ;                   /* write the index */   \
124         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
125         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
126         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
127 7: ;                                                                    \
128         APIC_IMASK_UNLOCK ;                                             \
129 8: ;                                                                    \
130
131 #ifdef APIC_IO
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.  Note that we cannot mess with mp_lock at all
140  *        if we entered from a critical section!
141  *      - If we can take the interrupt clear its fpending bit,
142  *        call the handler, then unmask and doreti.
143  *
144  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
145  */
146
147 #define FAST_INTR(irq_num, vec_name)                                    \
148         .text ;                                                         \
149         SUPERALIGN_TEXT ;                                               \
150 IDTVEC(vec_name) ;                                                      \
151         PUSH_FRAME ;                                                    \
152         FAKE_MCOUNT(15*4(%esp)) ;                                       \
153         MASK_LEVEL_IRQ(irq_num) ;                                       \
154         movl    $0, lapic_eoi ;                                         \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
157         pushl   %eax ;                                                  \
158         testl   $-1,TD_NEST_COUNT(%ebx) ;                               \
159         jne     1f ;                                                    \
160         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
161         jl      2f ;                                                    \
162 1: ;                                                                    \
163         /* in critical section, make interrupt pending */               \
164         /* set the pending bit and return, leave interrupt masked */    \
165         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
166         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
167         jmp     5f ;                                                    \
168 2: ;                                                                    \
169         /* clear pending bit, run handler */                            \
170         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
171         pushl   $irq_num ;                                              \
172         pushl   %esp ;                   /* pass frame by reference */  \
173         call    ithread_fast_handler ;   /* returns 0 to unmask */      \
174         addl    $8, %esp ;                                              \
175         UNMASK_IRQ(irq_num) ;                                           \
176 5: ;                                                                    \
177         MEXITCOUNT ;                                                    \
178         jmp     doreti ;                                                \
179
180 /*
181  * Slow interrupt call handlers run in the following sequence:
182  *
183  *      - Push the trap frame required by doreti.
184  *      - Mask the interrupt and reenable its source.
185  *      - If we cannot take the interrupt set its ipending bit and
186  *        doreti.  In addition to checking for a critical section
187  *        and cpl mask we also check to see if the thread is still
188  *        running.  Note that we cannot mess with mp_lock at all
189  *        if we entered from a critical section!
190  *      - If we can take the interrupt clear its ipending bit
191  *        and schedule the thread.  Leave interrupts masked and doreti.
192  *
193  *      Note that calls to sched_ithd() are made with interrupts enabled
194  *      and outside a critical section.  YYY sched_ithd may preempt us
195  *      synchronously (fix interrupt stacking).
196  *
197  *      YYY can cache gd base pointer instead of using hidden %fs
198  *      prefixes.
199  */
200
201 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending)              \
202         .text ;                                                         \
203         SUPERALIGN_TEXT ;                                               \
204 IDTVEC(vec_name) ;                                                      \
205         PUSH_FRAME ;                                                    \
206         maybe_extra_ipending ;                                          \
207 ;                                                                       \
208         MASK_LEVEL_IRQ(irq_num) ;                                       \
209         incl    PCPU(cnt) + V_INTR ;                                    \
210         movl    $0, lapic_eoi ;                                         \
211         movl    PCPU(curthread),%ebx ;                                  \
212         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
213         pushl   %eax ;          /* cpl do restore */                    \
214         testl   $-1,TD_NEST_COUNT(%ebx) ;                               \
215         jne     1f ;                                                    \
216         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
217         jl      2f ;                                                    \
218 1: ;                                                                    \
219         /* set the pending bit and return, leave the interrupt masked */ \
220         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
221         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
222         jmp     5f ;                                                    \
223 2: ;                                                                    \
224         /* set running bit, clear pending bit, run handler */           \
225         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
226         incl    TD_NEST_COUNT(%ebx) ;                                   \
227         sti ;                                                           \
228         pushl   $irq_num ;                                              \
229         call    sched_ithd ;                                            \
230         addl    $4,%esp ;                                               \
231         cli ;                                                           \
232         decl    TD_NEST_COUNT(%ebx) ;                                   \
233 5: ;                                                                    \
234         MEXITCOUNT ;                                                    \
235         jmp     doreti ;                                                \
236
237 /*
238  * Wrong interrupt call handlers.  We program these into APIC vectors
239  * that should otherwise never occur.  For example, we program the SLOW
240  * vector for irq N with this when we program the FAST vector with the
241  * real interrupt.
242  *
243  * XXX for now all we can do is EOI it.  We can't call do_wrongintr
244  * (yet) because we could be in a critical section.
245  */
246 #define WRONGINTR(irq_num,vec_name)                                     \
247         .text ;                                                         \
248         SUPERALIGN_TEXT  ;                                              \
249 IDTVEC(vec_name) ;                                                      \
250         PUSH_FRAME ;                                                    \
251         movl    $0, lapic_eoi ; /* End Of Interrupt to APIC */          \
252         /*pushl $irq_num ;*/                                            \
253         /*call  do_wrongintr ;*/                                        \
254         /*addl  $4,%esp ;*/                                             \
255         POP_FRAME ;                                                     \
256         iret  ;                                                         \
257
258 #endif
259
260 /*
261  * Handle "spurious INTerrupts".
262  * Notes:
263  *  This is different than the "spurious INTerrupt" generated by an
264  *   8259 PIC for missing INTs.  See the APIC documentation for details.
265  *  This routine should NOT do an 'EOI' cycle.
266  */
267         .text
268         SUPERALIGN_TEXT
269         .globl Xspuriousint
270 Xspuriousint:
271
272         /* No EOI cycle used here */
273
274         iret
275
276
277 /*
278  * Handle TLB shootdowns.
279  */
280         .text
281         SUPERALIGN_TEXT
282         .globl  Xinvltlb
283 Xinvltlb:
284         pushl   %eax
285
286         movl    %cr3, %eax              /* invalidate the TLB */
287         movl    %eax, %cr3
288
289         ss                              /* stack segment, avoid %ds load */
290         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
291
292         popl    %eax
293         iret
294
295
296 /*
297  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
298  *
299  *  - Signals its receipt.
300  *  - Waits for permission to restart.
301  *  - Processing pending IPIQ events while waiting.
302  *  - Signals its restart.
303  */
304
305         .text
306         SUPERALIGN_TEXT
307         .globl Xcpustop
308 Xcpustop:
309         pushl   %ebp
310         movl    %esp, %ebp
311         pushl   %eax
312         pushl   %ecx
313         pushl   %edx
314         pushl   %ds                     /* save current data segment */
315         pushl   %fs
316
317         movl    $KDSEL, %eax
318         mov     %ax, %ds                /* use KERNEL data segment */
319         movl    $KPSEL, %eax
320         mov     %ax, %fs
321
322         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
323
324         movl    PCPU(cpuid), %eax
325         imull   $PCB_SIZE, %eax
326         leal    CNAME(stoppcbs)(%eax), %eax
327         pushl   %eax
328         call    CNAME(savectx)          /* Save process context */
329         addl    $4, %esp
330         
331                 
332         movl    PCPU(cpuid), %eax
333
334         /*
335          * Indicate that we have stopped and loop waiting for permission
336          * to start again.  We must still process IPI events while in a
337          * stopped state.
338          */
339         MPLOCKED
340         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
341 1:
342         andl    $~RQF_IPIQ,PCPU(reqflags)
343         pushl   %eax
344         call    lwkt_smp_stopped
345         popl    %eax
346         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
347         jnc     1b
348
349         MPLOCKED
350         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
351         MPLOCKED
352         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
353
354         test    %eax, %eax
355         jnz     2f
356
357         movl    CNAME(cpustop_restartfunc), %eax
358         test    %eax, %eax
359         jz      2f
360         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
361
362         call    *%eax
363 2:
364         popl    %fs
365         popl    %ds                     /* restore previous data segment */
366         popl    %edx
367         popl    %ecx
368         popl    %eax
369         movl    %ebp, %esp
370         popl    %ebp
371         iret
372
373         /*
374          * For now just have one ipiq IPI, but what we really want is
375          * to have one for each source cpu to the APICs don't get stalled
376          * backlogging the requests.
377          */
378         .text
379         SUPERALIGN_TEXT
380         .globl Xipiq
381 Xipiq:
382         PUSH_FRAME
383         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
384         FAKE_MCOUNT(15*4(%esp))
385
386         incl    PCPU(cnt) + V_IPI
387         movl    PCPU(curthread),%ebx
388         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
389         jge     1f
390         subl    $8,%esp                 /* make same as interrupt frame */
391         pushl   %esp                    /* pass frame by reference */
392         incl    PCPU(intr_nesting_level)
393         addl    $TDPRI_CRIT,TD_PRI(%ebx)
394         call    lwkt_process_ipiq_frame
395         subl    $TDPRI_CRIT,TD_PRI(%ebx)
396         decl    PCPU(intr_nesting_level)
397         addl    $12,%esp
398         pushl   $0                      /* CPL for frame (REMOVED) */
399         MEXITCOUNT
400         jmp     doreti
401 1:
402         orl     $RQF_IPIQ,PCPU(reqflags)
403         MEXITCOUNT
404         POP_FRAME
405         iret
406
407         .text
408         SUPERALIGN_TEXT
409         .globl Xtimer
410 Xtimer:
411         PUSH_FRAME
412         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
413         FAKE_MCOUNT(15*4(%esp))
414
415         incl    PCPU(cnt) + V_TIMER
416         movl    PCPU(curthread),%ebx
417         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
418         jge     1f
419         testl   $-1,TD_NEST_COUNT(%ebx)
420         jne     1f
421         subl    $8,%esp                 /* make same as interrupt frame */
422         pushl   %esp                    /* pass frame by reference */
423         incl    PCPU(intr_nesting_level)
424         addl    $TDPRI_CRIT,TD_PRI(%ebx)
425         call    lapic_timer_process_frame
426         subl    $TDPRI_CRIT,TD_PRI(%ebx)
427         decl    PCPU(intr_nesting_level)
428         addl    $12,%esp
429         pushl   $0                      /* CPL for frame (REMOVED) */
430         MEXITCOUNT
431         jmp     doreti
432 1:
433         orl     $RQF_TIMER,PCPU(reqflags)
434         MEXITCOUNT
435         POP_FRAME
436         iret
437
438 #ifdef APIC_IO
439
440 MCOUNT_LABEL(bintr)
441         FAST_INTR(0,apic_fastintr0)
442         FAST_INTR(1,apic_fastintr1)
443         FAST_INTR(2,apic_fastintr2)
444         FAST_INTR(3,apic_fastintr3)
445         FAST_INTR(4,apic_fastintr4)
446         FAST_INTR(5,apic_fastintr5)
447         FAST_INTR(6,apic_fastintr6)
448         FAST_INTR(7,apic_fastintr7)
449         FAST_INTR(8,apic_fastintr8)
450         FAST_INTR(9,apic_fastintr9)
451         FAST_INTR(10,apic_fastintr10)
452         FAST_INTR(11,apic_fastintr11)
453         FAST_INTR(12,apic_fastintr12)
454         FAST_INTR(13,apic_fastintr13)
455         FAST_INTR(14,apic_fastintr14)
456         FAST_INTR(15,apic_fastintr15)
457         FAST_INTR(16,apic_fastintr16)
458         FAST_INTR(17,apic_fastintr17)
459         FAST_INTR(18,apic_fastintr18)
460         FAST_INTR(19,apic_fastintr19)
461         FAST_INTR(20,apic_fastintr20)
462         FAST_INTR(21,apic_fastintr21)
463         FAST_INTR(22,apic_fastintr22)
464         FAST_INTR(23,apic_fastintr23)
465         
466         /* YYY what is this garbage? */
467
468         SLOW_INTR(0,apic_slowintr0,)
469         SLOW_INTR(1,apic_slowintr1,)
470         SLOW_INTR(2,apic_slowintr2,)
471         SLOW_INTR(3,apic_slowintr3,)
472         SLOW_INTR(4,apic_slowintr4,)
473         SLOW_INTR(5,apic_slowintr5,)
474         SLOW_INTR(6,apic_slowintr6,)
475         SLOW_INTR(7,apic_slowintr7,)
476         SLOW_INTR(8,apic_slowintr8,)
477         SLOW_INTR(9,apic_slowintr9,)
478         SLOW_INTR(10,apic_slowintr10,)
479         SLOW_INTR(11,apic_slowintr11,)
480         SLOW_INTR(12,apic_slowintr12,)
481         SLOW_INTR(13,apic_slowintr13,)
482         SLOW_INTR(14,apic_slowintr14,)
483         SLOW_INTR(15,apic_slowintr15,)
484         SLOW_INTR(16,apic_slowintr16,)
485         SLOW_INTR(17,apic_slowintr17,)
486         SLOW_INTR(18,apic_slowintr18,)
487         SLOW_INTR(19,apic_slowintr19,)
488         SLOW_INTR(20,apic_slowintr20,)
489         SLOW_INTR(21,apic_slowintr21,)
490         SLOW_INTR(22,apic_slowintr22,)
491         SLOW_INTR(23,apic_slowintr23,)
492
493         WRONGINTR(0,apic_wrongintr0)
494         WRONGINTR(1,apic_wrongintr1)
495         WRONGINTR(2,apic_wrongintr2)
496         WRONGINTR(3,apic_wrongintr3)
497         WRONGINTR(4,apic_wrongintr4)
498         WRONGINTR(5,apic_wrongintr5)
499         WRONGINTR(6,apic_wrongintr6)
500         WRONGINTR(7,apic_wrongintr7)
501         WRONGINTR(8,apic_wrongintr8)
502         WRONGINTR(9,apic_wrongintr9)
503         WRONGINTR(10,apic_wrongintr10)
504         WRONGINTR(11,apic_wrongintr11)
505         WRONGINTR(12,apic_wrongintr12)
506         WRONGINTR(13,apic_wrongintr13)
507         WRONGINTR(14,apic_wrongintr14)
508         WRONGINTR(15,apic_wrongintr15)
509         WRONGINTR(16,apic_wrongintr16)
510         WRONGINTR(17,apic_wrongintr17)
511         WRONGINTR(18,apic_wrongintr18)
512         WRONGINTR(19,apic_wrongintr19)
513         WRONGINTR(20,apic_wrongintr20)
514         WRONGINTR(21,apic_wrongintr21)
515         WRONGINTR(22,apic_wrongintr22)
516         WRONGINTR(23,apic_wrongintr23)
517 MCOUNT_LABEL(eintr)
518
519 #endif
520
521         .data
522
523 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
524         .globl stopped_cpus, started_cpus
525 stopped_cpus:
526         .long   0
527 started_cpus:
528         .long   0
529
530         .globl CNAME(cpustop_restartfunc)
531 CNAME(cpustop_restartfunc):
532         .long 0
533                 
534         .globl  apic_pin_trigger
535 apic_pin_trigger:
536         .long   0
537
538         .text
539