Merge from vendor branch LESS:
[dragonfly.git] / sys / platform / pc32 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.36 2007/01/22 19:37:04 corecode Exp $
5  */
6
7 #include "use_npx.h"
8 #include "opt_auto_eoi.h"
9
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/i386/isa.h>
17
18 #include "assym.s"
19
20 #include "apicreg.h"
21 #include "apic_ipl.h"
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
24
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
27
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
30
31 #ifdef SMP
32 #define MPLOCKED     lock ;
33 #else
34 #define MPLOCKED
35 #endif
36
37 /*
38  * Push an interrupt frame in a format acceptable to doreti, reload
39  * the segment registers for the kernel.
40  */
41 #define PUSH_FRAME                                                      \
42         pushl   $0 ;            /* dummy error code */                  \
43         pushl   $0 ;            /* dummy trap type */                   \
44         pushl   $0 ;            /* dummy xflags type */                 \
45         pushal ;                                                        \
46         pushl   %ds ;           /* save data and extra segments ... */  \
47         pushl   %es ;                                                   \
48         pushl   %fs ;                                                   \
49         pushl   %gs ;                                                   \
50         mov     $KDSEL,%ax ;                                            \
51         mov     %ax,%ds ;                                               \
52         mov     %ax,%es ;                                               \
53         mov     %ax,%gs ;                                               \
54         mov     $KPSEL,%ax ;                                            \
55         mov     %ax,%fs ;                                               \
56
57 #define PUSH_DUMMY                                                      \
58         pushfl ;                /* phys int frame / flags */            \
59         pushl %cs ;             /* phys int frame / cs */               \
60         pushl   12(%esp) ;      /* original caller eip */               \
61         pushl   $0 ;            /* dummy error code */                  \
62         pushl   $0 ;            /* dummy trap type */                   \
63         pushl   $0 ;            /* dummy xflags type */                 \
64         subl    $13*4,%esp ;    /* pushal + 4 seg regs (dummy) + CPL */ \
65
66 /*
67  * Warning: POP_FRAME can only be used if there is no chance of a
68  * segment register being changed (e.g. by procfs), which is why syscalls
69  * have to use doreti.
70  */
71 #define POP_FRAME                                                       \
72         popl    %gs ;                                                   \
73         popl    %fs ;                                                   \
74         popl    %es ;                                                   \
75         popl    %ds ;                                                   \
76         popal ;                                                         \
77         addl    $3*4,%esp ;     /* dummy xflags, trap & error codes */  \
78
79 #define POP_DUMMY                                                       \
80         addl    $19*4,%esp ;                                            \
81
82 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
83 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
84
85 #define MASK_IRQ(irq_num)                                               \
86         APIC_IMASK_LOCK ;                       /* into critical reg */ \
87         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
88         jne     7f ;                    /* masked, don't mask */        \
89         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
90         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
91         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
92         movl    %eax, (%ecx) ;                  /* write the index */   \
93         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
94         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
95         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
96 7: ;                                            /* already masked */    \
97         APIC_IMASK_UNLOCK ;                                             \
98
99 /*
100  * Test to see whether we are handling an edge or level triggered INT.
101  *  Level-triggered INTs must still be masked as we don't clear the source,
102  *  and the EOI cycle would cause redundant INTs to occur.
103  */
104 #define MASK_LEVEL_IRQ(irq_num)                                         \
105         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
106         jz      9f ;                            /* edge, don't mask */  \
107         MASK_IRQ(irq_num) ;                                             \
108 9: ;                                                                    \
109
110 /*
111  * Test to see if the source is currntly masked, clear if so.
112  */
113 #define UNMASK_IRQ(irq_num)                                     \
114         cmpl    $0,%eax ;                                               \
115         jnz     8f ;                                                    \
116         APIC_IMASK_LOCK ;                       /* into critical reg */ \
117         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
118         je      7f ;                    /* bit clear, not masked */     \
119         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
120         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
121         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
122         movl    %eax,(%ecx) ;                   /* write the index */   \
123         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
124         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
125         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
126 7: ;                                                                    \
127         APIC_IMASK_UNLOCK ;                                             \
128 8: ;                                                                    \
129
130 #ifdef APIC_IO
131
132 /*
133  * Fast interrupt call handlers run in the following sequence:
134  *
135  *      - Push the trap frame required by doreti
136  *      - Mask the interrupt and reenable its source
137  *      - If we cannot take the interrupt set its fpending bit and
138  *        doreti.  Note that we cannot mess with mp_lock at all
139  *        if we entered from a critical section!
140  *      - If we can take the interrupt clear its fpending bit,
141  *        call the handler, then unmask and doreti.
142  *
143  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
144  */
145
146 #define FAST_INTR(irq_num, vec_name)                                    \
147         .text ;                                                         \
148         SUPERALIGN_TEXT ;                                               \
149 IDTVEC(vec_name) ;                                                      \
150         PUSH_FRAME ;                                                    \
151         FAKE_MCOUNT(15*4(%esp)) ;                                       \
152         MASK_LEVEL_IRQ(irq_num) ;                                       \
153         movl    $0, lapic_eoi ;                                         \
154         movl    PCPU(curthread),%ebx ;                                  \
155         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
156         pushl   %eax ;                                                  \
157         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
158         jl      2f ;                                                    \
159 1: ;                                                                    \
160         /* in critical section, make interrupt pending */               \
161         /* set the pending bit and return, leave interrupt masked */    \
162         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
163         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
164         jmp     5f ;                                                    \
165 2: ;                                                                    \
166         /* clear pending bit, run handler */                            \
167         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
168         pushl   $irq_num ;                                              \
169         pushl   %esp ;                   /* pass frame by reference */  \
170         call    ithread_fast_handler ;   /* returns 0 to unmask */      \
171         addl    $8, %esp ;                                              \
172         UNMASK_IRQ(irq_num) ;                                           \
173 5: ;                                                                    \
174         MEXITCOUNT ;                                                    \
175         jmp     doreti ;                                                \
176
177 /*
178  * Slow interrupt call handlers run in the following sequence:
179  *
180  *      - Push the trap frame required by doreti.
181  *      - Mask the interrupt and reenable its source.
182  *      - If we cannot take the interrupt set its ipending bit and
183  *        doreti.  In addition to checking for a critical section
184  *        and cpl mask we also check to see if the thread is still
185  *        running.  Note that we cannot mess with mp_lock at all
186  *        if we entered from a critical section!
187  *      - If we can take the interrupt clear its ipending bit
188  *        and schedule the thread.  Leave interrupts masked and doreti.
189  *
190  *      Note that calls to sched_ithd() are made with interrupts enabled
191  *      and outside a critical section.  YYY sched_ithd may preempt us
192  *      synchronously (fix interrupt stacking).
193  *
194  *      YYY can cache gd base pointer instead of using hidden %fs
195  *      prefixes.
196  */
197
198 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending)              \
199         .text ;                                                         \
200         SUPERALIGN_TEXT ;                                               \
201 IDTVEC(vec_name) ;                                                      \
202         PUSH_FRAME ;                                                    \
203         maybe_extra_ipending ;                                          \
204 ;                                                                       \
205         MASK_LEVEL_IRQ(irq_num) ;                                       \
206         incl    PCPU(cnt) + V_INTR ;                                    \
207         movl    $0, lapic_eoi ;                                         \
208         movl    PCPU(curthread),%ebx ;                                  \
209         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
210         pushl   %eax ;          /* cpl do restore */                    \
211         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
212         jl      2f ;                                                    \
213 1: ;                                                                    \
214         /* set the pending bit and return, leave the interrupt masked */ \
215         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
216         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
217         jmp     5f ;                                                    \
218 2: ;                                                                    \
219         /* set running bit, clear pending bit, run handler */           \
220         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
221         sti ;                                                           \
222         pushl   $irq_num ;                                              \
223         call    sched_ithd ;                                            \
224         addl    $4,%esp ;                                               \
225 5: ;                                                                    \
226         MEXITCOUNT ;                                                    \
227         jmp     doreti ;                                                \
228
229 /*
230  * Wrong interrupt call handlers.  We program these into APIC vectors
231  * that should otherwise never occur.  For example, we program the SLOW
232  * vector for irq N with this when we program the FAST vector with the
233  * real interrupt.
234  *
235  * XXX for now all we can do is EOI it.  We can't call do_wrongintr
236  * (yet) because we could be in a critical section.
237  */
238 #define WRONGINTR(irq_num,vec_name)                                     \
239         .text ;                                                         \
240         SUPERALIGN_TEXT  ;                                              \
241 IDTVEC(vec_name) ;                                                      \
242         PUSH_FRAME ;                                                    \
243         movl    $0, lapic_eoi ; /* End Of Interrupt to APIC */          \
244         /*pushl $irq_num ;*/                                            \
245         /*call  do_wrongintr ;*/                                        \
246         /*addl  $4,%esp ;*/                                             \
247         POP_FRAME ;                                                     \
248         iret  ;                                                         \
249
250 #endif
251
252 /*
253  * Handle "spurious INTerrupts".
254  * Notes:
255  *  This is different than the "spurious INTerrupt" generated by an
256  *   8259 PIC for missing INTs.  See the APIC documentation for details.
257  *  This routine should NOT do an 'EOI' cycle.
258  */
259         .text
260         SUPERALIGN_TEXT
261         .globl Xspuriousint
262 Xspuriousint:
263
264         /* No EOI cycle used here */
265
266         iret
267
268
269 /*
270  * Handle TLB shootdowns.
271  */
272         .text
273         SUPERALIGN_TEXT
274         .globl  Xinvltlb
275 Xinvltlb:
276         pushl   %eax
277
278         movl    %cr3, %eax              /* invalidate the TLB */
279         movl    %eax, %cr3
280
281         ss                              /* stack segment, avoid %ds load */
282         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
283
284         popl    %eax
285         iret
286
287
288 /*
289  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
290  *
291  *  - Signals its receipt.
292  *  - Waits for permission to restart.
293  *  - Processing pending IPIQ events while waiting.
294  *  - Signals its restart.
295  */
296
297         .text
298         SUPERALIGN_TEXT
299         .globl Xcpustop
300 Xcpustop:
301         pushl   %ebp
302         movl    %esp, %ebp
303         pushl   %eax
304         pushl   %ecx
305         pushl   %edx
306         pushl   %ds                     /* save current data segment */
307         pushl   %fs
308
309         movl    $KDSEL, %eax
310         mov     %ax, %ds                /* use KERNEL data segment */
311         movl    $KPSEL, %eax
312         mov     %ax, %fs
313
314         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
315
316         movl    PCPU(cpuid), %eax
317         imull   $PCB_SIZE, %eax
318         leal    CNAME(stoppcbs)(%eax), %eax
319         pushl   %eax
320         call    CNAME(savectx)          /* Save process context */
321         addl    $4, %esp
322         
323                 
324         movl    PCPU(cpuid), %eax
325
326         /*
327          * Indicate that we have stopped and loop waiting for permission
328          * to start again.  We must still process IPI events while in a
329          * stopped state.
330          */
331         MPLOCKED
332         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
333 1:
334         andl    $~RQF_IPIQ,PCPU(reqflags)
335         pushl   %eax
336         call    lwkt_smp_stopped
337         popl    %eax
338         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
339         jnc     1b
340
341         MPLOCKED
342         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
343         MPLOCKED
344         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
345
346         test    %eax, %eax
347         jnz     2f
348
349         movl    CNAME(cpustop_restartfunc), %eax
350         test    %eax, %eax
351         jz      2f
352         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
353
354         call    *%eax
355 2:
356         popl    %fs
357         popl    %ds                     /* restore previous data segment */
358         popl    %edx
359         popl    %ecx
360         popl    %eax
361         movl    %ebp, %esp
362         popl    %ebp
363         iret
364
365         /*
366          * For now just have one ipiq IPI, but what we really want is
367          * to have one for each source cpu to the APICs don't get stalled
368          * backlogging the requests.
369          */
370         .text
371         SUPERALIGN_TEXT
372         .globl Xipiq
373 Xipiq:
374         PUSH_FRAME
375         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
376         FAKE_MCOUNT(15*4(%esp))
377
378         movl    PCPU(curthread),%ebx
379         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
380         jge     1f
381         subl    $8,%esp                 /* make same as interrupt frame */
382         pushl   %esp                    /* pass frame by reference */
383         incl    PCPU(intr_nesting_level)
384         addl    $TDPRI_CRIT,TD_PRI(%ebx)
385         call    lwkt_process_ipiq_frame
386         subl    $TDPRI_CRIT,TD_PRI(%ebx)
387         decl    PCPU(intr_nesting_level)
388         addl    $12,%esp
389         pushl   $0                      /* CPL for frame (REMOVED) */
390         MEXITCOUNT
391         jmp     doreti
392 1:
393         orl     $RQF_IPIQ,PCPU(reqflags)
394         MEXITCOUNT
395         POP_FRAME
396         iret
397
398 #ifdef APIC_IO
399
400 MCOUNT_LABEL(bintr)
401         FAST_INTR(0,apic_fastintr0)
402         FAST_INTR(1,apic_fastintr1)
403         FAST_INTR(2,apic_fastintr2)
404         FAST_INTR(3,apic_fastintr3)
405         FAST_INTR(4,apic_fastintr4)
406         FAST_INTR(5,apic_fastintr5)
407         FAST_INTR(6,apic_fastintr6)
408         FAST_INTR(7,apic_fastintr7)
409         FAST_INTR(8,apic_fastintr8)
410         FAST_INTR(9,apic_fastintr9)
411         FAST_INTR(10,apic_fastintr10)
412         FAST_INTR(11,apic_fastintr11)
413         FAST_INTR(12,apic_fastintr12)
414         FAST_INTR(13,apic_fastintr13)
415         FAST_INTR(14,apic_fastintr14)
416         FAST_INTR(15,apic_fastintr15)
417         FAST_INTR(16,apic_fastintr16)
418         FAST_INTR(17,apic_fastintr17)
419         FAST_INTR(18,apic_fastintr18)
420         FAST_INTR(19,apic_fastintr19)
421         FAST_INTR(20,apic_fastintr20)
422         FAST_INTR(21,apic_fastintr21)
423         FAST_INTR(22,apic_fastintr22)
424         FAST_INTR(23,apic_fastintr23)
425         
426         /* YYY what is this garbage? */
427
428         SLOW_INTR(0,apic_slowintr0,)
429         SLOW_INTR(1,apic_slowintr1,)
430         SLOW_INTR(2,apic_slowintr2,)
431         SLOW_INTR(3,apic_slowintr3,)
432         SLOW_INTR(4,apic_slowintr4,)
433         SLOW_INTR(5,apic_slowintr5,)
434         SLOW_INTR(6,apic_slowintr6,)
435         SLOW_INTR(7,apic_slowintr7,)
436         SLOW_INTR(8,apic_slowintr8,)
437         SLOW_INTR(9,apic_slowintr9,)
438         SLOW_INTR(10,apic_slowintr10,)
439         SLOW_INTR(11,apic_slowintr11,)
440         SLOW_INTR(12,apic_slowintr12,)
441         SLOW_INTR(13,apic_slowintr13,)
442         SLOW_INTR(14,apic_slowintr14,)
443         SLOW_INTR(15,apic_slowintr15,)
444         SLOW_INTR(16,apic_slowintr16,)
445         SLOW_INTR(17,apic_slowintr17,)
446         SLOW_INTR(18,apic_slowintr18,)
447         SLOW_INTR(19,apic_slowintr19,)
448         SLOW_INTR(20,apic_slowintr20,)
449         SLOW_INTR(21,apic_slowintr21,)
450         SLOW_INTR(22,apic_slowintr22,)
451         SLOW_INTR(23,apic_slowintr23,)
452
453         WRONGINTR(0,apic_wrongintr0)
454         WRONGINTR(1,apic_wrongintr1)
455         WRONGINTR(2,apic_wrongintr2)
456         WRONGINTR(3,apic_wrongintr3)
457         WRONGINTR(4,apic_wrongintr4)
458         WRONGINTR(5,apic_wrongintr5)
459         WRONGINTR(6,apic_wrongintr6)
460         WRONGINTR(7,apic_wrongintr7)
461         WRONGINTR(8,apic_wrongintr8)
462         WRONGINTR(9,apic_wrongintr9)
463         WRONGINTR(10,apic_wrongintr10)
464         WRONGINTR(11,apic_wrongintr11)
465         WRONGINTR(12,apic_wrongintr12)
466         WRONGINTR(13,apic_wrongintr13)
467         WRONGINTR(14,apic_wrongintr14)
468         WRONGINTR(15,apic_wrongintr15)
469         WRONGINTR(16,apic_wrongintr16)
470         WRONGINTR(17,apic_wrongintr17)
471         WRONGINTR(18,apic_wrongintr18)
472         WRONGINTR(19,apic_wrongintr19)
473         WRONGINTR(20,apic_wrongintr20)
474         WRONGINTR(21,apic_wrongintr21)
475         WRONGINTR(22,apic_wrongintr22)
476         WRONGINTR(23,apic_wrongintr23)
477 MCOUNT_LABEL(eintr)
478
479 #endif
480
481         .data
482
483 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
484         .globl stopped_cpus, started_cpus
485 stopped_cpus:
486         .long   0
487 started_cpus:
488         .long   0
489
490         .globl CNAME(cpustop_restartfunc)
491 CNAME(cpustop_restartfunc):
492         .long 0
493                 
494         .globl  apic_pin_trigger
495 apic_pin_trigger:
496         .long   0
497
498         .text
499