Remove unused code in pc32/apic_vector.s
[dragonfly.git] / sys / platform / pc32 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
5  */
6
7 #include "use_npx.h"
8 #include "opt_auto_eoi.h"
9
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/isa.h>
17
18 #include "assym.s"
19
20 #include "apicreg.h"
21 #include "apic_ipl.h"
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
24
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
27
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
30
31 #ifdef SMP
32 #define MPLOCKED     lock ;
33 #else
34 #define MPLOCKED
35 #endif
36
37 /*
38  * Push an interrupt frame in a format acceptable to doreti, reload
39  * the segment registers for the kernel.
40  */
41 #define PUSH_FRAME                                                      \
42         pushl   $0 ;            /* dummy error code */                  \
43         pushl   $0 ;            /* dummy trap type */                   \
44         pushl   $0 ;            /* dummy xflags type */                 \
45         pushal ;                                                        \
46         pushl   %ds ;           /* save data and extra segments ... */  \
47         pushl   %es ;                                                   \
48         pushl   %fs ;                                                   \
49         pushl   %gs ;                                                   \
50         cld ;                                                           \
51         mov     $KDSEL,%ax ;                                            \
52         mov     %ax,%ds ;                                               \
53         mov     %ax,%es ;                                               \
54         mov     %ax,%gs ;                                               \
55         mov     $KPSEL,%ax ;                                            \
56         mov     %ax,%fs ;                                               \
57
58 /*
59  * Warning: POP_FRAME can only be used if there is no chance of a
60  * segment register being changed (e.g. by procfs), which is why syscalls
61  * have to use doreti.
62  */
63 #define POP_FRAME                                                       \
64         popl    %gs ;                                                   \
65         popl    %fs ;                                                   \
66         popl    %es ;                                                   \
67         popl    %ds ;                                                   \
68         popal ;                                                         \
69         addl    $3*4,%esp ;     /* dummy xflags, trap & error codes */  \
70
71 #define IOAPICADDR(irq_num) \
72         CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_ADDR
73 #define REDIRIDX(irq_num) \
74         CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_ENTIDX
75 #define IOAPICFLAGS(irq_num) \
76         CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_FLAGS
77
78 #define MASK_IRQ(irq_num)                                               \
79         APIC_IMASK_LOCK ;                       /* into critical reg */ \
80         testl   $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ;          \
81         jne     7f ;                    /* masked, don't mask */        \
82         orl     $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ;          \
83                                                 /* set the mask bit */  \
84         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
85         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
86         movl    %eax, (%ecx) ;                  /* write the index */   \
87         orl     $IOART_INTMASK,IOAPIC_WINDOW(%ecx) ;/* set the mask */  \
88 7: ;                                            /* already masked */    \
89         APIC_IMASK_UNLOCK ;                                             \
90
91 /*
92  * Test to see whether we are handling an edge or level triggered INT.
93  *  Level-triggered INTs must still be masked as we don't clear the source,
94  *  and the EOI cycle would cause redundant INTs to occur.
95  */
96 #define MASK_LEVEL_IRQ(irq_num)                                         \
97         testl   $IOAPIC_IM_FLAG_LEVEL, IOAPICFLAGS(irq_num) ;           \
98         jz      9f ;                            /* edge, don't mask */  \
99         MASK_IRQ(irq_num) ;                                             \
100 9: ;                                                                    \
101
102 /*
103  * Test to see if the source is currntly masked, clear if so.
104  */
105 #define UNMASK_IRQ(irq_num)                                             \
106         cmpl    $0,%eax ;                                               \
107         jnz     8f ;                                                    \
108         APIC_IMASK_LOCK ;                       /* into critical reg */ \
109         testl   $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ;          \
110         je      7f ;                    /* bit clear, not masked */     \
111         andl    $~IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ;         \
112                                                 /* clear mask bit */    \
113         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
114         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
115         movl    %eax,(%ecx) ;                   /* write the index */   \
116         andl    $~IOART_INTMASK,IOAPIC_WINDOW(%ecx) ;/* clear the mask */ \
117 7: ;                                                                    \
118         APIC_IMASK_UNLOCK ;                                             \
119 8: ;                                                                    \
120
121 #ifdef APIC_IO
122
123 /*
124  * Fast interrupt call handlers run in the following sequence:
125  *
126  *      - Push the trap frame required by doreti
127  *      - Mask the interrupt and reenable its source
128  *      - If we cannot take the interrupt set its fpending bit and
129  *        doreti.  Note that we cannot mess with mp_lock at all
130  *        if we entered from a critical section!
131  *      - If we can take the interrupt clear its fpending bit,
132  *        call the handler, then unmask and doreti.
133  *
134  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
135  */
136
137 #define FAST_INTR(irq_num, vec_name)                                    \
138         .text ;                                                         \
139         SUPERALIGN_TEXT ;                                               \
140 IDTVEC(vec_name) ;                                                      \
141         PUSH_FRAME ;                                                    \
142         FAKE_MCOUNT(15*4(%esp)) ;                                       \
143         MASK_LEVEL_IRQ(irq_num) ;                                       \
144         movl    $0, lapic_eoi ;                                         \
145         movl    PCPU(curthread),%ebx ;                                  \
146         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
147         pushl   %eax ;                                                  \
148         testl   $-1,TD_NEST_COUNT(%ebx) ;                               \
149         jne     1f ;                                                    \
150         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
151         jl      2f ;                                                    \
152 1: ;                                                                    \
153         /* in critical section, make interrupt pending */               \
154         /* set the pending bit and return, leave interrupt masked */    \
155         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
156         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
157         jmp     5f ;                                                    \
158 2: ;                                                                    \
159         /* clear pending bit, run handler */                            \
160         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
161         pushl   $irq_num ;                                              \
162         pushl   %esp ;                   /* pass frame by reference */  \
163         call    ithread_fast_handler ;   /* returns 0 to unmask */      \
164         addl    $8, %esp ;                                              \
165         UNMASK_IRQ(irq_num) ;                                           \
166 5: ;                                                                    \
167         MEXITCOUNT ;                                                    \
168         jmp     doreti ;                                                \
169
170 /*
171  * Slow interrupt call handlers run in the following sequence:
172  *
173  *      - Push the trap frame required by doreti.
174  *      - Mask the interrupt and reenable its source.
175  *      - If we cannot take the interrupt set its ipending bit and
176  *        doreti.  In addition to checking for a critical section
177  *        and cpl mask we also check to see if the thread is still
178  *        running.  Note that we cannot mess with mp_lock at all
179  *        if we entered from a critical section!
180  *      - If we can take the interrupt clear its ipending bit
181  *        and schedule the thread.  Leave interrupts masked and doreti.
182  *
183  *      Note that calls to sched_ithd() are made with interrupts enabled
184  *      and outside a critical section.  YYY sched_ithd may preempt us
185  *      synchronously (fix interrupt stacking).
186  *
187  *      YYY can cache gd base pointer instead of using hidden %fs
188  *      prefixes.
189  */
190
191 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending)              \
192         .text ;                                                         \
193         SUPERALIGN_TEXT ;                                               \
194 IDTVEC(vec_name) ;                                                      \
195         PUSH_FRAME ;                                                    \
196         maybe_extra_ipending ;                                          \
197 ;                                                                       \
198         MASK_LEVEL_IRQ(irq_num) ;                                       \
199         incl    PCPU(cnt) + V_INTR ;                                    \
200         movl    $0, lapic_eoi ;                                         \
201         movl    PCPU(curthread),%ebx ;                                  \
202         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
203         pushl   %eax ;          /* cpl do restore */                    \
204         testl   $-1,TD_NEST_COUNT(%ebx) ;                               \
205         jne     1f ;                                                    \
206         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
207         jl      2f ;                                                    \
208 1: ;                                                                    \
209         /* set the pending bit and return, leave the interrupt masked */ \
210         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
211         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
212         jmp     5f ;                                                    \
213 2: ;                                                                    \
214         /* set running bit, clear pending bit, run handler */           \
215         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
216         incl    TD_NEST_COUNT(%ebx) ;                                   \
217         sti ;                                                           \
218         pushl   $irq_num ;                                              \
219         call    sched_ithd ;                                            \
220         addl    $4,%esp ;                                               \
221         cli ;                                                           \
222         decl    TD_NEST_COUNT(%ebx) ;                                   \
223 5: ;                                                                    \
224         MEXITCOUNT ;                                                    \
225         jmp     doreti ;                                                \
226
227 /*
228  * Wrong interrupt call handlers.  We program these into APIC vectors
229  * that should otherwise never occur.  For example, we program the SLOW
230  * vector for irq N with this when we program the FAST vector with the
231  * real interrupt.
232  *
233  * XXX for now all we can do is EOI it.  We can't call do_wrongintr
234  * (yet) because we could be in a critical section.
235  */
236 #define WRONGINTR(irq_num,vec_name)                                     \
237         .text ;                                                         \
238         SUPERALIGN_TEXT  ;                                              \
239 IDTVEC(vec_name) ;                                                      \
240         PUSH_FRAME ;                                                    \
241         movl    $0, lapic_eoi ; /* End Of Interrupt to APIC */          \
242         /*pushl $irq_num ;*/                                            \
243         /*call  do_wrongintr ;*/                                        \
244         /*addl  $4,%esp ;*/                                             \
245         POP_FRAME ;                                                     \
246         iret  ;                                                         \
247
248 #endif
249
250 /*
251  * Handle "spurious INTerrupts".
252  * Notes:
253  *  This is different than the "spurious INTerrupt" generated by an
254  *   8259 PIC for missing INTs.  See the APIC documentation for details.
255  *  This routine should NOT do an 'EOI' cycle.
256  */
257         .text
258         SUPERALIGN_TEXT
259         .globl Xspuriousint
260 Xspuriousint:
261
262         /* No EOI cycle used here */
263
264         iret
265
266
267 /*
268  * Handle TLB shootdowns.
269  */
270         .text
271         SUPERALIGN_TEXT
272         .globl  Xinvltlb
273 Xinvltlb:
274         pushl   %eax
275
276         movl    %cr3, %eax              /* invalidate the TLB */
277         movl    %eax, %cr3
278
279         ss                              /* stack segment, avoid %ds load */
280         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
281
282         popl    %eax
283         iret
284
285
286 /*
287  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
288  *
289  *  - Signals its receipt.
290  *  - Waits for permission to restart.
291  *  - Processing pending IPIQ events while waiting.
292  *  - Signals its restart.
293  */
294
295         .text
296         SUPERALIGN_TEXT
297         .globl Xcpustop
298 Xcpustop:
299         pushl   %ebp
300         movl    %esp, %ebp
301         pushl   %eax
302         pushl   %ecx
303         pushl   %edx
304         pushl   %ds                     /* save current data segment */
305         pushl   %fs
306
307         movl    $KDSEL, %eax
308         mov     %ax, %ds                /* use KERNEL data segment */
309         movl    $KPSEL, %eax
310         mov     %ax, %fs
311
312         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
313
314         movl    PCPU(cpuid), %eax
315         imull   $PCB_SIZE, %eax
316         leal    CNAME(stoppcbs)(%eax), %eax
317         pushl   %eax
318         call    CNAME(savectx)          /* Save process context */
319         addl    $4, %esp
320         
321                 
322         movl    PCPU(cpuid), %eax
323
324         /*
325          * Indicate that we have stopped and loop waiting for permission
326          * to start again.  We must still process IPI events while in a
327          * stopped state.
328          */
329         MPLOCKED
330         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
331 1:
332         andl    $~RQF_IPIQ,PCPU(reqflags)
333         pushl   %eax
334         call    lwkt_smp_stopped
335         popl    %eax
336         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
337         jnc     1b
338
339         MPLOCKED
340         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
341         MPLOCKED
342         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
343
344         test    %eax, %eax
345         jnz     2f
346
347         movl    CNAME(cpustop_restartfunc), %eax
348         test    %eax, %eax
349         jz      2f
350         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
351
352         call    *%eax
353 2:
354         popl    %fs
355         popl    %ds                     /* restore previous data segment */
356         popl    %edx
357         popl    %ecx
358         popl    %eax
359         movl    %ebp, %esp
360         popl    %ebp
361         iret
362
363         /*
364          * For now just have one ipiq IPI, but what we really want is
365          * to have one for each source cpu to the APICs don't get stalled
366          * backlogging the requests.
367          */
368         .text
369         SUPERALIGN_TEXT
370         .globl Xipiq
371 Xipiq:
372         PUSH_FRAME
373         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
374         FAKE_MCOUNT(15*4(%esp))
375
376         incl    PCPU(cnt) + V_IPI
377         movl    PCPU(curthread),%ebx
378         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
379         jge     1f
380         subl    $8,%esp                 /* make same as interrupt frame */
381         pushl   %esp                    /* pass frame by reference */
382         incl    PCPU(intr_nesting_level)
383         addl    $TDPRI_CRIT,TD_PRI(%ebx)
384         call    lwkt_process_ipiq_frame
385         subl    $TDPRI_CRIT,TD_PRI(%ebx)
386         decl    PCPU(intr_nesting_level)
387         addl    $12,%esp
388         pushl   $0                      /* CPL for frame (REMOVED) */
389         MEXITCOUNT
390         jmp     doreti
391 1:
392         orl     $RQF_IPIQ,PCPU(reqflags)
393         MEXITCOUNT
394         POP_FRAME
395         iret
396
397         .text
398         SUPERALIGN_TEXT
399         .globl Xtimer
400 Xtimer:
401         PUSH_FRAME
402         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
403         FAKE_MCOUNT(15*4(%esp))
404
405         incl    PCPU(cnt) + V_TIMER
406         movl    PCPU(curthread),%ebx
407         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
408         jge     1f
409         testl   $-1,TD_NEST_COUNT(%ebx)
410         jne     1f
411         subl    $8,%esp                 /* make same as interrupt frame */
412         pushl   %esp                    /* pass frame by reference */
413         incl    PCPU(intr_nesting_level)
414         addl    $TDPRI_CRIT,TD_PRI(%ebx)
415         call    lapic_timer_process_frame
416         subl    $TDPRI_CRIT,TD_PRI(%ebx)
417         decl    PCPU(intr_nesting_level)
418         addl    $12,%esp
419         pushl   $0                      /* CPL for frame (REMOVED) */
420         MEXITCOUNT
421         jmp     doreti
422 1:
423         orl     $RQF_TIMER,PCPU(reqflags)
424         MEXITCOUNT
425         POP_FRAME
426         iret
427
428 #ifdef APIC_IO
429
430 MCOUNT_LABEL(bintr)
431         FAST_INTR(0,apic_fastintr0)
432         FAST_INTR(1,apic_fastintr1)
433         FAST_INTR(2,apic_fastintr2)
434         FAST_INTR(3,apic_fastintr3)
435         FAST_INTR(4,apic_fastintr4)
436         FAST_INTR(5,apic_fastintr5)
437         FAST_INTR(6,apic_fastintr6)
438         FAST_INTR(7,apic_fastintr7)
439         FAST_INTR(8,apic_fastintr8)
440         FAST_INTR(9,apic_fastintr9)
441         FAST_INTR(10,apic_fastintr10)
442         FAST_INTR(11,apic_fastintr11)
443         FAST_INTR(12,apic_fastintr12)
444         FAST_INTR(13,apic_fastintr13)
445         FAST_INTR(14,apic_fastintr14)
446         FAST_INTR(15,apic_fastintr15)
447         FAST_INTR(16,apic_fastintr16)
448         FAST_INTR(17,apic_fastintr17)
449         FAST_INTR(18,apic_fastintr18)
450         FAST_INTR(19,apic_fastintr19)
451         FAST_INTR(20,apic_fastintr20)
452         FAST_INTR(21,apic_fastintr21)
453         FAST_INTR(22,apic_fastintr22)
454         FAST_INTR(23,apic_fastintr23)
455         
456         /* YYY what is this garbage? */
457
458         SLOW_INTR(0,apic_slowintr0,)
459         SLOW_INTR(1,apic_slowintr1,)
460         SLOW_INTR(2,apic_slowintr2,)
461         SLOW_INTR(3,apic_slowintr3,)
462         SLOW_INTR(4,apic_slowintr4,)
463         SLOW_INTR(5,apic_slowintr5,)
464         SLOW_INTR(6,apic_slowintr6,)
465         SLOW_INTR(7,apic_slowintr7,)
466         SLOW_INTR(8,apic_slowintr8,)
467         SLOW_INTR(9,apic_slowintr9,)
468         SLOW_INTR(10,apic_slowintr10,)
469         SLOW_INTR(11,apic_slowintr11,)
470         SLOW_INTR(12,apic_slowintr12,)
471         SLOW_INTR(13,apic_slowintr13,)
472         SLOW_INTR(14,apic_slowintr14,)
473         SLOW_INTR(15,apic_slowintr15,)
474         SLOW_INTR(16,apic_slowintr16,)
475         SLOW_INTR(17,apic_slowintr17,)
476         SLOW_INTR(18,apic_slowintr18,)
477         SLOW_INTR(19,apic_slowintr19,)
478         SLOW_INTR(20,apic_slowintr20,)
479         SLOW_INTR(21,apic_slowintr21,)
480         SLOW_INTR(22,apic_slowintr22,)
481         SLOW_INTR(23,apic_slowintr23,)
482
483         WRONGINTR(0,apic_wrongintr0)
484         WRONGINTR(1,apic_wrongintr1)
485         WRONGINTR(2,apic_wrongintr2)
486         WRONGINTR(3,apic_wrongintr3)
487         WRONGINTR(4,apic_wrongintr4)
488         WRONGINTR(5,apic_wrongintr5)
489         WRONGINTR(6,apic_wrongintr6)
490         WRONGINTR(7,apic_wrongintr7)
491         WRONGINTR(8,apic_wrongintr8)
492         WRONGINTR(9,apic_wrongintr9)
493         WRONGINTR(10,apic_wrongintr10)
494         WRONGINTR(11,apic_wrongintr11)
495         WRONGINTR(12,apic_wrongintr12)
496         WRONGINTR(13,apic_wrongintr13)
497         WRONGINTR(14,apic_wrongintr14)
498         WRONGINTR(15,apic_wrongintr15)
499         WRONGINTR(16,apic_wrongintr16)
500         WRONGINTR(17,apic_wrongintr17)
501         WRONGINTR(18,apic_wrongintr18)
502         WRONGINTR(19,apic_wrongintr19)
503         WRONGINTR(20,apic_wrongintr20)
504         WRONGINTR(21,apic_wrongintr21)
505         WRONGINTR(22,apic_wrongintr22)
506         WRONGINTR(23,apic_wrongintr23)
507 MCOUNT_LABEL(eintr)
508
509 #endif
510
511         .data
512
513 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
514         .globl stopped_cpus, started_cpus
515 stopped_cpus:
516         .long   0
517 started_cpus:
518         .long   0
519
520         .globl CNAME(cpustop_restartfunc)
521 CNAME(cpustop_restartfunc):
522         .long 0
523                 
524         .text
525