AMD64 - Sync AMD64 support from Jordan Gordeev's svn repository and
[dragonfly.git] / sys / platform / pc64 / icu / icu_vector.s
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  * 3. Neither the name of The DragonFly Project nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific, prior written permission.
17  * 
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  * 
31  * from: vector.s, 386BSD 0.1 unknown origin
32  * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
33  * $DragonFly: src/sys/platform/pc64/icu/icu_vector.s,v 1.1 2008/08/29 17:07:16 dillon Exp $
34  */
35 /*
36  * WARNING!  SMP builds can use the ICU now so this code must be MP safe.
37  */
38
39 #include "opt_auto_eoi.h"
40
41 #include <machine/asmacros.h>
42 #include <machine/lock.h>
43 #include <machine/psl.h>
44 #include <machine/trap.h>
45
46 #include <machine_base/icu/icu.h>
47 #include <bus/isa/i386/isa.h>
48
49 #include "assym.s"
50 #include "icu_ipl.h"
51
52 #ifndef APIC_IO
53
54 #define ICU_IMR_OFFSET          1       /* IO_ICU{1,2} + 1 */
55
56 #define ICU_EOI                 0x20    /* XXX - define elsewhere */
57
58 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
59 #define IRQ_BIT(irq_num)        (1 << ((irq_num) % 8))
60 #define IRQ_BYTE(irq_num)       ((irq_num) >> 3)
61
62 #ifdef AUTO_EOI_1
63 #define ENABLE_ICU1             /* use auto-EOI to reduce i/o */
64 #define OUTB_ICU1
65 #else
66 #define ENABLE_ICU1                                                     \
67         movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */  \
68         OUTB_ICU1 ;             /* ... to clear in service bit */       \
69
70 #define OUTB_ICU1                                                       \
71         outb    %al,$IO_ICU1 ;                                          \
72
73 #endif
74
75 #ifdef AUTO_EOI_2
76 /*
77  * The data sheet says no auto-EOI on slave, but it sometimes works.
78  */
79 #define ENABLE_ICU1_AND_2       ENABLE_ICU1
80 #else
81 #define ENABLE_ICU1_AND_2                                               \
82         movb    $ICU_EOI,%al ;  /* as above */                          \
83         outb    %al,$IO_ICU2 ;  /* but do second icu first ... */       \
84         OUTB_ICU1 ;     /* ... then first icu (if !AUTO_EOI_1) */       \
85
86 #endif
87
88 /*
89  * Macro helpers
90  */
91 #define ICU_PUSH_FRAME                                                  \
92         PUSH_FRAME ;            /* 15 regs + space for 4 extras */      \
93         movl $0,TF_TRAPNO(%rsp) ;                                       \
94         movl $0,TF_ADDR(%rsp) ;                                         \
95         movl $0,TF_FLAGS(%rsp) ;                                        \
96         movl $0,TF_ERR(%rsp) ;                                          \
97         cld ;                                                           \
98
99 #define MASK_IRQ(icu, irq_num)                                          \
100         ICU_IMASK_LOCK ;                                                \
101         movb    icu_imen + IRQ_BYTE(irq_num),%al ;                      \
102         orb     $IRQ_BIT(irq_num),%al ;                                 \
103         movb    %al,icu_imen + IRQ_BYTE(irq_num) ;                      \
104         outb    %al,$icu+ICU_IMR_OFFSET ;                               \
105         ICU_IMASK_UNLOCK ;                                              \
106
107 #define UNMASK_IRQ(icu, irq_num)                                        \
108         cmpl    $0,%eax ;                                               \
109         jnz     8f ;                                                    \
110         ICU_IMASK_LOCK ;                                                \
111         movb    icu_imen + IRQ_BYTE(irq_num),%al ;                      \
112         andb    $~IRQ_BIT(irq_num),%al ;                                \
113         movb    %al,icu_imen + IRQ_BYTE(irq_num) ;                      \
114         outb    %al,$icu+ICU_IMR_OFFSET ;                               \
115         ICU_IMASK_UNLOCK ;                                              \
116 8: ;                                                                    \
117         
118 /*
119  * Fast interrupt call handlers run in the following sequence:
120  *
121  *      - Push the trap frame required by doreti.
122  *      - Mask the interrupt and reenable its source.
123  *      - If we cannot take the interrupt set its fpending bit and
124  *        doreti.
125  *      - If we can take the interrupt clear its fpending bit,
126  *        call the handler, then unmask the interrupt and doreti.
127  *
128  *      YYY can cache gd base pointer instead of using hidden %fs
129  *      prefixes.
130  */
131
132 #define FAST_INTR(irq_num, vec_name, icu, enable_icus)                  \
133         .text ;                                                         \
134         SUPERALIGN_TEXT ;                                               \
135 IDTVEC(vec_name) ;                                                      \
136         ICU_PUSH_FRAME ;                                                \
137         FAKE_MCOUNT(15*4(%esp)) ;                                       \
138         MASK_IRQ(icu, irq_num) ;                                        \
139         enable_icus ;                                                   \
140         movq    PCPU(curthread),%rbx ;                                  \
141         testl   $-1,TD_NEST_COUNT(%rbx) ;                               \
142         jne     1f ;                                                    \
143         cmpl    $TDPRI_CRIT,TD_PRI(%rbx) ;                              \
144         jl      2f ;                                                    \
145 1: ;                                                                    \
146         /* set pending bit and return, leave interrupt masked */        \
147         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
148         orl     $RQF_INTPEND, PCPU(reqflags) ;                          \
149         jmp     5f ;                                                    \
150 2: ;                                                                    \
151         /* clear pending bit, run handler */                            \
152         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
153         pushq   $irq_num ;                                              \
154         movq    %rsp,%rdi ;             /* rdi = call argument */       \
155         call    ithread_fast_handler ;  /* returns 0 to unmask int */   \
156         addq    $8,%rsp ;               /* intr frame -> trap frame */  \
157         UNMASK_IRQ(icu, irq_num) ;                                      \
158 5: ;                                                                    \
159         MEXITCOUNT ;                                                    \
160         jmp     doreti ;                                                \
161
162 /*
163  * Slow interrupt call handlers run in the following sequence:
164  *
165  *      - Push the trap frame required by doreti.
166  *      - Mask the interrupt and reenable its source.
167  *      - If we cannot take the interrupt set its ipending bit and
168  *        doreti.  In addition to checking for a critical section
169  *        and cpl mask we also check to see if the thread is still
170  *        running.
171  *      - If we can take the interrupt clear its ipending bit
172  *        and schedule its thread.  Leave interrupts masked and doreti.
173  *
174  *      sched_ithd() is called with interrupts enabled and outside of a
175  *      critical section (so it can preempt us).
176  *
177  *      YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
178  *
179  *      Note that intr_nesting_level is not bumped during sched_ithd because
180  *      blocking allocations are allowed in the preemption case.
181  *
182  *      YYY can cache gd base pointer instead of using hidden %fs
183  *      prefixes.
184  */
185
186 #define SLOW_INTR(irq_num, vec_name, icu, enable_icus)                  \
187         .text ;                                                         \
188         SUPERALIGN_TEXT ;                                               \
189 IDTVEC(vec_name) ;                                                      \
190         ICU_PUSH_FRAME ;                                                \
191         FAKE_MCOUNT(15*4(%esp)) ;                                       \
192         MASK_IRQ(icu, irq_num) ;                                        \
193         incl    PCPU(cnt) + V_INTR ;                                    \
194         enable_icus ;                                                   \
195         movq    PCPU(curthread),%rbx ;                                  \
196         testl   $-1,TD_NEST_COUNT(%rbx) ;                               \
197         jne     1f ;                                                    \
198         cmpl    $TDPRI_CRIT,TD_PRI(%rbx) ;                              \
199         jl      2f ;                                                    \
200 1: ;                                                                    \
201         /* set the pending bit and return, leave interrupt masked */    \
202         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
203         orl     $RQF_INTPEND, PCPU(reqflags) ;                          \
204         jmp     5f ;                                                    \
205 2: ;                                                                    \
206         /* set running bit, clear pending bit, run handler */           \
207         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
208         incl    TD_NEST_COUNT(%rbx) ;                                   \
209         sti ;                                                           \
210         movq    $irq_num,%rdi ; /* %rdi = argument to call */           \
211         call    sched_ithd ;                                            \
212         cli ;                                                           \
213         decl    TD_NEST_COUNT(%rbx) ;                                   \
214 5: ;                                                                    \
215         MEXITCOUNT ;                                                    \
216         jmp     doreti ;                                                \
217
218 /*
219  * Unmask a slow interrupt.  This function is used by interrupt threads
220  * after they have descheduled themselves to reenable interrupts and
221  * possibly cause a reschedule to occur.
222  */
223
224 #define INTR_UNMASK(irq_num, vec_name, icu)                             \
225         .text ;                                                         \
226         SUPERALIGN_TEXT ;                                               \
227 IDTVEC(vec_name) ;                                                      \
228         pushq   %rbp ;   /* frame for ddb backtrace */                  \
229         movq    %rsp, %rbp ;                                            \
230         subq    %rax, %rax ;                                            \
231         UNMASK_IRQ(icu, irq_num) ;                                      \
232         popq    %rbp ;                                                  \
233         ret ;                                                           \
234
235 MCOUNT_LABEL(bintr)
236         FAST_INTR(0,icu_fastintr0, IO_ICU1, ENABLE_ICU1)
237         FAST_INTR(1,icu_fastintr1, IO_ICU1, ENABLE_ICU1)
238         FAST_INTR(2,icu_fastintr2, IO_ICU1, ENABLE_ICU1)
239         FAST_INTR(3,icu_fastintr3, IO_ICU1, ENABLE_ICU1)
240         FAST_INTR(4,icu_fastintr4, IO_ICU1, ENABLE_ICU1)
241         FAST_INTR(5,icu_fastintr5, IO_ICU1, ENABLE_ICU1)
242         FAST_INTR(6,icu_fastintr6, IO_ICU1, ENABLE_ICU1)
243         FAST_INTR(7,icu_fastintr7, IO_ICU1, ENABLE_ICU1)
244         FAST_INTR(8,icu_fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
245         FAST_INTR(9,icu_fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
246         FAST_INTR(10,icu_fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
247         FAST_INTR(11,icu_fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
248         FAST_INTR(12,icu_fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
249         FAST_INTR(13,icu_fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
250         FAST_INTR(14,icu_fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
251         FAST_INTR(15,icu_fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
252
253         SLOW_INTR(0,icu_slowintr0, IO_ICU1, ENABLE_ICU1)
254         SLOW_INTR(1,icu_slowintr1, IO_ICU1, ENABLE_ICU1)
255         SLOW_INTR(2,icu_slowintr2, IO_ICU1, ENABLE_ICU1)
256         SLOW_INTR(3,icu_slowintr3, IO_ICU1, ENABLE_ICU1)
257         SLOW_INTR(4,icu_slowintr4, IO_ICU1, ENABLE_ICU1)
258         SLOW_INTR(5,icu_slowintr5, IO_ICU1, ENABLE_ICU1)
259         SLOW_INTR(6,icu_slowintr6, IO_ICU1, ENABLE_ICU1)
260         SLOW_INTR(7,icu_slowintr7, IO_ICU1, ENABLE_ICU1)
261         SLOW_INTR(8,icu_slowintr8, IO_ICU2, ENABLE_ICU1_AND_2)
262         SLOW_INTR(9,icu_slowintr9, IO_ICU2, ENABLE_ICU1_AND_2)
263         SLOW_INTR(10,icu_slowintr10, IO_ICU2, ENABLE_ICU1_AND_2)
264         SLOW_INTR(11,icu_slowintr11, IO_ICU2, ENABLE_ICU1_AND_2)
265         SLOW_INTR(12,icu_slowintr12, IO_ICU2, ENABLE_ICU1_AND_2)
266         SLOW_INTR(13,icu_slowintr13, IO_ICU2, ENABLE_ICU1_AND_2)
267         SLOW_INTR(14,icu_slowintr14, IO_ICU2, ENABLE_ICU1_AND_2)
268         SLOW_INTR(15,icu_slowintr15, IO_ICU2, ENABLE_ICU1_AND_2)
269
270 MCOUNT_LABEL(eintr)
271
272         .data
273
274         .text
275
276 #endif