MachIntrABI: intr_{config,cpuid} -> legacy_intr_{config,cpuid}
[dragonfly.git] / sys / platform / vkernel64 / platform / machintr.c
1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/platform/vkernel/platform/machintr.c,v 1.17 2008/04/30 16:59:45 dillon Exp $
35  */
36
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/machintr.h>
41 #include <sys/errno.h>
42 #include <sys/mman.h>
43 #include <sys/globaldata.h>
44 #include <sys/interrupt.h>
45 #include <stdio.h>
46 #include <signal.h>
47 #include <machine/globaldata.h>
48 #include <machine/md_var.h>
49 #include <sys/thread2.h>
50
51 /*
52  * Interrupt Subsystem ABI
53  */
54
55 static void dummy_intr_disable(int);
56 static void dummy_intr_enable(int);
57 static void dummy_intr_setup(int, int);
58 static void dummy_intr_teardown(int);
59 static int dummy_legacy_intr_cpuid(int);
60 static void dummy_finalize(void);
61 static void dummy_intrcleanup(void);
62 static void dummy_stabilize(void);
63
64 struct machintr_abi MachIntrABI = {
65         MACHINTR_GENERIC,
66         .intr_disable = dummy_intr_disable,
67         .intr_enable =  dummy_intr_enable,
68         .intr_setup =   dummy_intr_setup,
69         .intr_teardown = dummy_intr_teardown,
70         .legacy_intr_cpuid = dummy_legacy_intr_cpuid,
71
72         .finalize =     dummy_finalize,
73         .cleanup =      dummy_intrcleanup,
74         .stabilize =    dummy_stabilize
75 };
76
77 static void
78 dummy_intr_disable(int intr)
79 {
80 }
81
82 static void
83 dummy_intr_enable(int intr)
84 {
85 }
86
87 static void
88 dummy_intr_setup(int intr, int flags)
89 {
90 }
91
92 static void
93 dummy_intr_teardown(int intr)
94 {
95 }
96
97 static void
98 dummy_finalize(void)
99 {
100 }
101
102 static void
103 dummy_intrcleanup(void)
104 {
105 }
106
107 static void
108 dummy_stabilize(void)
109 {
110 }
111
112 static int
113 dummy_legacy_intr_cpuid(int irq __unused)
114 {
115         return 0;
116 }
117
118 /*
119  * Process pending interrupts
120  */
121 void
122 splz(void)
123 {
124         struct mdglobaldata *gd = mdcpu;
125         thread_t td = gd->mi.gd_curthread;
126         int irq;
127
128         while (gd->mi.gd_reqflags & (RQF_IPIQ|RQF_INTPEND)) {
129                 crit_enter_quick(td);
130 #ifdef SMP
131                 if (gd->mi.gd_reqflags & RQF_IPIQ) {
132                         atomic_clear_int(&gd->mi.gd_reqflags, RQF_IPIQ);
133                         lwkt_process_ipiq();
134                 }
135 #endif
136                 if (gd->mi.gd_reqflags & RQF_INTPEND) {
137                         atomic_clear_int(&gd->mi.gd_reqflags, RQF_INTPEND);
138                         while ((irq = ffs(gd->gd_spending)) != 0) {
139                                 --irq;
140                                 atomic_clear_int(&gd->gd_spending, 1 << irq);
141                                 irq += FIRST_SOFTINT;
142                                 sched_ithd_soft(irq);
143                         }
144                         while ((irq = ffs(gd->gd_fpending)) != 0) {
145                                 --irq;
146                                 atomic_clear_int(&gd->gd_fpending, 1 << irq);
147                                 sched_ithd_hard_virtual(irq);
148                         }
149                 }
150                 crit_exit_noyield(td);
151         }
152 }
153
154 /*
155  * Allows an unprotected signal handler or mailbox to signal an interrupt
156  *
157  * For sched_ithd_hard_virtual() to properly preempt via lwkt_schedule() we
158  * cannot enter a critical section here.  We use td_nest_count instead.
159  */
160 void
161 signalintr(int intr)
162 {
163         struct mdglobaldata *gd = mdcpu;
164         thread_t td = gd->mi.gd_curthread;
165
166         if (td->td_critcount || td->td_nest_count) {
167                 atomic_set_int_nonlocked(&gd->gd_fpending, 1 << intr);
168                 atomic_set_int(&gd->mi.gd_reqflags, RQF_INTPEND);
169         } else {
170                 ++td->td_nest_count;
171                 atomic_clear_int(&gd->gd_fpending, 1 << intr);
172                 sched_ithd_hard_virtual(intr);
173                 --td->td_nest_count;
174         }
175 }
176
177 /*
178  * Must block any signal normally handled as maskable interrupt.
179  */
180 void
181 cpu_disable_intr(void)
182 {
183         sigblock(sigmask(SIGALRM)|sigmask(SIGIO)|sigmask(SIGUSR1));
184 }
185
186 void
187 cpu_enable_intr(void)
188 {
189         sigsetmask(0);
190 }
191
192 void
193 cpu_mask_all_signals(void)
194 {
195         sigblock(sigmask(SIGALRM)|sigmask(SIGIO)|sigmask(SIGQUIT)|
196                  sigmask(SIGUSR1)|sigmask(SIGTERM)|sigmask(SIGWINCH)|
197                  sigmask(SIGUSR2));
198 }
199
200 void
201 cpu_unmask_all_signals(void)
202 {
203         sigsetmask(0);
204 }
205
206 void
207 cpu_invlpg(void *addr)
208 {
209         madvise(addr, PAGE_SIZE, MADV_INVAL);
210 }
211
212 void
213 cpu_invltlb(void)
214 {
215         madvise((void *)KvaStart, KvaEnd - KvaStart, MADV_INVAL);
216 }