Finish migrating the cpl into the thread structure.
[dragonfly.git] / sys / platform / pc32 / isa / ipl_funcs.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1997 Bruce Evans.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/i386/isa/ipl_funcs.c,v 1.32.2.5 2002/12/17 18:04:02 sam Exp $
8f41e33b 27 * $DragonFly: src/sys/platform/pc32/isa/ipl_funcs.c,v 1.4 2003/06/22 08:54:22 dillon Exp $
984263bc
MD
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/sysctl.h>
f1d1c3fa 34#include <sys/proc.h>
984263bc
MD
35#include <machine/ipl.h>
36#include <machine/globals.h>
f1d1c3fa 37#include <machine/pcb.h>
984263bc
MD
38#include <i386/isa/intr_machdep.h>
39
40/*
41 * Bits in the ipending bitmap variable must be set atomically because
42 * ipending may be manipulated by interrupts or other cpu's without holding
43 * any locks.
44 *
45 * Note: setbits uses a locked or, making simple cases MP safe.
46 */
47#define DO_SETBITS(name, var, bits) \
48void name(void) \
49{ \
50 atomic_set_int(var, bits); \
f1d1c3fa 51 mycpu->gd_reqpri = TDPRI_CRIT; \
984263bc
MD
52}
53
54DO_SETBITS(setdelayed, &ipending, loadandclear(&idelayed))
55
56DO_SETBITS(setsoftcamnet,&ipending, SWI_CAMNET_PENDING)
57DO_SETBITS(setsoftcambio,&ipending, SWI_CAMBIO_PENDING)
58DO_SETBITS(setsoftclock, &ipending, SWI_CLOCK_PENDING)
59DO_SETBITS(setsoftnet, &ipending, SWI_NET_PENDING)
60DO_SETBITS(setsofttty, &ipending, SWI_TTY_PENDING)
61DO_SETBITS(setsoftvm, &ipending, SWI_VM_PENDING)
62DO_SETBITS(setsofttq, &ipending, SWI_TQ_PENDING)
63DO_SETBITS(setsoftcrypto,&ipending, SWI_CRYPTO_PENDING)
64
65DO_SETBITS(schedsoftcamnet, &idelayed, SWI_CAMNET_PENDING)
66DO_SETBITS(schedsoftcambio, &idelayed, SWI_CAMBIO_PENDING)
67DO_SETBITS(schedsoftnet, &idelayed, SWI_NET_PENDING)
68DO_SETBITS(schedsofttty, &idelayed, SWI_TTY_PENDING)
69DO_SETBITS(schedsoftvm, &idelayed, SWI_VM_PENDING)
70DO_SETBITS(schedsofttq, &idelayed, SWI_TQ_PENDING)
71
72unsigned
73softclockpending(void)
74{
75 return (ipending & SWI_CLOCK_PENDING);
76}
77
78/*
79 * Support for SPL assertions.
80 */
81
82#ifdef INVARIANT_SUPPORT
83
84#define SPLASSERT_IGNORE 0
85#define SPLASSERT_LOG 1
86#define SPLASSERT_PANIC 2
87
88static int splassertmode = SPLASSERT_LOG;
89SYSCTL_INT(_kern, OID_AUTO, splassertmode, CTLFLAG_RW,
90 &splassertmode, 0, "Set the mode of SPLASSERT");
91TUNABLE_INT("kern.splassertmode", &splassertmode);
92
93static void
94splassertfail(char *str, const char *msg, char *name, int level)
95{
96 switch (splassertmode) {
97 case SPLASSERT_IGNORE:
98 break;
99 case SPLASSERT_LOG:
100 printf(str, msg, name, level);
101 printf("\n");
102 break;
103 case SPLASSERT_PANIC:
104 panic(str, msg, name, level);
105 break;
106 }
107}
108
109#define GENSPLASSERT(NAME, MODIFIER) \
110void \
111NAME##assert(const char *msg) \
112{ \
8f41e33b
MD
113 if ((curthread->td_cpl & (MODIFIER)) != (MODIFIER)) \
114 splassertfail("%s: not %s, curthread->td_cpl == %#x", \
115 msg, __XSTRING(NAME) + 3, curthread->td_cpl); \
984263bc
MD
116}
117#else
118#define GENSPLASSERT(NAME, MODIFIER)
119#endif
120
121/************************************************************************
122 * GENERAL SPL CODE *
123 ************************************************************************
124 *
125 * Implement splXXX(), spl0(), splx(), and splq(). splXXX() disables a
126 * set of interrupts (e.g. splbio() disables interrupts relating to
127 * device I/O) and returns the previous interrupt mask. splx() restores
128 * the previous interrupt mask, spl0() is a special case which enables
129 * all interrupts and is typically used inside i386/i386 swtch.s and
130 * fork_trampoline. splq() is a generic version of splXXX().
131 *
132 * The SPL routines mess around with the 'cpl' global, which masks
133 * interrupts. Interrupts are not *actually* masked. What happens is
134 * that if an interrupt masked by the cpl occurs, the appropriate bit
135 * in 'ipending' is set and the interrupt is defered. When we clear
136 * bits in the cpl we must check to see if any ipending interrupts have
137 * been unmasked and issue the synchronously, which is what the splz()
138 * call does.
139 *
140 * Because the cpl is often saved and restored in a nested fashion, cpl
141 * modifications are only allowed in the SMP case when the MP lock is held
142 * to prevent multiple processes from tripping over each other's masks.
143 * The cpl is saved when you do a context switch (mi_switch()) and restored
144 * when your process gets cpu again.
145 *
146 * An interrupt routine is allowed to modify the cpl as long as it restores
147 * it prior to returning (thus the interrupted mainline code doesn't notice
148 * anything amiss). For the SMP case, the interrupt routine must hold
149 * the MP lock for any cpl manipulation.
150 *
151 * Likewise, due to the deterministic nature of cpl modifications, we do
152 * NOT need to use locked instructions to modify it.
153 */
154
155#ifndef SMP
156
157#define GENSPL(NAME, OP, MODIFIER, PC) \
158GENSPLASSERT(NAME, MODIFIER) \
159unsigned NAME(void) \
160{ \
161 unsigned x; \
162 \
8f41e33b
MD
163 x = curthread->td_cpl; \
164 curthread->td_cpl OP MODIFIER; \
984263bc
MD
165 return (x); \
166}
167
168void
169spl0(void)
170{
8f41e33b 171 curthread->td_cpl = 0;
f1d1c3fa 172 if (ipending && curthread->td_pri < TDPRI_CRIT)
984263bc
MD
173 splz();
174}
175
176void
177splx(unsigned ipl)
178{
8f41e33b 179 curthread->td_cpl = ipl;
f1d1c3fa 180 if ((ipending & ~ipl) && curthread->td_pri < TDPRI_CRIT)
984263bc
MD
181 splz();
182}
183
184intrmask_t
185splq(intrmask_t mask)
186{
8f41e33b
MD
187 intrmask_t tmp = curthread->td_cpl;
188 curthread->td_cpl |= mask;
984263bc
MD
189 return (tmp);
190}
191
192#else /* !SMP */
193
194#include <machine/smp.h>
195#include <machine/smptests.h>
196
197/*
198 * SMP CASE
199 *
200 * Mostly the same as the non-SMP case now, but it didn't used to be
201 * this clean.
202 */
203
204#define GENSPL(NAME, OP, MODIFIER, PC) \
205GENSPLASSERT(NAME, MODIFIER) \
206unsigned NAME(void) \
207{ \
208 unsigned x; \
209 \
8f41e33b
MD
210 x = curthread->td_cpl; \
211 curthread->td_cpl OP MODIFIER; \
984263bc
MD
212 \
213 return (x); \
214}
215
216/*
217 * spl0() - unmask all interrupts
218 *
219 * The MP lock must be held on entry
220 * This routine may only be called from mainline code.
221 */
222void
223spl0(void)
224{
225 KASSERT(inside_intr == 0, ("spl0: called from interrupt"));
8f41e33b 226 curthread->td_cpl = 0;
f1d1c3fa 227 if (ipending && curthread->td_pri < TDPRI_CRIT)
984263bc
MD
228 splz();
229}
230
231/*
232 * splx() - restore previous interrupt mask
233 *
234 * The MP lock must be held on entry
235 */
236
237void
238splx(unsigned ipl)
239{
8f41e33b
MD
240 curthread->td_cpl = ipl;
241 if (inside_intr == 0 && (ipending & ~curthread->td_cpl) != 0 &&
f1d1c3fa 242 curthread->td_pri < TDPRI_CRIT) {
984263bc 243 splz();
f1d1c3fa 244 }
984263bc
MD
245}
246
247
248/*
249 * splq() - blocks specified interrupts
250 *
251 * The MP lock must be held on entry
252 */
253intrmask_t
254splq(intrmask_t mask)
255{
8f41e33b
MD
256 intrmask_t tmp = curthread->td_cpl;
257 curthread->td_cpl |= mask;
984263bc
MD
258 return (tmp);
259}
260
261#endif /* !SMP */
262
263/* Finally, generate the actual spl*() functions */
264
265/* NAME: OP: MODIFIER: PC: */
266GENSPL(splbio, |=, bio_imask, 2)
267GENSPL(splcam, |=, cam_imask, 7)
268GENSPL(splclock, =, HWI_MASK | SWI_MASK, 3)
269GENSPL(splhigh, =, HWI_MASK | SWI_MASK, 4)
270GENSPL(splimp, |=, net_imask, 5)
271GENSPL(splnet, |=, SWI_NET_MASK, 6)
272GENSPL(splsoftcam, |=, SWI_CAMBIO_MASK | SWI_CAMNET_MASK, 8)
273GENSPL(splsoftcambio, |=, SWI_CAMBIO_MASK, 9)
274GENSPL(splsoftcamnet, |=, SWI_CAMNET_MASK, 10)
275GENSPL(splsoftclock, =, SWI_CLOCK_MASK, 11)
276GENSPL(splsofttty, |=, SWI_TTY_MASK, 12)
277GENSPL(splsoftvm, |=, SWI_VM_MASK, 16)
278GENSPL(splsofttq, |=, SWI_TQ_MASK, 17)
279GENSPL(splstatclock, |=, stat_imask, 13)
280GENSPL(spltty, |=, tty_imask, 14)
281GENSPL(splvm, |=, net_imask | bio_imask | cam_imask, 15)
282GENSPL(splcrypto, |=, net_imask | SWI_NET_MASK | SWI_CRYPTO_MASK,16)