Fix a number of interrupt related issues.
[dragonfly.git] / sys / platform / pc32 / isa / ipl_funcs.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1997 Bruce Evans.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/i386/isa/ipl_funcs.c,v 1.32.2.5 2002/12/17 18:04:02 sam Exp $
46a3f46d 27 * $DragonFly: src/sys/platform/pc32/isa/ipl_funcs.c,v 1.8 2003/10/02 22:26:59 dillon Exp $
984263bc
MD
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/sysctl.h>
f1d1c3fa 34#include <sys/proc.h>
984263bc 35#include <machine/ipl.h>
85100692 36#include <machine/globaldata.h>
f1d1c3fa 37#include <machine/pcb.h>
984263bc
MD
38#include <i386/isa/intr_machdep.h>
39
40/*
41 * Bits in the ipending bitmap variable must be set atomically because
42 * ipending may be manipulated by interrupts or other cpu's without holding
43 * any locks.
44 *
45 * Note: setbits uses a locked or, making simple cases MP safe.
46 */
235957ed
MD
47#define DO_SETBITS(name, var, bits) \
48void name(void) \
49{ \
50 struct mdglobaldata *gd = mdcpu; \
51 atomic_set_int_nonlocked(var, bits); \
52 atomic_set_int_nonlocked(&gd->mi.gd_reqflags, RQF_INTPEND); \
53} \
984263bc 54
235957ed 55DO_SETBITS(setdelayed, &gd->gd_ipending, loadandclear(&gd->gd_idelayed))
984263bc 56
235957ed
MD
57DO_SETBITS(setsoftcamnet,&gd->gd_ipending, SWI_CAMNET_PENDING)
58DO_SETBITS(setsoftcambio,&gd->gd_ipending, SWI_CAMBIO_PENDING)
59DO_SETBITS(setsoftclock, &gd->gd_ipending, SWI_CLOCK_PENDING)
60DO_SETBITS(setsoftnet, &gd->gd_ipending, SWI_NET_PENDING)
61DO_SETBITS(setsofttty, &gd->gd_ipending, SWI_TTY_PENDING)
62DO_SETBITS(setsoftvm, &gd->gd_ipending, SWI_VM_PENDING)
63DO_SETBITS(setsofttq, &gd->gd_ipending, SWI_TQ_PENDING)
64DO_SETBITS(setsoftcrypto,&gd->gd_ipending, SWI_CRYPTO_PENDING)
984263bc 65
235957ed
MD
66DO_SETBITS(schedsoftcamnet, &gd->gd_idelayed, SWI_CAMNET_PENDING)
67DO_SETBITS(schedsoftcambio, &gd->gd_idelayed, SWI_CAMBIO_PENDING)
68DO_SETBITS(schedsoftnet, &gd->gd_idelayed, SWI_NET_PENDING)
69DO_SETBITS(schedsofttty, &gd->gd_idelayed, SWI_TTY_PENDING)
70DO_SETBITS(schedsoftvm, &gd->gd_idelayed, SWI_VM_PENDING)
71DO_SETBITS(schedsofttq, &gd->gd_idelayed, SWI_TQ_PENDING)
ef0fdad1 72/* YYY schedsoft what? */
984263bc
MD
73
74unsigned
75softclockpending(void)
76{
ef0fdad1 77 return ((mdcpu->gd_ipending | mdcpu->gd_fpending) & SWI_CLOCK_PENDING);
984263bc
MD
78}
79
80/*
81 * Support for SPL assertions.
82 */
83
84#ifdef INVARIANT_SUPPORT
85
86#define SPLASSERT_IGNORE 0
87#define SPLASSERT_LOG 1
88#define SPLASSERT_PANIC 2
89
90static int splassertmode = SPLASSERT_LOG;
91SYSCTL_INT(_kern, OID_AUTO, splassertmode, CTLFLAG_RW,
92 &splassertmode, 0, "Set the mode of SPLASSERT");
93TUNABLE_INT("kern.splassertmode", &splassertmode);
94
95static void
96splassertfail(char *str, const char *msg, char *name, int level)
97{
98 switch (splassertmode) {
99 case SPLASSERT_IGNORE:
100 break;
101 case SPLASSERT_LOG:
102 printf(str, msg, name, level);
103 printf("\n");
104 break;
105 case SPLASSERT_PANIC:
106 panic(str, msg, name, level);
107 break;
108 }
109}
110
111#define GENSPLASSERT(NAME, MODIFIER) \
112void \
113NAME##assert(const char *msg) \
114{ \
8f41e33b
MD
115 if ((curthread->td_cpl & (MODIFIER)) != (MODIFIER)) \
116 splassertfail("%s: not %s, curthread->td_cpl == %#x", \
117 msg, __XSTRING(NAME) + 3, curthread->td_cpl); \
984263bc
MD
118}
119#else
120#define GENSPLASSERT(NAME, MODIFIER)
121#endif
122
123/************************************************************************
124 * GENERAL SPL CODE *
125 ************************************************************************
126 *
127 * Implement splXXX(), spl0(), splx(), and splq(). splXXX() disables a
128 * set of interrupts (e.g. splbio() disables interrupts relating to
129 * device I/O) and returns the previous interrupt mask. splx() restores
130 * the previous interrupt mask, spl0() is a special case which enables
131 * all interrupts and is typically used inside i386/i386 swtch.s and
132 * fork_trampoline. splq() is a generic version of splXXX().
133 *
134 * The SPL routines mess around with the 'cpl' global, which masks
135 * interrupts. Interrupts are not *actually* masked. What happens is
136 * that if an interrupt masked by the cpl occurs, the appropriate bit
ef0fdad1
MD
137 * in '*pending' is set and the interrupt is defered. When we clear
138 * bits in the cpl we must check to see if any *pending interrupts have
984263bc
MD
139 * been unmasked and issue the synchronously, which is what the splz()
140 * call does.
141 *
142 * Because the cpl is often saved and restored in a nested fashion, cpl
143 * modifications are only allowed in the SMP case when the MP lock is held
144 * to prevent multiple processes from tripping over each other's masks.
145 * The cpl is saved when you do a context switch (mi_switch()) and restored
146 * when your process gets cpu again.
147 *
148 * An interrupt routine is allowed to modify the cpl as long as it restores
149 * it prior to returning (thus the interrupted mainline code doesn't notice
150 * anything amiss). For the SMP case, the interrupt routine must hold
151 * the MP lock for any cpl manipulation.
152 *
153 * Likewise, due to the deterministic nature of cpl modifications, we do
154 * NOT need to use locked instructions to modify it.
155 */
156
984263bc
MD
157#define GENSPL(NAME, OP, MODIFIER, PC) \
158GENSPLASSERT(NAME, MODIFIER) \
159unsigned NAME(void) \
160{ \
161 unsigned x; \
ef0fdad1 162 struct thread *td = curthread; \
984263bc 163 \
ef0fdad1
MD
164 x = td->td_cpl; \
165 td->td_cpl OP MODIFIER; \
984263bc
MD
166 return (x); \
167}
168
46a3f46d
MD
169/*
170 * Note: we do not have to check td->td_nest_count in these functions, only
171 * whether we are in a critical section or not.
172 */
984263bc
MD
173void
174spl0(void)
175{
ef0fdad1
MD
176 struct mdglobaldata *gd = mdcpu;
177 struct thread *td = gd->mi.gd_curthread;
984263bc 178
ef0fdad1
MD
179 td->td_cpl = 0;
180 if ((gd->gd_ipending || gd->gd_fpending) && td->td_pri < TDPRI_CRIT)
984263bc
MD
181 splz();
182}
183
984263bc
MD
184void
185splx(unsigned ipl)
186{
ef0fdad1
MD
187 struct mdglobaldata *gd = mdcpu;
188 struct thread *td = gd->mi.gd_curthread;
189
190 td->td_cpl = ipl;
191 if (((gd->gd_ipending | gd->gd_fpending) & ~ipl) &&
192 td->td_pri < TDPRI_CRIT) {
984263bc 193 splz();
f1d1c3fa 194 }
984263bc
MD
195}
196
984263bc
MD
197intrmask_t
198splq(intrmask_t mask)
ef0fdad1
MD
199{
200 struct mdglobaldata *gd = mdcpu;
201 struct thread *td = gd->mi.gd_curthread;
202 intrmask_t tmp;
984263bc 203
ef0fdad1
MD
204 tmp = td->td_cpl;
205 td->td_cpl |= mask;
206 return (tmp);
207}
984263bc
MD
208
209/* Finally, generate the actual spl*() functions */
210
211/* NAME: OP: MODIFIER: PC: */
212GENSPL(splbio, |=, bio_imask, 2)
213GENSPL(splcam, |=, cam_imask, 7)
214GENSPL(splclock, =, HWI_MASK | SWI_MASK, 3)
215GENSPL(splhigh, =, HWI_MASK | SWI_MASK, 4)
216GENSPL(splimp, |=, net_imask, 5)
217GENSPL(splnet, |=, SWI_NET_MASK, 6)
218GENSPL(splsoftcam, |=, SWI_CAMBIO_MASK | SWI_CAMNET_MASK, 8)
219GENSPL(splsoftcambio, |=, SWI_CAMBIO_MASK, 9)
220GENSPL(splsoftcamnet, |=, SWI_CAMNET_MASK, 10)
221GENSPL(splsoftclock, =, SWI_CLOCK_MASK, 11)
222GENSPL(splsofttty, |=, SWI_TTY_MASK, 12)
223GENSPL(splsoftvm, |=, SWI_VM_MASK, 16)
224GENSPL(splsofttq, |=, SWI_TQ_MASK, 17)
225GENSPL(splstatclock, |=, stat_imask, 13)
226GENSPL(spltty, |=, tty_imask, 14)
227GENSPL(splvm, |=, net_imask | bio_imask | cam_imask, 15)
228GENSPL(splcrypto, |=, net_imask | SWI_NET_MASK | SWI_CRYPTO_MASK,16)