intr: Don't embed struct thread in intr_info
[dragonfly.git] / sys / kern / kern_intr.c
CommitLineData
984263bc 1/*
033a4603 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
ef0fdad1 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
984263bc
MD
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
27 *
28 */
29
984263bc
MD
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/kernel.h>
34#include <sys/sysctl.h>
ef0fdad1
MD
35#include <sys/thread.h>
36#include <sys/proc.h>
7e071e7a 37#include <sys/random.h>
477d3c1c 38#include <sys/serialize.h>
a7231bde 39#include <sys/interrupt.h>
477d3c1c 40#include <sys/bus.h>
37e7efec 41#include <sys/machintr.h>
984263bc 42
477d3c1c 43#include <machine/frame.h>
984263bc 44
684a93c4
MD
45#include <sys/thread2.h>
46#include <sys/mplock2.h>
47
c83c147e 48struct intr_info;
9d522d14 49
ef0fdad1
MD
50typedef struct intrec {
51 struct intrec *next;
9d522d14 52 struct intr_info *info;
ef0fdad1
MD
53 inthand2_t *handler;
54 void *argument;
477d3c1c 55 char *name;
ef0fdad1 56 int intr;
477d3c1c
MD
57 int intr_flags;
58 struct lwkt_serialize *serializer;
59} *intrec_t;
60
61struct intr_info {
62 intrec_t i_reclist;
877d4511 63 struct thread *i_thread; /* don't embed struct thread */
477d3c1c
MD
64 struct random_softc i_random;
65 int i_running;
862f2618
MD
66 long i_count; /* interrupts dispatched */
67 int i_mplock_required;
477d3c1c
MD
68 int i_fast;
69 int i_slow;
f33e9c1c 70 int i_state;
b560de96
MD
71 int i_errorticks;
72 unsigned long i_straycount;
c83c147e
SZ
73 int i_cpuid;
74 int i_intr;
75};
76
77static struct intr_info intr_info_ary[MAXCPU][MAX_INTS];
78static struct intr_info *swi_info_ary[MAX_SOFTINTS];
5f456c40 79
ff52cb5b 80static int max_installed_hard_intr[MAXCPU];
477d3c1c 81
a9d00ec1
MD
82#define EMERGENCY_INTR_POLLING_FREQ_MAX 20000
83
250ce837
MD
84/*
85 * Assert that callers into interrupt handlers don't return with
86 * dangling tokens, spinlocks, or mp locks.
87 */
3933a3ab 88#ifdef INVARIANTS
149a3d9e
VS
89
90#define TD_INVARIANTS_DECLARE \
149a3d9e
VS
91 int spincount; \
92 lwkt_tokref_t curstop
93
94#define TD_INVARIANTS_GET(td) \
95 do { \
0846e4ce 96 spincount = (td)->td_gd->gd_spinlocks; \
149a3d9e
VS
97 curstop = (td)->td_toks_stop; \
98 } while(0)
99
100#define TD_INVARIANTS_TEST(td, name) \
101 do { \
0846e4ce 102 KASSERT(spincount == (td)->td_gd->gd_spinlocks, \
149a3d9e
VS
103 ("spincount mismatch after interrupt handler %s", \
104 name)); \
105 KASSERT(curstop == (td)->td_toks_stop, \
106 ("token count mismatch after interrupt handler %s", \
107 name)); \
149a3d9e 108 } while(0)
250ce837
MD
109
110#else
111
b5d16701 112/* !INVARIANTS */
3933a3ab
MD
113
114#define TD_INVARIANTS_DECLARE
115#define TD_INVARIANTS_GET(td)
149a3d9e 116#define TD_INVARIANTS_TEST(td, name)
3933a3ab 117
149a3d9e 118#endif /* ndef INVARIANTS */
3933a3ab 119
a9d00ec1
MD
120static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS);
121static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS);
96d52ac8 122static void emergency_intr_timer_callback(systimer_t, int, struct intrframe *);
a9d00ec1
MD
123static void ithread_handler(void *arg);
124static void ithread_emergency(void *arg);
c83c147e 125static void report_stray_interrupt(struct intr_info *info, const char *func);
6355d931 126static void int_moveto_destcpu(int *, int);
4c846371 127static void int_moveto_origcpu(int, int);
c83c147e 128static void sched_ithd_intern(struct intr_info *info);
37d44089 129
ff52cb5b
SZ
130static struct systimer emergency_intr_timer[MAXCPU];
131static struct thread emergency_intr_thread[MAXCPU];
a9d00ec1 132
f33e9c1c
MD
133#define ISTATE_NOTHREAD 0
134#define ISTATE_NORMAL 1
135#define ISTATE_LIVELOCKED 2
37d44089 136
b560de96 137static int livelock_limit = 40000;
0e6beaa3 138static int livelock_lowater = 20000;
b560de96 139static int livelock_debug = -1;
37d44089
MD
140SYSCTL_INT(_kern, OID_AUTO, livelock_limit,
141 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit");
f33e9c1c
MD
142SYSCTL_INT(_kern, OID_AUTO, livelock_lowater,
143 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore");
b560de96
MD
144SYSCTL_INT(_kern, OID_AUTO, livelock_debug,
145 CTLFLAG_RW, &livelock_debug, 0, "Livelock debug intr#");
984263bc 146
a9d00ec1
MD
147static int emergency_intr_enable = 0; /* emergency interrupt polling */
148TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable);
149SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW,
150 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable");
151
152static int emergency_intr_freq = 10; /* emergency polling frequency */
153TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq);
154SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW,
155 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency");
156
157/*
158 * Sysctl support routines
159 */
160static int
161sysctl_emergency_enable(SYSCTL_HANDLER_ARGS)
162{
ff52cb5b 163 int error, enabled, cpuid, freq;
a9d00ec1
MD
164
165 enabled = emergency_intr_enable;
166 error = sysctl_handle_int(oidp, &enabled, 0, req);
167 if (error || req->newptr == NULL)
168 return error;
169 emergency_intr_enable = enabled;
ff52cb5b
SZ
170 if (emergency_intr_enable)
171 freq = emergency_intr_freq;
172 else
173 freq = 1;
174
175 for (cpuid = 0; cpuid < ncpus; ++cpuid)
176 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq);
a9d00ec1
MD
177 return 0;
178}
179
180static int
181sysctl_emergency_freq(SYSCTL_HANDLER_ARGS)
182{
ff52cb5b 183 int error, phz, cpuid, freq;
a9d00ec1
MD
184
185 phz = emergency_intr_freq;
186 error = sysctl_handle_int(oidp, &phz, 0, req);
187 if (error || req->newptr == NULL)
188 return error;
189 if (phz <= 0)
190 return EINVAL;
191 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX)
192 phz = EMERGENCY_INTR_POLLING_FREQ_MAX;
193
194 emergency_intr_freq = phz;
ff52cb5b
SZ
195 if (emergency_intr_enable)
196 freq = emergency_intr_freq;
197 else
198 freq = 1;
199
200 for (cpuid = 0; cpuid < ncpus; ++cpuid)
201 systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq);
a9d00ec1
MD
202 return 0;
203}
984263bc 204
45d76888
MD
205/*
206 * Register an SWI or INTerrupt handler.
45d76888 207 */
477d3c1c
MD
208void *
209register_swi(int intr, inthand2_t *handler, void *arg, const char *name,
1da8d52f 210 struct lwkt_serialize *serializer, int cpuid)
984263bc 211{
5f456c40 212 if (intr < FIRST_SOFTINT || intr >= MAX_INTS)
ef0fdad1 213 panic("register_swi: bad intr %d", intr);
1da8d52f
SZ
214
215 if (cpuid < 0)
216 cpuid = intr % ncpus;
217 return(register_int(intr, handler, arg, name, serializer, 0, cpuid));
984263bc
MD
218}
219
8619d09d
AH
220void *
221register_swi_mp(int intr, inthand2_t *handler, void *arg, const char *name,
1da8d52f 222 struct lwkt_serialize *serializer, int cpuid)
8619d09d
AH
223{
224 if (intr < FIRST_SOFTINT || intr >= MAX_INTS)
225 panic("register_swi: bad intr %d", intr);
1da8d52f
SZ
226
227 if (cpuid < 0)
228 cpuid = intr % ncpus;
229 return(register_int(intr, handler, arg, name, serializer,
230 INTR_MPSAFE, cpuid));
8619d09d
AH
231}
232
477d3c1c
MD
233void *
234register_int(int intr, inthand2_t *handler, void *arg, const char *name,
6355d931 235 struct lwkt_serialize *serializer, int intr_flags, int cpuid)
984263bc 236{
477d3c1c
MD
237 struct intr_info *info;
238 struct intrec **list;
239 intrec_t rec;
6355d931
SZ
240 int orig_cpuid;
241
242 KKASSERT(cpuid >= 0 && cpuid < ncpus);
ef0fdad1 243
5f456c40 244 if (intr < 0 || intr >= MAX_INTS)
ef0fdad1 245 panic("register_int: bad intr %d", intr);
477d3c1c
MD
246 if (name == NULL)
247 name = "???";
c83c147e 248 info = &intr_info_ary[cpuid][intr];
477d3c1c 249
9d522d14
MD
250 /*
251 * Construct an interrupt handler record
252 */
efda3bd0
MD
253 rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT);
254 rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT);
477d3c1c 255 strcpy(rec->name, name);
ef0fdad1 256
9d522d14 257 rec->info = info;
ef0fdad1
MD
258 rec->handler = handler;
259 rec->argument = arg;
ef0fdad1 260 rec->intr = intr;
477d3c1c 261 rec->intr_flags = intr_flags;
ef0fdad1 262 rec->next = NULL;
477d3c1c 263 rec->serializer = serializer;
ef0fdad1 264
ff52cb5b
SZ
265 int_moveto_destcpu(&orig_cpuid, cpuid);
266
a9d00ec1
MD
267 /*
268 * Create an emergency polling thread and set up a systimer to wake
269 * it up.
270 */
ff52cb5b
SZ
271 if (emergency_intr_thread[cpuid].td_kstack == NULL) {
272 lwkt_create(ithread_emergency, NULL, NULL,
273 &emergency_intr_thread[cpuid],
c83c147e 274 TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithreadE %d",
ff52cb5b
SZ
275 cpuid);
276 systimer_init_periodic_nq(&emergency_intr_timer[cpuid],
277 emergency_intr_timer_callback,
278 &emergency_intr_thread[cpuid],
a9d00ec1
MD
279 (emergency_intr_enable ? emergency_intr_freq : 1));
280 }
281
ef0fdad1
MD
282 /*
283 * Create an interrupt thread if necessary, leave it in an unscheduled
45d76888 284 * state.
ef0fdad1 285 */
f33e9c1c
MD
286 if (info->i_state == ISTATE_NOTHREAD) {
287 info->i_state = ISTATE_NORMAL;
877d4511
SZ
288 info->i_thread = kmalloc(sizeof(struct thread), M_DEVBUF,
289 M_INTWAIT | M_ZERO);
fdce8919 290 lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL,
877d4511 291 info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid,
c83c147e 292 "ithread%d %d", intr, cpuid);
5f456c40 293 if (intr >= FIRST_SOFTINT)
877d4511 294 lwkt_setpri(info->i_thread, TDPRI_SOFT_NORM);
4b5f931b 295 else
877d4511
SZ
296 lwkt_setpri(info->i_thread, TDPRI_INT_MED);
297 info->i_thread->td_preemptable = lwkt_preempt;
ef0fdad1
MD
298 }
299
9d522d14
MD
300 list = &info->i_reclist;
301
ef0fdad1 302 /*
9d522d14 303 * Keep track of how many fast and slow interrupts we have.
862f2618
MD
304 * Set i_mplock_required if any handler in the chain requires
305 * the MP lock to operate.
ef0fdad1 306 */
862f2618
MD
307 if ((intr_flags & INTR_MPSAFE) == 0)
308 info->i_mplock_required = 1;
f8a09be1 309 if (intr_flags & INTR_CLOCK)
9d522d14
MD
310 ++info->i_fast;
311 else
312 ++info->i_slow;
313
8b3ec75a
MD
314 /*
315 * Enable random number generation keying off of this interrupt.
316 */
317 if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) {
318 info->i_random.sc_enabled = 1;
319 info->i_random.sc_intr = intr;
320 }
321
9d522d14
MD
322 /*
323 * Add the record to the interrupt list.
324 */
325 crit_enter();
ef0fdad1
MD
326 while (*list != NULL)
327 list = &(*list)->next;
328 *list = rec;
329 crit_exit();
5f456c40
MD
330
331 /*
332 * Update max_installed_hard_intr to make the emergency intr poll
333 * a bit more efficient.
334 */
335 if (intr < FIRST_SOFTINT) {
ff52cb5b
SZ
336 if (max_installed_hard_intr[cpuid] <= intr)
337 max_installed_hard_intr[cpuid] = intr + 1;
5f456c40 338 }
9d522d14 339
c83c147e
SZ
340 if (intr >= FIRST_SOFTINT)
341 swi_info_ary[intr - FIRST_SOFTINT] = info;
342
9d522d14
MD
343 /*
344 * Setup the machine level interrupt vector
345 */
f416026e
SZ
346 if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1)
347 machintr_intr_setup(intr, intr_flags);
9d522d14 348
4c846371 349 int_moveto_origcpu(orig_cpuid, cpuid);
db958607 350
477d3c1c 351 return(rec);
ef0fdad1 352}
984263bc 353
9d522d14 354void
1da8d52f 355unregister_swi(void *id, int intr, int cpuid)
ef0fdad1 356{
1da8d52f
SZ
357 if (cpuid < 0)
358 cpuid = intr % ncpus;
359
360 unregister_int(id, cpuid);
984263bc
MD
361}
362
9d522d14 363void
6355d931 364unregister_int(void *id, int cpuid)
984263bc 365{
477d3c1c
MD
366 struct intr_info *info;
367 struct intrec **list;
368 intrec_t rec;
6355d931
SZ
369 int intr, orig_cpuid;
370
371 KKASSERT(cpuid >= 0 && cpuid < ncpus);
477d3c1c
MD
372
373 intr = ((intrec_t)id)->intr;
ef0fdad1 374
5f456c40 375 if (intr < 0 || intr >= MAX_INTS)
ef0fdad1 376 panic("register_int: bad intr %d", intr);
477d3c1c 377
c83c147e 378 info = &intr_info_ary[cpuid][intr];
477d3c1c 379
6355d931 380 int_moveto_destcpu(&orig_cpuid, cpuid);
4c846371 381
477d3c1c 382 /*
9d522d14
MD
383 * Remove the interrupt descriptor, adjust the descriptor count,
384 * and teardown the machine level vector if this was the last interrupt.
477d3c1c 385 */
ef0fdad1 386 crit_enter();
477d3c1c 387 list = &info->i_reclist;
ef0fdad1 388 while ((rec = *list) != NULL) {
9d522d14 389 if (rec == id)
ef0fdad1 390 break;
ef0fdad1
MD
391 list = &rec->next;
392 }
9d522d14 393 if (rec) {
acf7409e
SZ
394 intrec_t rec0;
395
9d522d14 396 *list = rec->next;
f8a09be1 397 if (rec->intr_flags & INTR_CLOCK)
9d522d14
MD
398 --info->i_fast;
399 else
400 --info->i_slow;
e8727dce 401 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0)
f416026e 402 machintr_intr_teardown(intr);
862f2618 403
acf7409e
SZ
404 /*
405 * Clear i_mplock_required if no handlers in the chain require the
406 * MP lock.
407 */
408 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) {
409 if ((rec0->intr_flags & INTR_MPSAFE) == 0)
410 break;
411 }
412 if (rec0 == NULL)
862f2618 413 info->i_mplock_required = 0;
acf7409e 414 }
862f2618 415
c83c147e
SZ
416 if (intr >= FIRST_SOFTINT && info->i_reclist == NULL)
417 swi_info_ary[intr - FIRST_SOFTINT] = NULL;
418
ef0fdad1 419 crit_exit();
477d3c1c 420
4c846371
SZ
421 int_moveto_origcpu(orig_cpuid, cpuid);
422
477d3c1c 423 /*
9d522d14 424 * Free the record.
477d3c1c 425 */
ef0fdad1 426 if (rec != NULL) {
efda3bd0
MD
427 kfree(rec->name, M_DEVBUF);
428 kfree(rec, M_DEVBUF);
ef0fdad1 429 } else {
6ea70f76 430 kprintf("warning: unregister_int: int %d handler for %s not found\n",
477d3c1c 431 intr, ((intrec_t)id)->name);
ef0fdad1 432 }
477d3c1c
MD
433}
434
477d3c1c 435long
c83c147e 436get_interrupt_counter(int intr, int cpuid)
477d3c1c
MD
437{
438 struct intr_info *info;
439
c83c147e
SZ
440 KKASSERT(cpuid >= 0 && cpuid < ncpus);
441
5f456c40 442 if (intr < 0 || intr >= MAX_INTS)
477d3c1c 443 panic("register_int: bad intr %d", intr);
c83c147e 444 info = &intr_info_ary[cpuid][intr];
477d3c1c
MD
445 return(info->i_count);
446}
447
7e071e7a
MD
448void
449register_randintr(int intr)
450{
477d3c1c 451 struct intr_info *info;
c83c147e 452 int cpuid;
477d3c1c 453
5f456c40 454 if (intr < 0 || intr >= MAX_INTS)
417c990a 455 panic("register_randintr: bad intr %d", intr);
c83c147e
SZ
456
457 for (cpuid = 0; cpuid < ncpus; ++cpuid) {
458 info = &intr_info_ary[cpuid][intr];
459 info->i_random.sc_intr = intr;
460 info->i_random.sc_enabled = 1;
461 }
7e071e7a
MD
462}
463
464void
465unregister_randintr(int intr)
466{
477d3c1c 467 struct intr_info *info;
c83c147e 468 int cpuid;
477d3c1c 469
5f456c40 470 if (intr < 0 || intr >= MAX_INTS)
477d3c1c 471 panic("register_swi: bad intr %d", intr);
c83c147e
SZ
472
473 for (cpuid = 0; cpuid < ncpus; ++cpuid) {
474 info = &intr_info_ary[cpuid][intr];
475 info->i_random.sc_enabled = -1;
476 }
7e071e7a
MD
477}
478
5f456c40
MD
479int
480next_registered_randintr(int intr)
481{
482 struct intr_info *info;
483
484 if (intr < 0 || intr >= MAX_INTS)
485 panic("register_swi: bad intr %d", intr);
c83c147e 486
5f456c40 487 while (intr < MAX_INTS) {
c83c147e
SZ
488 int cpuid;
489
490 for (cpuid = 0; cpuid < ncpus; ++cpuid) {
491 info = &intr_info_ary[cpuid][intr];
492 if (info->i_random.sc_enabled > 0)
493 return intr;
494 }
5f456c40
MD
495 ++intr;
496 }
c83c147e 497 return intr;
5f456c40
MD
498}
499
ef0fdad1 500/*
b68b7282
MD
501 * Dispatch an interrupt. If there's nothing to do we have a stray
502 * interrupt and can just return, leaving the interrupt masked.
96728c05 503 *
477d3c1c 504 * We need to schedule the interrupt and set its i_running bit. If
96728c05
MD
505 * we are not on the interrupt thread's cpu we have to send a message
506 * to the correct cpu that will issue the desired action (interlocking
f33e9c1c
MD
507 * with the interrupt thread's critical section). We do NOT attempt to
508 * reschedule interrupts whos i_running bit is already set because
509 * this would prematurely wakeup a livelock-limited interrupt thread.
510 *
511 * i_running is only tested/set on the same cpu as the interrupt thread.
96728c05
MD
512 *
513 * We are NOT in a critical section, which will allow the scheduled
71ef2f5c 514 * interrupt to preempt us. The MP lock might *NOT* be held here.
ef0fdad1 515 */
96728c05
MD
516static void
517sched_ithd_remote(void *arg)
518{
c83c147e 519 sched_ithd_intern(arg);
96728c05
MD
520}
521
c83c147e
SZ
522static void
523sched_ithd_intern(struct intr_info *info)
ef0fdad1 524{
477d3c1c 525 ++info->i_count;
f33e9c1c 526 if (info->i_state != ISTATE_NOTHREAD) {
477d3c1c 527 if (info->i_reclist == NULL) {
c83c147e 528 report_stray_interrupt(info, "sched_ithd");
b68b7282 529 } else {
877d4511 530 if (info->i_thread->td_gd == mycpu) {
f33e9c1c
MD
531 if (info->i_running == 0) {
532 info->i_running = 1;
533 if (info->i_state != ISTATE_LIVELOCKED)
877d4511 534 lwkt_schedule(info->i_thread); /* MIGHT PREEMPT */
f33e9c1c 535 }
96728c05 536 } else {
877d4511 537 lwkt_send_ipiq(info->i_thread->td_gd, sched_ithd_remote, info);
96728c05 538 }
b68b7282 539 }
ef0fdad1 540 } else {
c83c147e 541 report_stray_interrupt(info, "sched_ithd");
ef0fdad1
MD
542 }
543}
544
c83c147e
SZ
545void
546sched_ithd_soft(int intr)
547{
548 struct intr_info *info;
549
550 KKASSERT(intr >= FIRST_SOFTINT && intr < MAX_INTS);
551
552 info = swi_info_ary[intr - FIRST_SOFTINT];
553 if (info != NULL) {
554 sched_ithd_intern(info);
555 } else {
556 kprintf("unregistered softint %d got scheduled on cpu%d\n",
557 intr, mycpuid);
558 }
559}
560
561void
562sched_ithd_hard(int intr)
563{
564 KKASSERT(intr >= 0 && intr < MAX_HARDINTS);
565 sched_ithd_intern(&intr_info_ary[mycpuid][intr]);
566}
567
bae88a6f
SZ
568#ifdef _KERNEL_VIRTUAL
569
570void
571sched_ithd_hard_virtual(int intr)
572{
573 KKASSERT(intr >= 0 && intr < MAX_HARDINTS);
574 sched_ithd_intern(&intr_info_ary[0][intr]);
575}
576
577void *
578register_int_virtual(int intr, inthand2_t *handler, void *arg, const char *name,
579 struct lwkt_serialize *serializer, int intr_flags)
580{
581 return register_int(intr, handler, arg, name, serializer, intr_flags, 0);
582}
583
584void
585unregister_int_virtual(void *id)
586{
587 unregister_int(id, 0);
588}
589
590#endif /* _KERN_VIRTUAL */
591
b560de96 592static void
c83c147e 593report_stray_interrupt(struct intr_info *info, const char *func)
b560de96
MD
594{
595 ++info->i_straycount;
596 if (info->i_straycount < 10) {
597 if (info->i_errorticks == ticks)
598 return;
599 info->i_errorticks = ticks;
c83c147e
SZ
600 kprintf("%s: stray interrupt %d on cpu%d\n",
601 func, info->i_intr, mycpuid);
7e88c0e6 602 } else if (info->i_straycount == 10) {
c83c147e
SZ
603 kprintf("%s: %ld stray interrupts %d on cpu%d - "
604 "there will be no further reports\n", func,
605 info->i_straycount, info->i_intr, mycpuid);
b560de96
MD
606 }
607}
608
37d44089
MD
609/*
610 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
611 * might not be held).
612 */
613static void
96d52ac8
SZ
614ithread_livelock_wakeup(systimer_t st, int in_ipi __unused,
615 struct intrframe *frame __unused)
37d44089 616{
477d3c1c 617 struct intr_info *info;
37d44089 618
c83c147e 619 info = &intr_info_ary[mycpuid][(int)(intptr_t)st->data];
f33e9c1c 620 if (info->i_state != ISTATE_NOTHREAD)
877d4511 621 lwkt_schedule(info->i_thread);
37d44089
MD
622}
623
729e15a8
SZ
624/*
625 * Schedule ithread within fast intr handler
626 *
c83c147e 627 * XXX Protect sched_ithd_hard() call with gd_intr_nesting_level?
729e15a8
SZ
628 * Interrupts aren't enabled, but still...
629 */
630static __inline void
631ithread_fast_sched(int intr, thread_t td)
632{
633 ++td->td_nest_count;
634
635 /*
636 * We are already in critical section, exit it now to
637 * allow preemption.
638 */
639 crit_exit_quick(td);
c83c147e 640 sched_ithd_hard(intr);
729e15a8
SZ
641 crit_enter_quick(td);
642
643 --td->td_nest_count;
644}
645
67b9bb39 646/*
7bd34050 647 * This function is called directly from the ICU or APIC vector code assembly
477d3c1c
MD
648 * to process an interrupt. The critical section and interrupt deferral
649 * checks have already been done but the function is entered WITHOUT
650 * a critical section held. The BGL may or may not be held.
651 *
652 * Must return non-zero if we do not want the vector code to re-enable
653 * the interrupt (which we don't if we have to schedule the interrupt)
67b9bb39 654 */
c7eb0589 655int ithread_fast_handler(struct intrframe *frame);
477d3c1c
MD
656
657int
c7eb0589 658ithread_fast_handler(struct intrframe *frame)
477d3c1c
MD
659{
660 int intr;
661 struct intr_info *info;
662 struct intrec **list;
663 int must_schedule;
477d3c1c 664 int got_mplock;
3933a3ab 665 TD_INVARIANTS_DECLARE;
c24c20c0 666 intrec_t rec, nrec;
477d3c1c 667 globaldata_t gd;
729e15a8 668 thread_t td;
477d3c1c 669
c7eb0589 670 intr = frame->if_vec;
477d3c1c 671 gd = mycpu;
729e15a8
SZ
672 td = curthread;
673
674 /* We must be in critical section. */
f9235b6d 675 KKASSERT(td->td_critcount);
477d3c1c 676
c83c147e 677 info = &intr_info_ary[mycpuid][intr];
477d3c1c
MD
678
679 /*
680 * If we are not processing any FAST interrupts, just schedule the thing.
477d3c1c
MD
681 */
682 if (info->i_fast == 0) {
3848f1c7 683 ++gd->gd_cnt.v_intr;
729e15a8 684 ithread_fast_sched(intr, td);
477d3c1c
MD
685 return(1);
686 }
687
688 /*
689 * This should not normally occur since interrupts ought to be
690 * masked if the ithread has been scheduled or is running.
691 */
692 if (info->i_running)
693 return(1);
694
695 /*
696 * Bump the interrupt nesting level to process any FAST interrupts.
697 * Obtain the MP lock as necessary. If the MP lock cannot be obtained,
698 * schedule the interrupt thread to deal with the issue instead.
699 *
700 * To reduce overhead, just leave the MP lock held once it has been
701 * obtained.
702 */
477d3c1c
MD
703 ++gd->gd_intr_nesting_level;
704 ++gd->gd_cnt.v_intr;
705 must_schedule = info->i_slow;
477d3c1c 706 got_mplock = 0;
477d3c1c 707
3933a3ab 708 TD_INVARIANTS_GET(td);
477d3c1c 709 list = &info->i_reclist;
3933a3ab 710
c24c20c0
MD
711 for (rec = *list; rec; rec = nrec) {
712 /* rec may be invalid after call */
713 nrec = rec->next;
477d3c1c 714
f8a09be1 715 if (rec->intr_flags & INTR_CLOCK) {
477d3c1c
MD
716 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) {
717 if (try_mplock() == 0) {
f5c2d910
SZ
718 /* Couldn't get the MP lock; just schedule it. */
719 must_schedule = 1;
477d3c1c
MD
720 break;
721 }
722 got_mplock = 1;
723 }
477d3c1c
MD
724 if (rec->serializer) {
725 must_schedule += lwkt_serialize_handler_try(
726 rec->serializer, rec->handler,
c7eb0589 727 rec->argument, frame);
477d3c1c 728 } else {
c7eb0589 729 rec->handler(rec->argument, frame);
477d3c1c 730 }
3933a3ab 731 TD_INVARIANTS_TEST(td, rec->name);
477d3c1c
MD
732 }
733 }
734
735 /*
736 * Cleanup
737 */
738 --gd->gd_intr_nesting_level;
477d3c1c
MD
739 if (got_mplock)
740 rel_mplock();
477d3c1c
MD
741
742 /*
729e15a8
SZ
743 * If we had a problem, or mixed fast and slow interrupt handlers are
744 * registered, schedule the ithread to catch the missed records (it
745 * will just re-run all of them). A return value of 0 indicates that
746 * all handlers have been run and the interrupt can be re-enabled, and
747 * a non-zero return indicates that the interrupt thread controls
748 * re-enablement.
477d3c1c 749 */
afd7b1c0 750 if (must_schedule > 0)
729e15a8 751 ithread_fast_sched(intr, td);
afd7b1c0 752 else if (must_schedule == 0)
477d3c1c
MD
753 ++info->i_count;
754 return(must_schedule);
755}
756
b68b7282 757/*
45d76888
MD
758 * Interrupt threads run this as their main loop.
759 *
68b3ccd4 760 * The handler begins execution outside a critical section and no MP lock.
37d44089 761 *
477d3c1c 762 * The i_running state starts at 0. When an interrupt occurs, the hardware
c83c147e
SZ
763 * interrupt is disabled and sched_ithd_hard() The HW interrupt remains
764 * disabled until all routines have run. We then call ithread_done() to
765 * reenable the HW interrupt and deschedule us until the next interrupt.
45d76888 766 *
477d3c1c 767 * We are responsible for atomically checking i_running and ithread_done()
45d76888 768 * is responsible for atomically checking for platform-specific delayed
477d3c1c 769 * interrupts. i_running for our irq is only set in the context of our cpu,
45d76888 770 * so a critical section is a sufficient interlock.
b68b7282 771 */
93781523
MD
772#define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
773
ef0fdad1
MD
774static void
775ithread_handler(void *arg)
776{
477d3c1c 777 struct intr_info *info;
f33e9c1c 778 int use_limit;
b560de96 779 __uint32_t lseconds;
c83c147e 780 int intr, cpuid = mycpuid;
9d522d14 781 int mpheld;
477d3c1c
MD
782 struct intrec **list;
783 intrec_t rec, nrec;
f33e9c1c 784 globaldata_t gd;
67b9bb39 785 struct systimer ill_timer; /* enforced freq. timer */
f33e9c1c 786 u_int ill_count; /* interrupt livelock counter */
3933a3ab 787 TD_INVARIANTS_DECLARE;
45d76888 788
f33e9c1c 789 ill_count = 0;
973c11b9 790 intr = (int)(intptr_t)arg;
c83c147e 791 info = &intr_info_ary[cpuid][intr];
477d3c1c 792 list = &info->i_reclist;
477d3c1c 793
45d76888 794 /*
862f2618 795 * The loop must be entered with one critical section held. The thread
fdce8919 796 * does not hold the mplock on startup.
45d76888 797 */
e381e77c
MD
798 gd = mycpu;
799 lseconds = gd->gd_time_seconds;
45d76888 800 crit_enter_gd(gd);
862f2618 801 mpheld = 0;
ef0fdad1 802
ef0fdad1 803 for (;;) {
862f2618
MD
804 /*
805 * The chain is only considered MPSAFE if all its interrupt handlers
806 * are MPSAFE. However, if intr_mpsafe has been turned off we
807 * always operate with the BGL.
808 */
c9e9fb21 809 if (info->i_mplock_required != mpheld) {
862f2618
MD
810 if (info->i_mplock_required) {
811 KKASSERT(mpheld == 0);
c9e9fb21 812 get_mplock();
862f2618
MD
813 mpheld = 1;
814 } else {
815 KKASSERT(mpheld != 0);
816 rel_mplock();
817 mpheld = 0;
818 }
819 }
820
3933a3ab
MD
821 TD_INVARIANTS_GET(gd->gd_curthread);
822
93781523 823 /*
f33e9c1c
MD
824 * If an interrupt is pending, clear i_running and execute the
825 * handlers. Note that certain types of interrupts can re-trigger
826 * and set i_running again.
45d76888 827 *
f33e9c1c 828 * Each handler is run in a critical section. Note that we run both
862f2618 829 * FAST and SLOW designated service routines.
93781523 830 */
f33e9c1c
MD
831 if (info->i_running) {
832 ++ill_count;
833 info->i_running = 0;
9d522d14 834
b560de96 835 if (*list == NULL)
c83c147e 836 report_stray_interrupt(info, "ithread_handler");
b560de96 837
f33e9c1c 838 for (rec = *list; rec; rec = nrec) {
c24c20c0 839 /* rec may be invalid after call */
f33e9c1c
MD
840 nrec = rec->next;
841 if (rec->serializer) {
842 lwkt_serialize_handler_call(rec->serializer, rec->handler,
843 rec->argument, NULL);
844 } else {
845 rec->handler(rec->argument, NULL);
846 }
3933a3ab 847 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name);
477d3c1c 848 }
ef0fdad1 849 }
37d44089
MD
850
851 /*
852 * This is our interrupt hook to add rate randomness to the random
853 * number generator.
854 */
8b3ec75a 855 if (info->i_random.sc_enabled > 0)
96728c05 856 add_interrupt_randomness(intr);
37d44089
MD
857
858 /*
f33e9c1c
MD
859 * Unmask the interrupt to allow it to trigger again. This only
860 * applies to certain types of interrupts (typ level interrupts).
861 * This can result in the interrupt retriggering, but the retrigger
862 * will not be processed until we cycle our critical section.
363d922a
MD
863 *
864 * Only unmask interrupts while handlers are installed. It is
865 * possible to hit a situation where no handlers are installed
866 * due to a device driver livelocking and then tearing down its
867 * interrupt on close (the parallel bus being a good example).
37d44089 868 */
6d164b20 869 if (intr < FIRST_SOFTINT && *list)
35b2edcb 870 machintr_intr_enable(intr);
f33e9c1c
MD
871
872 /*
873 * Do a quick exit/enter to catch any higher-priority interrupt
874 * sources, such as the statclock, so thread time accounting
875 * will still work. This may also cause an interrupt to re-trigger.
876 */
877 crit_exit_gd(gd);
878 crit_enter_gd(gd);
879
880 /*
881 * LIVELOCK STATE MACHINE
882 */
883 switch(info->i_state) {
884 case ISTATE_NORMAL:
885 /*
b560de96 886 * Reset the count each second.
f33e9c1c 887 */
b560de96
MD
888 if (lseconds != gd->gd_time_seconds) {
889 lseconds = gd->gd_time_seconds;
890 ill_count = 0;
f33e9c1c
MD
891 }
892
893 /*
894 * If we did not exceed the frequency limit, we are done.
895 * If the interrupt has not retriggered we deschedule ourselves.
896 */
897 if (ill_count <= livelock_limit) {
898 if (info->i_running == 0) {
899 lwkt_deschedule_self(gd->gd_curthread);
900 lwkt_switch();
901 }
37d44089 902 break;
f33e9c1c
MD
903 }
904
905 /*
906 * Otherwise we are livelocked. Set up a periodic systimer
907 * to wake the thread up at the limit frequency.
908 */
c83c147e
SZ
909 kprintf("intr %d on cpu%d at %d/%d hz, livelocked limit engaged!\n",
910 intr, cpuid, ill_count, livelock_limit);
f33e9c1c
MD
911 info->i_state = ISTATE_LIVELOCKED;
912 if ((use_limit = livelock_limit) < 100)
913 use_limit = 100;
914 else if (use_limit > 500000)
915 use_limit = 500000;
79b38af2 916 systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup,
973c11b9 917 (void *)(intptr_t)intr, use_limit);
37d44089 918 /* fall through */
f33e9c1c 919 case ISTATE_LIVELOCKED:
37d44089 920 /*
f33e9c1c
MD
921 * Wait for our periodic timer to go off. Since the interrupt
922 * has re-armed it can still set i_running, but it will not
923 * reschedule us while we are in a livelocked state.
37d44089 924 */
f33e9c1c 925 lwkt_deschedule_self(gd->gd_curthread);
37d44089 926 lwkt_switch();
93781523 927
37d44089 928 /*
b560de96
MD
929 * Check once a second to see if the livelock condition no
930 * longer applies.
37d44089 931 */
b560de96
MD
932 if (lseconds != gd->gd_time_seconds) {
933 lseconds = gd->gd_time_seconds;
f33e9c1c 934 if (ill_count < livelock_lowater) {
b560de96
MD
935 info->i_state = ISTATE_NORMAL;
936 systimer_del(&ill_timer);
c83c147e
SZ
937 kprintf("intr %d on cpu%d at %d/%d hz, livelock removed\n",
938 intr, cpuid, ill_count, livelock_lowater);
b560de96
MD
939 } else if (livelock_debug == intr ||
940 (bootverbose && cold)) {
c83c147e
SZ
941 kprintf("intr %d on cpu%d at %d/%d hz, in livelock\n",
942 intr, cpuid, ill_count, livelock_lowater);
f33e9c1c 943 }
b560de96 944 ill_count = 0;
37d44089
MD
945 }
946 break;
947 }
ef0fdad1 948 }
eccb255f 949 /* NOT REACHED */
ef0fdad1
MD
950}
951
a9d00ec1
MD
952/*
953 * Emergency interrupt polling thread. The thread begins execution
954 * outside a critical section with the BGL held.
955 *
956 * If emergency interrupt polling is enabled, this thread will
957 * execute all system interrupts not marked INTR_NOPOLL at the
958 * specified polling frequency.
959 *
960 * WARNING! This thread runs *ALL* interrupt service routines that
961 * are not marked INTR_NOPOLL, which basically means everything except
962 * the 8254 clock interrupt and the ATA interrupt. It has very high
963 * overhead and should only be used in situations where the machine
964 * cannot otherwise be made to work. Due to the severe performance
965 * degredation, it should not be enabled on production machines.
966 */
967static void
968ithread_emergency(void *arg __unused)
969{
eccb255f 970 globaldata_t gd = mycpu;
a9d00ec1
MD
971 struct intr_info *info;
972 intrec_t rec, nrec;
ff52cb5b 973 int intr, cpuid = mycpuid;
3933a3ab 974 TD_INVARIANTS_DECLARE;
a9d00ec1 975
c9e9fb21 976 get_mplock();
eccb255f
MD
977 crit_enter_gd(gd);
978 TD_INVARIANTS_GET(gd->gd_curthread);
c9e9fb21 979
a9d00ec1 980 for (;;) {
ff52cb5b 981 for (intr = 0; intr < max_installed_hard_intr[cpuid]; ++intr) {
c83c147e 982 info = &intr_info_ary[cpuid][intr];
a9d00ec1 983 for (rec = info->i_reclist; rec; rec = nrec) {
c24c20c0
MD
984 /* rec may be invalid after call */
985 nrec = rec->next;
a9d00ec1
MD
986 if ((rec->intr_flags & INTR_NOPOLL) == 0) {
987 if (rec->serializer) {
c24c20c0 988 lwkt_serialize_handler_try(rec->serializer,
a9d00ec1
MD
989 rec->handler, rec->argument, NULL);
990 } else {
991 rec->handler(rec->argument, NULL);
992 }
eccb255f 993 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name);
a9d00ec1 994 }
a9d00ec1
MD
995 }
996 }
eccb255f 997 lwkt_deschedule_self(gd->gd_curthread);
a9d00ec1
MD
998 lwkt_switch();
999 }
eccb255f 1000 /* NOT REACHED */
a9d00ec1
MD
1001}
1002
1003/*
1004 * Systimer callback - schedule the emergency interrupt poll thread
1005 * if emergency polling is enabled.
1006 */
1007static
1008void
96d52ac8
SZ
1009emergency_intr_timer_callback(systimer_t info, int in_ipi __unused,
1010 struct intrframe *frame __unused)
a9d00ec1
MD
1011{
1012 if (emergency_intr_enable)
1013 lwkt_schedule(info->data);
1014}
1015
984263bc
MD
1016/*
1017 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1018 * The data for this machine dependent, and the declarations are in machine
1019 * dependent code. The layout of intrnames and intrcnt however is machine
1020 * independent.
1021 *
1022 * We do not know the length of intrcnt and intrnames at compile time, so
1023 * calculate things at run time.
1024 */
477d3c1c 1025
984263bc
MD
1026static int
1027sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1028{
477d3c1c
MD
1029 struct intr_info *info;
1030 intrec_t rec;
1031 int error = 0;
1032 int len;
c83c147e 1033 int intr, cpuid;
477d3c1c
MD
1034 char buf[64];
1035
c83c147e
SZ
1036 for (cpuid = 0; cpuid < ncpus; ++cpuid) {
1037 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) {
1038 info = &intr_info_ary[cpuid][intr];
477d3c1c 1039
c83c147e
SZ
1040 len = 0;
1041 buf[0] = 0;
1042 for (rec = info->i_reclist; rec; rec = rec->next) {
1043 ksnprintf(buf + len, sizeof(buf) - len, "%s%s",
1044 (len ? "/" : ""), rec->name);
1045 len += strlen(buf + len);
1046 }
1047 if (len == 0) {
1048 ksnprintf(buf, sizeof(buf), "irq%d", intr);
1049 len = strlen(buf);
1050 }
1051 error = SYSCTL_OUT(req, buf, len + 1);
477d3c1c 1052 }
477d3c1c
MD
1053 }
1054 return (error);
984263bc
MD
1055}
1056
1057SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1058 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1059
3242c748
SZ
1060static int
1061sysctl_intrcnt_all(SYSCTL_HANDLER_ARGS)
1062{
1063 struct intr_info *info;
1064 int error = 0;
c83c147e 1065 int intr, cpuid;
3242c748 1066
c83c147e
SZ
1067 for (cpuid = 0; cpuid < ncpus; ++cpuid) {
1068 for (intr = 0; intr < MAX_INTS; ++intr) {
1069 info = &intr_info_ary[cpuid][intr];
3242c748 1070
c83c147e
SZ
1071 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count));
1072 if (error)
3242c748 1073 goto failed;
c83c147e 1074 }
3242c748
SZ
1075 }
1076failed:
1077 return(error);
1078}
1079
1080SYSCTL_PROC(_hw, OID_AUTO, intrcnt_all, CTLTYPE_OPAQUE | CTLFLAG_RD,
1081 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts");
1082
c24bcdf9
SZ
1083SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1084 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts");
1085
4c846371 1086static void
6355d931 1087int_moveto_destcpu(int *orig_cpuid0, int cpuid)
4c846371 1088{
6355d931 1089 int orig_cpuid = mycpuid;
4c846371
SZ
1090
1091 if (cpuid != orig_cpuid)
1092 lwkt_migratecpu(cpuid);
1093
1094 *orig_cpuid0 = orig_cpuid;
4c846371
SZ
1095}
1096
1097static void
1098int_moveto_origcpu(int orig_cpuid, int cpuid)
1099{
1100 if (cpuid != orig_cpuid)
1101 lwkt_migratecpu(orig_cpuid);
1102}
c83c147e
SZ
1103
1104static void
1105intr_init(void *dummy __unused)
1106{
1107 int cpuid;
1108
1109 kprintf("Initialize MI interrupts\n");
1110
1111 for (cpuid = 0; cpuid < ncpus; ++cpuid) {
1112 int intr;
1113
1114 for (intr = 0; intr < MAX_INTS; ++intr) {
1115 struct intr_info *info = &intr_info_ary[cpuid][intr];
1116
1117 info->i_cpuid = cpuid;
1118 info->i_intr = intr;
1119 }
1120 }
1121}
1122SYSINIT(intr_init, SI_BOOT2_FINISH_PIC, SI_ORDER_ANY, intr_init, NULL);