kernel - Major signal path adjustments to fix races, tsleep race fixes, +more
[dragonfly.git] / sys / kern / kern_intr.c
CommitLineData
984263bc 1/*
033a4603 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
ef0fdad1 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
984263bc
MD
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
27 *
28 */
29
984263bc
MD
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/kernel.h>
34#include <sys/sysctl.h>
ef0fdad1
MD
35#include <sys/thread.h>
36#include <sys/proc.h>
7e071e7a 37#include <sys/random.h>
477d3c1c 38#include <sys/serialize.h>
a7231bde 39#include <sys/interrupt.h>
477d3c1c 40#include <sys/bus.h>
37e7efec 41#include <sys/machintr.h>
984263bc 42
477d3c1c 43#include <machine/frame.h>
984263bc 44
684a93c4
MD
45#include <sys/thread2.h>
46#include <sys/mplock2.h>
47
9d522d14
MD
48struct info_info;
49
ef0fdad1
MD
50typedef struct intrec {
51 struct intrec *next;
9d522d14 52 struct intr_info *info;
ef0fdad1
MD
53 inthand2_t *handler;
54 void *argument;
477d3c1c 55 char *name;
ef0fdad1 56 int intr;
477d3c1c
MD
57 int intr_flags;
58 struct lwkt_serialize *serializer;
59} *intrec_t;
60
61struct intr_info {
62 intrec_t i_reclist;
63 struct thread i_thread;
64 struct random_softc i_random;
65 int i_running;
862f2618
MD
66 long i_count; /* interrupts dispatched */
67 int i_mplock_required;
477d3c1c
MD
68 int i_fast;
69 int i_slow;
f33e9c1c 70 int i_state;
b560de96
MD
71 int i_errorticks;
72 unsigned long i_straycount;
5f456c40
MD
73} intr_info_ary[MAX_INTS];
74
75int max_installed_hard_intr;
76int max_installed_soft_intr;
477d3c1c 77
a9d00ec1
MD
78#define EMERGENCY_INTR_POLLING_FREQ_MAX 20000
79
250ce837
MD
80/*
81 * Assert that callers into interrupt handlers don't return with
82 * dangling tokens, spinlocks, or mp locks.
83 */
3933a3ab 84#ifdef INVARIANTS
149a3d9e
VS
85
86#define TD_INVARIANTS_DECLARE \
149a3d9e
VS
87 int spincount; \
88 lwkt_tokref_t curstop
89
90#define TD_INVARIANTS_GET(td) \
91 do { \
149a3d9e
VS
92 spincount = (td)->td_gd->gd_spinlocks_wr; \
93 curstop = (td)->td_toks_stop; \
94 } while(0)
95
96#define TD_INVARIANTS_TEST(td, name) \
97 do { \
98 KASSERT(spincount == (td)->td_gd->gd_spinlocks_wr, \
99 ("spincount mismatch after interrupt handler %s", \
100 name)); \
101 KASSERT(curstop == (td)->td_toks_stop, \
102 ("token count mismatch after interrupt handler %s", \
103 name)); \
149a3d9e 104 } while(0)
250ce837
MD
105
106#else
107
b5d16701 108/* !INVARIANTS */
3933a3ab
MD
109
110#define TD_INVARIANTS_DECLARE
111#define TD_INVARIANTS_GET(td)
149a3d9e 112#define TD_INVARIANTS_TEST(td, name)
3933a3ab 113
149a3d9e 114#endif /* ndef INVARIANTS */
3933a3ab 115
a9d00ec1
MD
116static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS);
117static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS);
96d52ac8 118static void emergency_intr_timer_callback(systimer_t, int, struct intrframe *);
a9d00ec1
MD
119static void ithread_handler(void *arg);
120static void ithread_emergency(void *arg);
b560de96 121static void report_stray_interrupt(int intr, struct intr_info *info);
6355d931 122static void int_moveto_destcpu(int *, int);
4c846371 123static void int_moveto_origcpu(int, int);
a9d00ec1 124
c157ff7a 125int intr_info_size = NELEM(intr_info_ary);
37d44089 126
a9d00ec1
MD
127static struct systimer emergency_intr_timer;
128static struct thread emergency_intr_thread;
129
f33e9c1c
MD
130#define ISTATE_NOTHREAD 0
131#define ISTATE_NORMAL 1
132#define ISTATE_LIVELOCKED 2
37d44089 133
b560de96 134static int livelock_limit = 40000;
0e6beaa3 135static int livelock_lowater = 20000;
b560de96 136static int livelock_debug = -1;
37d44089
MD
137SYSCTL_INT(_kern, OID_AUTO, livelock_limit,
138 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit");
f33e9c1c
MD
139SYSCTL_INT(_kern, OID_AUTO, livelock_lowater,
140 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore");
b560de96
MD
141SYSCTL_INT(_kern, OID_AUTO, livelock_debug,
142 CTLFLAG_RW, &livelock_debug, 0, "Livelock debug intr#");
984263bc 143
a9d00ec1
MD
144static int emergency_intr_enable = 0; /* emergency interrupt polling */
145TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable);
146SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW,
147 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable");
148
149static int emergency_intr_freq = 10; /* emergency polling frequency */
150TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq);
151SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW,
152 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency");
153
154/*
155 * Sysctl support routines
156 */
157static int
158sysctl_emergency_enable(SYSCTL_HANDLER_ARGS)
159{
160 int error, enabled;
161
162 enabled = emergency_intr_enable;
163 error = sysctl_handle_int(oidp, &enabled, 0, req);
164 if (error || req->newptr == NULL)
165 return error;
166 emergency_intr_enable = enabled;
167 if (emergency_intr_enable) {
ba39e2e0
MD
168 systimer_adjust_periodic(&emergency_intr_timer,
169 emergency_intr_freq);
a9d00ec1 170 } else {
ba39e2e0 171 systimer_adjust_periodic(&emergency_intr_timer, 1);
a9d00ec1
MD
172 }
173 return 0;
174}
175
176static int
177sysctl_emergency_freq(SYSCTL_HANDLER_ARGS)
178{
179 int error, phz;
180
181 phz = emergency_intr_freq;
182 error = sysctl_handle_int(oidp, &phz, 0, req);
183 if (error || req->newptr == NULL)
184 return error;
185 if (phz <= 0)
186 return EINVAL;
187 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX)
188 phz = EMERGENCY_INTR_POLLING_FREQ_MAX;
189
190 emergency_intr_freq = phz;
191 if (emergency_intr_enable) {
ba39e2e0
MD
192 systimer_adjust_periodic(&emergency_intr_timer,
193 emergency_intr_freq);
a9d00ec1 194 } else {
ba39e2e0 195 systimer_adjust_periodic(&emergency_intr_timer, 1);
a9d00ec1
MD
196 }
197 return 0;
198}
984263bc 199
45d76888
MD
200/*
201 * Register an SWI or INTerrupt handler.
45d76888 202 */
477d3c1c
MD
203void *
204register_swi(int intr, inthand2_t *handler, void *arg, const char *name,
1da8d52f 205 struct lwkt_serialize *serializer, int cpuid)
984263bc 206{
5f456c40 207 if (intr < FIRST_SOFTINT || intr >= MAX_INTS)
ef0fdad1 208 panic("register_swi: bad intr %d", intr);
1da8d52f
SZ
209
210 if (cpuid < 0)
211 cpuid = intr % ncpus;
212 return(register_int(intr, handler, arg, name, serializer, 0, cpuid));
984263bc
MD
213}
214
477d3c1c 215void *
8619d09d 216register_swi_mp(int intr, inthand2_t *handler, void *arg, const char *name,
1da8d52f 217 struct lwkt_serialize *serializer, int cpuid)
8619d09d
AH
218{
219 if (intr < FIRST_SOFTINT || intr >= MAX_INTS)
220 panic("register_swi: bad intr %d", intr);
1da8d52f
SZ
221
222 if (cpuid < 0)
223 cpuid = intr % ncpus;
224 return(register_int(intr, handler, arg, name, serializer,
225 INTR_MPSAFE, cpuid));
8619d09d
AH
226}
227
228void *
477d3c1c 229register_int(int intr, inthand2_t *handler, void *arg, const char *name,
6355d931 230 struct lwkt_serialize *serializer, int intr_flags, int cpuid)
984263bc 231{
477d3c1c
MD
232 struct intr_info *info;
233 struct intrec **list;
234 intrec_t rec;
6355d931
SZ
235 int orig_cpuid;
236
237 KKASSERT(cpuid >= 0 && cpuid < ncpus);
ef0fdad1 238
5f456c40 239 if (intr < 0 || intr >= MAX_INTS)
ef0fdad1 240 panic("register_int: bad intr %d", intr);
477d3c1c
MD
241 if (name == NULL)
242 name = "???";
243 info = &intr_info_ary[intr];
244
9d522d14
MD
245 /*
246 * Construct an interrupt handler record
247 */
efda3bd0
MD
248 rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT);
249 rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT);
477d3c1c 250 strcpy(rec->name, name);
ef0fdad1 251
9d522d14 252 rec->info = info;
ef0fdad1
MD
253 rec->handler = handler;
254 rec->argument = arg;
ef0fdad1 255 rec->intr = intr;
477d3c1c 256 rec->intr_flags = intr_flags;
ef0fdad1 257 rec->next = NULL;
477d3c1c 258 rec->serializer = serializer;
ef0fdad1 259
ef0fdad1 260 /*
a9d00ec1
MD
261 * Create an emergency polling thread and set up a systimer to wake
262 * it up.
263 */
264 if (emergency_intr_thread.td_kstack == NULL) {
fdce8919 265 lwkt_create(ithread_emergency, NULL, NULL, &emergency_intr_thread,
4643740a 266 TDF_NOSTART | TDF_INTTHREAD, ncpus - 1, "ithread emerg");
a9d00ec1
MD
267 systimer_init_periodic_nq(&emergency_intr_timer,
268 emergency_intr_timer_callback, &emergency_intr_thread,
269 (emergency_intr_enable ? emergency_intr_freq : 1));
270 }
271
6355d931 272 int_moveto_destcpu(&orig_cpuid, cpuid);
db958607 273
a9d00ec1 274 /*
ef0fdad1 275 * Create an interrupt thread if necessary, leave it in an unscheduled
45d76888 276 * state.
ef0fdad1 277 */
f33e9c1c
MD
278 if (info->i_state == ISTATE_NOTHREAD) {
279 info->i_state = ISTATE_NORMAL;
fdce8919 280 lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL,
4643740a 281 &info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid,
fdce8919 282 "ithread %d", intr);
5f456c40 283 if (intr >= FIRST_SOFTINT)
477d3c1c 284 lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM);
4b5f931b 285 else
477d3c1c
MD
286 lwkt_setpri(&info->i_thread, TDPRI_INT_MED);
287 info->i_thread.td_preemptable = lwkt_preempt;
ef0fdad1
MD
288 }
289
9d522d14
MD
290 list = &info->i_reclist;
291
ef0fdad1 292 /*
9d522d14 293 * Keep track of how many fast and slow interrupts we have.
862f2618
MD
294 * Set i_mplock_required if any handler in the chain requires
295 * the MP lock to operate.
ef0fdad1 296 */
862f2618
MD
297 if ((intr_flags & INTR_MPSAFE) == 0)
298 info->i_mplock_required = 1;
f8a09be1 299 if (intr_flags & INTR_CLOCK)
9d522d14
MD
300 ++info->i_fast;
301 else
302 ++info->i_slow;
303
304 /*
8b3ec75a
MD
305 * Enable random number generation keying off of this interrupt.
306 */
307 if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) {
308 info->i_random.sc_enabled = 1;
309 info->i_random.sc_intr = intr;
310 }
311
312 /*
9d522d14
MD
313 * Add the record to the interrupt list.
314 */
315 crit_enter();
ef0fdad1
MD
316 while (*list != NULL)
317 list = &(*list)->next;
318 *list = rec;
319 crit_exit();
5f456c40
MD
320
321 /*
322 * Update max_installed_hard_intr to make the emergency intr poll
323 * a bit more efficient.
324 */
325 if (intr < FIRST_SOFTINT) {
326 if (max_installed_hard_intr <= intr)
327 max_installed_hard_intr = intr + 1;
328 } else {
329 if (max_installed_soft_intr <= intr)
330 max_installed_soft_intr = intr + 1;
331 }
9d522d14
MD
332
333 /*
334 * Setup the machine level interrupt vector
335 */
f416026e
SZ
336 if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1)
337 machintr_intr_setup(intr, intr_flags);
9d522d14 338
4c846371 339 int_moveto_origcpu(orig_cpuid, cpuid);
db958607 340
477d3c1c 341 return(rec);
ef0fdad1 342}
984263bc 343
9d522d14 344void
1da8d52f 345unregister_swi(void *id, int intr, int cpuid)
ef0fdad1 346{
1da8d52f
SZ
347 if (cpuid < 0)
348 cpuid = intr % ncpus;
349
350 unregister_int(id, cpuid);
984263bc
MD
351}
352
9d522d14 353void
6355d931 354unregister_int(void *id, int cpuid)
984263bc 355{
477d3c1c
MD
356 struct intr_info *info;
357 struct intrec **list;
358 intrec_t rec;
6355d931
SZ
359 int intr, orig_cpuid;
360
361 KKASSERT(cpuid >= 0 && cpuid < ncpus);
477d3c1c
MD
362
363 intr = ((intrec_t)id)->intr;
ef0fdad1 364
5f456c40 365 if (intr < 0 || intr >= MAX_INTS)
ef0fdad1 366 panic("register_int: bad intr %d", intr);
477d3c1c
MD
367
368 info = &intr_info_ary[intr];
369
6355d931 370 int_moveto_destcpu(&orig_cpuid, cpuid);
4c846371 371
477d3c1c 372 /*
9d522d14
MD
373 * Remove the interrupt descriptor, adjust the descriptor count,
374 * and teardown the machine level vector if this was the last interrupt.
477d3c1c 375 */
ef0fdad1 376 crit_enter();
477d3c1c 377 list = &info->i_reclist;
ef0fdad1 378 while ((rec = *list) != NULL) {
9d522d14 379 if (rec == id)
ef0fdad1 380 break;
ef0fdad1
MD
381 list = &rec->next;
382 }
9d522d14 383 if (rec) {
acf7409e
SZ
384 intrec_t rec0;
385
9d522d14 386 *list = rec->next;
f8a09be1 387 if (rec->intr_flags & INTR_CLOCK)
9d522d14
MD
388 --info->i_fast;
389 else
390 --info->i_slow;
e8727dce 391 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0)
f416026e 392 machintr_intr_teardown(intr);
862f2618 393
acf7409e
SZ
394 /*
395 * Clear i_mplock_required if no handlers in the chain require the
396 * MP lock.
397 */
398 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) {
399 if ((rec0->intr_flags & INTR_MPSAFE) == 0)
400 break;
401 }
402 if (rec0 == NULL)
862f2618 403 info->i_mplock_required = 0;
acf7409e 404 }
862f2618 405
ef0fdad1 406 crit_exit();
477d3c1c 407
4c846371
SZ
408 int_moveto_origcpu(orig_cpuid, cpuid);
409
477d3c1c 410 /*
9d522d14 411 * Free the record.
477d3c1c 412 */
ef0fdad1 413 if (rec != NULL) {
efda3bd0
MD
414 kfree(rec->name, M_DEVBUF);
415 kfree(rec, M_DEVBUF);
ef0fdad1 416 } else {
6ea70f76 417 kprintf("warning: unregister_int: int %d handler for %s not found\n",
477d3c1c 418 intr, ((intrec_t)id)->name);
ef0fdad1 419 }
477d3c1c
MD
420}
421
422const char *
423get_registered_name(int intr)
424{
425 intrec_t rec;
426
5f456c40 427 if (intr < 0 || intr >= MAX_INTS)
477d3c1c
MD
428 panic("register_int: bad intr %d", intr);
429
430 if ((rec = intr_info_ary[intr].i_reclist) == NULL)
431 return(NULL);
432 else if (rec->next)
433 return("mux");
434 else
435 return(rec->name);
984263bc
MD
436}
437
477d3c1c
MD
438int
439count_registered_ints(int intr)
440{
441 struct intr_info *info;
442
5f456c40 443 if (intr < 0 || intr >= MAX_INTS)
477d3c1c
MD
444 panic("register_int: bad intr %d", intr);
445 info = &intr_info_ary[intr];
446 return(info->i_fast + info->i_slow);
447}
448
449long
450get_interrupt_counter(int intr)
451{
452 struct intr_info *info;
453
5f456c40 454 if (intr < 0 || intr >= MAX_INTS)
477d3c1c
MD
455 panic("register_int: bad intr %d", intr);
456 info = &intr_info_ary[intr];
457 return(info->i_count);
458}
459
460
4b5f931b
MD
461void
462swi_setpriority(int intr, int pri)
463{
477d3c1c 464 struct intr_info *info;
4b5f931b 465
5f456c40 466 if (intr < FIRST_SOFTINT || intr >= MAX_INTS)
4b5f931b 467 panic("register_swi: bad intr %d", intr);
477d3c1c 468 info = &intr_info_ary[intr];
f33e9c1c 469 if (info->i_state != ISTATE_NOTHREAD)
477d3c1c 470 lwkt_setpri(&info->i_thread, pri);
4b5f931b
MD
471}
472
7e071e7a
MD
473void
474register_randintr(int intr)
475{
477d3c1c
MD
476 struct intr_info *info;
477
5f456c40 478 if (intr < 0 || intr >= MAX_INTS)
417c990a 479 panic("register_randintr: bad intr %d", intr);
477d3c1c
MD
480 info = &intr_info_ary[intr];
481 info->i_random.sc_intr = intr;
482 info->i_random.sc_enabled = 1;
7e071e7a
MD
483}
484
485void
486unregister_randintr(int intr)
487{
477d3c1c
MD
488 struct intr_info *info;
489
5f456c40 490 if (intr < 0 || intr >= MAX_INTS)
477d3c1c
MD
491 panic("register_swi: bad intr %d", intr);
492 info = &intr_info_ary[intr];
8b3ec75a 493 info->i_random.sc_enabled = -1;
7e071e7a
MD
494}
495
5f456c40
MD
496int
497next_registered_randintr(int intr)
498{
499 struct intr_info *info;
500
501 if (intr < 0 || intr >= MAX_INTS)
502 panic("register_swi: bad intr %d", intr);
503 while (intr < MAX_INTS) {
504 info = &intr_info_ary[intr];
8b3ec75a 505 if (info->i_random.sc_enabled > 0)
5f456c40
MD
506 break;
507 ++intr;
508 }
509 return(intr);
510}
511
ef0fdad1 512/*
b68b7282
MD
513 * Dispatch an interrupt. If there's nothing to do we have a stray
514 * interrupt and can just return, leaving the interrupt masked.
96728c05 515 *
477d3c1c 516 * We need to schedule the interrupt and set its i_running bit. If
96728c05
MD
517 * we are not on the interrupt thread's cpu we have to send a message
518 * to the correct cpu that will issue the desired action (interlocking
f33e9c1c
MD
519 * with the interrupt thread's critical section). We do NOT attempt to
520 * reschedule interrupts whos i_running bit is already set because
521 * this would prematurely wakeup a livelock-limited interrupt thread.
522 *
523 * i_running is only tested/set on the same cpu as the interrupt thread.
96728c05
MD
524 *
525 * We are NOT in a critical section, which will allow the scheduled
71ef2f5c 526 * interrupt to preempt us. The MP lock might *NOT* be held here.
ef0fdad1 527 */
b8a98473
MD
528#ifdef SMP
529
96728c05
MD
530static void
531sched_ithd_remote(void *arg)
532{
bfc09ba0 533 sched_ithd((int)(intptr_t)arg);
96728c05
MD
534}
535
b8a98473
MD
536#endif
537
ef0fdad1
MD
538void
539sched_ithd(int intr)
540{
477d3c1c 541 struct intr_info *info;
ef0fdad1 542
477d3c1c
MD
543 info = &intr_info_ary[intr];
544
545 ++info->i_count;
f33e9c1c 546 if (info->i_state != ISTATE_NOTHREAD) {
477d3c1c 547 if (info->i_reclist == NULL) {
b560de96 548 report_stray_interrupt(intr, info);
b68b7282 549 } else {
b8a98473 550#ifdef SMP
477d3c1c 551 if (info->i_thread.td_gd == mycpu) {
f33e9c1c
MD
552 if (info->i_running == 0) {
553 info->i_running = 1;
554 if (info->i_state != ISTATE_LIVELOCKED)
555 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */
556 }
96728c05 557 } else {
477d3c1c 558 lwkt_send_ipiq(info->i_thread.td_gd,
bfc09ba0 559 sched_ithd_remote, (void *)(intptr_t)intr);
96728c05 560 }
b8a98473 561#else
f33e9c1c
MD
562 if (info->i_running == 0) {
563 info->i_running = 1;
564 if (info->i_state != ISTATE_LIVELOCKED)
565 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */
566 }
b8a98473 567#endif
b68b7282 568 }
ef0fdad1 569 } else {
b560de96 570 report_stray_interrupt(intr, info);
ef0fdad1
MD
571 }
572}
573
b560de96
MD
574static void
575report_stray_interrupt(int intr, struct intr_info *info)
576{
577 ++info->i_straycount;
578 if (info->i_straycount < 10) {
579 if (info->i_errorticks == ticks)
580 return;
581 info->i_errorticks = ticks;
582 kprintf("sched_ithd: stray interrupt %d on cpu %d\n",
583 intr, mycpuid);
7e88c0e6 584 } else if (info->i_straycount == 10) {
b560de96
MD
585 kprintf("sched_ithd: %ld stray interrupts %d on cpu %d - "
586 "there will be no further reports\n",
587 info->i_straycount, intr, mycpuid);
588 }
589}
590
b68b7282 591/*
37d44089
MD
592 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
593 * might not be held).
594 */
595static void
96d52ac8
SZ
596ithread_livelock_wakeup(systimer_t st, int in_ipi __unused,
597 struct intrframe *frame __unused)
37d44089 598{
477d3c1c 599 struct intr_info *info;
37d44089 600
973c11b9 601 info = &intr_info_ary[(int)(intptr_t)st->data];
f33e9c1c 602 if (info->i_state != ISTATE_NOTHREAD)
477d3c1c 603 lwkt_schedule(&info->i_thread);
37d44089
MD
604}
605
67b9bb39 606/*
729e15a8
SZ
607 * Schedule ithread within fast intr handler
608 *
609 * XXX Protect sched_ithd() call with gd_intr_nesting_level?
610 * Interrupts aren't enabled, but still...
611 */
612static __inline void
613ithread_fast_sched(int intr, thread_t td)
614{
615 ++td->td_nest_count;
616
617 /*
618 * We are already in critical section, exit it now to
619 * allow preemption.
620 */
621 crit_exit_quick(td);
622 sched_ithd(intr);
623 crit_enter_quick(td);
624
625 --td->td_nest_count;
626}
627
628/*
7bd34050 629 * This function is called directly from the ICU or APIC vector code assembly
477d3c1c
MD
630 * to process an interrupt. The critical section and interrupt deferral
631 * checks have already been done but the function is entered WITHOUT
632 * a critical section held. The BGL may or may not be held.
633 *
634 * Must return non-zero if we do not want the vector code to re-enable
635 * the interrupt (which we don't if we have to schedule the interrupt)
67b9bb39 636 */
c7eb0589 637int ithread_fast_handler(struct intrframe *frame);
477d3c1c
MD
638
639int
c7eb0589 640ithread_fast_handler(struct intrframe *frame)
477d3c1c
MD
641{
642 int intr;
643 struct intr_info *info;
644 struct intrec **list;
645 int must_schedule;
646#ifdef SMP
647 int got_mplock;
648#endif
3933a3ab 649 TD_INVARIANTS_DECLARE;
c24c20c0 650 intrec_t rec, nrec;
477d3c1c 651 globaldata_t gd;
729e15a8 652 thread_t td;
477d3c1c 653
c7eb0589 654 intr = frame->if_vec;
477d3c1c 655 gd = mycpu;
729e15a8
SZ
656 td = curthread;
657
658 /* We must be in critical section. */
f9235b6d 659 KKASSERT(td->td_critcount);
477d3c1c
MD
660
661 info = &intr_info_ary[intr];
662
663 /*
664 * If we are not processing any FAST interrupts, just schedule the thing.
477d3c1c
MD
665 */
666 if (info->i_fast == 0) {
3848f1c7 667 ++gd->gd_cnt.v_intr;
729e15a8 668 ithread_fast_sched(intr, td);
477d3c1c
MD
669 return(1);
670 }
671
672 /*
673 * This should not normally occur since interrupts ought to be
674 * masked if the ithread has been scheduled or is running.
675 */
676 if (info->i_running)
677 return(1);
678
679 /*
680 * Bump the interrupt nesting level to process any FAST interrupts.
681 * Obtain the MP lock as necessary. If the MP lock cannot be obtained,
682 * schedule the interrupt thread to deal with the issue instead.
683 *
684 * To reduce overhead, just leave the MP lock held once it has been
685 * obtained.
686 */
477d3c1c
MD
687 ++gd->gd_intr_nesting_level;
688 ++gd->gd_cnt.v_intr;
689 must_schedule = info->i_slow;
690#ifdef SMP
691 got_mplock = 0;
692#endif
693
3933a3ab 694 TD_INVARIANTS_GET(td);
477d3c1c 695 list = &info->i_reclist;
3933a3ab 696
c24c20c0
MD
697 for (rec = *list; rec; rec = nrec) {
698 /* rec may be invalid after call */
699 nrec = rec->next;
477d3c1c 700
f8a09be1 701 if (rec->intr_flags & INTR_CLOCK) {
477d3c1c
MD
702#ifdef SMP
703 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) {
704 if (try_mplock() == 0) {
f5c2d910
SZ
705 /* Couldn't get the MP lock; just schedule it. */
706 must_schedule = 1;
477d3c1c
MD
707 break;
708 }
709 got_mplock = 1;
710 }
711#endif
712 if (rec->serializer) {
713 must_schedule += lwkt_serialize_handler_try(
714 rec->serializer, rec->handler,
c7eb0589 715 rec->argument, frame);
477d3c1c 716 } else {
c7eb0589 717 rec->handler(rec->argument, frame);
477d3c1c 718 }
3933a3ab 719 TD_INVARIANTS_TEST(td, rec->name);
477d3c1c
MD
720 }
721 }
722
723 /*
724 * Cleanup
725 */
726 --gd->gd_intr_nesting_level;
727#ifdef SMP
728 if (got_mplock)
729 rel_mplock();
730#endif
477d3c1c
MD
731
732 /*
729e15a8
SZ
733 * If we had a problem, or mixed fast and slow interrupt handlers are
734 * registered, schedule the ithread to catch the missed records (it
735 * will just re-run all of them). A return value of 0 indicates that
736 * all handlers have been run and the interrupt can be re-enabled, and
737 * a non-zero return indicates that the interrupt thread controls
738 * re-enablement.
477d3c1c 739 */
afd7b1c0 740 if (must_schedule > 0)
729e15a8 741 ithread_fast_sched(intr, td);
afd7b1c0 742 else if (must_schedule == 0)
477d3c1c
MD
743 ++info->i_count;
744 return(must_schedule);
745}
746
37d44089 747/*
45d76888
MD
748 * Interrupt threads run this as their main loop.
749 *
68b3ccd4 750 * The handler begins execution outside a critical section and no MP lock.
37d44089 751 *
477d3c1c 752 * The i_running state starts at 0. When an interrupt occurs, the hardware
37d44089
MD
753 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled
754 * until all routines have run. We then call ithread_done() to reenable
45d76888
MD
755 * the HW interrupt and deschedule us until the next interrupt.
756 *
477d3c1c 757 * We are responsible for atomically checking i_running and ithread_done()
45d76888 758 * is responsible for atomically checking for platform-specific delayed
477d3c1c 759 * interrupts. i_running for our irq is only set in the context of our cpu,
45d76888 760 * so a critical section is a sufficient interlock.
b68b7282 761 */
93781523
MD
762#define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
763
ef0fdad1
MD
764static void
765ithread_handler(void *arg)
766{
477d3c1c 767 struct intr_info *info;
f33e9c1c 768 int use_limit;
b560de96 769 __uint32_t lseconds;
477d3c1c 770 int intr;
9d522d14 771 int mpheld;
477d3c1c
MD
772 struct intrec **list;
773 intrec_t rec, nrec;
f33e9c1c 774 globaldata_t gd;
67b9bb39 775 struct systimer ill_timer; /* enforced freq. timer */
f33e9c1c 776 u_int ill_count; /* interrupt livelock counter */
3933a3ab 777 TD_INVARIANTS_DECLARE;
45d76888 778
f33e9c1c 779 ill_count = 0;
973c11b9 780 intr = (int)(intptr_t)arg;
477d3c1c
MD
781 info = &intr_info_ary[intr];
782 list = &info->i_reclist;
477d3c1c 783
45d76888 784 /*
862f2618 785 * The loop must be entered with one critical section held. The thread
fdce8919 786 * does not hold the mplock on startup.
45d76888 787 */
e381e77c
MD
788 gd = mycpu;
789 lseconds = gd->gd_time_seconds;
45d76888 790 crit_enter_gd(gd);
862f2618 791 mpheld = 0;
ef0fdad1 792
ef0fdad1 793 for (;;) {
93781523 794 /*
862f2618
MD
795 * The chain is only considered MPSAFE if all its interrupt handlers
796 * are MPSAFE. However, if intr_mpsafe has been turned off we
797 * always operate with the BGL.
798 */
0e6beaa3 799#ifdef SMP
c9e9fb21 800 if (info->i_mplock_required != mpheld) {
862f2618
MD
801 if (info->i_mplock_required) {
802 KKASSERT(mpheld == 0);
c9e9fb21 803 get_mplock();
862f2618
MD
804 mpheld = 1;
805 } else {
806 KKASSERT(mpheld != 0);
807 rel_mplock();
808 mpheld = 0;
809 }
810 }
0e6beaa3 811#endif
862f2618 812
3933a3ab
MD
813 TD_INVARIANTS_GET(gd->gd_curthread);
814
862f2618 815 /*
f33e9c1c
MD
816 * If an interrupt is pending, clear i_running and execute the
817 * handlers. Note that certain types of interrupts can re-trigger
818 * and set i_running again.
45d76888 819 *
f33e9c1c 820 * Each handler is run in a critical section. Note that we run both
862f2618 821 * FAST and SLOW designated service routines.
93781523 822 */
f33e9c1c
MD
823 if (info->i_running) {
824 ++ill_count;
825 info->i_running = 0;
9d522d14 826
b560de96
MD
827 if (*list == NULL)
828 report_stray_interrupt(intr, info);
829
f33e9c1c 830 for (rec = *list; rec; rec = nrec) {
c24c20c0 831 /* rec may be invalid after call */
f33e9c1c
MD
832 nrec = rec->next;
833 if (rec->serializer) {
834 lwkt_serialize_handler_call(rec->serializer, rec->handler,
835 rec->argument, NULL);
836 } else {
837 rec->handler(rec->argument, NULL);
838 }
3933a3ab 839 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name);
477d3c1c 840 }
ef0fdad1 841 }
37d44089
MD
842
843 /*
844 * This is our interrupt hook to add rate randomness to the random
845 * number generator.
846 */
8b3ec75a 847 if (info->i_random.sc_enabled > 0)
96728c05 848 add_interrupt_randomness(intr);
37d44089
MD
849
850 /*
f33e9c1c
MD
851 * Unmask the interrupt to allow it to trigger again. This only
852 * applies to certain types of interrupts (typ level interrupts).
853 * This can result in the interrupt retriggering, but the retrigger
854 * will not be processed until we cycle our critical section.
363d922a
MD
855 *
856 * Only unmask interrupts while handlers are installed. It is
857 * possible to hit a situation where no handlers are installed
858 * due to a device driver livelocking and then tearing down its
859 * interrupt on close (the parallel bus being a good example).
37d44089 860 */
6d164b20 861 if (intr < FIRST_SOFTINT && *list)
35b2edcb 862 machintr_intr_enable(intr);
f33e9c1c
MD
863
864 /*
865 * Do a quick exit/enter to catch any higher-priority interrupt
866 * sources, such as the statclock, so thread time accounting
867 * will still work. This may also cause an interrupt to re-trigger.
868 */
869 crit_exit_gd(gd);
870 crit_enter_gd(gd);
871
872 /*
873 * LIVELOCK STATE MACHINE
874 */
875 switch(info->i_state) {
876 case ISTATE_NORMAL:
877 /*
b560de96 878 * Reset the count each second.
f33e9c1c 879 */
b560de96
MD
880 if (lseconds != gd->gd_time_seconds) {
881 lseconds = gd->gd_time_seconds;
882 ill_count = 0;
f33e9c1c
MD
883 }
884
885 /*
886 * If we did not exceed the frequency limit, we are done.
887 * If the interrupt has not retriggered we deschedule ourselves.
888 */
889 if (ill_count <= livelock_limit) {
890 if (info->i_running == 0) {
891 lwkt_deschedule_self(gd->gd_curthread);
892 lwkt_switch();
893 }
37d44089 894 break;
f33e9c1c
MD
895 }
896
897 /*
898 * Otherwise we are livelocked. Set up a periodic systimer
899 * to wake the thread up at the limit frequency.
900 */
b560de96 901 kprintf("intr %d at %d/%d hz, livelocked limit engaged!\n",
59d9413f 902 intr, ill_count, livelock_limit);
f33e9c1c
MD
903 info->i_state = ISTATE_LIVELOCKED;
904 if ((use_limit = livelock_limit) < 100)
905 use_limit = 100;
906 else if (use_limit > 500000)
907 use_limit = 500000;
79b38af2 908 systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup,
973c11b9 909 (void *)(intptr_t)intr, use_limit);
37d44089 910 /* fall through */
f33e9c1c 911 case ISTATE_LIVELOCKED:
37d44089 912 /*
f33e9c1c
MD
913 * Wait for our periodic timer to go off. Since the interrupt
914 * has re-armed it can still set i_running, but it will not
915 * reschedule us while we are in a livelocked state.
37d44089 916 */
f33e9c1c 917 lwkt_deschedule_self(gd->gd_curthread);
37d44089 918 lwkt_switch();
93781523 919
37d44089 920 /*
b560de96
MD
921 * Check once a second to see if the livelock condition no
922 * longer applies.
37d44089 923 */
b560de96
MD
924 if (lseconds != gd->gd_time_seconds) {
925 lseconds = gd->gd_time_seconds;
f33e9c1c 926 if (ill_count < livelock_lowater) {
b560de96
MD
927 info->i_state = ISTATE_NORMAL;
928 systimer_del(&ill_timer);
929 kprintf("intr %d at %d/%d hz, livelock removed\n",
930 intr, ill_count, livelock_lowater);
931 } else if (livelock_debug == intr ||
932 (bootverbose && cold)) {
933 kprintf("intr %d at %d/%d hz, in livelock\n",
934 intr, ill_count, livelock_lowater);
f33e9c1c 935 }
b560de96 936 ill_count = 0;
37d44089
MD
937 }
938 break;
939 }
ef0fdad1 940 }
eccb255f 941 /* NOT REACHED */
ef0fdad1
MD
942}
943
a9d00ec1
MD
944/*
945 * Emergency interrupt polling thread. The thread begins execution
946 * outside a critical section with the BGL held.
947 *
948 * If emergency interrupt polling is enabled, this thread will
949 * execute all system interrupts not marked INTR_NOPOLL at the
950 * specified polling frequency.
951 *
952 * WARNING! This thread runs *ALL* interrupt service routines that
953 * are not marked INTR_NOPOLL, which basically means everything except
954 * the 8254 clock interrupt and the ATA interrupt. It has very high
955 * overhead and should only be used in situations where the machine
956 * cannot otherwise be made to work. Due to the severe performance
957 * degredation, it should not be enabled on production machines.
958 */
959static void
960ithread_emergency(void *arg __unused)
961{
eccb255f 962 globaldata_t gd = mycpu;
a9d00ec1
MD
963 struct intr_info *info;
964 intrec_t rec, nrec;
965 int intr;
3933a3ab 966 TD_INVARIANTS_DECLARE;
a9d00ec1 967
c9e9fb21 968 get_mplock();
eccb255f
MD
969 crit_enter_gd(gd);
970 TD_INVARIANTS_GET(gd->gd_curthread);
c9e9fb21 971
a9d00ec1 972 for (;;) {
5f456c40 973 for (intr = 0; intr < max_installed_hard_intr; ++intr) {
a9d00ec1
MD
974 info = &intr_info_ary[intr];
975 for (rec = info->i_reclist; rec; rec = nrec) {
c24c20c0
MD
976 /* rec may be invalid after call */
977 nrec = rec->next;
a9d00ec1
MD
978 if ((rec->intr_flags & INTR_NOPOLL) == 0) {
979 if (rec->serializer) {
c24c20c0 980 lwkt_serialize_handler_try(rec->serializer,
a9d00ec1
MD
981 rec->handler, rec->argument, NULL);
982 } else {
983 rec->handler(rec->argument, NULL);
984 }
eccb255f 985 TD_INVARIANTS_TEST(gd->gd_curthread, rec->name);
a9d00ec1 986 }
a9d00ec1
MD
987 }
988 }
eccb255f 989 lwkt_deschedule_self(gd->gd_curthread);
a9d00ec1
MD
990 lwkt_switch();
991 }
eccb255f 992 /* NOT REACHED */
a9d00ec1
MD
993}
994
995/*
996 * Systimer callback - schedule the emergency interrupt poll thread
997 * if emergency polling is enabled.
998 */
999static
1000void
96d52ac8
SZ
1001emergency_intr_timer_callback(systimer_t info, int in_ipi __unused,
1002 struct intrframe *frame __unused)
a9d00ec1
MD
1003{
1004 if (emergency_intr_enable)
1005 lwkt_schedule(info->data);
1006}
1007
9db4b353
SZ
1008int
1009ithread_cpuid(int intr)
1010{
1011 const struct intr_info *info;
1012
1013 KKASSERT(intr >= 0 && intr < MAX_INTS);
1014 info = &intr_info_ary[intr];
1015
1016 if (info->i_state == ISTATE_NOTHREAD)
1017 return -1;
1018 return info->i_thread.td_gd->gd_cpuid;
1019}
1020
984263bc
MD
1021/*
1022 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1023 * The data for this machine dependent, and the declarations are in machine
1024 * dependent code. The layout of intrnames and intrcnt however is machine
1025 * independent.
1026 *
1027 * We do not know the length of intrcnt and intrnames at compile time, so
1028 * calculate things at run time.
1029 */
477d3c1c 1030
984263bc
MD
1031static int
1032sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1033{
477d3c1c
MD
1034 struct intr_info *info;
1035 intrec_t rec;
1036 int error = 0;
1037 int len;
1038 int intr;
1039 char buf[64];
1040
5f456c40 1041 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) {
477d3c1c
MD
1042 info = &intr_info_ary[intr];
1043
1044 len = 0;
1045 buf[0] = 0;
1046 for (rec = info->i_reclist; rec; rec = rec->next) {
f8c7a42d 1047 ksnprintf(buf + len, sizeof(buf) - len, "%s%s",
477d3c1c
MD
1048 (len ? "/" : ""), rec->name);
1049 len += strlen(buf + len);
1050 }
1051 if (len == 0) {
f8c7a42d 1052 ksnprintf(buf, sizeof(buf), "irq%d", intr);
477d3c1c
MD
1053 len = strlen(buf);
1054 }
1055 error = SYSCTL_OUT(req, buf, len + 1);
1056 }
1057 return (error);
984263bc
MD
1058}
1059
477d3c1c 1060
984263bc
MD
1061SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1062 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1063
1064static int
1065sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1066{
477d3c1c
MD
1067 struct intr_info *info;
1068 int error = 0;
1069 int intr;
1070
5f456c40 1071 for (intr = 0; intr < max_installed_hard_intr; ++intr) {
477d3c1c
MD
1072 info = &intr_info_ary[intr];
1073
1074 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count));
1075 if (error)
5f456c40
MD
1076 goto failed;
1077 }
1078 for (intr = FIRST_SOFTINT; intr < max_installed_soft_intr; ++intr) {
1079 info = &intr_info_ary[intr];
1080
1081 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count));
1082 if (error)
1083 goto failed;
477d3c1c 1084 }
5f456c40 1085failed:
477d3c1c 1086 return(error);
984263bc
MD
1087}
1088
1089SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1090 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
477d3c1c 1091
3242c748
SZ
1092static int
1093sysctl_intrcnt_all(SYSCTL_HANDLER_ARGS)
1094{
1095 struct intr_info *info;
1096 int error = 0;
1097 int intr;
1098
1099 for (intr = 0; intr < MAX_INTS; ++intr) {
1100 info = &intr_info_ary[intr];
1101
1102 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count));
1103 if (error)
1104 goto failed;
1105 }
1106failed:
1107 return(error);
1108}
1109
1110SYSCTL_PROC(_hw, OID_AUTO, intrcnt_all, CTLTYPE_OPAQUE | CTLFLAG_RD,
1111 NULL, 0, sysctl_intrcnt_all, "", "Interrupt Counts");
1112
4c846371 1113static void
6355d931 1114int_moveto_destcpu(int *orig_cpuid0, int cpuid)
4c846371 1115{
6355d931 1116 int orig_cpuid = mycpuid;
4c846371
SZ
1117
1118 if (cpuid != orig_cpuid)
1119 lwkt_migratecpu(cpuid);
1120
1121 *orig_cpuid0 = orig_cpuid;
4c846371
SZ
1122}
1123
1124static void
1125int_moveto_origcpu(int orig_cpuid, int cpuid)
1126{
1127 if (cpuid != orig_cpuid)
1128 lwkt_migratecpu(orig_cpuid);
1129}