| 1 | /* |
| 2 | * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. |
| 3 | * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice unmodified, this list of conditions, and the following |
| 10 | * disclaimer. |
| 11 | * 2. Redistributions in binary form must reproduce the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer in the |
| 13 | * documentation and/or other materials provided with the distribution. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 25 | * |
| 26 | * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $ |
| 27 | * $DragonFly: src/sys/kern/kern_intr.c,v 1.21 2005/06/06 15:02:27 dillon Exp $ |
| 28 | * |
| 29 | */ |
| 30 | |
| 31 | #include <sys/param.h> |
| 32 | #include <sys/systm.h> |
| 33 | #include <sys/malloc.h> |
| 34 | #include <sys/kernel.h> |
| 35 | #include <sys/sysctl.h> |
| 36 | #include <sys/thread.h> |
| 37 | #include <sys/proc.h> |
| 38 | #include <sys/thread2.h> |
| 39 | #include <sys/random.h> |
| 40 | |
| 41 | #include <machine/ipl.h> |
| 42 | |
| 43 | #include <sys/interrupt.h> |
| 44 | |
| 45 | typedef struct intrec { |
| 46 | struct intrec *next; |
| 47 | inthand2_t *handler; |
| 48 | intrmask_t *maskptr; /* LEGACY */ |
| 49 | void *argument; |
| 50 | const char *name; |
| 51 | int intr; |
| 52 | } intrec_t; |
| 53 | |
| 54 | static intrec_t *intlists[NHWI+NSWI]; |
| 55 | static thread_t ithreads[NHWI+NSWI]; |
| 56 | static struct thread ithread_ary[NHWI+NSWI]; |
| 57 | static struct random_softc irandom_ary[NHWI+NSWI]; |
| 58 | static int irunning[NHWI+NSWI]; |
| 59 | static u_int ill_count[NHWI+NSWI]; /* interrupt livelock counter */ |
| 60 | static u_int ill_ticks[NHWI+NSWI]; /* track elapsed to calculate freq */ |
| 61 | static u_int ill_delta[NHWI+NSWI]; /* track elapsed to calculate freq */ |
| 62 | static int ill_state[NHWI+NSWI]; /* current state */ |
| 63 | static struct systimer ill_timer[NHWI+NSWI]; /* enforced freq. timer */ |
| 64 | static struct systimer ill_rtimer[NHWI+NSWI]; /* recovery timer */ |
| 65 | static intrmask_t dummy_intr_mask; |
| 66 | |
| 67 | #define LIVELOCK_NONE 0 |
| 68 | #define LIVELOCK_LIMITED 1 |
| 69 | |
| 70 | static int livelock_limit = 50000; |
| 71 | static int livelock_fallback = 20000; |
| 72 | SYSCTL_INT(_kern, OID_AUTO, livelock_limit, |
| 73 | CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit"); |
| 74 | SYSCTL_INT(_kern, OID_AUTO, livelock_fallback, |
| 75 | CTLFLAG_RW, &livelock_fallback, 0, "Livelock interrupt fallback rate"); |
| 76 | |
| 77 | static void ithread_handler(void *arg); |
| 78 | |
| 79 | /* |
| 80 | * Register an SWI or INTerrupt handler. |
| 81 | * |
| 82 | * Note that maskptr exists to support legacy spl handling and is not intended |
| 83 | * to be permanent (because spls are not compatible with BGL removal). |
| 84 | */ |
| 85 | thread_t |
| 86 | register_swi(int intr, inthand2_t *handler, void *arg, const char *name, |
| 87 | intrmask_t *maskptr) |
| 88 | { |
| 89 | if (intr < NHWI || intr >= NHWI + NSWI) |
| 90 | panic("register_swi: bad intr %d", intr); |
| 91 | return(register_int(intr, handler, arg, name, maskptr)); |
| 92 | } |
| 93 | |
| 94 | thread_t |
| 95 | register_int(int intr, inthand2_t *handler, void *arg, const char *name, |
| 96 | intrmask_t *maskptr) |
| 97 | { |
| 98 | intrec_t **list; |
| 99 | intrec_t *rec; |
| 100 | thread_t td; |
| 101 | |
| 102 | if (intr < 0 || intr >= NHWI + NSWI) |
| 103 | panic("register_int: bad intr %d", intr); |
| 104 | if (maskptr == NULL) |
| 105 | maskptr = &dummy_intr_mask; |
| 106 | |
| 107 | rec = malloc(sizeof(intrec_t), M_DEVBUF, M_NOWAIT); |
| 108 | if (rec == NULL) |
| 109 | panic("register_swi: malloc failed"); |
| 110 | rec->handler = handler; |
| 111 | rec->maskptr = maskptr; |
| 112 | rec->argument = arg; |
| 113 | rec->name = name; |
| 114 | rec->intr = intr; |
| 115 | rec->next = NULL; |
| 116 | |
| 117 | list = &intlists[intr]; |
| 118 | |
| 119 | /* |
| 120 | * Create an interrupt thread if necessary, leave it in an unscheduled |
| 121 | * state. |
| 122 | */ |
| 123 | if ((td = ithreads[intr]) == NULL) { |
| 124 | lwkt_create((void *)ithread_handler, (void *)intr, &ithreads[intr], |
| 125 | &ithread_ary[intr], TDF_STOPREQ|TDF_INTTHREAD, -1, |
| 126 | "ithread %d", intr); |
| 127 | td = ithreads[intr]; |
| 128 | if (intr >= NHWI && intr < NHWI + NSWI) |
| 129 | lwkt_setpri(td, TDPRI_SOFT_NORM); |
| 130 | else |
| 131 | lwkt_setpri(td, TDPRI_INT_MED); |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * Add the record to the interrupt list |
| 136 | */ |
| 137 | crit_enter(); /* token */ |
| 138 | while (*list != NULL) |
| 139 | list = &(*list)->next; |
| 140 | *list = rec; |
| 141 | crit_exit(); |
| 142 | return(td); |
| 143 | } |
| 144 | |
| 145 | void |
| 146 | unregister_swi(int intr, inthand2_t *handler) |
| 147 | { |
| 148 | if (intr < NHWI || intr >= NHWI + NSWI) |
| 149 | panic("register_swi: bad intr %d", intr); |
| 150 | unregister_int(intr, handler); |
| 151 | } |
| 152 | |
| 153 | void |
| 154 | unregister_int(int intr, inthand2_t handler) |
| 155 | { |
| 156 | intrec_t **list; |
| 157 | intrec_t *rec; |
| 158 | |
| 159 | if (intr < 0 || intr > NHWI + NSWI) |
| 160 | panic("register_int: bad intr %d", intr); |
| 161 | list = &intlists[intr]; |
| 162 | crit_enter(); |
| 163 | while ((rec = *list) != NULL) { |
| 164 | if (rec->handler == (void *)handler) { |
| 165 | *list = rec->next; |
| 166 | break; |
| 167 | } |
| 168 | list = &rec->next; |
| 169 | } |
| 170 | crit_exit(); |
| 171 | if (rec != NULL) { |
| 172 | free(rec, M_DEVBUF); |
| 173 | } else { |
| 174 | printf("warning: unregister_int: int %d handler %p not found\n", |
| 175 | intr, handler); |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | void |
| 180 | swi_setpriority(int intr, int pri) |
| 181 | { |
| 182 | struct thread *td; |
| 183 | |
| 184 | if (intr < NHWI || intr >= NHWI + NSWI) |
| 185 | panic("register_swi: bad intr %d", intr); |
| 186 | if ((td = ithreads[intr]) != NULL) |
| 187 | lwkt_setpri(td, pri); |
| 188 | } |
| 189 | |
| 190 | void |
| 191 | register_randintr(int intr) |
| 192 | { |
| 193 | struct random_softc *sc = &irandom_ary[intr]; |
| 194 | sc->sc_intr = intr; |
| 195 | sc->sc_enabled = 1; |
| 196 | } |
| 197 | |
| 198 | void |
| 199 | unregister_randintr(int intr) |
| 200 | { |
| 201 | struct random_softc *sc = &irandom_ary[intr]; |
| 202 | sc->sc_enabled = 0; |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * Dispatch an interrupt. If there's nothing to do we have a stray |
| 207 | * interrupt and can just return, leaving the interrupt masked. |
| 208 | * |
| 209 | * We need to schedule the interrupt and set its irunning[] bit. If |
| 210 | * we are not on the interrupt thread's cpu we have to send a message |
| 211 | * to the correct cpu that will issue the desired action (interlocking |
| 212 | * with the interrupt thread's critical section). |
| 213 | * |
| 214 | * We are NOT in a critical section, which will allow the scheduled |
| 215 | * interrupt to preempt us. The MP lock might *NOT* be held here. |
| 216 | */ |
| 217 | static void |
| 218 | sched_ithd_remote(void *arg) |
| 219 | { |
| 220 | sched_ithd((int)arg); |
| 221 | } |
| 222 | |
| 223 | void |
| 224 | sched_ithd(int intr) |
| 225 | { |
| 226 | thread_t td; |
| 227 | |
| 228 | if ((td = ithreads[intr]) != NULL) { |
| 229 | if (intlists[intr] == NULL) { |
| 230 | printf("sched_ithd: stray interrupt %d\n", intr); |
| 231 | } else { |
| 232 | if (td->td_gd == mycpu) { |
| 233 | irunning[intr] = 1; |
| 234 | lwkt_schedule(td); /* preemption handled internally */ |
| 235 | } else { |
| 236 | lwkt_send_ipiq(td->td_gd, sched_ithd_remote, (void *)intr); |
| 237 | } |
| 238 | } |
| 239 | } else { |
| 240 | printf("sched_ithd: stray interrupt %d\n", intr); |
| 241 | } |
| 242 | } |
| 243 | |
| 244 | /* |
| 245 | * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL |
| 246 | * might not be held). |
| 247 | */ |
| 248 | static void |
| 249 | ithread_livelock_wakeup(systimer_t info) |
| 250 | { |
| 251 | int intr = (int)info->data; |
| 252 | thread_t td; |
| 253 | |
| 254 | if ((td = ithreads[intr]) != NULL) |
| 255 | lwkt_schedule(td); |
| 256 | } |
| 257 | |
| 258 | |
| 259 | /* |
| 260 | * Interrupt threads run this as their main loop. |
| 261 | * |
| 262 | * The handler begins execution outside a critical section and with the BGL |
| 263 | * held. |
| 264 | * |
| 265 | * The irunning state starts at 0. When an interrupt occurs, the hardware |
| 266 | * interrupt is disabled and sched_ithd() The HW interrupt remains disabled |
| 267 | * until all routines have run. We then call ithread_done() to reenable |
| 268 | * the HW interrupt and deschedule us until the next interrupt. |
| 269 | * |
| 270 | * We are responsible for atomically checking irunning[] and ithread_done() |
| 271 | * is responsible for atomically checking for platform-specific delayed |
| 272 | * interrupts. irunning[] for our irq is only set in the context of our cpu, |
| 273 | * so a critical section is a sufficient interlock. |
| 274 | */ |
| 275 | #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */ |
| 276 | |
| 277 | static void |
| 278 | ithread_handler(void *arg) |
| 279 | { |
| 280 | int intr = (int)arg; |
| 281 | int freq; |
| 282 | u_int bticks; |
| 283 | u_int cputicks; |
| 284 | intrec_t **list = &intlists[intr]; |
| 285 | intrec_t *rec; |
| 286 | intrec_t *nrec; |
| 287 | struct random_softc *sc = &irandom_ary[intr]; |
| 288 | globaldata_t gd = mycpu; |
| 289 | |
| 290 | /* |
| 291 | * The loop must be entered with one critical section held. |
| 292 | */ |
| 293 | crit_enter_gd(gd); |
| 294 | |
| 295 | for (;;) { |
| 296 | /* |
| 297 | * We can get woken up by the livelock periodic code too, run the |
| 298 | * handlers only if there is a real interrupt pending. XXX |
| 299 | * |
| 300 | * Clear irunning[] prior to running the handlers to interlock |
| 301 | * again new events occuring during processing of existing events. |
| 302 | * |
| 303 | * For now run each handler in a critical section. |
| 304 | */ |
| 305 | irunning[intr] = 0; |
| 306 | for (rec = *list; rec; rec = nrec) { |
| 307 | nrec = rec->next; |
| 308 | rec->handler(rec->argument); |
| 309 | } |
| 310 | |
| 311 | /* |
| 312 | * Do a quick exit/enter to catch any higher-priority |
| 313 | * interrupt sources and so user/system/interrupt statistics |
| 314 | * work for interrupt threads. |
| 315 | */ |
| 316 | crit_exit_gd(gd); |
| 317 | crit_enter_gd(gd); |
| 318 | |
| 319 | /* |
| 320 | * This is our interrupt hook to add rate randomness to the random |
| 321 | * number generator. |
| 322 | */ |
| 323 | if (sc->sc_enabled) |
| 324 | add_interrupt_randomness(intr); |
| 325 | |
| 326 | /* |
| 327 | * This is our livelock test. If we hit the rate limit we |
| 328 | * limit ourselves to X interrupts/sec until the rate |
| 329 | * falls below 50% of that value, then we unlimit again. |
| 330 | * |
| 331 | * XXX calling cputimer_count() is expensive but a livelock may |
| 332 | * prevent other interrupts from occuring so we cannot use ticks. |
| 333 | */ |
| 334 | cputicks = sys_cputimer->count(); |
| 335 | ++ill_count[intr]; |
| 336 | bticks = cputicks - ill_ticks[intr]; |
| 337 | ill_ticks[intr] = cputicks; |
| 338 | if (bticks > sys_cputimer->freq) |
| 339 | bticks = sys_cputimer->freq; |
| 340 | |
| 341 | switch(ill_state[intr]) { |
| 342 | case LIVELOCK_NONE: |
| 343 | ill_delta[intr] += bticks; |
| 344 | if (ill_delta[intr] < LIVELOCK_TIMEFRAME(sys_cputimer->freq)) |
| 345 | break; |
| 346 | freq = (int64_t)ill_count[intr] * sys_cputimer->freq / |
| 347 | ill_delta[intr]; |
| 348 | ill_delta[intr] = 0; |
| 349 | ill_count[intr] = 0; |
| 350 | if (freq < livelock_limit) |
| 351 | break; |
| 352 | printf("intr %d at %d hz, livelocked! limiting at %d hz\n", |
| 353 | intr, freq, livelock_fallback); |
| 354 | ill_state[intr] = LIVELOCK_LIMITED; |
| 355 | bticks = 0; |
| 356 | /* force periodic check to avoid stale removal (if ints stop) */ |
| 357 | systimer_init_periodic(&ill_rtimer[intr], ithread_livelock_wakeup, |
| 358 | (void *)intr, 1); |
| 359 | /* fall through */ |
| 360 | case LIVELOCK_LIMITED: |
| 361 | /* |
| 362 | * Delay (us) before rearming the interrupt |
| 363 | */ |
| 364 | systimer_init_oneshot(&ill_timer[intr], ithread_livelock_wakeup, |
| 365 | (void *)intr, 1 + 1000000 / livelock_fallback); |
| 366 | lwkt_deschedule_self(curthread); |
| 367 | lwkt_switch(); |
| 368 | |
| 369 | /* in case we were woken up by something else */ |
| 370 | systimer_del(&ill_timer[intr]); |
| 371 | |
| 372 | /* |
| 373 | * Calculate interrupt rate (note that due to our delay it |
| 374 | * will not exceed livelock_fallback). |
| 375 | */ |
| 376 | ill_delta[intr] += bticks; |
| 377 | if (ill_delta[intr] < LIVELOCK_TIMEFRAME(sys_cputimer->freq)) |
| 378 | break; |
| 379 | freq = (int64_t)ill_count[intr] * sys_cputimer->freq / |
| 380 | ill_delta[intr]; |
| 381 | ill_delta[intr] = 0; |
| 382 | ill_count[intr] = 0; |
| 383 | if (freq < (livelock_fallback >> 1)) { |
| 384 | printf("intr %d at %d hz, removing livelock limit\n", |
| 385 | intr, freq); |
| 386 | ill_state[intr] = LIVELOCK_NONE; |
| 387 | systimer_del(&ill_rtimer[intr]); |
| 388 | } |
| 389 | break; |
| 390 | } |
| 391 | |
| 392 | /* |
| 393 | * There are two races here. irunning[] is set by sched_ithd() |
| 394 | * in the context of our cpu and is critical-section safe. We |
| 395 | * are responsible for checking it. ipending is not critical |
| 396 | * section safe and must be handled by the platform specific |
| 397 | * ithread_done() routine. |
| 398 | */ |
| 399 | if (irunning[intr] == 0) |
| 400 | ithread_done(intr); |
| 401 | /* must be in critical section on loop */ |
| 402 | } |
| 403 | /* not reached */ |
| 404 | } |
| 405 | |
| 406 | /* |
| 407 | * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. |
| 408 | * The data for this machine dependent, and the declarations are in machine |
| 409 | * dependent code. The layout of intrnames and intrcnt however is machine |
| 410 | * independent. |
| 411 | * |
| 412 | * We do not know the length of intrcnt and intrnames at compile time, so |
| 413 | * calculate things at run time. |
| 414 | */ |
| 415 | static int |
| 416 | sysctl_intrnames(SYSCTL_HANDLER_ARGS) |
| 417 | { |
| 418 | return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, |
| 419 | req)); |
| 420 | } |
| 421 | |
| 422 | SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, |
| 423 | NULL, 0, sysctl_intrnames, "", "Interrupt Names"); |
| 424 | |
| 425 | static int |
| 426 | sysctl_intrcnt(SYSCTL_HANDLER_ARGS) |
| 427 | { |
| 428 | return (sysctl_handle_opaque(oidp, intrcnt, |
| 429 | (char *)eintrcnt - (char *)intrcnt, req)); |
| 430 | } |
| 431 | |
| 432 | SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, |
| 433 | NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); |