Move the polling systimer initialization code out of kern_clock.c and into
[dragonfly.git] / sys / kern / kern_intr.c
CommitLineData
984263bc 1/*
033a4603 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
ef0fdad1 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
984263bc
MD
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
477d3c1c 27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.24 2005/10/13 00:02:22 dillon Exp $
984263bc
MD
28 *
29 */
30
984263bc
MD
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/sysctl.h>
ef0fdad1
MD
36#include <sys/thread.h>
37#include <sys/proc.h>
38#include <sys/thread2.h>
7e071e7a 39#include <sys/random.h>
477d3c1c
MD
40#include <sys/serialize.h>
41#include <sys/bus.h>
984263bc
MD
42
43#include <machine/ipl.h>
477d3c1c 44#include <machine/frame.h>
984263bc
MD
45
46#include <sys/interrupt.h>
47
ef0fdad1
MD
48typedef struct intrec {
49 struct intrec *next;
50 inthand2_t *handler;
51 void *argument;
477d3c1c 52 char *name;
ef0fdad1 53 int intr;
477d3c1c
MD
54 int intr_flags;
55 struct lwkt_serialize *serializer;
56} *intrec_t;
57
58struct intr_info {
59 intrec_t i_reclist;
60 struct thread i_thread;
61 struct random_softc i_random;
62 int i_running;
63 long i_count;
64 int i_fast;
65 int i_slow;
66 int i_valid_thread;
67} intr_info_ary[NHWI + NSWI];
68
69int intr_info_size = sizeof(intr_info_ary) / sizeof(intr_info_ary[0]);
37d44089
MD
70
71#define LIVELOCK_NONE 0
72#define LIVELOCK_LIMITED 1
73
93781523
MD
74static int livelock_limit = 50000;
75static int livelock_fallback = 20000;
37d44089
MD
76SYSCTL_INT(_kern, OID_AUTO, livelock_limit,
77 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit");
78SYSCTL_INT(_kern, OID_AUTO, livelock_fallback,
79 CTLFLAG_RW, &livelock_fallback, 0, "Livelock interrupt fallback rate");
984263bc 80
ef0fdad1 81static void ithread_handler(void *arg);
984263bc 82
45d76888
MD
83/*
84 * Register an SWI or INTerrupt handler.
45d76888 85 */
477d3c1c
MD
86void *
87register_swi(int intr, inthand2_t *handler, void *arg, const char *name,
88 struct lwkt_serialize *serializer)
984263bc 89{
ef0fdad1
MD
90 if (intr < NHWI || intr >= NHWI + NSWI)
91 panic("register_swi: bad intr %d", intr);
477d3c1c 92 return(register_int(intr, handler, arg, name, serializer, 0));
984263bc
MD
93}
94
477d3c1c
MD
95void *
96register_int(int intr, inthand2_t *handler, void *arg, const char *name,
97 struct lwkt_serialize *serializer, int intr_flags)
984263bc 98{
477d3c1c
MD
99 struct intr_info *info;
100 struct intrec **list;
101 intrec_t rec;
ef0fdad1 102
93781523 103 if (intr < 0 || intr >= NHWI + NSWI)
ef0fdad1 104 panic("register_int: bad intr %d", intr);
477d3c1c
MD
105 if (name == NULL)
106 name = "???";
107 info = &intr_info_ary[intr];
108
109 rec = malloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT);
110 rec->name = malloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT);
111 strcpy(rec->name, name);
ef0fdad1 112
ef0fdad1
MD
113 rec->handler = handler;
114 rec->argument = arg;
ef0fdad1 115 rec->intr = intr;
477d3c1c 116 rec->intr_flags = intr_flags;
ef0fdad1 117 rec->next = NULL;
477d3c1c 118 rec->serializer = serializer;
ef0fdad1 119
477d3c1c
MD
120 list = &info->i_reclist;
121
122 /*
123 * Keep track of how many fast and slow interrupts we have.
124 */
125 if (intr_flags & INTR_FAST)
126 ++info->i_fast;
127 else
128 ++info->i_slow;
ef0fdad1
MD
129
130 /*
131 * Create an interrupt thread if necessary, leave it in an unscheduled
45d76888 132 * state.
ef0fdad1 133 */
477d3c1c
MD
134 if (info->i_valid_thread == 0) {
135 info->i_valid_thread = 1;
136 lwkt_create((void *)ithread_handler, (void *)intr, NULL,
137 &info->i_thread, TDF_STOPREQ|TDF_INTTHREAD, -1,
75cdbe6c 138 "ithread %d", intr);
4b5f931b 139 if (intr >= NHWI && intr < NHWI + NSWI)
477d3c1c 140 lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM);
4b5f931b 141 else
477d3c1c
MD
142 lwkt_setpri(&info->i_thread, TDPRI_INT_MED);
143 info->i_thread.td_preemptable = lwkt_preempt;
ef0fdad1
MD
144 }
145
146 /*
147 * Add the record to the interrupt list
148 */
149 crit_enter(); /* token */
150 while (*list != NULL)
151 list = &(*list)->next;
152 *list = rec;
153 crit_exit();
477d3c1c 154 return(rec);
ef0fdad1 155}
984263bc 156
477d3c1c
MD
157int
158unregister_swi(void *id)
ef0fdad1 159{
477d3c1c 160 return(unregister_int(id));
984263bc
MD
161}
162
477d3c1c
MD
163int
164unregister_int(void *id)
984263bc 165{
477d3c1c
MD
166 struct intr_info *info;
167 struct intrec **list;
168 intrec_t rec;
169 int intr;
170
171 intr = ((intrec_t)id)->intr;
ef0fdad1
MD
172
173 if (intr < 0 || intr > NHWI + NSWI)
174 panic("register_int: bad intr %d", intr);
477d3c1c
MD
175
176 info = &intr_info_ary[intr];
177
178 /*
179 * Remove the interrupt descriptor
180 */
ef0fdad1 181 crit_enter();
477d3c1c 182 list = &info->i_reclist;
ef0fdad1 183 while ((rec = *list) != NULL) {
477d3c1c 184 if (rec == id) {
ef0fdad1
MD
185 *list = rec->next;
186 break;
984263bc 187 }
ef0fdad1
MD
188 list = &rec->next;
189 }
190 crit_exit();
477d3c1c
MD
191
192 /*
193 * Free it, adjust interrupt type counts
194 */
ef0fdad1 195 if (rec != NULL) {
477d3c1c
MD
196 if (rec->intr_flags & INTR_FAST)
197 --info->i_fast;
198 else
199 --info->i_slow;
200 free(rec->name, M_DEVBUF);
ef0fdad1
MD
201 free(rec, M_DEVBUF);
202 } else {
477d3c1c
MD
203 printf("warning: unregister_int: int %d handler for %s not found\n",
204 intr, ((intrec_t)id)->name);
ef0fdad1 205 }
477d3c1c
MD
206
207 /*
208 * Return the number of interrupt vectors still registered on this intr
209 */
210 return(info->i_fast + info->i_slow);
211}
212
213int
214get_registered_intr(void *id)
215{
216 return(((intrec_t)id)->intr);
217}
218
219const char *
220get_registered_name(int intr)
221{
222 intrec_t rec;
223
224 if (intr < 0 || intr > NHWI + NSWI)
225 panic("register_int: bad intr %d", intr);
226
227 if ((rec = intr_info_ary[intr].i_reclist) == NULL)
228 return(NULL);
229 else if (rec->next)
230 return("mux");
231 else
232 return(rec->name);
984263bc
MD
233}
234
477d3c1c
MD
235int
236count_registered_ints(int intr)
237{
238 struct intr_info *info;
239
240 if (intr < 0 || intr > NHWI + NSWI)
241 panic("register_int: bad intr %d", intr);
242 info = &intr_info_ary[intr];
243 return(info->i_fast + info->i_slow);
244}
245
246long
247get_interrupt_counter(int intr)
248{
249 struct intr_info *info;
250
251 if (intr < 0 || intr > NHWI + NSWI)
252 panic("register_int: bad intr %d", intr);
253 info = &intr_info_ary[intr];
254 return(info->i_count);
255}
256
257
4b5f931b
MD
258void
259swi_setpriority(int intr, int pri)
260{
477d3c1c 261 struct intr_info *info;
4b5f931b
MD
262
263 if (intr < NHWI || intr >= NHWI + NSWI)
264 panic("register_swi: bad intr %d", intr);
477d3c1c
MD
265 info = &intr_info_ary[intr];
266 if (info->i_valid_thread)
267 lwkt_setpri(&info->i_thread, pri);
4b5f931b
MD
268}
269
7e071e7a
MD
270void
271register_randintr(int intr)
272{
477d3c1c
MD
273 struct intr_info *info;
274
275 if (intr < NHWI || intr >= NHWI + NSWI)
276 panic("register_swi: bad intr %d", intr);
277 info = &intr_info_ary[intr];
278 info->i_random.sc_intr = intr;
279 info->i_random.sc_enabled = 1;
7e071e7a
MD
280}
281
282void
283unregister_randintr(int intr)
284{
477d3c1c
MD
285 struct intr_info *info;
286
287 if (intr < NHWI || intr >= NHWI + NSWI)
288 panic("register_swi: bad intr %d", intr);
289 info = &intr_info_ary[intr];
290 info->i_random.sc_enabled = 0;
7e071e7a
MD
291}
292
ef0fdad1 293/*
b68b7282
MD
294 * Dispatch an interrupt. If there's nothing to do we have a stray
295 * interrupt and can just return, leaving the interrupt masked.
96728c05 296 *
477d3c1c 297 * We need to schedule the interrupt and set its i_running bit. If
96728c05
MD
298 * we are not on the interrupt thread's cpu we have to send a message
299 * to the correct cpu that will issue the desired action (interlocking
300 * with the interrupt thread's critical section).
301 *
302 * We are NOT in a critical section, which will allow the scheduled
71ef2f5c 303 * interrupt to preempt us. The MP lock might *NOT* be held here.
ef0fdad1 304 */
96728c05
MD
305static void
306sched_ithd_remote(void *arg)
307{
308 sched_ithd((int)arg);
309}
310
ef0fdad1
MD
311void
312sched_ithd(int intr)
313{
477d3c1c 314 struct intr_info *info;
ef0fdad1 315
477d3c1c
MD
316 info = &intr_info_ary[intr];
317
318 ++info->i_count;
319 if (info->i_valid_thread) {
320 if (info->i_reclist == NULL) {
ef0fdad1 321 printf("sched_ithd: stray interrupt %d\n", intr);
b68b7282 322 } else {
477d3c1c
MD
323 if (info->i_thread.td_gd == mycpu) {
324 info->i_running = 1;
325 /* preemption handled internally */
326 lwkt_schedule(&info->i_thread);
96728c05 327 } else {
477d3c1c
MD
328 lwkt_send_ipiq(info->i_thread.td_gd,
329 sched_ithd_remote, (void *)intr);
96728c05 330 }
b68b7282 331 }
ef0fdad1
MD
332 } else {
333 printf("sched_ithd: stray interrupt %d\n", intr);
334 }
335}
336
37d44089
MD
337/*
338 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
339 * might not be held).
340 */
341static void
477d3c1c 342ithread_livelock_wakeup(systimer_t st)
37d44089 343{
477d3c1c 344 struct intr_info *info;
37d44089 345
477d3c1c
MD
346 info = &intr_info_ary[(int)st->data];
347 if (info->i_valid_thread)
348 lwkt_schedule(&info->i_thread);
37d44089
MD
349}
350
67b9bb39 351/*
477d3c1c
MD
352 * This function is called drectly from the ICU or APIC vector code assembly
353 * to process an interrupt. The critical section and interrupt deferral
354 * checks have already been done but the function is entered WITHOUT
355 * a critical section held. The BGL may or may not be held.
356 *
357 * Must return non-zero if we do not want the vector code to re-enable
358 * the interrupt (which we don't if we have to schedule the interrupt)
67b9bb39 359 */
477d3c1c
MD
360int ithread_fast_handler(struct intrframe frame);
361
362int
363ithread_fast_handler(struct intrframe frame)
364{
365 int intr;
366 struct intr_info *info;
367 struct intrec **list;
368 int must_schedule;
369#ifdef SMP
370 int got_mplock;
371#endif
372 intrec_t rec, next_rec;
373 globaldata_t gd;
374
375 intr = frame.if_vec;
376 gd = mycpu;
377
378 info = &intr_info_ary[intr];
379
380 /*
381 * If we are not processing any FAST interrupts, just schedule the thing.
382 * (since we aren't in a critical section, this can result in a
383 * preemption)
384 */
385 if (info->i_fast == 0) {
386 sched_ithd(intr);
387 return(1);
388 }
389
390 /*
391 * This should not normally occur since interrupts ought to be
392 * masked if the ithread has been scheduled or is running.
393 */
394 if (info->i_running)
395 return(1);
396
397 /*
398 * Bump the interrupt nesting level to process any FAST interrupts.
399 * Obtain the MP lock as necessary. If the MP lock cannot be obtained,
400 * schedule the interrupt thread to deal with the issue instead.
401 *
402 * To reduce overhead, just leave the MP lock held once it has been
403 * obtained.
404 */
405 crit_enter_gd(gd);
406 ++gd->gd_intr_nesting_level;
407 ++gd->gd_cnt.v_intr;
408 must_schedule = info->i_slow;
409#ifdef SMP
410 got_mplock = 0;
411#endif
412
413 list = &info->i_reclist;
414 for (rec = *list; rec; rec = next_rec) {
415 next_rec = rec->next; /* rec may be invalid after call */
416
417 if (rec->intr_flags & INTR_FAST) {
418#ifdef SMP
419 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) {
420 if (try_mplock() == 0) {
421 /*
422 * XXX forward to the cpu holding the MP lock
423 */
424 must_schedule = 1;
425 break;
426 }
427 got_mplock = 1;
428 }
429#endif
430 if (rec->serializer) {
431 must_schedule += lwkt_serialize_handler_try(
432 rec->serializer, rec->handler,
433 rec->argument, &frame);
434 } else {
435 rec->handler(rec->argument, &frame);
436 }
437 }
438 }
439
440 /*
441 * Cleanup
442 */
443 --gd->gd_intr_nesting_level;
444#ifdef SMP
445 if (got_mplock)
446 rel_mplock();
447#endif
448 crit_exit_gd(gd);
449
450 /*
451 * If we had a problem, schedule the thread to catch the missed
452 * records (it will just re-run all of them). A return value of 0
453 * indicates that all handlers have been run and the interrupt can
454 * be re-enabled, and a non-zero return indicates that the interrupt
455 * thread controls re-enablement.
456 */
457 if (must_schedule)
458 sched_ithd(intr);
459 else
460 ++info->i_count;
461 return(must_schedule);
462}
463
464#if 0
465
4666: ; \
467 /* could not get the MP lock, forward the interrupt */ \
468 movl mp_lock, %eax ; /* check race */ \
469 cmpl $MP_FREE_LOCK,%eax ; \
470 je 2b ; \
471 incl PCPU(cnt)+V_FORWARDED_INTS ; \
472 subl $12,%esp ; \
473 movl $irq_num,8(%esp) ; \
474 movl $forward_fastint_remote,4(%esp) ; \
475 movl %eax,(%esp) ; \
476 call lwkt_send_ipiq_bycpu ; \
477 addl $12,%esp ; \
478 jmp 5f ;
479
480#endif
67b9bb39 481
37d44089 482
b68b7282 483/*
45d76888
MD
484 * Interrupt threads run this as their main loop.
485 *
486 * The handler begins execution outside a critical section and with the BGL
487 * held.
37d44089 488 *
477d3c1c 489 * The i_running state starts at 0. When an interrupt occurs, the hardware
37d44089
MD
490 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled
491 * until all routines have run. We then call ithread_done() to reenable
45d76888
MD
492 * the HW interrupt and deschedule us until the next interrupt.
493 *
477d3c1c 494 * We are responsible for atomically checking i_running and ithread_done()
45d76888 495 * is responsible for atomically checking for platform-specific delayed
477d3c1c 496 * interrupts. i_running for our irq is only set in the context of our cpu,
45d76888 497 * so a critical section is a sufficient interlock.
b68b7282 498 */
93781523
MD
499#define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
500
ef0fdad1
MD
501static void
502ithread_handler(void *arg)
503{
477d3c1c 504 struct intr_info *info;
93781523 505 u_int cputicks;
477d3c1c
MD
506 u_int bticks;
507 int intr;
508 int freq;
509 struct intrec **list;
510 intrec_t rec, nrec;
45d76888 511 globaldata_t gd = mycpu;
67b9bb39
MD
512 struct systimer ill_timer; /* enforced freq. timer */
513 struct systimer ill_rtimer; /* recovery timer */
514 u_int ill_count = 0; /* interrupt livelock counter */
515 u_int ill_ticks = 0; /* track elapsed to calculate freq */
516 u_int ill_delta = 0; /* track elapsed to calculate freq */
517 int ill_state = 0; /* current state */
45d76888 518
477d3c1c
MD
519 intr = (int)arg;
520 info = &intr_info_ary[intr];
521 list = &info->i_reclist;
522 gd = mycpu;
523
45d76888
MD
524 /*
525 * The loop must be entered with one critical section held.
526 */
527 crit_enter_gd(gd);
ef0fdad1 528
ef0fdad1 529 for (;;) {
93781523
MD
530 /*
531 * We can get woken up by the livelock periodic code too, run the
45d76888
MD
532 * handlers only if there is a real interrupt pending. XXX
533 *
477d3c1c 534 * Clear i_running prior to running the handlers to interlock
45d76888 535 * again new events occuring during processing of existing events.
e43a034f 536 *
477d3c1c
MD
537 * Run each handler in a critical section. Note that we run both
538 * FAST and SLOW designated service routines.
93781523 539 */
477d3c1c 540 info->i_running = 0;
a474df86
MD
541 for (rec = *list; rec; rec = nrec) {
542 nrec = rec->next;
477d3c1c
MD
543 if (rec->serializer) {
544 lwkt_serialize_handler_call(rec->serializer,
545 rec->handler, rec->argument, NULL);
546 } else {
547 rec->handler(rec->argument, NULL);
548 }
ef0fdad1 549 }
37d44089 550
e43a034f
MD
551 /*
552 * Do a quick exit/enter to catch any higher-priority
553 * interrupt sources and so user/system/interrupt statistics
554 * work for interrupt threads.
555 */
556 crit_exit_gd(gd);
557 crit_enter_gd(gd);
558
37d44089
MD
559 /*
560 * This is our interrupt hook to add rate randomness to the random
561 * number generator.
562 */
477d3c1c 563 if (info->i_random.sc_enabled)
96728c05 564 add_interrupt_randomness(intr);
37d44089
MD
565
566 /*
567 * This is our livelock test. If we hit the rate limit we
45d76888 568 * limit ourselves to X interrupts/sec until the rate
37d44089 569 * falls below 50% of that value, then we unlimit again.
45d76888
MD
570 *
571 * XXX calling cputimer_count() is expensive but a livelock may
572 * prevent other interrupts from occuring so we cannot use ticks.
37d44089 573 */
044ee7c4 574 cputicks = sys_cputimer->count();
67b9bb39
MD
575 ++ill_count;
576 bticks = cputicks - ill_ticks;
577 ill_ticks = cputicks;
044ee7c4
MD
578 if (bticks > sys_cputimer->freq)
579 bticks = sys_cputimer->freq;
37d44089 580
67b9bb39 581 switch(ill_state) {
37d44089 582 case LIVELOCK_NONE:
67b9bb39
MD
583 ill_delta += bticks;
584 if (ill_delta < LIVELOCK_TIMEFRAME(sys_cputimer->freq))
37d44089 585 break;
67b9bb39
MD
586 freq = (int64_t)ill_count * sys_cputimer->freq /
587 ill_delta;
588 ill_delta = 0;
589 ill_count = 0;
37d44089
MD
590 if (freq < livelock_limit)
591 break;
592 printf("intr %d at %d hz, livelocked! limiting at %d hz\n",
593 intr, freq, livelock_fallback);
67b9bb39 594 ill_state = LIVELOCK_LIMITED;
37d44089 595 bticks = 0;
93781523 596 /* force periodic check to avoid stale removal (if ints stop) */
67b9bb39
MD
597 systimer_init_periodic(&ill_rtimer, ithread_livelock_wakeup,
598 (void *)intr, 1);
37d44089
MD
599 /* fall through */
600 case LIVELOCK_LIMITED:
601 /*
602 * Delay (us) before rearming the interrupt
603 */
67b9bb39 604 systimer_init_oneshot(&ill_timer, ithread_livelock_wakeup,
37d44089
MD
605 (void *)intr, 1 + 1000000 / livelock_fallback);
606 lwkt_deschedule_self(curthread);
607 lwkt_switch();
93781523
MD
608
609 /* in case we were woken up by something else */
67b9bb39 610 systimer_del(&ill_timer);
37d44089
MD
611
612 /*
613 * Calculate interrupt rate (note that due to our delay it
614 * will not exceed livelock_fallback).
615 */
67b9bb39
MD
616 ill_delta += bticks;
617 if (ill_delta < LIVELOCK_TIMEFRAME(sys_cputimer->freq))
37d44089 618 break;
67b9bb39
MD
619 freq = (int64_t)ill_count * sys_cputimer->freq / ill_delta;
620 ill_delta = 0;
621 ill_count = 0;
37d44089
MD
622 if (freq < (livelock_fallback >> 1)) {
623 printf("intr %d at %d hz, removing livelock limit\n",
624 intr, freq);
67b9bb39
MD
625 ill_state = LIVELOCK_NONE;
626 systimer_del(&ill_rtimer);
37d44089
MD
627 }
628 break;
629 }
630
631 /*
477d3c1c 632 * There are two races here. i_running is set by sched_ithd()
45d76888
MD
633 * in the context of our cpu and is critical-section safe. We
634 * are responsible for checking it. ipending is not critical
635 * section safe and must be handled by the platform specific
636 * ithread_done() routine.
37d44089 637 */
477d3c1c 638 if (info->i_running == 0)
e43a034f 639 ithread_done(intr);
45d76888 640 /* must be in critical section on loop */
ef0fdad1 641 }
e43a034f 642 /* not reached */
ef0fdad1
MD
643}
644
984263bc
MD
645/*
646 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
647 * The data for this machine dependent, and the declarations are in machine
648 * dependent code. The layout of intrnames and intrcnt however is machine
649 * independent.
650 *
651 * We do not know the length of intrcnt and intrnames at compile time, so
652 * calculate things at run time.
653 */
477d3c1c 654
984263bc
MD
655static int
656sysctl_intrnames(SYSCTL_HANDLER_ARGS)
657{
477d3c1c
MD
658 struct intr_info *info;
659 intrec_t rec;
660 int error = 0;
661 int len;
662 int intr;
663 char buf[64];
664
665 for (intr = 0; error == 0 && intr < NHWI + NSWI; ++intr) {
666 info = &intr_info_ary[intr];
667
668 len = 0;
669 buf[0] = 0;
670 for (rec = info->i_reclist; rec; rec = rec->next) {
671 snprintf(buf + len, sizeof(buf) - len, "%s%s",
672 (len ? "/" : ""), rec->name);
673 len += strlen(buf + len);
674 }
675 if (len == 0) {
676 snprintf(buf, sizeof(buf), "irq%d", intr);
677 len = strlen(buf);
678 }
679 error = SYSCTL_OUT(req, buf, len + 1);
680 }
681 return (error);
984263bc
MD
682}
683
477d3c1c 684
984263bc
MD
685SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
686 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
687
688static int
689sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
690{
477d3c1c
MD
691 struct intr_info *info;
692 int error = 0;
693 int intr;
694
695 for (intr = 0; intr < NHWI + NSWI; ++intr) {
696 info = &intr_info_ary[intr];
697
698 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count));
699 if (error)
700 break;
701 }
702 return(error);
984263bc
MD
703}
704
705SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
706 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
477d3c1c 707