2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
32 #include <sys/machintr.h>
33 #include <machine/globaldata.h>
34 #include <machine/smp.h>
35 #include <machine/md_var.h>
36 #include <machine/pmap.h>
37 #include <machine_base/apic/mpapic.h>
38 #include <machine_base/apic/ioapic_abi.h>
39 #include <machine/segments.h>
40 #include <sys/thread2.h>
42 #include <machine/intr_machdep.h>
46 #define IOAPIC_COUNT_MAX 16
47 #define IOAPIC_ID_MASK (IOAPIC_COUNT_MAX - 1)
56 TAILQ_ENTRY(ioapic_info) io_link;
58 TAILQ_HEAD(ioapic_info_list, ioapic_info);
60 struct ioapic_intsrc {
62 enum intr_trigger int_trig;
63 enum intr_polarity int_pola;
67 struct ioapic_info_list ioc_list;
68 struct ioapic_intsrc ioc_intsrc[16]; /* XXX magic number */
71 volatile lapic_t *lapic;
73 static void lapic_timer_calibrate(void);
74 static void lapic_timer_set_divisor(int);
75 static void lapic_timer_fixup_handler(void *);
76 static void lapic_timer_restart_handler(void *);
78 void lapic_timer_process(void);
79 void lapic_timer_process_frame(struct intrframe *);
80 void lapic_timer_always(struct intrframe *);
82 static int lapic_timer_enable = 1;
83 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
85 static void lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
86 static void lapic_timer_intr_enable(struct cputimer_intr *);
87 static void lapic_timer_intr_restart(struct cputimer_intr *);
88 static void lapic_timer_intr_pmfixup(struct cputimer_intr *);
90 static int lapic_unused_apic_id(int);
92 static void ioapic_setup(const struct ioapic_info *);
93 static int ioapic_alloc_apic_id(int);
94 static void ioapic_set_apic_id(const struct ioapic_info *);
95 static void ioapic_gsi_setup(int);
96 static const struct ioapic_info *
97 ioapic_gsi_search(int);
98 static void ioapic_pin_prog(void *, int, int,
99 enum intr_trigger, enum intr_polarity, uint32_t);
101 static struct cputimer_intr lapic_cputimer_intr = {
103 .reload = lapic_timer_intr_reload,
104 .enable = lapic_timer_intr_enable,
105 .config = cputimer_intr_default_config,
106 .restart = lapic_timer_intr_restart,
107 .pmfixup = lapic_timer_intr_pmfixup,
108 .initclock = cputimer_intr_default_initclock,
109 .next = SLIST_ENTRY_INITIALIZER,
111 .type = CPUTIMER_INTR_LAPIC,
112 .prio = CPUTIMER_INTR_PRIO_LAPIC,
113 .caps = CPUTIMER_INTR_CAP_NONE
116 static int lapic_timer_divisor_idx = -1;
117 static const uint32_t lapic_timer_divisors[] = {
118 APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
119 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128, APIC_TDCR_1
121 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
123 static struct ioapic_conf ioapic_conf;
133 * Enable LAPIC, configure interrupts.
136 lapic_init(boolean_t bsp)
144 * Since IDT is shared between BSP and APs, these vectors
145 * only need to be installed once; we do it on BSP.
148 /* Install a 'Spurious INTerrupt' vector */
149 setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
150 SDT_SYSIGT, SEL_KPL, 0);
152 /* Install an inter-CPU IPI for TLB invalidation */
153 setidt(XINVLTLB_OFFSET, Xinvltlb,
154 SDT_SYSIGT, SEL_KPL, 0);
156 /* Install an inter-CPU IPI for IPIQ messaging */
157 setidt(XIPIQ_OFFSET, Xipiq,
158 SDT_SYSIGT, SEL_KPL, 0);
160 /* Install a timer vector */
161 setidt(XTIMER_OFFSET, Xtimer,
162 SDT_SYSIGT, SEL_KPL, 0);
164 /* Install an inter-CPU IPI for CPU stop/restart */
165 setidt(XCPUSTOP_OFFSET, Xcpustop,
166 SDT_SYSIGT, SEL_KPL, 0);
170 * Setup LINT0 as ExtINT on the BSP. This is theoretically an
171 * aggregate interrupt input from the 8259. The INTA cycle
172 * will be routed to the external controller (the 8259) which
173 * is expected to supply the vector.
175 * Must be setup edge triggered, active high.
177 * Disable LINT0 on BSP, if I/O APIC is enabled.
179 * Disable LINT0 on the APs. It doesn't matter what delivery
180 * mode we use because we leave it masked.
182 temp = lapic->lvt_lint0;
183 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
184 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
186 temp |= APIC_LVT_DM_EXTINT;
188 temp |= APIC_LVT_MASKED;
190 temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
192 lapic->lvt_lint0 = temp;
195 * Setup LINT1 as NMI.
197 * Must be setup edge trigger, active high.
199 * Enable LINT1 on BSP, if I/O APIC is enabled.
201 * Disable LINT1 on the APs.
203 temp = lapic->lvt_lint1;
204 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
205 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
206 temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
207 if (bsp && apic_io_enable)
208 temp &= ~APIC_LVT_MASKED;
209 lapic->lvt_lint1 = temp;
212 * Mask the LAPIC error interrupt, LAPIC performance counter
215 lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
216 lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
219 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
221 timer = lapic->lvt_timer;
222 timer &= ~APIC_LVTT_VECTOR;
223 timer |= XTIMER_OFFSET;
224 timer |= APIC_LVTT_MASKED;
225 lapic->lvt_timer = timer;
228 * Set the Task Priority Register as needed. At the moment allow
229 * interrupts on all cpus (the APs will remain CLId until they are
230 * ready to deal). We could disable all but IPIs by setting
231 * temp |= TPR_IPI for cpu != 0.
234 temp &= ~APIC_TPR_PRIO; /* clear priority field */
235 #ifdef SMP /* APIC-IO */
236 if (!apic_io_enable) {
239 * If we are NOT running the IO APICs, the LAPIC will only be used
240 * for IPIs. Set the TPR to prevent any unintentional interrupts.
243 #ifdef SMP /* APIC-IO */
252 temp |= APIC_SVR_ENABLE; /* enable the LAPIC */
253 temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
256 * Set the spurious interrupt vector. The low 4 bits of the vector
259 if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
260 panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
261 temp &= ~APIC_SVR_VECTOR;
262 temp |= XSPURIOUSINT_OFFSET;
267 * Pump out a few EOIs to clean out interrupts that got through
268 * before we were able to set the TPR.
275 lapic_timer_calibrate();
276 if (lapic_timer_enable) {
277 cputimer_intr_register(&lapic_cputimer_intr);
278 cputimer_intr_select(&lapic_cputimer_intr, 0);
281 lapic_timer_set_divisor(lapic_timer_divisor_idx);
285 apic_dump("apic_initialize()");
289 lapic_timer_set_divisor(int divisor_idx)
291 KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
292 lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
296 lapic_timer_oneshot(u_int count)
300 value = lapic->lvt_timer;
301 value &= ~APIC_LVTT_PERIODIC;
302 lapic->lvt_timer = value;
303 lapic->icr_timer = count;
307 lapic_timer_oneshot_quick(u_int count)
309 lapic->icr_timer = count;
313 lapic_timer_calibrate(void)
317 /* Try to calibrate the local APIC timer. */
318 for (lapic_timer_divisor_idx = 0;
319 lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
320 lapic_timer_divisor_idx++) {
321 lapic_timer_set_divisor(lapic_timer_divisor_idx);
322 lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
324 value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
325 if (value != APIC_TIMER_MAX_COUNT)
328 if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
329 panic("lapic: no proper timer divisor?!\n");
330 lapic_cputimer_intr.freq = value / 2;
332 kprintf("lapic: divisor index %d, frequency %u Hz\n",
333 lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
337 lapic_timer_process_oncpu(struct globaldata *gd, struct intrframe *frame)
341 gd->gd_timer_running = 0;
343 count = sys_cputimer->count();
344 if (TAILQ_FIRST(&gd->gd_systimerq) != NULL)
345 systimer_intr(&count, 0, frame);
349 lapic_timer_process(void)
351 lapic_timer_process_oncpu(mycpu, NULL);
355 lapic_timer_process_frame(struct intrframe *frame)
357 lapic_timer_process_oncpu(mycpu, frame);
361 * This manual debugging code is called unconditionally from Xtimer
362 * (the lapic timer interrupt) whether the current thread is in a
363 * critical section or not) and can be useful in tracking down lockups.
365 * NOTE: MANUAL DEBUG CODE
368 static int saveticks[SMP_MAXCPU];
369 static int savecounts[SMP_MAXCPU];
373 lapic_timer_always(struct intrframe *frame)
376 globaldata_t gd = mycpu;
377 int cpu = gd->gd_cpuid;
383 gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu;
384 *gptr = ((*gptr + 1) & 0x00FF) | 0x0700;
387 ksnprintf(buf, sizeof(buf), " %p %16s %d %16s ",
388 (void *)frame->if_rip, gd->gd_curthread->td_comm, ticks,
390 for (i = 0; buf[i]; ++i) {
391 gptr[i] = 0x0700 | (unsigned char)buf[i];
395 if (saveticks[gd->gd_cpuid] != ticks) {
396 saveticks[gd->gd_cpuid] = ticks;
397 savecounts[gd->gd_cpuid] = 0;
399 ++savecounts[gd->gd_cpuid];
400 if (savecounts[gd->gd_cpuid] > 2000 && panicstr == NULL) {
401 panic("cpud %d panicing on ticks failure",
404 for (i = 0; i < ncpus; ++i) {
406 if (saveticks[i] && panicstr == NULL) {
407 delta = saveticks[i] - ticks;
408 if (delta < -10 || delta > 10) {
409 panic("cpu %d panicing on cpu %d watchdog",
419 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
421 struct globaldata *gd = mycpu;
423 reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
427 if (gd->gd_timer_running) {
428 if (reload < lapic->ccr_timer)
429 lapic_timer_oneshot_quick(reload);
431 gd->gd_timer_running = 1;
432 lapic_timer_oneshot_quick(reload);
437 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
441 timer = lapic->lvt_timer;
442 timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
443 lapic->lvt_timer = timer;
445 lapic_timer_fixup_handler(NULL);
449 lapic_timer_fixup_handler(void *arg)
456 if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
458 * Detect the presence of C1E capability mostly on latest
459 * dual-cores (or future) k8 family. This feature renders
460 * the local APIC timer dead, so we disable it by reading
461 * the Interrupt Pending Message register and clearing both
462 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
465 * "BIOS and Kernel Developer's Guide for AMD NPT
466 * Family 0Fh Processors"
467 * #32559 revision 3.00
469 if ((cpu_id & 0x00000f00) == 0x00000f00 &&
470 (cpu_id & 0x0fff0000) >= 0x00040000) {
473 msr = rdmsr(0xc0010055);
474 if (msr & 0x18000000) {
475 struct globaldata *gd = mycpu;
477 kprintf("cpu%d: AMD C1E detected\n",
479 wrmsr(0xc0010055, msr & ~0x18000000ULL);
482 * We are kinda stalled;
485 gd->gd_timer_running = 1;
486 lapic_timer_oneshot_quick(2);
496 lapic_timer_restart_handler(void *dummy __unused)
500 lapic_timer_fixup_handler(&started);
502 struct globaldata *gd = mycpu;
504 gd->gd_timer_running = 1;
505 lapic_timer_oneshot_quick(2);
510 * This function is called only by ACPI-CA code currently:
511 * - AMD C1E fixup. AMD C1E only seems to happen after ACPI
512 * module controls PM. So once ACPI-CA is attached, we try
513 * to apply the fixup to prevent LAPIC timer from hanging.
516 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
518 lwkt_send_ipiq_mask(smp_active_mask,
519 lapic_timer_fixup_handler, NULL);
523 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
525 lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
530 * dump contents of local APIC registers
535 kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
536 kprintf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
537 lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
541 * Inter Processor Interrupt functions.
545 * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
547 * destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
548 * vector is any valid SYSTEM INT vector
549 * delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
551 * A backlog of requests can create a deadlock between cpus. To avoid this
552 * we have to be able to accept IPIs at the same time we are trying to send
553 * them. The critical section prevents us from attempting to send additional
554 * IPIs reentrantly, but also prevents IPIQ processing so we have to call
555 * lwkt_process_ipiq() manually. It's rather messy and expensive for this
556 * to occur but fortunately it does not happen too often.
559 apic_ipi(int dest_type, int vector, int delivery_mode)
564 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
565 unsigned long rflags = read_rflags();
567 DEBUG_PUSH_INFO("apic_ipi");
568 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
572 write_rflags(rflags);
575 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
576 delivery_mode | vector;
577 lapic->icr_lo = icr_lo;
583 single_apic_ipi(int cpu, int vector, int delivery_mode)
589 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
590 unsigned long rflags = read_rflags();
592 DEBUG_PUSH_INFO("single_apic_ipi");
593 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
597 write_rflags(rflags);
599 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
600 icr_hi |= (CPU_TO_ID(cpu) << 24);
601 lapic->icr_hi = icr_hi;
604 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK)
605 | APIC_DEST_DESTFLD | delivery_mode | vector;
608 lapic->icr_lo = icr_lo;
615 * Returns 0 if the apic is busy, 1 if we were able to queue the request.
617 * NOT WORKING YET! The code as-is may end up not queueing an IPI at all
618 * to the target, and the scheduler does not 'poll' for IPI messages.
621 single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
627 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
631 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
632 icr_hi |= (CPU_TO_ID(cpu) << 24);
633 lapic->icr_hi = icr_hi;
636 icr_lo = (lapic->icr_lo & APIC_RESV2_MASK)
637 | APIC_DEST_DESTFLD | delivery_mode | vector;
640 lapic->icr_lo = icr_lo;
648 * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
650 * target is a bitmask of destination cpus. Vector is any
651 * valid system INT vector. Delivery mode may be either
652 * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
655 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
659 int n = BSFCPUMASK(target);
660 target &= ~CPUMASK(n);
661 single_apic_ipi(n, vector, delivery_mode);
667 * Timer code, in development...
668 * - suggested by rgrimes@gndrsh.aac.dev.com
671 get_apic_timer_frequency(void)
673 return(lapic_cputimer_intr.freq);
677 * Load a 'downcount time' in uSeconds.
680 set_apic_timer(int us)
685 * When we reach here, lapic timer's frequency
686 * must have been calculated as well as the
687 * divisor (lapic->dcr_timer is setup during the
688 * divisor calculation).
690 KKASSERT(lapic_cputimer_intr.freq != 0 &&
691 lapic_timer_divisor_idx >= 0);
693 count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
694 lapic_timer_oneshot(count);
699 * Read remaining time in timer.
702 read_apic_timer(void)
705 /** XXX FIXME: we need to return the actual remaining time,
706 * for now we just return the remaining count.
709 return lapic->ccr_timer;
715 * Spin-style delay, set delay time in uS, spin till it drains.
720 set_apic_timer(count);
721 while (read_apic_timer())
726 lapic_unused_apic_id(int start)
730 for (i = start; i < NAPICID; ++i) {
731 if (ID_TO_CPU(i) == -1)
738 lapic_map(vm_offset_t lapic_addr)
740 lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
742 kprintf("lapic: at 0x%08lx\n", lapic_addr);
745 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
746 TAILQ_HEAD_INITIALIZER(lapic_enumerators);
751 struct lapic_enumerator *e;
754 for (i = 0; i < NAPICID; ++i)
757 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
758 error = e->lapic_probe(e);
763 panic("can't config lapic\n");
765 e->lapic_enumerate(e);
769 lapic_enumerator_register(struct lapic_enumerator *ne)
771 struct lapic_enumerator *e;
773 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
774 if (e->lapic_prio < ne->lapic_prio) {
775 TAILQ_INSERT_BEFORE(e, ne, lapic_link);
779 TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
782 static TAILQ_HEAD(, ioapic_enumerator) ioapic_enumerators =
783 TAILQ_HEAD_INITIALIZER(ioapic_enumerators);
788 struct ioapic_enumerator *e;
789 struct ioapic_info *info;
790 int start_apic_id = 0;
794 TAILQ_INIT(&ioapic_conf.ioc_list);
795 /* XXX magic number */
796 for (i = 0; i < 16; ++i)
797 ioapic_conf.ioc_intsrc[i].int_gsi = -1;
799 TAILQ_FOREACH(e, &ioapic_enumerators, ioapic_link) {
800 error = e->ioapic_probe(e);
806 panic("can't config I/O APIC\n");
808 kprintf("no I/O APIC\n");
819 * Switch to I/O APIC MachIntrABI and reconfigure
820 * the default IDT entries.
822 MachIntrABI = MachIntrABI_IOAPIC;
823 MachIntrABI.setdefault();
825 e->ioapic_enumerate(e);
831 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link)
834 if (i > IOAPIC_COUNT_MAX) /* XXX magic number */
835 panic("ioapic_config: more than 16 I/O APIC\n");
840 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link) {
843 apic_id = ioapic_alloc_apic_id(start_apic_id);
844 if (apic_id == NAPICID) {
845 kprintf("IOAPIC: can't alloc APIC ID for "
846 "%dth I/O APIC\n", info->io_idx);
849 info->io_apic_id = apic_id;
851 start_apic_id = apic_id + 1;
855 * xAPIC allows I/O APIC's APIC ID to be same
856 * as the LAPIC's APIC ID
858 kprintf("IOAPIC: use xAPIC model to alloc APIC ID "
861 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link)
862 info->io_apic_id = info->io_idx;
866 * Warning about any GSI holes
868 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link) {
869 const struct ioapic_info *prev_info;
871 prev_info = TAILQ_PREV(info, ioapic_info_list, io_link);
872 if (prev_info != NULL) {
873 if (info->io_gsi_base !=
874 prev_info->io_gsi_base + prev_info->io_npin) {
875 kprintf("IOAPIC: warning gsi hole "
877 prev_info->io_gsi_base +
879 info->io_gsi_base - 1);
885 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link) {
886 kprintf("IOAPIC: idx %d, apic id %d, "
887 "gsi base %d, npin %d\n",
898 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link)
900 ioapic_abi_fixup_irqmap();
904 MachIntrABI.cleanup();
910 ioapic_enumerator_register(struct ioapic_enumerator *ne)
912 struct ioapic_enumerator *e;
914 TAILQ_FOREACH(e, &ioapic_enumerators, ioapic_link) {
915 if (e->ioapic_prio < ne->ioapic_prio) {
916 TAILQ_INSERT_BEFORE(e, ne, ioapic_link);
920 TAILQ_INSERT_TAIL(&ioapic_enumerators, ne, ioapic_link);
924 ioapic_add(void *addr, int gsi_base, int npin)
926 struct ioapic_info *info, *ninfo;
929 gsi_end = gsi_base + npin - 1;
930 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link) {
931 if ((gsi_base >= info->io_gsi_base &&
932 gsi_base < info->io_gsi_base + info->io_npin) ||
933 (gsi_end >= info->io_gsi_base &&
934 gsi_end < info->io_gsi_base + info->io_npin)) {
935 panic("ioapic_add: overlapped gsi, base %d npin %d, "
936 "hit base %d, npin %d\n", gsi_base, npin,
937 info->io_gsi_base, info->io_npin);
939 if (info->io_addr == addr)
940 panic("ioapic_add: duplicated addr %p\n", addr);
943 ninfo = kmalloc(sizeof(*ninfo), M_DEVBUF, M_WAITOK | M_ZERO);
944 ninfo->io_addr = addr;
945 ninfo->io_npin = npin;
946 ninfo->io_gsi_base = gsi_base;
947 ninfo->io_apic_id = -1;
950 * Create IOAPIC list in ascending order of GSI base
952 TAILQ_FOREACH_REVERSE(info, &ioapic_conf.ioc_list,
953 ioapic_info_list, io_link) {
954 if (ninfo->io_gsi_base > info->io_gsi_base) {
955 TAILQ_INSERT_AFTER(&ioapic_conf.ioc_list,
956 info, ninfo, io_link);
961 TAILQ_INSERT_HEAD(&ioapic_conf.ioc_list, ninfo, io_link);
965 ioapic_intsrc(int irq, int gsi, enum intr_trigger trig, enum intr_polarity pola)
967 struct ioapic_intsrc *int_src;
970 int_src = &ioapic_conf.ioc_intsrc[irq];
973 /* Don't allow mixed mode */
974 kprintf("IOAPIC: warning intsrc irq %d -> gsi 0\n", irq);
978 if (int_src->int_gsi != -1) {
979 if (int_src->int_gsi != gsi) {
980 kprintf("IOAPIC: warning intsrc irq %d, gsi "
981 "%d -> %d\n", irq, int_src->int_gsi, gsi);
983 if (int_src->int_trig != trig) {
984 kprintf("IOAPIC: warning intsrc irq %d, trig "
986 intr_str_trigger(int_src->int_trig),
987 intr_str_trigger(trig));
989 if (int_src->int_pola != pola) {
990 kprintf("IOAPIC: warning intsrc irq %d, pola "
992 intr_str_polarity(int_src->int_pola),
993 intr_str_polarity(pola));
996 int_src->int_gsi = gsi;
997 int_src->int_trig = trig;
998 int_src->int_pola = pola;
1002 ioapic_set_apic_id(const struct ioapic_info *info)
1007 id = ioapic_read(info->io_addr, IOAPIC_ID);
1009 id &= ~APIC_ID_MASK;
1010 id |= (info->io_apic_id << 24);
1012 ioapic_write(info->io_addr, IOAPIC_ID, id);
1017 id = ioapic_read(info->io_addr, IOAPIC_ID);
1018 apic_id = (id & APIC_ID_MASK) >> 24;
1021 * I/O APIC ID is a 4bits field
1023 if ((apic_id & IOAPIC_ID_MASK) !=
1024 (info->io_apic_id & IOAPIC_ID_MASK)) {
1025 panic("ioapic_set_apic_id: can't set apic id to %d, "
1026 "currently set to %d\n", info->io_apic_id, apic_id);
1031 ioapic_gsi_setup(int gsi)
1033 enum intr_trigger trig;
1034 enum intr_polarity pola;
1040 ioapic_extpin_setup(ioapic_gsi_ioaddr(gsi),
1041 ioapic_gsi_pin(gsi), 0);
1046 for (irq = 0; irq < 16; ++irq) {
1047 const struct ioapic_intsrc *int_src =
1048 &ioapic_conf.ioc_intsrc[irq];
1050 if (gsi == int_src->int_gsi) {
1051 trig = int_src->int_trig;
1052 pola = int_src->int_pola;
1059 trig = INTR_TRIGGER_EDGE;
1060 pola = INTR_POLARITY_HIGH;
1062 trig = INTR_TRIGGER_LEVEL;
1063 pola = INTR_POLARITY_LOW;
1068 ioapic_abi_set_irqmap(irq, gsi, trig, pola);
1072 ioapic_gsi_ioaddr(int gsi)
1074 const struct ioapic_info *info;
1076 info = ioapic_gsi_search(gsi);
1077 return info->io_addr;
1081 ioapic_gsi_pin(int gsi)
1083 const struct ioapic_info *info;
1085 info = ioapic_gsi_search(gsi);
1086 return gsi - info->io_gsi_base;
1089 static const struct ioapic_info *
1090 ioapic_gsi_search(int gsi)
1092 const struct ioapic_info *info;
1094 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link) {
1095 if (gsi >= info->io_gsi_base &&
1096 gsi < info->io_gsi_base + info->io_npin)
1099 panic("ioapic_gsi_search: no I/O APIC\n");
1103 ioapic_gsi(int idx, int pin)
1105 const struct ioapic_info *info;
1107 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link) {
1108 if (info->io_idx == idx)
1113 if (pin >= info->io_npin)
1115 return info->io_gsi_base + pin;
1119 ioapic_extpin_setup(void *addr, int pin, int vec)
1121 ioapic_pin_prog(addr, pin, vec,
1122 INTR_TRIGGER_CONFORM, INTR_POLARITY_CONFORM, IOART_DELEXINT);
1126 ioapic_extpin_gsi(void)
1132 ioapic_pin_setup(void *addr, int pin, int vec,
1133 enum intr_trigger trig, enum intr_polarity pola)
1136 * Always clear an I/O APIC pin before [re]programming it. This is
1137 * particularly important if the pin is set up for a level interrupt
1138 * as the IOART_REM_IRR bit might be set. When we reprogram the
1139 * vector any EOI from pending ints on this pin could be lost and
1140 * IRR might never get reset.
1142 * To fix this problem, clear the vector and make sure it is
1143 * programmed as an edge interrupt. This should theoretically
1144 * clear IRR so we can later, safely program it as a level
1147 ioapic_pin_prog(addr, pin, vec, INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH,
1149 ioapic_pin_prog(addr, pin, vec, trig, pola, IOART_DELFIXED);
1153 ioapic_pin_prog(void *addr, int pin, int vec,
1154 enum intr_trigger trig, enum intr_polarity pola, uint32_t del_mode)
1156 uint32_t flags, target;
1159 KKASSERT(del_mode == IOART_DELEXINT || del_mode == IOART_DELFIXED);
1161 select = IOAPIC_REDTBL0 + (2 * pin);
1163 flags = ioapic_read(addr, select) & IOART_RESV;
1164 flags |= IOART_INTMSET | IOART_DESTPHY;
1169 * We only support limited I/O APIC mixed mode,
1170 * so even for ExtINT, we still use "fixed"
1173 flags |= IOART_DELFIXED;
1176 if (del_mode == IOART_DELEXINT) {
1177 KKASSERT(trig == INTR_TRIGGER_CONFORM &&
1178 pola == INTR_POLARITY_CONFORM);
1179 flags |= IOART_TRGREDG | IOART_INTAHI;
1182 case INTR_TRIGGER_EDGE:
1183 flags |= IOART_TRGREDG;
1186 case INTR_TRIGGER_LEVEL:
1187 flags |= IOART_TRGRLVL;
1190 case INTR_TRIGGER_CONFORM:
1191 panic("ioapic_pin_prog: trig conform is not "
1195 case INTR_POLARITY_HIGH:
1196 flags |= IOART_INTAHI;
1199 case INTR_POLARITY_LOW:
1200 flags |= IOART_INTALO;
1203 case INTR_POLARITY_CONFORM:
1204 panic("ioapic_pin_prog: pola conform is not "
1209 target = ioapic_read(addr, select + 1) & IOART_HI_DEST_RESV;
1210 target |= (CPU_TO_ID(0) << IOART_HI_DEST_SHIFT) &
1213 ioapic_write(addr, select, flags | vec);
1214 ioapic_write(addr, select + 1, target);
1218 ioapic_setup(const struct ioapic_info *info)
1222 ioapic_set_apic_id(info);
1224 for (i = 0; i < info->io_npin; ++i)
1225 ioapic_gsi_setup(info->io_gsi_base + i);
1229 ioapic_alloc_apic_id(int start)
1232 const struct ioapic_info *info;
1233 int apic_id, apic_id16;
1235 apic_id = lapic_unused_apic_id(start);
1236 if (apic_id == NAPICID) {
1237 kprintf("IOAPIC: can't find unused APIC ID\n");
1240 apic_id16 = apic_id & IOAPIC_ID_MASK;
1243 * Check against other I/O APIC's APIC ID's lower 4bits.
1245 * The new APIC ID will have to be different from others
1246 * in the lower 4bits, no matter whether xAPIC is used
1249 TAILQ_FOREACH(info, &ioapic_conf.ioc_list, io_link) {
1250 if (info->io_apic_id == -1) {
1254 if ((info->io_apic_id & IOAPIC_ID_MASK) == apic_id16)
1260 kprintf("IOAPIC: APIC ID %d has same lower 4bits as "
1261 "%dth I/O APIC, keep searching...\n",
1262 apic_id, info->io_idx);
1264 start = apic_id + 1;
1266 panic("ioapic_unused_apic_id: never reached\n");