2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
32 #include <sys/machintr.h>
33 #include <machine/globaldata.h>
34 #include <machine/smp.h>
35 #include <machine/md_var.h>
36 #include <machine/pmap.h>
37 #include <machine/specialreg.h>
38 #include <machine_base/apic/lapic.h>
39 #include <machine_base/apic/ioapic.h>
40 #include <machine_base/apic/ioapic_abi.h>
41 #include <machine_base/apic/apicvar.h>
42 #include <machine_base/icu/icu_var.h>
43 #include <machine/segments.h>
44 #include <sys/thread2.h>
46 #include <machine/cputypes.h>
47 #include <machine/intr_machdep.h>
51 volatile lapic_t *lapic;
53 static void lapic_timer_calibrate(void);
54 static void lapic_timer_set_divisor(int);
55 static void lapic_timer_fixup_handler(void *);
56 static void lapic_timer_restart_handler(void *);
58 void lapic_timer_process(void);
59 void lapic_timer_process_frame(struct intrframe *);
60 void lapic_timer_always(struct intrframe *);
62 static int lapic_timer_enable = 1;
63 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
65 static void lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
66 static void lapic_timer_intr_enable(struct cputimer_intr *);
67 static void lapic_timer_intr_restart(struct cputimer_intr *);
68 static void lapic_timer_intr_pmfixup(struct cputimer_intr *);
70 static struct cputimer_intr lapic_cputimer_intr = {
72 .reload = lapic_timer_intr_reload,
73 .enable = lapic_timer_intr_enable,
74 .config = cputimer_intr_default_config,
75 .restart = lapic_timer_intr_restart,
76 .pmfixup = lapic_timer_intr_pmfixup,
77 .initclock = cputimer_intr_default_initclock,
78 .next = SLIST_ENTRY_INITIALIZER,
80 .type = CPUTIMER_INTR_LAPIC,
81 .prio = CPUTIMER_INTR_PRIO_LAPIC,
82 .caps = CPUTIMER_INTR_CAP_NONE
85 static int lapic_timer_divisor_idx = -1;
86 static const uint32_t lapic_timer_divisors[] = {
87 APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
88 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128, APIC_TDCR_1
90 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
93 * APIC ID <-> CPU ID mapping structures.
95 int cpu_id_to_apic_id[NAPICID];
96 int apic_id_to_cpu_id[NAPICID];
100 * Enable LAPIC, configure interrupts.
103 lapic_init(boolean_t bsp)
111 * Since IDT is shared between BSP and APs, these vectors
112 * only need to be installed once; we do it on BSP.
115 if (cpu_vendor_id == CPU_VENDOR_AMD &&
116 CPUID_TO_FAMILY(cpu_id) >= 0xf) {
120 * Set the LINTEN bit in the HyperTransport
121 * Transaction Control Register.
123 * This will cause EXTINT and NMI interrupts
124 * routed over the hypertransport bus to be
125 * fed into the LAPIC LINT0/LINT1. If the bit
126 * isn't set, the interrupts will go to the
127 * general cpu INTR/NMI pins. On a dual-core
128 * cpu the interrupt winds up going to BOTH cpus.
129 * The first cpu that does the interrupt ack
130 * cycle will get the correct interrupt. The
131 * second cpu that does it will get a spurious
132 * interrupt vector (typically IRQ 7).
135 (1 << 31) | /* enable */
136 (0 << 16) | /* bus */
137 (0x18 << 11) | /* dev (cpu + 0x18) */
138 (0 << 8) | /* func */
142 if ((tcr & 0x00010000) == 0) {
143 kprintf("LAPIC: AMD LINTEN on\n");
144 outl(0xcfc, tcr|0x00010000);
149 /* Install a 'Spurious INTerrupt' vector */
150 setidt_global(XSPURIOUSINT_OFFSET, Xspuriousint,
151 SDT_SYSIGT, SEL_KPL, 0);
153 /* Install a timer vector */
154 setidt_global(XTIMER_OFFSET, Xtimer,
155 SDT_SYSIGT, SEL_KPL, 0);
158 /* Install an inter-CPU IPI for TLB invalidation */
159 setidt_global(XINVLTLB_OFFSET, Xinvltlb,
160 SDT_SYSIGT, SEL_KPL, 0);
162 /* Install an inter-CPU IPI for IPIQ messaging */
163 setidt_global(XIPIQ_OFFSET, Xipiq,
164 SDT_SYSIGT, SEL_KPL, 0);
166 /* Install an inter-CPU IPI for CPU stop/restart */
167 setidt_global(XCPUSTOP_OFFSET, Xcpustop,
168 SDT_SYSIGT, SEL_KPL, 0);
173 * Setup LINT0 as ExtINT on the BSP. This is theoretically an
174 * aggregate interrupt input from the 8259. The INTA cycle
175 * will be routed to the external controller (the 8259) which
176 * is expected to supply the vector.
178 * Must be setup edge triggered, active high.
180 * Disable LINT0 on BSP, if I/O APIC is enabled.
182 * Disable LINT0 on the APs. It doesn't matter what delivery
183 * mode we use because we leave it masked.
185 temp = lapic->lvt_lint0;
186 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
187 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
189 temp |= APIC_LVT_DM_EXTINT;
191 temp |= APIC_LVT_MASKED;
193 temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
195 lapic->lvt_lint0 = temp;
198 * Setup LINT1 as NMI.
200 * Must be setup edge trigger, active high.
202 * Enable LINT1 on BSP, if I/O APIC is enabled.
204 * Disable LINT1 on the APs.
206 temp = lapic->lvt_lint1;
207 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
208 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
209 temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
210 if (bsp && ioapic_enable)
211 temp &= ~APIC_LVT_MASKED;
212 lapic->lvt_lint1 = temp;
215 * Mask the LAPIC error interrupt, LAPIC performance counter
218 lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
219 lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
222 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
224 timer = lapic->lvt_timer;
225 timer &= ~APIC_LVTT_VECTOR;
226 timer |= XTIMER_OFFSET;
227 timer |= APIC_LVTT_MASKED;
228 lapic->lvt_timer = timer;
231 * Set the Task Priority Register as needed. At the moment allow
232 * interrupts on all cpus (the APs will remain CLId until they are
236 temp &= ~APIC_TPR_PRIO; /* clear priority field */
243 temp |= APIC_SVR_ENABLE; /* enable the LAPIC */
244 temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
247 * Set the spurious interrupt vector. The low 4 bits of the vector
250 if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
251 panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
252 temp &= ~APIC_SVR_VECTOR;
253 temp |= XSPURIOUSINT_OFFSET;
258 * Pump out a few EOIs to clean out interrupts that got through
259 * before we were able to set the TPR.
266 lapic_timer_calibrate();
267 if (lapic_timer_enable) {
268 cputimer_intr_register(&lapic_cputimer_intr);
269 cputimer_intr_select(&lapic_cputimer_intr, 0);
272 lapic_timer_set_divisor(lapic_timer_divisor_idx);
276 apic_dump("apic_initialize()");
280 lapic_timer_set_divisor(int divisor_idx)
282 KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
283 lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
287 lapic_timer_oneshot(u_int count)
291 value = lapic->lvt_timer;
292 value &= ~APIC_LVTT_PERIODIC;
293 lapic->lvt_timer = value;
294 lapic->icr_timer = count;
298 lapic_timer_oneshot_quick(u_int count)
300 lapic->icr_timer = count;
304 lapic_timer_calibrate(void)
308 /* Try to calibrate the local APIC timer. */
309 for (lapic_timer_divisor_idx = 0;
310 lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
311 lapic_timer_divisor_idx++) {
312 lapic_timer_set_divisor(lapic_timer_divisor_idx);
313 lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
315 value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
316 if (value != APIC_TIMER_MAX_COUNT)
319 if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
320 panic("lapic: no proper timer divisor?!\n");
321 lapic_cputimer_intr.freq = value / 2;
323 kprintf("lapic: divisor index %d, frequency %u Hz\n",
324 lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
328 lapic_timer_process_oncpu(struct globaldata *gd, struct intrframe *frame)
332 gd->gd_timer_running = 0;
334 count = sys_cputimer->count();
335 if (TAILQ_FIRST(&gd->gd_systimerq) != NULL)
336 systimer_intr(&count, 0, frame);
340 lapic_timer_process(void)
342 lapic_timer_process_oncpu(mycpu, NULL);
346 lapic_timer_process_frame(struct intrframe *frame)
348 lapic_timer_process_oncpu(mycpu, frame);
352 * This manual debugging code is called unconditionally from Xtimer
353 * (the lapic timer interrupt) whether the current thread is in a
354 * critical section or not) and can be useful in tracking down lockups.
356 * NOTE: MANUAL DEBUG CODE
359 static int saveticks[SMP_MAXCPU];
360 static int savecounts[SMP_MAXCPU];
364 lapic_timer_always(struct intrframe *frame)
367 globaldata_t gd = mycpu;
368 int cpu = gd->gd_cpuid;
374 gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu;
375 *gptr = ((*gptr + 1) & 0x00FF) | 0x0700;
378 ksnprintf(buf, sizeof(buf), " %p %16s %d %16s ",
379 (void *)frame->if_rip, gd->gd_curthread->td_comm, ticks,
381 for (i = 0; buf[i]; ++i) {
382 gptr[i] = 0x0700 | (unsigned char)buf[i];
386 if (saveticks[gd->gd_cpuid] != ticks) {
387 saveticks[gd->gd_cpuid] = ticks;
388 savecounts[gd->gd_cpuid] = 0;
390 ++savecounts[gd->gd_cpuid];
391 if (savecounts[gd->gd_cpuid] > 2000 && panicstr == NULL) {
392 panic("cpud %d panicing on ticks failure",
395 for (i = 0; i < ncpus; ++i) {
397 if (saveticks[i] && panicstr == NULL) {
398 delta = saveticks[i] - ticks;
399 if (delta < -10 || delta > 10) {
400 panic("cpu %d panicing on cpu %d watchdog",
410 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
412 struct globaldata *gd = mycpu;
414 reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
418 if (gd->gd_timer_running) {
419 if (reload < lapic->ccr_timer)
420 lapic_timer_oneshot_quick(reload);
422 gd->gd_timer_running = 1;
423 lapic_timer_oneshot_quick(reload);
428 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
432 timer = lapic->lvt_timer;
433 timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
434 lapic->lvt_timer = timer;
436 lapic_timer_fixup_handler(NULL);
440 lapic_timer_fixup_handler(void *arg)
447 if (cpu_vendor_id == CPU_VENDOR_AMD) {
449 * Detect the presence of C1E capability mostly on latest
450 * dual-cores (or future) k8 family. This feature renders
451 * the local APIC timer dead, so we disable it by reading
452 * the Interrupt Pending Message register and clearing both
453 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
456 * "BIOS and Kernel Developer's Guide for AMD NPT
457 * Family 0Fh Processors"
458 * #32559 revision 3.00
460 if ((cpu_id & 0x00000f00) == 0x00000f00 &&
461 (cpu_id & 0x0fff0000) >= 0x00040000) {
464 msr = rdmsr(0xc0010055);
465 if (msr & 0x18000000) {
466 struct globaldata *gd = mycpu;
468 kprintf("cpu%d: AMD C1E detected\n",
470 wrmsr(0xc0010055, msr & ~0x18000000ULL);
473 * We are kinda stalled;
476 gd->gd_timer_running = 1;
477 lapic_timer_oneshot_quick(2);
487 lapic_timer_restart_handler(void *dummy __unused)
491 lapic_timer_fixup_handler(&started);
493 struct globaldata *gd = mycpu;
495 gd->gd_timer_running = 1;
496 lapic_timer_oneshot_quick(2);
501 * This function is called only by ACPI-CA code currently:
502 * - AMD C1E fixup. AMD C1E only seems to happen after ACPI
503 * module controls PM. So once ACPI-CA is attached, we try
504 * to apply the fixup to prevent LAPIC timer from hanging.
507 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
510 lwkt_send_ipiq_mask(smp_active_mask,
511 lapic_timer_fixup_handler, NULL);
513 lapic_timer_fixup_handler(NULL);
518 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
521 lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
523 lapic_timer_restart_handler(NULL);
529 * dump contents of local APIC registers
534 kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
535 kprintf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
536 lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
542 * Inter Processor Interrupt functions.
546 * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
548 * destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
549 * vector is any valid SYSTEM INT vector
550 * delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
554 * We now implement a per-cpu interlock (gd->gd_npoll) to prevent more than
555 * one IPI from being sent to any given cpu at a time. Thus we no longer
556 * have to process incoming IPIs while waiting for the status to clear.
557 * No deadlock should be possible.
559 * We now physically disable interrupts for the lapic ICR operation. If
560 * we do not do this then it looks like an EOI sent to the lapic (which
561 * occurs even with a critical section) can interfere with the command
562 * register ready status and cause an IPI to be lost.
564 * e.g. an interrupt can occur, issue the EOI, IRET, and cause the command
565 * register to busy just before we write to icr_lo, resulting in a lost
566 * issuance. This only appears to occur on Intel cpus and is not
567 * documented. It could simply be that cpus are so fast these days that
568 * it was always an issue, but is only now rearing its ugly head. This
572 apic_ipi(int dest_type, int vector, int delivery_mode)
574 unsigned long rflags;
577 rflags = read_rflags();
579 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
582 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
583 delivery_mode | vector;
584 lapic->icr_lo = icr_lo;
585 write_rflags(rflags);
591 single_apic_ipi(int cpu, int vector, int delivery_mode)
593 unsigned long rflags;
597 rflags = read_rflags();
599 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
602 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
603 icr_hi |= (CPUID_TO_APICID(cpu) << 24);
604 lapic->icr_hi = icr_hi;
607 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) |
608 APIC_DEST_DESTFLD | delivery_mode | vector;
611 lapic->icr_lo = icr_lo;
612 write_rflags(rflags);
618 * Returns 0 if the apic is busy, 1 if we were able to queue the request.
620 * NOT WORKING YET! The code as-is may end up not queueing an IPI at all
621 * to the target, and the scheduler does not 'poll' for IPI messages.
624 single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
630 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
634 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
635 icr_hi |= (CPUID_TO_APICID(cpu) << 24);
636 lapic->icr_hi = icr_hi;
639 icr_lo = (lapic->icr_lo & APIC_RESV2_MASK)
640 | APIC_DEST_DESTFLD | delivery_mode | vector;
643 lapic->icr_lo = icr_lo;
651 * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
653 * target is a bitmask of destination cpus. Vector is any
654 * valid system INT vector. Delivery mode may be either
655 * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
658 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
662 int n = BSFCPUMASK(target);
663 target &= ~CPUMASK(n);
664 single_apic_ipi(n, vector, delivery_mode);
672 * Timer code, in development...
673 * - suggested by rgrimes@gndrsh.aac.dev.com
676 get_apic_timer_frequency(void)
678 return(lapic_cputimer_intr.freq);
682 * Load a 'downcount time' in uSeconds.
685 set_apic_timer(int us)
690 * When we reach here, lapic timer's frequency
691 * must have been calculated as well as the
692 * divisor (lapic->dcr_timer is setup during the
693 * divisor calculation).
695 KKASSERT(lapic_cputimer_intr.freq != 0 &&
696 lapic_timer_divisor_idx >= 0);
698 count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
699 lapic_timer_oneshot(count);
704 * Read remaining time in timer.
707 read_apic_timer(void)
710 /** XXX FIXME: we need to return the actual remaining time,
711 * for now we just return the remaining count.
714 return lapic->ccr_timer;
720 * Spin-style delay, set delay time in uS, spin till it drains.
725 set_apic_timer(count);
726 while (read_apic_timer())
731 lapic_unused_apic_id(int start)
735 for (i = start; i < NAPICID; ++i) {
736 if (APICID_TO_CPUID(i) == -1)
743 lapic_map(vm_paddr_t lapic_addr)
745 lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
748 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
749 TAILQ_HEAD_INITIALIZER(lapic_enumerators);
754 struct lapic_enumerator *e;
755 int error, i, ap_max;
757 KKASSERT(lapic_enable);
759 for (i = 0; i < NAPICID; ++i)
760 APICID_TO_CPUID(i) = -1;
762 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
763 error = e->lapic_probe(e);
768 kprintf("LAPIC: Can't find LAPIC\n");
772 e->lapic_enumerate(e);
775 TUNABLE_INT_FETCH("hw.ap_max", &ap_max);
776 if (ap_max > MAXCPU - 1)
780 kprintf("LAPIC: Warning use only %d out of %d "
790 lapic_enumerator_register(struct lapic_enumerator *ne)
792 struct lapic_enumerator *e;
794 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
795 if (e->lapic_prio < ne->lapic_prio) {
796 TAILQ_INSERT_BEFORE(e, ne, lapic_link);
800 TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
804 lapic_set_cpuid(int cpu_id, int apic_id)
806 CPUID_TO_APICID(cpu_id) = apic_id;
807 APICID_TO_CPUID(apic_id) = cpu_id;
811 lapic_fixup_noioapic(void)
815 /* Only allowed on BSP */
816 KKASSERT(mycpuid == 0);
817 KKASSERT(!ioapic_enable);
819 temp = lapic->lvt_lint0;
820 temp &= ~APIC_LVT_MASKED;
821 lapic->lvt_lint0 = temp;
823 temp = lapic->lvt_lint1;
824 temp |= APIC_LVT_MASKED;
825 lapic->lvt_lint1 = temp;
829 lapic_sysinit(void *dummy __unused)
834 error = lapic_config();
840 /* Initialize BSP's local APIC */
842 } else if (ioapic_enable) {
844 icu_reinit_noioapic();
847 SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL)