i386: Allow UP kernel to use LAPIC timer and I/O APIC
[dragonfly.git] / sys / platform / pc32 / apic / lapic.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
26 */
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/kernel.h>
31#include <sys/bus.h>
32#include <sys/machintr.h>
33#include <machine/globaldata.h>
34#include <machine/smp.h>
35#include <machine/cputypes.h>
36#include <machine/md_var.h>
37#include <machine/pmap.h>
38#include <machine_base/apic/lapic.h>
39#include <machine_base/apic/ioapic.h>
40#include <machine_base/apic/ioapic_abi.h>
41#include <machine_base/icu/icu_var.h>
42#include <machine/segments.h>
43#include <sys/thread2.h>
44
45#include <machine/intr_machdep.h>
46
47extern int naps;
48
49volatile lapic_t *lapic;
50
51static void lapic_timer_calibrate(void);
52static void lapic_timer_set_divisor(int);
53static void lapic_timer_fixup_handler(void *);
54static void lapic_timer_restart_handler(void *);
55
56void lapic_timer_process(void);
57void lapic_timer_process_frame(struct intrframe *);
58
59static int lapic_timer_enable = 1;
60TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
61
62static void lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
63static void lapic_timer_intr_enable(struct cputimer_intr *);
64static void lapic_timer_intr_restart(struct cputimer_intr *);
65static void lapic_timer_intr_pmfixup(struct cputimer_intr *);
66
67static struct cputimer_intr lapic_cputimer_intr = {
68 .freq = 0,
69 .reload = lapic_timer_intr_reload,
70 .enable = lapic_timer_intr_enable,
71 .config = cputimer_intr_default_config,
72 .restart = lapic_timer_intr_restart,
73 .pmfixup = lapic_timer_intr_pmfixup,
74 .initclock = cputimer_intr_default_initclock,
75 .next = SLIST_ENTRY_INITIALIZER,
76 .name = "lapic",
77 .type = CPUTIMER_INTR_LAPIC,
78 .prio = CPUTIMER_INTR_PRIO_LAPIC,
79 .caps = CPUTIMER_INTR_CAP_NONE
80};
81
82static int lapic_timer_divisor_idx = -1;
83static const uint32_t lapic_timer_divisors[] = {
84 APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
85 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128, APIC_TDCR_1
86};
87#define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
88
89/*
90 * APIC ID <-> CPU ID mapping structures.
91 */
92int cpu_id_to_apic_id[NAPICID];
93int apic_id_to_cpu_id[NAPICID];
94int lapic_enable = 1;
95
96/*
97 * Enable LAPIC, configure interrupts.
98 */
99void
100lapic_init(boolean_t bsp)
101{
102 uint32_t timer;
103 u_int temp;
104
105 /*
106 * Install vectors
107 *
108 * Since IDT is shared between BSP and APs, these vectors
109 * only need to be installed once; we do it on BSP.
110 */
111 if (bsp) {
112 /* Install a 'Spurious INTerrupt' vector */
113 setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
114 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
115
116 /* Install a timer vector */
117 setidt(XTIMER_OFFSET, Xtimer,
118 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
119
120#ifdef SMP
121 /* Install an inter-CPU IPI for TLB invalidation */
122 setidt(XINVLTLB_OFFSET, Xinvltlb,
123 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
124
125 /* Install an inter-CPU IPI for IPIQ messaging */
126 setidt(XIPIQ_OFFSET, Xipiq,
127 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
128
129 /* Install an inter-CPU IPI for CPU stop/restart */
130 setidt(XCPUSTOP_OFFSET, Xcpustop,
131 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
132#endif
133 }
134
135 /*
136 * Setup LINT0 as ExtINT on the BSP. This is theoretically an
137 * aggregate interrupt input from the 8259. The INTA cycle
138 * will be routed to the external controller (the 8259) which
139 * is expected to supply the vector.
140 *
141 * Must be setup edge triggered, active high.
142 *
143 * Disable LINT0 on BSP, if I/O APIC is enabled.
144 *
145 * Disable LINT0 on the APs. It doesn't matter what delivery
146 * mode we use because we leave it masked.
147 */
148 temp = lapic->lvt_lint0;
149 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
150 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
151 if (bsp) {
152 temp |= APIC_LVT_DM_EXTINT;
153 if (ioapic_enable)
154 temp |= APIC_LVT_MASKED;
155 } else {
156 temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
157 }
158 lapic->lvt_lint0 = temp;
159
160 /*
161 * Setup LINT1 as NMI.
162 *
163 * Must be setup edge trigger, active high.
164 *
165 * Enable LINT1 on BSP, if I/O APIC is enabled.
166 *
167 * Disable LINT1 on the APs.
168 */
169 temp = lapic->lvt_lint1;
170 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
171 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
172 temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
173 if (bsp && ioapic_enable)
174 temp &= ~APIC_LVT_MASKED;
175 lapic->lvt_lint1 = temp;
176
177 /*
178 * Mask the LAPIC error interrupt, LAPIC performance counter
179 * interrupt.
180 */
181 lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
182 lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
183
184 /*
185 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
186 */
187 timer = lapic->lvt_timer;
188 timer &= ~APIC_LVTT_VECTOR;
189 timer |= XTIMER_OFFSET;
190 timer |= APIC_LVTT_MASKED;
191 lapic->lvt_timer = timer;
192
193 /*
194 * Set the Task Priority Register as needed. At the moment allow
195 * interrupts on all cpus (the APs will remain CLId until they are
196 * ready to deal).
197 */
198 temp = lapic->tpr;
199 temp &= ~APIC_TPR_PRIO; /* clear priority field */
200 lapic->tpr = temp;
201
202 /*
203 * Enable the LAPIC
204 */
205 temp = lapic->svr;
206 temp |= APIC_SVR_ENABLE; /* enable the LAPIC */
207 temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
208
209 /*
210 * Set the spurious interrupt vector. The low 4 bits of the vector
211 * must be 1111.
212 */
213 if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
214 panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
215 temp &= ~APIC_SVR_VECTOR;
216 temp |= XSPURIOUSINT_OFFSET;
217
218 lapic->svr = temp;
219
220 /*
221 * Pump out a few EOIs to clean out interrupts that got through
222 * before we were able to set the TPR.
223 */
224 lapic->eoi = 0;
225 lapic->eoi = 0;
226 lapic->eoi = 0;
227
228 if (bsp) {
229 lapic_timer_calibrate();
230 if (lapic_timer_enable) {
231 cputimer_intr_register(&lapic_cputimer_intr);
232 cputimer_intr_select(&lapic_cputimer_intr, 0);
233 }
234 } else {
235 lapic_timer_set_divisor(lapic_timer_divisor_idx);
236 }
237
238 if (bootverbose)
239 apic_dump("apic_initialize()");
240}
241
242static void
243lapic_timer_set_divisor(int divisor_idx)
244{
245 KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
246 lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
247}
248
249static void
250lapic_timer_oneshot(u_int count)
251{
252 uint32_t value;
253
254 value = lapic->lvt_timer;
255 value &= ~APIC_LVTT_PERIODIC;
256 lapic->lvt_timer = value;
257 lapic->icr_timer = count;
258}
259
260static void
261lapic_timer_oneshot_quick(u_int count)
262{
263 lapic->icr_timer = count;
264}
265
266static void
267lapic_timer_calibrate(void)
268{
269 sysclock_t value;
270
271 /* Try to calibrate the local APIC timer. */
272 for (lapic_timer_divisor_idx = 0;
273 lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
274 lapic_timer_divisor_idx++) {
275 lapic_timer_set_divisor(lapic_timer_divisor_idx);
276 lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
277 DELAY(2000000);
278 value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
279 if (value != APIC_TIMER_MAX_COUNT)
280 break;
281 }
282 if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
283 panic("lapic: no proper timer divisor?!\n");
284 lapic_cputimer_intr.freq = value / 2;
285
286 kprintf("lapic: divisor index %d, frequency %u Hz\n",
287 lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
288}
289
290static void
291lapic_timer_process_oncpu(struct globaldata *gd, struct intrframe *frame)
292{
293 sysclock_t count;
294
295 gd->gd_timer_running = 0;
296
297 count = sys_cputimer->count();
298 if (TAILQ_FIRST(&gd->gd_systimerq) != NULL)
299 systimer_intr(&count, 0, frame);
300}
301
302void
303lapic_timer_process(void)
304{
305 lapic_timer_process_oncpu(mycpu, NULL);
306}
307
308void
309lapic_timer_process_frame(struct intrframe *frame)
310{
311 lapic_timer_process_oncpu(mycpu, frame);
312}
313
314static void
315lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
316{
317 struct globaldata *gd = mycpu;
318
319 reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
320 if (reload < 2)
321 reload = 2;
322
323 if (gd->gd_timer_running) {
324 if (reload < lapic->ccr_timer)
325 lapic_timer_oneshot_quick(reload);
326 } else {
327 gd->gd_timer_running = 1;
328 lapic_timer_oneshot_quick(reload);
329 }
330}
331
332static void
333lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
334{
335 uint32_t timer;
336
337 timer = lapic->lvt_timer;
338 timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
339 lapic->lvt_timer = timer;
340
341 lapic_timer_fixup_handler(NULL);
342}
343
344static void
345lapic_timer_fixup_handler(void *arg)
346{
347 int *started = arg;
348
349 if (started != NULL)
350 *started = 0;
351
352 if (cpu_vendor_id == CPU_VENDOR_AMD) {
353 /*
354 * Detect the presence of C1E capability mostly on latest
355 * dual-cores (or future) k8 family. This feature renders
356 * the local APIC timer dead, so we disable it by reading
357 * the Interrupt Pending Message register and clearing both
358 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
359 *
360 * Reference:
361 * "BIOS and Kernel Developer's Guide for AMD NPT
362 * Family 0Fh Processors"
363 * #32559 revision 3.00
364 */
365 if ((cpu_id & 0x00000f00) == 0x00000f00 &&
366 (cpu_id & 0x0fff0000) >= 0x00040000) {
367 uint64_t msr;
368
369 msr = rdmsr(0xc0010055);
370 if (msr & 0x18000000) {
371 struct globaldata *gd = mycpu;
372
373 kprintf("cpu%d: AMD C1E detected\n",
374 gd->gd_cpuid);
375 wrmsr(0xc0010055, msr & ~0x18000000ULL);
376
377 /*
378 * We are kinda stalled;
379 * kick start again.
380 */
381 gd->gd_timer_running = 1;
382 lapic_timer_oneshot_quick(2);
383
384 if (started != NULL)
385 *started = 1;
386 }
387 }
388 }
389}
390
391static void
392lapic_timer_restart_handler(void *dummy __unused)
393{
394 int started;
395
396 lapic_timer_fixup_handler(&started);
397 if (!started) {
398 struct globaldata *gd = mycpu;
399
400 gd->gd_timer_running = 1;
401 lapic_timer_oneshot_quick(2);
402 }
403}
404
405/*
406 * This function is called only by ACPI-CA code currently:
407 * - AMD C1E fixup. AMD C1E only seems to happen after ACPI
408 * module controls PM. So once ACPI-CA is attached, we try
409 * to apply the fixup to prevent LAPIC timer from hanging.
410 */
411static void
412lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
413{
414#ifdef SMP
415 lwkt_send_ipiq_mask(smp_active_mask,
416 lapic_timer_fixup_handler, NULL);
417#else
418 lapic_timer_fixup_handler(NULL);
419#endif
420}
421
422static void
423lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
424{
425#ifdef SMP
426 lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
427#else
428 lapic_timer_restart_handler(NULL);
429#endif
430}
431
432
433/*
434 * dump contents of local APIC registers
435 */
436void
437apic_dump(char* str)
438{
439 kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
440 kprintf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
441 lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
442}
443
444#ifdef SMP
445
446/*
447 * Inter Processor Interrupt functions.
448 */
449
450/*
451 * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
452 *
453 * destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
454 * vector is any valid SYSTEM INT vector
455 * delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
456 *
457 * A backlog of requests can create a deadlock between cpus. To avoid this
458 * we have to be able to accept IPIs at the same time we are trying to send
459 * them. The critical section prevents us from attempting to send additional
460 * IPIs reentrantly, but also prevents IPIQ processing so we have to call
461 * lwkt_process_ipiq() manually. It's rather messy and expensive for this
462 * to occur but fortunately it does not happen too often.
463 */
464int
465apic_ipi(int dest_type, int vector, int delivery_mode)
466{
467 u_long icr_lo;
468
469 crit_enter();
470 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
471 unsigned int eflags = read_eflags();
472 cpu_enable_intr();
473 DEBUG_PUSH_INFO("apic_ipi");
474 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
475 lwkt_process_ipiq();
476 }
477 DEBUG_POP_INFO();
478 write_eflags(eflags);
479 }
480
481 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
482 delivery_mode | vector;
483 lapic->icr_lo = icr_lo;
484 crit_exit();
485 return 0;
486}
487
488void
489single_apic_ipi(int cpu, int vector, int delivery_mode)
490{
491 u_long icr_lo;
492 u_long icr_hi;
493
494 crit_enter();
495 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
496 unsigned int eflags = read_eflags();
497 cpu_enable_intr();
498 DEBUG_PUSH_INFO("single_apic_ipi");
499 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
500 lwkt_process_ipiq();
501 }
502 DEBUG_POP_INFO();
503 write_eflags(eflags);
504 }
505 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
506 icr_hi |= (CPUID_TO_APICID(cpu) << 24);
507 lapic->icr_hi = icr_hi;
508
509 /* build ICR_LOW */
510 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK)
511 | APIC_DEST_DESTFLD | delivery_mode | vector;
512
513 /* write APIC ICR */
514 lapic->icr_lo = icr_lo;
515 crit_exit();
516}
517
518#if 0
519
520/*
521 * Returns 0 if the apic is busy, 1 if we were able to queue the request.
522 *
523 * NOT WORKING YET! The code as-is may end up not queueing an IPI at all
524 * to the target, and the scheduler does not 'poll' for IPI messages.
525 */
526int
527single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
528{
529 u_long icr_lo;
530 u_long icr_hi;
531
532 crit_enter();
533 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
534 crit_exit();
535 return(0);
536 }
537 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
538 icr_hi |= (CPUID_TO_APICID(cpu) << 24);
539 lapic->icr_hi = icr_hi;
540
541 /* build IRC_LOW */
542 icr_lo = (lapic->icr_lo & APIC_RESV2_MASK)
543 | APIC_DEST_DESTFLD | delivery_mode | vector;
544
545 /* write APIC ICR */
546 lapic->icr_lo = icr_lo;
547 crit_exit();
548 return(1);
549}
550
551#endif
552
553/*
554 * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
555 *
556 * target is a bitmask of destination cpus. Vector is any
557 * valid system INT vector. Delivery mode may be either
558 * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
559 */
560void
561selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
562{
563 crit_enter();
564 while (target) {
565 int n = BSFCPUMASK(target);
566 target &= ~CPUMASK(n);
567 single_apic_ipi(n, vector, delivery_mode);
568 }
569 crit_exit();
570}
571
572#endif /* SMP */
573
574/*
575 * Timer code, in development...
576 * - suggested by rgrimes@gndrsh.aac.dev.com
577 */
578int
579get_apic_timer_frequency(void)
580{
581 return(lapic_cputimer_intr.freq);
582}
583
584/*
585 * Load a 'downcount time' in uSeconds.
586 */
587void
588set_apic_timer(int us)
589{
590 u_int count;
591
592 /*
593 * When we reach here, lapic timer's frequency
594 * must have been calculated as well as the
595 * divisor (lapic.dcr_timer is setup during the
596 * divisor calculation).
597 */
598 KKASSERT(lapic_cputimer_intr.freq != 0 &&
599 lapic_timer_divisor_idx >= 0);
600
601 count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
602 lapic_timer_oneshot(count);
603}
604
605
606/*
607 * Read remaining time in timer.
608 */
609int
610read_apic_timer(void)
611{
612#if 0
613 /** XXX FIXME: we need to return the actual remaining time,
614 * for now we just return the remaining count.
615 */
616#else
617 return lapic->ccr_timer;
618#endif
619}
620
621
622/*
623 * Spin-style delay, set delay time in uS, spin till it drains.
624 */
625void
626u_sleep(int count)
627{
628 set_apic_timer(count);
629 while (read_apic_timer())
630 /* spin */ ;
631}
632
633int
634lapic_unused_apic_id(int start)
635{
636 int i;
637
638 for (i = start; i < NAPICID; ++i) {
639 if (APICID_TO_CPUID(i) == -1)
640 return i;
641 }
642 return NAPICID;
643}
644
645void
646lapic_map(vm_offset_t lapic_addr)
647{
648 lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
649
650 kprintf("lapic: at %p\n", (void *)lapic_addr);
651}
652
653static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
654 TAILQ_HEAD_INITIALIZER(lapic_enumerators);
655
656int
657lapic_config(void)
658{
659 struct lapic_enumerator *e;
660 int error, i, ap_max;
661
662 KKASSERT(lapic_enable);
663
664 for (i = 0; i < NAPICID; ++i)
665 APICID_TO_CPUID(i) = -1;
666
667 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
668 error = e->lapic_probe(e);
669 if (!error)
670 break;
671 }
672 if (e == NULL) {
673 kprintf("LAPIC: Can't find LAPIC\n");
674 return ENXIO;
675 }
676
677 e->lapic_enumerate(e);
678
679 ap_max = MAXCPU - 1;
680 TUNABLE_INT_FETCH("hw.ap_max", &ap_max);
681 if (ap_max > MAXCPU - 1)
682 ap_max = MAXCPU - 1;
683
684 if (naps > ap_max) {
685 kprintf("LAPIC: Warning use only %d out of %d "
686 "available APs\n", ap_max, naps);
687 naps = ap_max;
688 }
689
690 return 0;
691}
692
693void
694lapic_enumerator_register(struct lapic_enumerator *ne)
695{
696 struct lapic_enumerator *e;
697
698 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
699 if (e->lapic_prio < ne->lapic_prio) {
700 TAILQ_INSERT_BEFORE(e, ne, lapic_link);
701 return;
702 }
703 }
704 TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
705}
706
707void
708lapic_set_cpuid(int cpu_id, int apic_id)
709{
710 CPUID_TO_APICID(cpu_id) = apic_id;
711 APICID_TO_CPUID(apic_id) = cpu_id;
712}
713
714void
715lapic_fixup_noioapic(void)
716{
717 u_int temp;
718
719 /* Only allowed on BSP */
720 KKASSERT(mycpuid == 0);
721 KKASSERT(!ioapic_enable);
722
723 temp = lapic->lvt_lint0;
724 temp &= ~APIC_LVT_MASKED;
725 lapic->lvt_lint0 = temp;
726
727 temp = lapic->lvt_lint1;
728 temp |= APIC_LVT_MASKED;
729 lapic->lvt_lint1 = temp;
730}
731
732static void
733lapic_sysinit(void *dummy __unused)
734{
735 if (lapic_enable) {
736 int error;
737
738 error = lapic_config();
739 if (error)
740 lapic_enable = 0;
741 }
742
743 if (lapic_enable) {
744 /* Initialize BSP's local APIC */
745 lapic_init(TRUE);
746 } else if (ioapic_enable) {
747 ioapic_enable = 0;
748 icu_reinit_noioapic();
749 }
750}
751SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL)