X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/7295dc4694612795b7574965e1527509404c9c0f..759fc5332c2b97c3a48b5aefb94b6cab57de07a4:/sys/platform/pc64/icu/icu_abi.c diff --git a/sys/platform/pc64/icu/icu_abi.c b/sys/platform/pc64/icu/icu_abi.c index 95ead43a9f..db6e8f39c4 100644 --- a/sys/platform/pc64/icu/icu_abi.c +++ b/sys/platform/pc64/icu/icu_abi.c @@ -44,172 +44,545 @@ #include #include #include +#include #include #include #include -#include +#include #include +#include +#include #include -#include "icu.h" -#include "icu_ipl.h" +#include -#ifndef APIC_IO - -extern void ICU_INTREN(int); -extern void ICU_INTRDIS(int); +#include +#include +#include extern inthand_t - IDTVEC(icu_fastintr0), IDTVEC(icu_fastintr1), - IDTVEC(icu_fastintr2), IDTVEC(icu_fastintr3), - IDTVEC(icu_fastintr4), IDTVEC(icu_fastintr5), - IDTVEC(icu_fastintr6), IDTVEC(icu_fastintr7), - IDTVEC(icu_fastintr8), IDTVEC(icu_fastintr9), - IDTVEC(icu_fastintr10), IDTVEC(icu_fastintr11), - IDTVEC(icu_fastintr12), IDTVEC(icu_fastintr13), - IDTVEC(icu_fastintr14), IDTVEC(icu_fastintr15); - -static int icu_vectorctl(int, int, int); -static int icu_setvar(int, const void *); -static int icu_getvar(int, void *); -static void icu_finalize(void); -static void icu_cleanup(void); - -static inthand_t *icu_fastintr[ICU_HWI_VECTORS] = { - &IDTVEC(icu_fastintr0), &IDTVEC(icu_fastintr1), - &IDTVEC(icu_fastintr2), &IDTVEC(icu_fastintr3), - &IDTVEC(icu_fastintr4), &IDTVEC(icu_fastintr5), - &IDTVEC(icu_fastintr6), &IDTVEC(icu_fastintr7), - &IDTVEC(icu_fastintr8), &IDTVEC(icu_fastintr9), - &IDTVEC(icu_fastintr10), &IDTVEC(icu_fastintr11), - &IDTVEC(icu_fastintr12), &IDTVEC(icu_fastintr13), - &IDTVEC(icu_fastintr14), &IDTVEC(icu_fastintr15) + IDTVEC(icu_intr0), IDTVEC(icu_intr1), + IDTVEC(icu_intr2), IDTVEC(icu_intr3), + IDTVEC(icu_intr4), IDTVEC(icu_intr5), + IDTVEC(icu_intr6), IDTVEC(icu_intr7), + IDTVEC(icu_intr8), IDTVEC(icu_intr9), + IDTVEC(icu_intr10), IDTVEC(icu_intr11), + IDTVEC(icu_intr12), IDTVEC(icu_intr13), + IDTVEC(icu_intr14), IDTVEC(icu_intr15); + +static inthand_t *icu_intr[ICU_HWI_VECTORS] = { + &IDTVEC(icu_intr0), &IDTVEC(icu_intr1), + &IDTVEC(icu_intr2), &IDTVEC(icu_intr3), + &IDTVEC(icu_intr4), &IDTVEC(icu_intr5), + &IDTVEC(icu_intr6), &IDTVEC(icu_intr7), + &IDTVEC(icu_intr8), &IDTVEC(icu_intr9), + &IDTVEC(icu_intr10), &IDTVEC(icu_intr11), + &IDTVEC(icu_intr12), &IDTVEC(icu_intr13), + &IDTVEC(icu_intr14), &IDTVEC(icu_intr15) }; -struct machintr_abi MachIntrABI = { - MACHINTR_ICU, - .intrdis = ICU_INTRDIS, - .intren = ICU_INTREN, - .vectorctl =icu_vectorctl, - .setvar = icu_setvar, - .getvar = icu_getvar, - .finalize = icu_finalize, - .cleanup = icu_cleanup +static struct icu_irqmap { + int im_type; /* ICU_IMT_ */ + enum intr_trigger im_trig; + int im_msi_base; +} icu_irqmaps[MAXCPU][IDT_HWI_VECTORS]; + +static struct lwkt_token icu_irqmap_tok = + LWKT_TOKEN_INITIALIZER(icu_irqmap_token); + +#define ICU_IMT_UNUSED 0 /* KEEP THIS */ +#define ICU_IMT_RESERVED 1 +#define ICU_IMT_LINE 2 +#define ICU_IMT_SYSCALL 3 +#define ICU_IMT_MSI 4 + +#define ICU_IMT_ISHWI(map) ((map)->im_type != ICU_IMT_RESERVED && \ + (map)->im_type != ICU_IMT_SYSCALL) + +extern void ICU_INTREN(int); +extern void ICU_INTRDIS(int); + +extern int imcr_present; + +static void icu_abi_intr_enable(int); +static void icu_abi_intr_disable(int); +static void icu_abi_intr_setup(int, int); +static void icu_abi_intr_teardown(int); +static void icu_abi_intr_config(int, enum intr_trigger, enum intr_polarity); +static int icu_abi_intr_cpuid(int); + +static int icu_abi_msi_alloc(int [], int, int); +static void icu_abi_msi_release(const int [], int, int); +static void icu_abi_msi_map(int, uint64_t *, uint32_t *, int); + +static void icu_abi_finalize(void); +static void icu_abi_cleanup(void); +static void icu_abi_setdefault(void); +static void icu_abi_stabilize(void); +static void icu_abi_initmap(void); +static void icu_abi_rman_setup(struct rman *); + +struct machintr_abi MachIntrABI_ICU = { + MACHINTR_ICU, + .intr_disable = icu_abi_intr_disable, + .intr_enable = icu_abi_intr_enable, + .intr_setup = icu_abi_intr_setup, + .intr_teardown = icu_abi_intr_teardown, + .intr_config = icu_abi_intr_config, + .intr_cpuid = icu_abi_intr_cpuid, + + .msi_alloc = icu_abi_msi_alloc, + .msi_release = icu_abi_msi_release, + .msi_map = icu_abi_msi_map, + + .finalize = icu_abi_finalize, + .cleanup = icu_abi_cleanup, + .setdefault = icu_abi_setdefault, + .stabilize = icu_abi_stabilize, + .initmap = icu_abi_initmap, + .rman_setup = icu_abi_rman_setup }; -static int icu_imcr_present; +static int icu_abi_msi_start; /* NOTE: for testing only */ /* * WARNING! SMP builds can use the ICU now so this code must be MP safe. */ -static -int -icu_setvar(int varid, const void *buf) + +static void +icu_abi_intr_enable(int irq) { - int error = 0; - - switch(varid) { - case MACHINTR_VAR_IMCR_PRESENT: - icu_imcr_present = *(const int *)buf; - break; - default: - error = ENOENT; - break; - } - return (error); + const struct icu_irqmap *map; + + KASSERT(irq >= 0 && irq < IDT_HWI_VECTORS, + ("icu enable, invalid irq %d\n", irq)); + + map = &icu_irqmaps[mycpuid][irq]; + KASSERT(ICU_IMT_ISHWI(map), + ("icu enable, not hwi irq %d, type %d, cpu%d\n", + irq, map->im_type, mycpuid)); + if (map->im_type != ICU_IMT_LINE) + return; + + ICU_INTREN(irq); } -static -int -icu_getvar(int varid, void *buf) +static void +icu_abi_intr_disable(int irq) { - int error = 0; - - switch(varid) { - case MACHINTR_VAR_IMCR_PRESENT: - *(int *)buf = icu_imcr_present; - break; - default: - error = ENOENT; - break; - } - return (error); + const struct icu_irqmap *map; + + KASSERT(irq >= 0 && irq < IDT_HWI_VECTORS, + ("icu disable, invalid irq %d\n", irq)); + + map = &icu_irqmaps[mycpuid][irq]; + KASSERT(ICU_IMT_ISHWI(map), + ("icu disable, not hwi irq %d, type %d, cpu%d\n", + irq, map->im_type, mycpuid)); + if (map->im_type != ICU_IMT_LINE) + return; + + ICU_INTRDIS(irq); } /* * Called before interrupts are physically enabled */ static void -icu_finalize(void) +icu_abi_stabilize(void) { - int intr; - - for (intr = 0; intr < ICU_HWI_VECTORS; ++intr) { - machintr_intrdis(intr); - } - machintr_intren(ICU_IRQ_SLAVE); - - /* - * If an IMCR is present, programming bit 0 disconnects the 8259 - * from the BSP. The 8259 may still be connected to LINT0 on the BSP's - * LAPIC. - * - * If we are running SMP the LAPIC is active, try to use virtual wire - * mode so we can use other interrupt sources within the LAPIC in - * addition to the 8259. - */ - if (icu_imcr_present) { -#if defined(SMP) - outb(0x22, 0x70); - outb(0x23, 0x01); -#endif - } + int intr; + + for (intr = 0; intr < ICU_HWI_VECTORS; ++intr) + ICU_INTRDIS(intr); + ICU_INTREN(ICU_IRQ_SLAVE); } /* * Called after interrupts physically enabled but before the * critical section is released. */ -static -void -icu_cleanup(void) +static void +icu_abi_cleanup(void) +{ + bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); +} + +/* + * Called after stablize and cleanup; critical section is not + * held and interrupts are not physically disabled. + */ +static void +icu_abi_finalize(void) +{ + KKASSERT(MachIntrABI.type == MACHINTR_ICU); + KKASSERT(!ioapic_enable); + + /* + * If an IMCR is present, programming bit 0 disconnects the 8259 + * from the BSP. The 8259 may still be connected to LINT0 on the + * BSP's LAPIC. + * + * If we are running SMP the LAPIC is active, try to use virtual + * wire mode so we can use other interrupt sources within the LAPIC + * in addition to the 8259. + */ + if (imcr_present) { + outb(0x22, 0x70); + outb(0x23, 0x01); + } +} + +static void +icu_abi_intr_setup(int intr, int flags) +{ + const struct icu_irqmap *map; + register_t ef; + + KASSERT(intr >= 0 && intr < IDT_HWI_VECTORS, + ("icu setup, invalid irq %d\n", intr)); + + map = &icu_irqmaps[mycpuid][intr]; + KASSERT(ICU_IMT_ISHWI(map), + ("icu setup, not hwi irq %d, type %d, cpu%d\n", + intr, map->im_type, mycpuid)); + if (map->im_type != ICU_IMT_LINE) + return; + + ef = read_rflags(); + cpu_disable_intr(); + + ICU_INTREN(intr); + + write_rflags(ef); +} + +static void +icu_abi_intr_teardown(int intr) +{ + const struct icu_irqmap *map; + register_t ef; + + KASSERT(intr >= 0 && intr < IDT_HWI_VECTORS, + ("icu teardown, invalid irq %d\n", intr)); + + map = &icu_irqmaps[mycpuid][intr]; + KASSERT(ICU_IMT_ISHWI(map), + ("icu teardown, not hwi irq %d, type %d, cpu%d\n", + intr, map->im_type, mycpuid)); + if (map->im_type != ICU_IMT_LINE) + return; + + ef = read_rflags(); + cpu_disable_intr(); + + ICU_INTRDIS(intr); + + write_rflags(ef); +} + +static void +icu_abi_setdefault(void) +{ + int intr; + + for (intr = 0; intr < ICU_HWI_VECTORS; ++intr) { + if (intr == ICU_IRQ_SLAVE) + continue; + setidt_global(IDT_OFFSET + intr, icu_intr[intr], + SDT_SYSIGT, SEL_KPL, 0); + } +} + +static void +icu_abi_initmap(void) +{ + int cpu; + + kgetenv_int("hw.icu.msi_start", &icu_abi_msi_start); + icu_abi_msi_start &= ~0x1f; /* MUST be 32 aligned */ + + /* + * NOTE: ncpus is not ready yet + */ + for (cpu = 0; cpu < MAXCPU; ++cpu) { + int i; + + if (cpu != 0) { + for (i = 0; i < ICU_HWI_VECTORS; ++i) + icu_irqmaps[cpu][i].im_type = ICU_IMT_RESERVED; + } else { + for (i = 0; i < ICU_HWI_VECTORS; ++i) + icu_irqmaps[cpu][i].im_type = ICU_IMT_LINE; + icu_irqmaps[cpu][ICU_IRQ_SLAVE].im_type = + ICU_IMT_RESERVED; + + if (elcr_found) { + for (i = 0; i < ICU_HWI_VECTORS; ++i) { + icu_irqmaps[cpu][i].im_trig = + elcr_read_trigger(i); + } + } else { + /* + * NOTE: Trigger mode does not matter at all + */ + for (i = 0; i < ICU_HWI_VECTORS; ++i) { + icu_irqmaps[cpu][i].im_trig = + INTR_TRIGGER_EDGE; + } + } + } + + for (i = 0; i < IDT_HWI_VECTORS; ++i) + icu_irqmaps[cpu][i].im_msi_base = -1; + + icu_irqmaps[cpu][IDT_OFFSET_SYSCALL - IDT_OFFSET].im_type = + ICU_IMT_SYSCALL; + } +} + +static void +icu_abi_intr_config(int irq, enum intr_trigger trig, + enum intr_polarity pola __unused) +{ + struct icu_irqmap *map; + + KKASSERT(trig == INTR_TRIGGER_EDGE || trig == INTR_TRIGGER_LEVEL); + + KKASSERT(irq >= 0 && irq < IDT_HWI_VECTORS); + map = &icu_irqmaps[0][irq]; + + KKASSERT(map->im_type == ICU_IMT_LINE); + + /* TODO: Check whether it is configured or not */ + + if (trig == map->im_trig) + return; + + if (bootverbose) { + kprintf("ICU: irq %d, %s -> %s\n", irq, + intr_str_trigger(map->im_trig), + intr_str_trigger(trig)); + } + map->im_trig = trig; + + if (!elcr_found) { + if (bootverbose) + kprintf("ICU: no ELCR, skip irq %d config\n", irq); + return; + } + elcr_write_trigger(irq, map->im_trig); +} + +static int +icu_abi_intr_cpuid(int irq __unused) +{ + return 0; +} + +static void +icu_abi_rman_setup(struct rman *rm) { - mdcpu->gd_fpending = 0; + int start, end, i; + + KASSERT(rm->rm_cpuid >= 0 && rm->rm_cpuid < MAXCPU, + ("invalid rman cpuid %d", rm->rm_cpuid)); + + start = end = -1; + for (i = 0; i < IDT_HWI_VECTORS; ++i) { + const struct icu_irqmap *map = &icu_irqmaps[rm->rm_cpuid][i]; + + if (start < 0) { + if (ICU_IMT_ISHWI(map)) + start = end = i; + } else { + if (ICU_IMT_ISHWI(map)) { + end = i; + } else { + KKASSERT(end >= 0); + if (bootverbose) { + kprintf("ICU: rman cpu%d %d - %d\n", + rm->rm_cpuid, start, end); + } + if (rman_manage_region(rm, start, end)) { + panic("rman_manage_region" + "(cpu%d %d - %d)", rm->rm_cpuid, + start, end); + } + start = end = -1; + } + } + } + if (start >= 0) { + KKASSERT(end >= 0); + if (bootverbose) { + kprintf("ICU: rman cpu%d %d - %d\n", + rm->rm_cpuid, start, end); + } + if (rman_manage_region(rm, start, end)) { + panic("rman_manage_region(cpu%d %d - %d)", + rm->rm_cpuid, start, end); + } + } } +static int +icu_abi_msi_alloc(int intrs[], int count, int cpuid) +{ + int i, error; + + KASSERT(cpuid >= 0 && cpuid < ncpus, + ("invalid cpuid %d", cpuid)); + + KASSERT(count > 0 && count <= 32, ("invalid count %d\n", count)); + KASSERT((count & (count - 1)) == 0, + ("count %d is not power of 2\n", count)); + + lwkt_gettoken(&icu_irqmap_tok); + + /* + * NOTE: + * Since IDT_OFFSET is 32, which is the maximum valid 'count', + * we do not need to find out the first properly aligned + * interrupt vector. + */ + + error = EMSGSIZE; + for (i = icu_abi_msi_start; i < IDT_HWI_VECTORS; i += count) { + int j; + + if (icu_irqmaps[cpuid][i].im_type != ICU_IMT_UNUSED) + continue; + + for (j = 1; j < count; ++j) { + if (icu_irqmaps[cpuid][i + j].im_type != ICU_IMT_UNUSED) + break; + } + if (j != count) + continue; + + for (j = 0; j < count; ++j) { + struct icu_irqmap *map; + int intr = i + j; + + map = &icu_irqmaps[cpuid][intr]; + KASSERT(map->im_msi_base < 0, + ("intr %d, stale MSI-base %d\n", + intr, map->im_msi_base)); + + map->im_type = ICU_IMT_MSI; + map->im_msi_base = i; -static -int -icu_vectorctl(int op, int intr, int flags) + intrs[j] = intr; + msi_setup(intr, cpuid); + + if (bootverbose) { + kprintf("alloc MSI intr %d on cpu%d\n", + intr, cpuid); + } + + } + error = 0; + break; + } + + lwkt_reltoken(&icu_irqmap_tok); + + return error; +} + +static void +icu_abi_msi_release(const int intrs[], int count, int cpuid) { - int error; - register_t ef; - - if (intr < 0 || intr >= ICU_HWI_VECTORS || intr == ICU_IRQ_SLAVE) - return (EINVAL); - - ef = read_rflags(); - cpu_disable_intr(); - error = 0; - - switch(op) { - case MACHINTR_VECTOR_SETUP: - setidt(IDT_OFFSET + intr, icu_fastintr[intr], SDT_SYSIGT, SEL_KPL, 0); - machintr_intren(intr); - break; - case MACHINTR_VECTOR_TEARDOWN: - case MACHINTR_VECTOR_SETDEFAULT: - setidt(IDT_OFFSET + intr, icu_fastintr[intr], SDT_SYSIGT, SEL_KPL, 0); - machintr_intrdis(intr); - break; - default: - error = EOPNOTSUPP; - break; - } - write_rflags(ef); - return (error); + int i, msi_base = -1, intr_next = -1, mask; + + KASSERT(cpuid >= 0 && cpuid < ncpus, + ("invalid cpuid %d", cpuid)); + + KASSERT(count > 0 && count <= 32, ("invalid count %d\n", count)); + + mask = count - 1; + KASSERT((count & mask) == 0, ("count %d is not power of 2\n", count)); + + lwkt_gettoken(&icu_irqmap_tok); + + for (i = 0; i < count; ++i) { + struct icu_irqmap *map; + int intr = intrs[i]; + + KASSERT(intr >= 0 && intr < IDT_HWI_VECTORS, + ("invalid intr %d\n", intr)); + + map = &icu_irqmaps[cpuid][intr]; + KASSERT(map->im_type == ICU_IMT_MSI, + ("try release non-MSI intr %d, type %d\n", + intr, map->im_type)); + KASSERT(map->im_msi_base >= 0 && map->im_msi_base <= intr, + ("intr %d, invalid MSI-base %d\n", intr, map->im_msi_base)); + KASSERT((map->im_msi_base & mask) == 0, + ("intr %d, MSI-base %d is not proper aligned %d\n", + intr, map->im_msi_base, count)); + + if (msi_base < 0) { + msi_base = map->im_msi_base; + } else { + KASSERT(map->im_msi_base == msi_base, + ("intr %d, inconsistent MSI-base, " + "was %d, now %d\n", + intr, msi_base, map->im_msi_base)); + } + + if (intr_next < intr) + intr_next = intr; + + map->im_type = ICU_IMT_UNUSED; + map->im_msi_base = -1; + + if (bootverbose) + kprintf("release MSI intr %d on cpu%d\n", intr, cpuid); + } + + KKASSERT(intr_next > 0); + KKASSERT(msi_base >= 0); + + ++intr_next; + if (intr_next < IDT_HWI_VECTORS) { + const struct icu_irqmap *map = &icu_irqmaps[cpuid][intr_next]; + + if (map->im_type == ICU_IMT_MSI) { + KASSERT(map->im_msi_base != msi_base, + ("more than %d MSI was allocated\n", count)); + } + } + + lwkt_reltoken(&icu_irqmap_tok); } -#endif +static void +icu_abi_msi_map(int intr, uint64_t *addr, uint32_t *data, int cpuid) +{ + const struct icu_irqmap *map; + + KASSERT(cpuid >= 0 && cpuid < ncpus, + ("invalid cpuid %d", cpuid)); + + KASSERT(intr >= 0 && intr < IDT_HWI_VECTORS, + ("invalid intr %d\n", intr)); + + lwkt_gettoken(&icu_irqmap_tok); + + map = &icu_irqmaps[cpuid][intr]; + KASSERT(map->im_type == ICU_IMT_MSI, + ("try map non-MSI intr %d, type %d\n", intr, map->im_type)); + KASSERT(map->im_msi_base >= 0 && map->im_msi_base <= intr, + ("intr %d, invalid MSI-base %d\n", intr, map->im_msi_base)); + + msi_map(map->im_msi_base, addr, data, cpuid); + + if (bootverbose) + kprintf("map MSI intr %d on cpu%d\n", intr, cpuid); + + lwkt_reltoken(&icu_irqmap_tok); +}