/* _NVRM_COPYRIGHT_BEGIN_ * * Copyright 2001-2002 by NVIDIA Corporation. All rights reserved. All * information contained herein is proprietary and confidential to NVIDIA * Corporation. Any use, reproduction, or disclosure without the written * permission of NVIDIA Corporation is prohibited. * * _NVRM_COPYRIGHT_END_ */ #include "nv-misc.h" #include "os-interface.h" #include "nv.h" #include "nv-freebsd.h" #if defined(NVCPU_X86) && defined(NV_USE_OS_VM86_INT10CALL) #include #endif MALLOC_DEFINE(M_NV_STACK, "nvstack", "NVidia stack"); static nv_stack_t *__nvidia_init_sp = NULL; devclass_t nvidia_devclass; nv_state_t nvidia_ctl_state; int nvidia_attach(device_t dev) { int status; U032 i; struct nvidia_softc *sc; nv_state_t *nv; sc = device_get_softc(dev); nv = sc->nv_state; nv->os_state = sc; nv->flags = 0; nv->bus = pci_get_bus(dev); nv->slot = pci_get_slot(dev); nv->vendor_id = pci_get_vendor(dev); nv->device_id = pci_get_device(dev); nv->interrupt_line = pci_get_irq(dev); nv->handle = dev; callout_init(&sc->timer_ch); for (i = 0; i < NV_GPU_NUM_BARS; i++) { if (sc->BAR_recs[i] != NULL) { nv->bars[i].address = rman_get_start(sc->BAR_recs[i]); nv->bars[i].size = rman_get_size(sc->BAR_recs[i]); } } nv->fb = &nv->bars[NV_GPU_BAR_INDEX_FB]; nv->regs = &nv->bars[NV_GPU_BAR_INDEX_REGS]; pci_enable_io(dev, SYS_RES_MEMORY); if ((status = rm_is_supported_device(sc->attach_sp, nv)) != RM_OK) { nv_printf(NV_DBG_ERRORS, "NVRM: The NVIDIA GPU %02x:%02x (PCI ID: %04x:%04x) installed\n" "NVRM: in this system is not supported by the %s NVIDIA FreeBSD\n" "NVRM: graphics driver release. Please see 'Appendix A -\n" "NVRM: Supported NVIDIA GPU Products' in this release's README,\n" "NVRM: available on the FreeBSD graphics driver download page at\n" "NVRM: www.nvidia.com.\n", nv->bus, nv->slot, nv->vendor_id, nv->device_id, NV_VERSION_STRING); return ENXIO; } if ((status = nvidia_dev_attach(sc)) != 0) return status; if ((status = nvidia_ctl_attach()) != 0) return status; nv_sysctl_init(nv); return 0; } int nvidia_detach(device_t dev) { int status; struct nvidia_softc *sc; sc = device_get_softc(dev); nv_sysctl_exit(sc->nv_state); status = nvidia_dev_detach(sc); if (status) { device_printf(dev, "NVRM: NVIDIA driver DEV detach failed.\n"); goto fail; } status = nvidia_ctl_detach(); if (status) { device_printf(dev, "NVRM: NVIDIA driver CTL detach failed.\n"); goto fail; } fail: /* XXX Fix me? (state) */ return status; } #ifdef NV_SUPPORT_ACPI_PM int nvidia_suspend(device_t dev) { nv_stack_t *sp; struct nvidia_softc *sc; nv_state_t *nv; int status = RM_ERROR; /* Only if ACPI is running */ if (devclass_get_softc(devclass_find("ACPI"), 0) == NULL) return ENODEV; NV_UMA_ZONE_ALLOC_STACK(sp); if (sp == NULL) return ENOMEM; sc = device_get_softc(dev); nv = sc->nv_state; NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE); status = rm_power_management(sp, nv, 0, NV_PM_ACPI_STANDBY); NV_UMA_ZONE_FREE_STACK(sp); return status; } int nvidia_resume(device_t dev) { nv_stack_t *sp; struct nvidia_softc *sc; nv_state_t *nv; int status = RM_ERROR; NV_UMA_ZONE_ALLOC_STACK(sp); if (sp == NULL) return ENOMEM; sc = device_get_softc(dev); nv = sc->nv_state; NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE); status = rm_power_management(sp, nv, 0, NV_PM_ACPI_RESUME); NV_UMA_ZONE_FREE_STACK(sp); return status; } #endif /* NV_SUPPORT_ACPI_PM */ int nvidia_alloc_hardware(device_t dev) { int error = 0; struct nvidia_softc *sc; U032 flags, i; sc = device_get_softc(dev); sc->dev = dev; flags = 0; /* not RF_ACTIVE */ for (i = 0; i < NV_GPU_NUM_BARS && sc->BAR_rids[i] != 0; i++) { struct resource *res; res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->BAR_rids[i], flags); if (res == NULL) { /* * The most likely reason for this failure is that the SBIOS failed * to assign a valid address range to this BAR; FreeBSD is unable to * correct the problem and fails this BUS resource allocation. We * trust the kernel with BAR validation at this point, but later try * to catch cases where the X server "corrects" "invalid" BAR's. * * Please see to nvidia_pci_check_config_space() in nvidia_pci.c for * additional information. */ device_printf(dev, "NVRM: NVIDIA MEM resource alloc failed, BAR%d @ 0x%02x.\n", i, sc->nv_state->bars[i].offset); error = ENXIO; goto fail; } sc->BAR_recs[i] = res; } flags = RF_SHAREABLE | RF_ACTIVE; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, flags); if (sc->irq == NULL) { device_printf(dev, "NVRM: NVIDIA IRQ resource alloc failed.\n"); error = ENXIO; goto fail; } fail: return (error); } void nvidia_free_hardware(device_t dev) { struct nvidia_softc *sc; U032 i; sc = device_get_softc(dev); for (i = 0; i < NV_GPU_NUM_BARS && sc->BAR_recs[i] != NULL; i++) bus_release_resource(dev, SYS_RES_MEMORY, sc->BAR_rids[i], sc->BAR_recs[i]); if (sc->irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); if (sc->iop != NULL) bus_release_resource(dev, SYS_RES_IOPORT, sc->iop_rid, sc->iop); } void nvidia_intr(void *xsc) { struct nvidia_softc *sc; nv_state_t *nv; U032 run_bottom_half = 0; nv_stack_t *sp; sc = (struct nvidia_softc *) xsc; nv = sc->nv_state; sp = sc->isr_sp; if (sp == NULL) return; NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, FALSE); rm_isr(sp, nv, &run_bottom_half); if (run_bottom_half) { /* We're not executing in an HW ISR context */ rm_isr_bh(sp, nv); } } int nvidia_get_card_info(struct nv_ioctl_card_info *ci) { unsigned int i; struct nvidia_softc *sc; nv_state_t *nv; /* * Clients supporting versioning will pass version magic in the first * card information field. */ struct nv_ioctl_rm_api_old_version *av = (void *) ci; int status = 0; switch (av->magic) { case NV_RM_API_OLD_VERSION_MAGIC_OVERRIDE_REQ: case NV_RM_API_OLD_VERSION_MAGIC_LAX_REQ: case NV_RM_API_OLD_VERSION_MAGIC_REQ: /* * the client is using the old major-minor-patch API * version check; reject it. */ nv_printf(NV_DBG_ERRORS, "NVRM: API mismatch: the client has the version %d.%d-%d, but\n" "NVRM: this kernel module has the version %s. Please\n" "NVRM: make sure that this kernel module and all NVIDIA driver\n" "NVRM: components have the same version.\n", av->major, av->minor, av->patch, NV_VERSION_STRING); status = -EINVAL; break; case NV_RM_API_OLD_VERSION_MAGIC_IGNORE: /* * the client is telling us to ignore the old version * scheme; it will do a version check via * NV_ESC_CHECK_VERSION_STR */ break; default: status = -EINVAL; break; } /* clear card information structure */ memset(ci, 0, sizeof(ci)); for (i = 0; i < NV_MAX_DEVICES; i++) { sc = devclass_get_softc(nvidia_devclass, i); if (!sc) continue; nv = sc->nv_state; ci[i].flags = (NV_IOCTL_CARD_INFO_FLAG_PRESENT | NV_IOCTL_CARD_INFO_FLAG_NEED_MSYNC); ci[i].bus = nv->bus; ci[i].slot = nv->slot; ci[i].vendor_id = nv->vendor_id; ci[i].device_id = nv->device_id; ci[i].interrupt_line = nv->interrupt_line; ci[i].fb_address = nv->fb->address; ci[i].fb_size = nv->fb->size; ci[i].reg_address = nv->regs->address; ci[i].reg_size = nv->regs->size; } return status; } int nvidia_handle_ioctl( struct cdev *dev, u_long cmd, caddr_t data, int fflag, d_thread_t *td ) { struct nvidia_softc *sc; nv_state_t *nv; int unit = minor(dev); nv_stack_t *sp; void *args; nv_ioctl_xfer_t *xfer = NULL; int status; int nr, size; if (unit == CDEV_CTL_MINOR) { nv = &nvidia_ctl_state; sc = nv->os_state; } else { sc = devclass_get_softc(nvidia_devclass, unit); if (!sc) return ENXIO; nv = sc->nv_state; } sp = sc->api_sp; size = __NV_IOC_SIZE(cmd); nr = __NV_IOC_NR(cmd); args = (void *)data; if (nr == NV_ESC_IOCTL_XFER_CMD) { if (__NV_IOC_SIZE(cmd) != sizeof(nv_ioctl_xfer_t)) return EINVAL; xfer = args; size = xfer->size; if (size > NV_ABSOLUTE_MAX_IOCTL_SIZE) return EINVAL; args = malloc(size, M_NVIDIA, M_WAITOK); if (args == NULL) return ENOMEM; if (copyin(NvP64_VALUE(xfer->ptr), args, size) != 0) { free(args, M_NVIDIA); return EFAULT; } nr = xfer->cmd; } NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE); switch (nr) { case NV_ESC_CHECK_VERSION_STR: status = (rm_perform_version_check(sp, args) == RM_OK) ? 0 : EINVAL; break; case NV_ESC_CARD_INFO: status = nvidia_get_card_info(args); break; default: status = rm_ioctl(sp, nv, __TD_FDT(td), nr, args) ? 0 : EINVAL; break; } if (args != (void *)data) { if (copyout(args, NvP64_VALUE(xfer->ptr), size) != 0) status = EFAULT; free(args, M_NVIDIA); } return status; } int nvidia_open_ctl( struct cdev *dev, d_thread_t *td ) { struct nvidia_softc *sc; nv_state_t *nv = &nvidia_ctl_state; BOOL new_filep = FALSE; struct nvidia_filep *filep; sc = nv->os_state; STAILQ_FOREACH(filep, &sc->filep_queue, queue) { if (filep->fd_table == __TD_FDT(td)) break; } if (filep == NULL) { filep = kmalloc(sizeof(nvidia_filep_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK); if (filep == NULL) return ENOMEM; filep->fd_table = __TD_FDT(td); filep->fd_refcnt = 0; STAILQ_INSERT_HEAD(&sc->filep_queue, filep, queue); new_filep = TRUE; } filep->fd_refcnt++; if (sc->refcnt == 0) { NV_UMA_ZONE_ALLOC_STACK(sc->api_sp); if (sc->api_sp == NULL) { if (new_filep) free(filep, M_NVIDIA); return ENOMEM; } STAILQ_INIT(&sc->event_queue); nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL); } sc->refcnt++; return 0; } int nvidia_close_ctl( struct cdev *dev, d_thread_t *td ) { struct nvidia_softc *sc; nv_state_t *nv = &nvidia_ctl_state; struct nvidia_event *et; struct nvidia_filep *filep; nv_stack_t *sp; sc = nv->os_state; STAILQ_FOREACH(filep, &sc->filep_queue, queue) { if (filep->fd_table == __TD_FDT(td)) break; } if (filep == NULL) return EINVAL; sp = sc->api_sp; if (--filep->fd_refcnt == 0) { rm_free_unused_clients(sp, nv, __TD_FDT(td)); STAILQ_REMOVE(&sc->filep_queue, filep, nvidia_filep, queue); free(filep, M_NVIDIA); } if (--sc->refcnt == 0) { while ((et = STAILQ_FIRST(&sc->event_queue))) { STAILQ_REMOVE(&sc->event_queue, et, nvidia_event, queue); free(et, M_NVIDIA); } NV_UMA_ZONE_FREE_STACK(sp); nv->flags &= ~NV_FLAG_OPEN; } return 0; } int nvidia_open_dev( struct nvidia_softc *sc, struct cdev *dev, d_thread_t *td ) { int status = ENOMEM; nv_state_t *nv = sc->nv_state; BOOL new_filep = FALSE; struct nvidia_filep *filep; nv_stack_t *sp = NULL; STAILQ_FOREACH(filep, &sc->filep_queue, queue) { if (filep->fd_table == __TD_FDT(td)) break; } if (filep == NULL) { filep = kmalloc(sizeof(nvidia_filep_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK); if (filep == NULL) return ENOMEM; filep->fd_table = __TD_FDT(td); filep->fd_refcnt = 0; new_filep = TRUE; } if (sc->refcnt == 0) { NV_UMA_ZONE_ALLOC_STACK(sc->api_sp); if (sc->api_sp == NULL) goto failed; NV_UMA_ZONE_ALLOC_STACK(sc->pci_cfgchk_sp); if (sc->pci_cfgchk_sp == NULL) goto failed; NV_UMA_ZONE_ALLOC_STACK(sc->isr_sp); if (sc->isr_sp == NULL) goto failed; NV_UMA_ZONE_ALLOC_STACK(sc->timer_sp); if (sc->timer_sp == NULL) goto failed; } sp = sc->api_sp; NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE); if (sc->refcnt == 0) { if (!rm_init_adapter(sp, nv)) { device_printf(sc->dev, "NVRM: rm_init_adapter() failed!\n"); status = EIO; goto failed; } STAILQ_INIT(&sc->event_queue); nv->flags |= NV_FLAG_OPEN; } filep->fd_refcnt++; if (new_filep) STAILQ_INSERT_HEAD(&sc->filep_queue, filep, queue); sc->refcnt++; return 0; failed: if (sc->refcnt == 0) { if (sc->timer_sp != NULL) NV_UMA_ZONE_FREE_STACK(sc->timer_sp); if (sc->isr_sp != NULL) NV_UMA_ZONE_FREE_STACK(sc->isr_sp); if (sc->pci_cfgchk_sp != NULL) NV_UMA_ZONE_FREE_STACK(sc->pci_cfgchk_sp); if (sc->api_sp != NULL) NV_UMA_ZONE_FREE_STACK(sc->api_sp); } if (new_filep) free(filep, M_NVIDIA); return status; } int nvidia_close_dev( struct nvidia_softc *sc, struct cdev *dev, d_thread_t *td ) { nv_state_t *nv = sc->nv_state; struct nvidia_event *et; struct nvidia_filep *filep; nv_stack_t *sp; STAILQ_FOREACH(filep, &sc->filep_queue, queue) { if (filep->fd_table == __TD_FDT(td)) break; } if (filep == NULL) return EINVAL; sp = sc->api_sp; NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE); if (--filep->fd_refcnt == 0) { rm_free_unused_clients(sp, nv, __TD_FDT(td)); STAILQ_REMOVE(&sc->filep_queue, filep, nvidia_filep, queue); free(filep, M_NVIDIA); } if (--sc->refcnt == 0) { rm_disable_adapter(sp, nv); rm_shutdown_adapter(sp, nv); while ((et = STAILQ_FIRST(&sc->event_queue))) { STAILQ_REMOVE(&sc->event_queue, et, nvidia_event, queue); free(et, M_NVIDIA); } NV_UMA_ZONE_FREE_STACK(sc->timer_sp); NV_UMA_ZONE_FREE_STACK(sc->isr_sp); NV_UMA_ZONE_FREE_STACK(sc->pci_cfgchk_sp); NV_UMA_ZONE_FREE_STACK(sc->api_sp); nv->flags &= ~NV_FLAG_OPEN; } return 0; } int nvidia_modevent( module_t mod, int what, void *arg ) { nv_state_t *nv; struct nvidia_softc *sc; nv_stack_t *sp; switch (what) { case MOD_LOAD: /* * The module load event. Our KLD has just been loaded and is * ready to initialize. We setup the core resource manager in * this routine, further initialization takes place at attach * time. */ sc = &nvidia_ctl_sc; NV_UMA_ZONE_ALLOC_STACK(sp); if (sp == NULL) { return ENOMEM; } bzero(sc, sizeof(nvidia_softc_t)); STAILQ_INIT(&sc->filep_queue); if (!rm_init_rm(sp)) { kprintf("NVRM: rm_init_rm() failed!\n"); NV_UMA_ZONE_FREE_STACK(sp); return EIO; } __nvidia_init_sp = sp; spin_init(&sc->rm_lock); lockinit(&sc->api_lock, "nvapi", 0, LK_CANRECURSE); nvidia_ctl_state.os_state = sc; sc->nv_state = (void *)&nvidia_ctl_state; nvidia_sysctl_init(); nvidia_linux_init(); break; case MOD_UNLOAD: /* * Check if the control device is still open and reject the * unload request if it is. This event can occur even when the * module usage count is non-zero! */ nv = &nvidia_ctl_state; sc = nv->os_state; nv_lock_api(nv); if (sc->refcnt != 0) { /* XXX Fix me? (refcnt) */ nv_unlock_api(nv); return EBUSY; } nv_unlock_api(nv); spin_uninit(&sc->rm_lock); lockuninit(&sc->api_lock); sp = __nvidia_init_sp; rm_shutdown_rm(sp); NV_UMA_ZONE_FREE_STACK(sp); nvidia_sysctl_exit(); nvidia_linux_exit(); break; default: break; } return 0; } #ifdef NV_SUPPORT_OS_AGP S032 nv_os_agp_init( nv_stack_t *sp, nv_state_t *nv, void **base, U032 *limit ) { void *bitmap; struct nvidia_softc *sc = nv->os_state; struct agp_info ai; U032 mode = 0; U032 fw = 0; U032 sba = 0; U032 rate = (8 | 4 | 2 | 1); U032 size = 0; sc->agp_dev = agp_find_device(); if (!sc->agp_dev) { kprintf("NVRM: agp_find_device failed, chipset unsupported?\n"); return -ENODEV; } if (agp_acquire(sc->agp_dev) != 0) return -EBUSY; agp_get_info(sc->agp_dev, &ai); mode = ai.ai_mode; if (os_set_mem_range(ai.ai_aperture_base, ai.ai_aperture_size, NV_MEMORY_WRITECOMBINED) != RM_OK) { /* * Failure to set a write-combining range for the AGP aperture is * not necessarily a fatal error condition; we don't know at this * point, however, and abort to prevent performance and stability * problems that may be hard to track down otherwise. */ agp_release(sc->agp_dev); return -EIO; } rm_read_registry_dword(sp, NULL, "NVreg", "ReqAGPRate", &rate); rm_read_registry_dword(sp, NULL, "NVreg", "EnableAGPFW", &fw); rm_read_registry_dword(sp, NULL, "NVreg", "EnableAGPSBA", &sba); if (AGP_MODE_GET_MODE_3(mode)) rate = (rate >> 2) & 3; mode = AGP_MODE_SET_RATE(mode, AGP_MODE_GET_RATE(mode) & rate); mode |= 1 /* avoid 0x mode request */; if (AGP_MODE_GET_RATE(mode) & 2) mode = AGP_MODE_SET_RATE(mode, AGP_MODE_GET_RATE(mode) & ~1); if (AGP_MODE_GET_RATE(mode) & 4) mode = AGP_MODE_SET_RATE(mode, AGP_MODE_GET_RATE(mode) & ~2); mode = AGP_MODE_SET_FW(mode, fw); mode = AGP_MODE_SET_SBA(mode, sba); if (agp_enable(sc->agp_dev, mode) != 0) { agp_release(sc->agp_dev); os_unset_mem_range(ai.ai_aperture_base, ai.ai_aperture_size); return -EIO; } size = ai.ai_aperture_size / RM_PAGE_SIZE / 8; if (os_alloc_mem((void **)&bitmap, size) != RM_OK) { agp_release(sc->agp_dev); os_unset_mem_range(ai.ai_aperture_base, ai.ai_aperture_size); return -EIO; } os_mem_set(bitmap, 0xff, size); if (rm_set_agp_bitmap(sp, nv, bitmap) != RM_OK) { agp_release(sc->agp_dev); os_free_mem(bitmap); os_unset_mem_range(ai.ai_aperture_base, ai.ai_aperture_size); return -EIO; } *base = (void *) ai.ai_aperture_base; *limit = (U032) ai.ai_aperture_size - 1; return 0; } S032 nv_os_agp_teardown( nv_stack_t *sp, nv_state_t *nv ) { struct nvidia_softc *sc = nv->os_state; void *bitmap; if (agp_release(sc->agp_dev) != 0) return -EBUSY; rm_clear_agp_bitmap(sp, nv, &bitmap); os_free_mem(bitmap); os_unset_mem_range(nv->agp.address, nv->agp.size); return 0; } #endif /* NV_SUPPORT_OS_AGP */ RM_STATUS NV_API_CALL nv_agp_init( nv_state_t *nv, void **base, void *limit, U032 config ) { RM_STATUS status = RM_ERROR; nv_stack_t *sp; if (NV_AGP_ENABLED(nv)) return RM_ERR_STATE_IN_USE; if (config == NVOS_AGP_CONFIG_DISABLE_AGP) { /* * Match the behavior on Linux, don't consider the attempt * to initialize AGP as 'disabled' an error. */ nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP; nv->agp_status = NV_AGP_STATUS_DISABLED; return RM_OK; } NV_UMA_ZONE_ALLOC_STACK(sp); if (sp == NULL) return RM_ERR_NO_FREE_MEM; #ifdef NV_SUPPORT_OS_AGP if ((config & NVOS_AGP_CONFIG_OSAGP) != 0) { if (nv_os_agp_init(sp, nv, base, limit) == 0) { /* * If the operating system AGP GART driver successfully * configured its backend, apply chipset overrides. */ rm_update_agp_config(sp, nv); NV_UMA_ZONE_FREE_STACK(sp); nv->agp_config = NVOS_AGP_CONFIG_OSAGP; nv->agp_status = NV_AGP_STATUS_ENABLED; return RM_OK; } } #endif /* NV_SUPPORT_OS_AGP */ if ((config & NVOS_AGP_CONFIG_NVAGP) == 0) { status = RM_ERR_NOT_SUPPORTED; goto failed; } if (devclass_get_softc(devclass_find("agp"), 0) != NULL) { /* * Make sure we don't try to use the internal GART driver when * the OS AGPGART driver (agp.ko) is attached. While that may * be perfectly fine on most systems, but is known to break on * some. * ------------------------------------------------------------- * DON'T REDISTRIBUTE THE DRIVER WITH THIS SANITY CHECK REMOVED! * ------------------------------------------------------------- */ kprintf("NVRM: detected agp.ko, aborting NVIDIA AGP setup!\n"); goto failed; } status = rm_init_agp(sp, nv); if (status == RM_OK) { NV_UMA_ZONE_FREE_STACK(sp); nv->agp_config = NVOS_AGP_CONFIG_NVAGP; nv->agp_status = NV_AGP_STATUS_ENABLED; return status; } failed: NV_UMA_ZONE_FREE_STACK(sp); nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP; nv->agp_status = NV_AGP_STATUS_FAILED; return status; } RM_STATUS NV_API_CALL nv_agp_teardown(nv_state_t *nv) { RM_STATUS status = RM_ERR_NOT_SUPPORTED; nv_stack_t *sp; if (!NV_AGP_ENABLED(nv)) return status; NV_UMA_ZONE_ALLOC_STACK(sp); if (sp == NULL) return RM_ERR_NO_FREE_MEM; #ifdef NV_SUPPORT_OS_AGP if (NV_OSAGP_ENABLED(nv)) status = (nv_os_agp_teardown(sp, nv) == 0) ? RM_OK : RM_ERROR; #endif if (NV_NVAGP_ENABLED(nv)) status = rm_teardown_agp(sp, nv); NV_UMA_ZONE_FREE_STACK(sp); nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP; nv->agp_status = NV_AGP_STATUS_DISABLED; return status; } S032 NV_API_CALL nv_no_incoherent_mappings(void) { #if !defined(NVCPU_X86_64) && !defined(NV_SUPPORT_OS_AGP) return 1; #else /* * XXX We can't modify FreeBSD/amd64's cached direct mapping * and are thus can't provide coherent mappings. The driver * will attempt to work around this problem, but AGP support * may be unavailable on some newer systems. * * The FreeBSD AGP GART driver also doesn't currently update * the kernel mappings of system memory mapped into the AGP * aperture. */ return 0; #endif } void NV_API_CALL nv_lock_rm(nv_state_t *nv) { /* * With SMPng, the "giant" kernel lock is gone. That means that we're * in a more complex enviroment locking-wise, but since the necessary * locking primitives are available to us, we can handle it. * * With mtx_lock_spin we acquire a spin mutex and locally disable all * interrupts on the current processor. */ struct nvidia_softc *sc = nv->os_state; spin_lock_wr(&sc->rm_lock); } void NV_API_CALL nv_unlock_rm(nv_state_t *nv) { struct nvidia_softc *sc = nv->os_state; spin_unlock_wr(&sc->rm_lock); } void nv_lock_api(nv_state_t *nv) { struct nvidia_softc *sc = nv->os_state; lockmgr(&sc->api_lock, LK_EXCLUSIVE|LK_CANRECURSE); } void nv_unlock_api(nv_state_t *nv) { struct nvidia_softc *sc = nv->os_state; lockmgr(&sc->api_lock, LK_RELEASE); } void NV_API_CALL nv_post_event( nv_state_t *nv, nv_event_t *event, U032 hObject, U032 index ) { struct nvidia_softc *sc; struct nvidia_event *et; et = kmalloc(sizeof(nvidia_event_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK); if (et == NULL) return; et->event = *event; et->event.hObject = hObject; et->event.index = index; nv_lock_rm(nv); sc = nv->os_state; STAILQ_INSERT_TAIL(&sc->event_queue, et, queue); nv_unlock_rm(nv); selwakeup(&sc->rsel); } S032 NV_API_CALL nv_get_event( nv_state_t *nv, void *file, nv_event_t *event, U032 *pending ) { struct nvidia_softc *sc = nv->os_state; struct nvidia_event *et, *_et; nv_lock_rm(nv); STAILQ_FOREACH(et, &sc->event_queue, queue) { if (et->event.file == file) break; } if (et != NULL) { *event = et->event; STAILQ_REMOVE(&sc->event_queue, et, nvidia_event, queue); STAILQ_FOREACH(_et, &sc->event_queue, queue) { if (_et->event.file == file) break; } *pending = (_et != NULL); nv_unlock_rm(nv); /* will attempt to acquire a blockable sleep lock */ free(et, M_NVIDIA); return RM_OK; } nv_unlock_rm(nv); return RM_ERROR; /* RM polling? */ } void* NV_API_CALL nv_alloc_kernel_mapping( nv_state_t *nv, NvU64 address, U032 size, void **private ) { struct nvidia_alloc *at; struct nvidia_softc *sc = nv->os_state; vm_offset_t offset, linear; offset = (vm_offset_t) address & PAGE_MASK; address &= ~PAGE_MASK; SLIST_FOREACH(at, &sc->alloc_list, list) { linear = at->address; do { if (vtophys(linear) == (vm_offset_t) address) return (void *)(linear + offset); linear += PAGE_SIZE; } while (linear < (at->address + at->size)); } return NULL; } S032 NV_API_CALL nv_free_kernel_mapping( nv_state_t *nv, void *address, void *private ) { /* There's nothing to be done here. */ return RM_OK; } S032 nv_alloc_contig_pages( nv_state_t *nv, U032 count, U032 cache_type, NvU64 *pte_array, void **private ) { struct nvidia_alloc *at; struct nvidia_softc *sc = nv->os_state; void *address; U032 size = count * PAGE_SIZE; int status; if (os_alloc_contig_pages(&address, size) != RM_OK) return -ENOMEM; at = kmalloc(sizeof(struct nvidia_alloc), M_NVIDIA, M_WAITOK | M_ZERO); if (!at) { os_free_contig_pages(address, size); return -ENOMEM; } if (cache_type != NV_MEMORY_CACHED) { status = pmap_change_attr((vm_offset_t)address, size, PAT_UNCACHEABLE); if (status != 0) { free(at, M_NVIDIA); os_free_contig_pages(address, size); return status; } } at->alloc_type_contiguous = 1; at->cache_type = cache_type; at->size = size; at->address = (vm_offset_t)address; at->pte_array = pte_array; pte_array[0] = vtophys(at->address); *private = at; SLIST_INSERT_HEAD(&sc->alloc_list, at, list); return 0; } S032 nv_free_contig_pages( nv_state_t *nv, void *private ) { struct nvidia_alloc *at = private; struct nvidia_softc *sc = nv->os_state; SLIST_REMOVE(&sc->alloc_list, at, nvidia_alloc, list); if (at->cache_type != NV_MEMORY_CACHED) pmap_change_attr(at->address, at->size, PAT_WRITE_BACK); os_free_contig_pages((void *)at->address, at->size); free(at, M_NVIDIA); return 0; } S032 nv_alloc_system_pages( nv_state_t *nv, U032 count, U032 cache_type, NvU64 *pte_array, void **private ) { struct nvidia_alloc *at; struct nvidia_softc *sc = nv->os_state; void *address; u_int32_t i, size; int status; size = count * PAGE_SIZE; at = kmalloc(sizeof(struct nvidia_alloc), M_NVIDIA, M_WAITOK | M_ZERO); if (!at) { return -ENOMEM; } address = kmalloc(size, M_NVIDIA, M_WAITOK | M_ZERO); if (!address) { free(at, M_NVIDIA); return -ENOMEM; } if (cache_type != NV_MEMORY_CACHED) { status = pmap_change_attr((vm_offset_t)address, size, PAT_UNCACHEABLE); if (status != 0) { free(at, M_NVIDIA); free(address, M_NVIDIA); return status; } } at->alloc_type_contiguous = 0; at->cache_type = cache_type; at->size = size; at->address = (vm_offset_t)address; at->pte_array = pte_array; for (i = 0; i < count; i++) { pte_array[i] = (NvU64)vtophys(at->address + (i * PAGE_SIZE)); vm_page_wire(PHYS_TO_VM_PAGE(pte_array[i])); } *private = at; SLIST_INSERT_HEAD(&sc->alloc_list, at, list); return 0; } S032 nv_free_system_pages( nv_state_t *nv, void *private ) { struct nvidia_alloc *at = private; struct nvidia_softc *sc = nv->os_state; u_int32_t i, count; count = at->size / PAGE_SIZE; SLIST_REMOVE(&sc->alloc_list, at, nvidia_alloc, list); for (i = 0; i < count; i++) { vm_page_unwire(PHYS_TO_VM_PAGE(at->pte_array[i]), 0); } if (at->cache_type != NV_MEMORY_CACHED) pmap_change_attr(at->address, at->size, PAT_WRITE_BACK); free((void *)at->address, M_NVIDIA); free(at, M_NVIDIA); return 0; } #ifdef NV_SUPPORT_OS_AGP S032 nv_alloc_agp_pages( nv_state_t *nv, U032 count, U032 offset, void **private ) { void *handle; struct nvidia_softc *sc = nv->os_state; handle = agp_alloc_memory(sc->agp_dev, 0, count << PAGE_SHIFT); if (!handle) { /* * This is very unlikely to happen, the system's memory resources * would have to be nearly exhausted. */ return -ENOMEM; } if (agp_bind_memory(sc->agp_dev, handle, offset) != 0) { /* * This shouldn't happen, we claimed the AGP backend and are thus * using it exclusively; the resource manager manages AGP offsets * internally, we wouldn't have been called had we run out of AGP * aperture space. */ os_dbg_breakpoint(); agp_free_memory(sc->agp_dev, handle); return -ENOMEM; } *private = handle; return 0; } S032 nv_free_agp_pages( nv_state_t *nv, U032 count, void *private, U032 *offset ) { void *handle = private; struct nvidia_softc *sc = nv->os_state; struct agp_memory_info info; agp_memory_info(sc->agp_dev, handle, &info); *offset = info.ami_offset; if (agp_unbind_memory(sc->agp_dev, handle) != 0) { /* * This is the only place where previously bound AGP memory would * be freed. If we fail to unbind this memory now, something very * wrong must have happened. */ os_dbg_breakpoint(); } agp_free_memory(sc->agp_dev, handle); return 0; } #endif /* NV_SUPPORT_OS_AGP */ RM_STATUS NV_API_CALL nv_alloc_pages( nv_state_t *nv, U032 count, U032 alloc_type_agp, U032 alloc_type_contiguous, U032 cache_type, NvU64 *pte_array, void **private ) { U032 offset; RM_STATUS status = RM_ERR_NO_FREE_MEM; nv_stack_t *sp = NULL; if (alloc_type_agp) { if (!NV_AGP_ENABLED(nv)) return RM_ERR_NOT_SUPPORTED; NV_UMA_ZONE_ALLOC_STACK(sp); if (sp == NULL) return RM_ERR_NO_FREE_MEM; #ifdef NV_SUPPORT_OS_AGP if (NV_OSAGP_ENABLED(nv)) { status = rm_alloc_agp_bitmap(sp, nv, count, &offset); if (status != RM_OK) goto failed; if (nv_alloc_agp_pages(nv, count, (offset << PAGE_SHIFT), private) != 0) { rm_free_agp_bitmap(sp, nv, count, offset); goto failed; } NV_UMA_ZONE_FREE_STACK(sp); pte_array[0] = (nv->agp.address + (offset << PAGE_SHIFT)); return RM_OK; } #endif /* NV_SUPPORT_OS_AGP */ if (NV_NVAGP_ENABLED(nv)) { status = rm_alloc_agp_pages(sp, nv, count, private, &offset); if (status != RM_OK) goto failed; NV_UMA_ZONE_FREE_STACK(sp); pte_array[0] = (nv->agp.address + (offset << PAGE_SHIFT)); return RM_OK; } } else { /* XXX Fix me! (PAT) */ if (cache_type == NV_MEMORY_WRITECOMBINED) { status = RM_ERR_NOT_SUPPORTED; goto failed; } if (!alloc_type_contiguous) { if (nv_alloc_system_pages(nv, count, cache_type, pte_array, private)) goto failed; } else { if (nv_alloc_contig_pages(nv, count, cache_type, pte_array, private)) goto failed; } return RM_OK; } failed: if (sp != NULL) NV_UMA_ZONE_FREE_STACK(sp); return status; } RM_STATUS NV_API_CALL nv_free_pages( nv_state_t *nv, U032 count, U032 alloc_type_agp, U032 alloc_type_contiguous, U032 cache_type, void *private ) { RM_STATUS status = RM_ERROR; nv_stack_t *sp = NULL; if (alloc_type_agp) { if (!NV_AGP_ENABLED(nv)) return RM_ERR_NOT_SUPPORTED; NV_UMA_ZONE_ALLOC_STACK(sp); if (sp == NULL) return RM_ERR_NO_FREE_MEM; #ifdef NV_SUPPORT_OS_AGP if (NV_OSAGP_ENABLED(nv)) { U032 offset; if (nv_free_agp_pages(nv, count, private, &offset) != 0) goto failed; rm_free_agp_bitmap(sp, nv, count, (offset >> PAGE_SHIFT)); NV_UMA_ZONE_FREE_STACK(sp); return RM_OK; } #endif /* NV_SUPPORT_OS_AGP */ if (NV_NVAGP_ENABLED(nv)) { if (rm_free_agp_pages(sp, nv, private) != RM_OK) goto failed; } NV_UMA_ZONE_FREE_STACK(sp); } else { if (!alloc_type_contiguous) { if (nv_free_system_pages(nv, private)) goto failed; } else { if (nv_free_contig_pages(nv, private)) goto failed; } } return RM_OK; failed: if (sp != NULL) NV_UMA_ZONE_FREE_STACK(sp); return status; } NvU64 NV_API_CALL nv_dma_to_mmap_token( nv_state_t *nv, NvU64 address ) { struct nvidia_alloc *at; struct nvidia_softc *sc = nv->os_state; vm_offset_t offset, linear; uint32_t i; offset = (vm_offset_t)address & PAGE_MASK; address &= ~PAGE_MASK; /* * XXX FreeBSD doesn't currently allow the use of physical * addresses as mmap(2) tokens, a linear address range * derived from the allocation's contiguous kernel mapping * is used, instead. */ SLIST_FOREACH(at, &sc->alloc_list, list) { for (i = 0; i < (at->size / PAGE_SIZE); i++) { if ((!at->alloc_type_contiguous && (address == (NvU64)(NvUPtr)at->pte_array[i])) || (address == (NvU64)(NvUPtr)at->pte_array[0] + (i * PAGE_SIZE))) { linear = at->address + (i * PAGE_SIZE); return NV_VM_TO_MMAP_OFFSET(linear + offset); } } } return 0; } NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address) { vm_offset_t va = (vm_offset_t) address; #if defined(NVCPU_X86_64) if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) return DMAP_TO_PHYS(va); #endif if (va < KERNBASE) { os_dbg_breakpoint(); return 0; } return vtophys(va); } NvU64 NV_API_CALL nv_get_user_phys_address(NvU64 address) { struct vmspace *vm; vm_offset_t va = (vm_offset_t) address; if (va >= KERNBASE) { os_dbg_breakpoint(); return 0; } /* if (vm_fault_quick((caddr_t) va, VM_PROT_WRITE)) return 0; */ vm = curproc->p_vmspace; return pmap_extract(vmspace_pmap(vm), va); } int nvidia_mmap_dev( struct nvidia_softc *sc, vm_offset_t offset, vm_offset_t *physical ) { struct nvidia_alloc *at; nv_state_t *nv = sc->nv_state; nv_stack_t *sp; sp = sc->api_sp; NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE); /* * Offsets that fall into the frame buffer, registry or AGP * apertures are physical addresses and mapped into userspace * directly. */ if (IS_FB_OFFSET(nv, offset, PAGE_SIZE) || IS_BC_OFFSET(nv, offset, PAGE_SIZE)) { *physical = offset; return 0; } if (IS_REG_OFFSET(nv, offset, PAGE_SIZE)) { *physical = offset; return 0; } if (IS_AGP_OFFSET(nv, offset, PAGE_SIZE)) { *physical = offset; return 0; } offset = NV_MMAP_TO_VM_OFFSET(offset); SLIST_FOREACH(at, &sc->alloc_list, list) { if (offset >= at->address && offset < at->address + at->size) { *physical = vtophys(offset); return 0; } } return -1; } void nvidia_rc_timer(void *data) { nv_state_t *nv = data; struct nvidia_softc *sc = nv->os_state; nv_stack_t *sp; sp = sc->timer_sp; NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, FALSE); /* * We need this timer to trigger again one second from * now, reset the timeout. */ rm_run_rc_callback(sp, nv); callout_reset(&sc->timer_ch, hz, nvidia_rc_timer, (void *) nv); } int NV_API_CALL nv_start_rc_timer( nv_state_t *nv ) { struct nvidia_softc *sc = nv->os_state; if (nv->rc_timer_enabled != 0) return -EIO; callout_reset(&sc->timer_ch, hz, nvidia_rc_timer, (void *) nv); nv->rc_timer_enabled = 1; return 0; } int NV_API_CALL nv_stop_rc_timer( nv_state_t *nv ) { struct nvidia_softc *sc = nv->os_state; if (nv->rc_timer_enabled == 0) return -EIO; callout_stop(&sc->timer_ch); nv->rc_timer_enabled = 0; return 0; } void NV_API_CALL nv_set_dma_address_size( nv_state_t *nv, U032 phys_addr_bits ) { } void* NV_API_CALL nv_get_adapter_state( U016 bus, U016 slot ) { unsigned int i; struct nvidia_softc *sc; nv_state_t *nv; for (i = 0; i < NV_MAX_DEVICES; i++) { sc = devclass_get_softc(nvidia_devclass, i); if (!sc) continue; nv = sc->nv_state; if (nv->bus == bus && nv->slot == slot) return (void *) nv; } if (bus == 255 && slot == 255) { nv = &nvidia_ctl_state; return (void *) nv; } return NULL; } void NV_API_CALL nv_verify_pci_config( nv_state_t *nv, BOOL check_the_bars ) { nv_stack_t *sp; struct nvidia_softc *sc = nv->os_state; sp = sc->pci_cfgchk_sp; NV_PCI_CHECK_CONFIG_SPACE(sp, nv, check_the_bars, FALSE, FALSE); } void NV_API_CALL nv_acpi_methods_init(U032 *handlesPresent) { *handlesPresent = 0; } void NV_API_CALL nv_acpi_methods_uninit(void) { return; } RM_STATUS NV_API_CALL nv_acpi_method( U032 acpi_method, U032 function, U032 subFunction, void *inParams, U016 inParamSize, U032 *outStatus, void *outData, U016 *outDataSize ) { return RM_ERR_NOT_SUPPORTED; }