1 /* _NVRM_COPYRIGHT_BEGIN_
3 * Copyright 2001-2002 by NVIDIA Corporation. All rights reserved. All
4 * information contained herein is proprietary and confidential to NVIDIA
5 * Corporation. Any use, reproduction, or disclosure without the written
6 * permission of NVIDIA Corporation is prohibited.
12 #include "os-interface.h"
14 #include "nv-freebsd.h"
16 #if defined(NVCPU_X86) && defined(NV_USE_OS_VM86_INT10CALL)
17 #include <machine/vm86.h>
20 MALLOC_DEFINE(M_NV_STACK, "nvstack", "NVidia stack");
21 static nv_stack_t *__nvidia_init_sp = NULL;
23 devclass_t nvidia_devclass;
24 nv_state_t nvidia_ctl_state;
26 int nvidia_attach(device_t dev)
30 struct nvidia_softc *sc;
33 sc = device_get_softc(dev);
38 nv->bus = pci_get_bus(dev);
39 nv->slot = pci_get_slot(dev);
40 nv->vendor_id = pci_get_vendor(dev);
41 nv->device_id = pci_get_device(dev);
42 nv->interrupt_line = pci_get_irq(dev);
44 callout_init(&sc->timer_ch);
46 for (i = 0; i < NV_GPU_NUM_BARS; i++) {
47 if (sc->BAR_recs[i] != NULL) {
48 nv->bars[i].address = rman_get_start(sc->BAR_recs[i]);
49 nv->bars[i].size = rman_get_size(sc->BAR_recs[i]);
53 nv->fb = &nv->bars[NV_GPU_BAR_INDEX_FB];
54 nv->regs = &nv->bars[NV_GPU_BAR_INDEX_REGS];
56 pci_enable_io(dev, SYS_RES_MEMORY);
58 if ((status = rm_is_supported_device(sc->attach_sp, nv)) != RM_OK) {
59 nv_printf(NV_DBG_ERRORS,
60 "NVRM: The NVIDIA GPU %02x:%02x (PCI ID: %04x:%04x) installed\n"
61 "NVRM: in this system is not supported by the %s NVIDIA FreeBSD\n"
62 "NVRM: graphics driver release. Please see 'Appendix A -\n"
63 "NVRM: Supported NVIDIA GPU Products' in this release's README,\n"
64 "NVRM: available on the FreeBSD graphics driver download page at\n"
65 "NVRM: www.nvidia.com.\n",
66 nv->bus, nv->slot, nv->vendor_id, nv->device_id, NV_VERSION_STRING);
70 if ((status = nvidia_dev_attach(sc)) != 0)
73 if ((status = nvidia_ctl_attach()) != 0)
80 int nvidia_detach(device_t dev)
83 struct nvidia_softc *sc;
85 sc = device_get_softc(dev);
86 nv_sysctl_exit(sc->nv_state);
88 status = nvidia_dev_detach(sc);
90 device_printf(dev, "NVRM: NVIDIA driver DEV detach failed.\n");
94 status = nvidia_ctl_detach();
96 device_printf(dev, "NVRM: NVIDIA driver CTL detach failed.\n");
101 /* XXX Fix me? (state) */
106 #ifdef NV_SUPPORT_ACPI_PM
107 int nvidia_suspend(device_t dev)
110 struct nvidia_softc *sc;
112 int status = RM_ERROR;
114 /* Only if ACPI is running */
115 if (devclass_get_softc(devclass_find("ACPI"), 0) == NULL)
118 NV_UMA_ZONE_ALLOC_STACK(sp);
122 sc = device_get_softc(dev);
125 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE);
126 status = rm_power_management(sp, nv, 0, NV_PM_ACPI_STANDBY);
128 NV_UMA_ZONE_FREE_STACK(sp);
133 int nvidia_resume(device_t dev)
136 struct nvidia_softc *sc;
138 int status = RM_ERROR;
140 NV_UMA_ZONE_ALLOC_STACK(sp);
144 sc = device_get_softc(dev);
147 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE);
148 status = rm_power_management(sp, nv, 0, NV_PM_ACPI_RESUME);
150 NV_UMA_ZONE_FREE_STACK(sp);
154 #endif /* NV_SUPPORT_ACPI_PM */
157 int nvidia_alloc_hardware(device_t dev)
160 struct nvidia_softc *sc;
163 sc = device_get_softc(dev);
166 flags = 0; /* not RF_ACTIVE */
167 for (i = 0; i < NV_GPU_NUM_BARS && sc->BAR_rids[i] != 0; i++) {
168 struct resource *res;
169 res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->BAR_rids[i], flags);
172 * The most likely reason for this failure is that the SBIOS failed
173 * to assign a valid address range to this BAR; FreeBSD is unable to
174 * correct the problem and fails this BUS resource allocation. We
175 * trust the kernel with BAR validation at this point, but later try
176 * to catch cases where the X server "corrects" "invalid" BAR's.
178 * Please see to nvidia_pci_check_config_space() in nvidia_pci.c for
179 * additional information.
182 "NVRM: NVIDIA MEM resource alloc failed, BAR%d @ 0x%02x.\n",
183 i, sc->nv_state->bars[i].offset);
187 sc->BAR_recs[i] = res;
190 flags = RF_SHAREABLE | RF_ACTIVE;
191 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, flags);
192 if (sc->irq == NULL) {
193 device_printf(dev, "NVRM: NVIDIA IRQ resource alloc failed.\n");
202 void nvidia_free_hardware(device_t dev)
204 struct nvidia_softc *sc;
207 sc = device_get_softc(dev);
209 for (i = 0; i < NV_GPU_NUM_BARS && sc->BAR_recs[i] != NULL; i++)
210 bus_release_resource(dev, SYS_RES_MEMORY, sc->BAR_rids[i], sc->BAR_recs[i]);
212 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
214 bus_release_resource(dev, SYS_RES_IOPORT, sc->iop_rid, sc->iop);
217 void nvidia_intr(void *xsc)
219 struct nvidia_softc *sc;
221 U032 run_bottom_half = 0;
224 sc = (struct nvidia_softc *) xsc;
232 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, FALSE);
233 rm_isr(sp, nv, &run_bottom_half);
235 if (run_bottom_half) {
236 /* We're not executing in an HW ISR context */
241 int nvidia_get_card_info(struct nv_ioctl_card_info *ci)
244 struct nvidia_softc *sc;
248 * Clients supporting versioning will pass version magic in the first
249 * card information field.
251 struct nv_ioctl_rm_api_old_version *av = (void *) ci;
255 case NV_RM_API_OLD_VERSION_MAGIC_OVERRIDE_REQ:
256 case NV_RM_API_OLD_VERSION_MAGIC_LAX_REQ:
257 case NV_RM_API_OLD_VERSION_MAGIC_REQ:
259 * the client is using the old major-minor-patch API
260 * version check; reject it.
262 nv_printf(NV_DBG_ERRORS,
263 "NVRM: API mismatch: the client has the version %d.%d-%d, but\n"
264 "NVRM: this kernel module has the version %s. Please\n"
265 "NVRM: make sure that this kernel module and all NVIDIA driver\n"
266 "NVRM: components have the same version.\n",
267 av->major, av->minor, av->patch,
272 case NV_RM_API_OLD_VERSION_MAGIC_IGNORE:
274 * the client is telling us to ignore the old version
275 * scheme; it will do a version check via
276 * NV_ESC_CHECK_VERSION_STR
284 /* clear card information structure */
285 memset(ci, 0, sizeof(ci));
287 for (i = 0; i < NV_MAX_DEVICES; i++) {
288 sc = devclass_get_softc(nvidia_devclass, i);
293 ci[i].flags = (NV_IOCTL_CARD_INFO_FLAG_PRESENT |
294 NV_IOCTL_CARD_INFO_FLAG_NEED_MSYNC);
296 ci[i].slot = nv->slot;
297 ci[i].vendor_id = nv->vendor_id;
298 ci[i].device_id = nv->device_id;
299 ci[i].interrupt_line = nv->interrupt_line;
300 ci[i].fb_address = nv->fb->address;
301 ci[i].fb_size = nv->fb->size;
302 ci[i].reg_address = nv->regs->address;
303 ci[i].reg_size = nv->regs->size;
309 int nvidia_handle_ioctl(
317 struct nvidia_softc *sc;
319 int unit = minor(dev);
322 nv_ioctl_xfer_t *xfer = NULL;
326 if (unit == CDEV_CTL_MINOR) {
327 nv = &nvidia_ctl_state;
330 sc = devclass_get_softc(nvidia_devclass, unit);
338 size = __NV_IOC_SIZE(cmd);
339 nr = __NV_IOC_NR(cmd);
343 if (nr == NV_ESC_IOCTL_XFER_CMD) {
344 if (__NV_IOC_SIZE(cmd) != sizeof(nv_ioctl_xfer_t))
350 if (size > NV_ABSOLUTE_MAX_IOCTL_SIZE)
353 args = malloc(size, M_NVIDIA, M_WAITOK);
357 if (copyin(NvP64_VALUE(xfer->ptr), args, size) != 0) {
358 free(args, M_NVIDIA);
365 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE);
368 case NV_ESC_CHECK_VERSION_STR:
369 status = (rm_perform_version_check(sp, args) == RM_OK)
373 case NV_ESC_CARD_INFO:
374 status = nvidia_get_card_info(args);
378 status = rm_ioctl(sp, nv, __TD_FDT(td), nr, args)
383 if (args != (void *)data) {
384 if (copyout(args, NvP64_VALUE(xfer->ptr), size) != 0)
386 free(args, M_NVIDIA);
397 struct nvidia_softc *sc;
398 nv_state_t *nv = &nvidia_ctl_state;
399 BOOL new_filep = FALSE;
400 struct nvidia_filep *filep;
404 STAILQ_FOREACH(filep, &sc->filep_queue, queue) {
405 if (filep->fd_table == __TD_FDT(td))
410 filep = kmalloc(sizeof(nvidia_filep_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK);
413 filep->fd_table = __TD_FDT(td);
414 filep->fd_refcnt = 0;
415 STAILQ_INSERT_HEAD(&sc->filep_queue, filep, queue);
421 if (sc->refcnt == 0) {
422 NV_UMA_ZONE_ALLOC_STACK(sc->api_sp);
423 if (sc->api_sp == NULL) {
425 free(filep, M_NVIDIA);
429 STAILQ_INIT(&sc->event_queue);
430 nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL);
438 int nvidia_close_ctl(
443 struct nvidia_softc *sc;
444 nv_state_t *nv = &nvidia_ctl_state;
445 struct nvidia_event *et;
446 struct nvidia_filep *filep;
451 STAILQ_FOREACH(filep, &sc->filep_queue, queue) {
452 if (filep->fd_table == __TD_FDT(td))
461 if (--filep->fd_refcnt == 0) {
462 rm_free_unused_clients(sp, nv, __TD_FDT(td));
463 STAILQ_REMOVE(&sc->filep_queue, filep, nvidia_filep, queue);
464 free(filep, M_NVIDIA);
467 if (--sc->refcnt == 0) {
468 while ((et = STAILQ_FIRST(&sc->event_queue))) {
469 STAILQ_REMOVE(&sc->event_queue, et, nvidia_event, queue);
473 NV_UMA_ZONE_FREE_STACK(sp);
475 nv->flags &= ~NV_FLAG_OPEN;
482 struct nvidia_softc *sc,
488 nv_state_t *nv = sc->nv_state;
489 BOOL new_filep = FALSE;
490 struct nvidia_filep *filep;
491 nv_stack_t *sp = NULL;
493 STAILQ_FOREACH(filep, &sc->filep_queue, queue) {
494 if (filep->fd_table == __TD_FDT(td))
499 filep = kmalloc(sizeof(nvidia_filep_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK);
502 filep->fd_table = __TD_FDT(td);
503 filep->fd_refcnt = 0;
507 if (sc->refcnt == 0) {
508 NV_UMA_ZONE_ALLOC_STACK(sc->api_sp);
509 if (sc->api_sp == NULL)
512 NV_UMA_ZONE_ALLOC_STACK(sc->pci_cfgchk_sp);
513 if (sc->pci_cfgchk_sp == NULL)
516 NV_UMA_ZONE_ALLOC_STACK(sc->isr_sp);
517 if (sc->isr_sp == NULL)
520 NV_UMA_ZONE_ALLOC_STACK(sc->timer_sp);
521 if (sc->timer_sp == NULL)
526 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE);
528 if (sc->refcnt == 0) {
529 if (!rm_init_adapter(sp, nv)) {
530 device_printf(sc->dev, "NVRM: rm_init_adapter() failed!\n");
535 STAILQ_INIT(&sc->event_queue);
536 nv->flags |= NV_FLAG_OPEN;
541 STAILQ_INSERT_HEAD(&sc->filep_queue, filep, queue);
548 if (sc->refcnt == 0) {
549 if (sc->timer_sp != NULL)
550 NV_UMA_ZONE_FREE_STACK(sc->timer_sp);
551 if (sc->isr_sp != NULL)
552 NV_UMA_ZONE_FREE_STACK(sc->isr_sp);
553 if (sc->pci_cfgchk_sp != NULL)
554 NV_UMA_ZONE_FREE_STACK(sc->pci_cfgchk_sp);
555 if (sc->api_sp != NULL)
556 NV_UMA_ZONE_FREE_STACK(sc->api_sp);
560 free(filep, M_NVIDIA);
565 int nvidia_close_dev(
566 struct nvidia_softc *sc,
571 nv_state_t *nv = sc->nv_state;
572 struct nvidia_event *et;
573 struct nvidia_filep *filep;
576 STAILQ_FOREACH(filep, &sc->filep_queue, queue) {
577 if (filep->fd_table == __TD_FDT(td))
586 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE);
588 if (--filep->fd_refcnt == 0) {
589 rm_free_unused_clients(sp, nv, __TD_FDT(td));
590 STAILQ_REMOVE(&sc->filep_queue, filep, nvidia_filep, queue);
591 free(filep, M_NVIDIA);
594 if (--sc->refcnt == 0) {
595 rm_disable_adapter(sp, nv);
596 rm_shutdown_adapter(sp, nv);
598 while ((et = STAILQ_FIRST(&sc->event_queue))) {
599 STAILQ_REMOVE(&sc->event_queue, et, nvidia_event, queue);
603 NV_UMA_ZONE_FREE_STACK(sc->timer_sp);
604 NV_UMA_ZONE_FREE_STACK(sc->isr_sp);
605 NV_UMA_ZONE_FREE_STACK(sc->pci_cfgchk_sp);
606 NV_UMA_ZONE_FREE_STACK(sc->api_sp);
608 nv->flags &= ~NV_FLAG_OPEN;
622 struct nvidia_softc *sc;
628 * The module load event. Our KLD has just been loaded and is
629 * ready to initialize. We setup the core resource manager in
630 * this routine, further initialization takes place at attach
635 NV_UMA_ZONE_ALLOC_STACK(sp);
640 bzero(sc, sizeof(nvidia_softc_t));
641 STAILQ_INIT(&sc->filep_queue);
643 if (!rm_init_rm(sp)) {
644 kprintf("NVRM: rm_init_rm() failed!\n");
645 NV_UMA_ZONE_FREE_STACK(sp);
649 __nvidia_init_sp = sp;
651 spin_init(&sc->rm_lock);
652 lockinit(&sc->api_lock, "nvapi", 0, LK_CANRECURSE);
654 nvidia_ctl_state.os_state = sc;
655 sc->nv_state = (void *)&nvidia_ctl_state;
657 nvidia_sysctl_init();
664 * Check if the control device is still open and reject the
665 * unload request if it is. This event can occur even when the
666 * module usage count is non-zero!
668 nv = &nvidia_ctl_state;
673 if (sc->refcnt != 0) { /* XXX Fix me? (refcnt) */
680 spin_uninit(&sc->rm_lock);
681 lockuninit(&sc->api_lock);
683 sp = __nvidia_init_sp;
686 NV_UMA_ZONE_FREE_STACK(sp);
688 nvidia_sysctl_exit();
701 #ifdef NV_SUPPORT_OS_AGP
710 struct nvidia_softc *sc = nv->os_state;
716 U032 rate = (8 | 4 | 2 | 1);
719 sc->agp_dev = agp_find_device();
721 kprintf("NVRM: agp_find_device failed, chipset unsupported?\n");
725 if (agp_acquire(sc->agp_dev) != 0)
728 agp_get_info(sc->agp_dev, &ai);
731 if (os_set_mem_range(ai.ai_aperture_base, ai.ai_aperture_size,
732 NV_MEMORY_WRITECOMBINED) != RM_OK) {
734 * Failure to set a write-combining range for the AGP aperture is
735 * not necessarily a fatal error condition; we don't know at this
736 * point, however, and abort to prevent performance and stability
737 * problems that may be hard to track down otherwise.
739 agp_release(sc->agp_dev);
743 rm_read_registry_dword(sp, NULL, "NVreg", "ReqAGPRate", &rate);
744 rm_read_registry_dword(sp, NULL, "NVreg", "EnableAGPFW", &fw);
745 rm_read_registry_dword(sp, NULL, "NVreg", "EnableAGPSBA", &sba);
747 if (AGP_MODE_GET_MODE_3(mode))
748 rate = (rate >> 2) & 3;
749 mode = AGP_MODE_SET_RATE(mode, AGP_MODE_GET_RATE(mode) & rate);
750 mode |= 1 /* avoid 0x mode request */;
752 if (AGP_MODE_GET_RATE(mode) & 2)
753 mode = AGP_MODE_SET_RATE(mode, AGP_MODE_GET_RATE(mode) & ~1);
754 if (AGP_MODE_GET_RATE(mode) & 4)
755 mode = AGP_MODE_SET_RATE(mode, AGP_MODE_GET_RATE(mode) & ~2);
757 mode = AGP_MODE_SET_FW(mode, fw);
758 mode = AGP_MODE_SET_SBA(mode, sba);
760 if (agp_enable(sc->agp_dev, mode) != 0) {
761 agp_release(sc->agp_dev);
762 os_unset_mem_range(ai.ai_aperture_base, ai.ai_aperture_size);
766 size = ai.ai_aperture_size / RM_PAGE_SIZE / 8;
768 if (os_alloc_mem((void **)&bitmap, size) != RM_OK) {
769 agp_release(sc->agp_dev);
770 os_unset_mem_range(ai.ai_aperture_base, ai.ai_aperture_size);
774 os_mem_set(bitmap, 0xff, size);
776 if (rm_set_agp_bitmap(sp, nv, bitmap) != RM_OK) {
777 agp_release(sc->agp_dev);
779 os_unset_mem_range(ai.ai_aperture_base, ai.ai_aperture_size);
783 *base = (void *) ai.ai_aperture_base;
784 *limit = (U032) ai.ai_aperture_size - 1;
789 S032 nv_os_agp_teardown(
794 struct nvidia_softc *sc = nv->os_state;
797 if (agp_release(sc->agp_dev) != 0)
800 rm_clear_agp_bitmap(sp, nv, &bitmap);
803 os_unset_mem_range(nv->agp.address, nv->agp.size);
807 #endif /* NV_SUPPORT_OS_AGP */
809 RM_STATUS NV_API_CALL nv_agp_init(
816 RM_STATUS status = RM_ERROR;
819 if (NV_AGP_ENABLED(nv))
820 return RM_ERR_STATE_IN_USE;
822 if (config == NVOS_AGP_CONFIG_DISABLE_AGP) {
824 * Match the behavior on Linux, don't consider the attempt
825 * to initialize AGP as 'disabled' an error.
827 nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP;
828 nv->agp_status = NV_AGP_STATUS_DISABLED;
832 NV_UMA_ZONE_ALLOC_STACK(sp);
834 return RM_ERR_NO_FREE_MEM;
836 #ifdef NV_SUPPORT_OS_AGP
837 if ((config & NVOS_AGP_CONFIG_OSAGP) != 0) {
838 if (nv_os_agp_init(sp, nv, base, limit) == 0) {
840 * If the operating system AGP GART driver successfully
841 * configured its backend, apply chipset overrides.
843 rm_update_agp_config(sp, nv);
844 NV_UMA_ZONE_FREE_STACK(sp);
846 nv->agp_config = NVOS_AGP_CONFIG_OSAGP;
847 nv->agp_status = NV_AGP_STATUS_ENABLED;
852 #endif /* NV_SUPPORT_OS_AGP */
854 if ((config & NVOS_AGP_CONFIG_NVAGP) == 0) {
855 status = RM_ERR_NOT_SUPPORTED;
859 if (devclass_get_softc(devclass_find("agp"), 0) != NULL) {
861 * Make sure we don't try to use the internal GART driver when
862 * the OS AGPGART driver (agp.ko) is attached. While that may
863 * be perfectly fine on most systems, but is known to break on
865 * -------------------------------------------------------------
866 * DON'T REDISTRIBUTE THE DRIVER WITH THIS SANITY CHECK REMOVED!
867 * -------------------------------------------------------------
869 kprintf("NVRM: detected agp.ko, aborting NVIDIA AGP setup!\n");
873 status = rm_init_agp(sp, nv);
874 if (status == RM_OK) {
875 NV_UMA_ZONE_FREE_STACK(sp);
877 nv->agp_config = NVOS_AGP_CONFIG_NVAGP;
878 nv->agp_status = NV_AGP_STATUS_ENABLED;
884 NV_UMA_ZONE_FREE_STACK(sp);
886 nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP;
887 nv->agp_status = NV_AGP_STATUS_FAILED;
892 RM_STATUS NV_API_CALL nv_agp_teardown(nv_state_t *nv)
894 RM_STATUS status = RM_ERR_NOT_SUPPORTED;
897 if (!NV_AGP_ENABLED(nv))
900 NV_UMA_ZONE_ALLOC_STACK(sp);
902 return RM_ERR_NO_FREE_MEM;
904 #ifdef NV_SUPPORT_OS_AGP
905 if (NV_OSAGP_ENABLED(nv))
906 status = (nv_os_agp_teardown(sp, nv) == 0)
909 if (NV_NVAGP_ENABLED(nv))
910 status = rm_teardown_agp(sp, nv);
912 NV_UMA_ZONE_FREE_STACK(sp);
914 nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP;
915 nv->agp_status = NV_AGP_STATUS_DISABLED;
920 S032 NV_API_CALL nv_no_incoherent_mappings(void)
922 #if !defined(NVCPU_X86_64) && !defined(NV_SUPPORT_OS_AGP)
926 * XXX We can't modify FreeBSD/amd64's cached direct mapping
927 * and are thus can't provide coherent mappings. The driver
928 * will attempt to work around this problem, but AGP support
929 * may be unavailable on some newer systems.
931 * The FreeBSD AGP GART driver also doesn't currently update
932 * the kernel mappings of system memory mapped into the AGP
939 void NV_API_CALL nv_lock_rm(nv_state_t *nv)
942 * With SMPng, the "giant" kernel lock is gone. That means that we're
943 * in a more complex enviroment locking-wise, but since the necessary
944 * locking primitives are available to us, we can handle it.
946 * With mtx_lock_spin we acquire a spin mutex and locally disable all
947 * interrupts on the current processor.
949 struct nvidia_softc *sc = nv->os_state;
950 spin_lock_wr(&sc->rm_lock);
953 void NV_API_CALL nv_unlock_rm(nv_state_t *nv)
955 struct nvidia_softc *sc = nv->os_state;
956 spin_unlock_wr(&sc->rm_lock);
959 void nv_lock_api(nv_state_t *nv)
961 struct nvidia_softc *sc = nv->os_state;
962 lockmgr(&sc->api_lock, LK_EXCLUSIVE|LK_CANRECURSE);
965 void nv_unlock_api(nv_state_t *nv)
967 struct nvidia_softc *sc = nv->os_state;
968 lockmgr(&sc->api_lock, LK_RELEASE);
972 void NV_API_CALL nv_post_event(
979 struct nvidia_softc *sc;
980 struct nvidia_event *et;
982 et = kmalloc(sizeof(nvidia_event_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK);
987 et->event.hObject = hObject;
988 et->event.index = index;
993 STAILQ_INSERT_TAIL(&sc->event_queue, et, queue);
997 selwakeup(&sc->rsel);
1000 S032 NV_API_CALL nv_get_event(
1007 struct nvidia_softc *sc = nv->os_state;
1008 struct nvidia_event *et, *_et;
1012 STAILQ_FOREACH(et, &sc->event_queue, queue) {
1013 if (et->event.file == file)
1020 STAILQ_REMOVE(&sc->event_queue, et, nvidia_event, queue);
1022 STAILQ_FOREACH(_et, &sc->event_queue, queue) {
1023 if (_et->event.file == file)
1027 *pending = (_et != NULL);
1031 /* will attempt to acquire a blockable sleep lock */
1038 return RM_ERROR; /* RM polling? */
1041 void* NV_API_CALL nv_alloc_kernel_mapping(
1048 struct nvidia_alloc *at;
1049 struct nvidia_softc *sc = nv->os_state;
1050 vm_offset_t offset, linear;
1052 offset = (vm_offset_t) address & PAGE_MASK;
1053 address &= ~PAGE_MASK;
1055 SLIST_FOREACH(at, &sc->alloc_list, list) {
1056 linear = at->address;
1058 if (vtophys(linear) == (vm_offset_t) address)
1059 return (void *)(linear + offset);
1060 linear += PAGE_SIZE;
1061 } while (linear < (at->address + at->size));
1067 S032 NV_API_CALL nv_free_kernel_mapping(
1074 /* There's nothing to be done here. */
1078 S032 nv_alloc_contig_pages(
1086 struct nvidia_alloc *at;
1087 struct nvidia_softc *sc = nv->os_state;
1089 U032 size = count * PAGE_SIZE;
1092 if (os_alloc_contig_pages(&address, size) != RM_OK)
1095 at = kmalloc(sizeof(struct nvidia_alloc), M_NVIDIA, M_WAITOK | M_ZERO);
1097 os_free_contig_pages(address, size);
1101 if (cache_type != NV_MEMORY_CACHED) {
1102 status = pmap_change_attr((vm_offset_t)address, size, PAT_UNCACHEABLE);
1105 os_free_contig_pages(address, size);
1110 at->alloc_type_contiguous = 1;
1112 at->cache_type = cache_type;
1114 at->address = (vm_offset_t)address;
1115 at->pte_array = pte_array;
1117 pte_array[0] = vtophys(at->address);
1120 SLIST_INSERT_HEAD(&sc->alloc_list, at, list);
1125 S032 nv_free_contig_pages(
1130 struct nvidia_alloc *at = private;
1131 struct nvidia_softc *sc = nv->os_state;
1133 SLIST_REMOVE(&sc->alloc_list, at, nvidia_alloc, list);
1135 if (at->cache_type != NV_MEMORY_CACHED)
1136 pmap_change_attr(at->address, at->size, PAT_WRITE_BACK);
1138 os_free_contig_pages((void *)at->address, at->size);
1144 S032 nv_alloc_system_pages(
1152 struct nvidia_alloc *at;
1153 struct nvidia_softc *sc = nv->os_state;
1158 size = count * PAGE_SIZE;
1159 at = kmalloc(sizeof(struct nvidia_alloc), M_NVIDIA, M_WAITOK | M_ZERO);
1164 address = kmalloc(size, M_NVIDIA, M_WAITOK | M_ZERO);
1170 if (cache_type != NV_MEMORY_CACHED) {
1171 status = pmap_change_attr((vm_offset_t)address, size, PAT_UNCACHEABLE);
1174 free(address, M_NVIDIA);
1179 at->alloc_type_contiguous = 0;
1181 at->cache_type = cache_type;
1183 at->address = (vm_offset_t)address;
1184 at->pte_array = pte_array;
1186 for (i = 0; i < count; i++) {
1187 pte_array[i] = (NvU64)vtophys(at->address + (i * PAGE_SIZE));
1188 vm_page_wire(PHYS_TO_VM_PAGE(pte_array[i]));
1192 SLIST_INSERT_HEAD(&sc->alloc_list, at, list);
1197 S032 nv_free_system_pages(
1202 struct nvidia_alloc *at = private;
1203 struct nvidia_softc *sc = nv->os_state;
1206 count = at->size / PAGE_SIZE;
1207 SLIST_REMOVE(&sc->alloc_list, at, nvidia_alloc, list);
1209 for (i = 0; i < count; i++) {
1210 vm_page_unwire(PHYS_TO_VM_PAGE(at->pte_array[i]), 0);
1213 if (at->cache_type != NV_MEMORY_CACHED)
1214 pmap_change_attr(at->address, at->size, PAT_WRITE_BACK);
1216 free((void *)at->address, M_NVIDIA);
1222 #ifdef NV_SUPPORT_OS_AGP
1223 S032 nv_alloc_agp_pages(
1231 struct nvidia_softc *sc = nv->os_state;
1233 handle = agp_alloc_memory(sc->agp_dev, 0, count << PAGE_SHIFT);
1236 * This is very unlikely to happen, the system's memory resources
1237 * would have to be nearly exhausted.
1242 if (agp_bind_memory(sc->agp_dev, handle, offset) != 0) {
1244 * This shouldn't happen, we claimed the AGP backend and are thus
1245 * using it exclusively; the resource manager manages AGP offsets
1246 * internally, we wouldn't have been called had we run out of AGP
1249 os_dbg_breakpoint();
1251 agp_free_memory(sc->agp_dev, handle);
1259 S032 nv_free_agp_pages(
1266 void *handle = private;
1267 struct nvidia_softc *sc = nv->os_state;
1268 struct agp_memory_info info;
1270 agp_memory_info(sc->agp_dev, handle, &info);
1271 *offset = info.ami_offset;
1273 if (agp_unbind_memory(sc->agp_dev, handle) != 0) {
1275 * This is the only place where previously bound AGP memory would
1276 * be freed. If we fail to unbind this memory now, something very
1277 * wrong must have happened.
1279 os_dbg_breakpoint();
1282 agp_free_memory(sc->agp_dev, handle);
1285 #endif /* NV_SUPPORT_OS_AGP */
1287 RM_STATUS NV_API_CALL nv_alias_pages(
1297 return RM_ERR_NOT_SUPPORTED;
1300 RM_STATUS NV_API_CALL nv_guest_pfn_list(
1303 unsigned int pfn_count,
1304 unsigned int offset_index,
1305 unsigned int *user_pfn_list
1308 return RM_ERR_NOT_SUPPORTED;
1311 RM_STATUS NV_API_CALL nv_alloc_pages(
1314 U032 alloc_type_agp,
1315 U032 alloc_type_contiguous,
1322 RM_STATUS status = RM_ERR_NO_FREE_MEM;
1323 nv_stack_t *sp = NULL;
1325 if (alloc_type_agp) {
1326 if (!NV_AGP_ENABLED(nv))
1327 return RM_ERR_NOT_SUPPORTED;
1329 NV_UMA_ZONE_ALLOC_STACK(sp);
1331 return RM_ERR_NO_FREE_MEM;
1333 #ifdef NV_SUPPORT_OS_AGP
1334 if (NV_OSAGP_ENABLED(nv)) {
1335 status = rm_alloc_agp_bitmap(sp, nv, count, &offset);
1336 if (status != RM_OK)
1339 if (nv_alloc_agp_pages(nv, count, (offset << PAGE_SHIFT),
1341 rm_free_agp_bitmap(sp, nv, count, offset);
1345 NV_UMA_ZONE_FREE_STACK(sp);
1347 pte_array[0] = (nv->agp.address + (offset << PAGE_SHIFT));
1350 #endif /* NV_SUPPORT_OS_AGP */
1352 if (NV_NVAGP_ENABLED(nv)) {
1353 status = rm_alloc_agp_pages(sp, nv, count, private, &offset);
1354 if (status != RM_OK)
1357 NV_UMA_ZONE_FREE_STACK(sp);
1359 pte_array[0] = (nv->agp.address + (offset << PAGE_SHIFT));
1363 /* XXX Fix me! (PAT) */
1364 if (cache_type == NV_MEMORY_WRITECOMBINED) {
1365 status = RM_ERR_NOT_SUPPORTED;
1369 if (!alloc_type_contiguous) {
1370 if (nv_alloc_system_pages(nv, count, cache_type, pte_array, private))
1373 if (nv_alloc_contig_pages(nv, count, cache_type, pte_array, private))
1382 NV_UMA_ZONE_FREE_STACK(sp);
1387 RM_STATUS NV_API_CALL nv_free_pages(
1390 U032 alloc_type_agp,
1391 U032 alloc_type_contiguous,
1396 RM_STATUS status = RM_ERROR;
1397 nv_stack_t *sp = NULL;
1399 if (alloc_type_agp) {
1400 if (!NV_AGP_ENABLED(nv))
1401 return RM_ERR_NOT_SUPPORTED;
1403 NV_UMA_ZONE_ALLOC_STACK(sp);
1405 return RM_ERR_NO_FREE_MEM;
1407 #ifdef NV_SUPPORT_OS_AGP
1408 if (NV_OSAGP_ENABLED(nv)) {
1411 if (nv_free_agp_pages(nv, count, private, &offset) != 0)
1414 rm_free_agp_bitmap(sp, nv, count, (offset >> PAGE_SHIFT));
1415 NV_UMA_ZONE_FREE_STACK(sp);
1419 #endif /* NV_SUPPORT_OS_AGP */
1421 if (NV_NVAGP_ENABLED(nv)) {
1422 if (rm_free_agp_pages(sp, nv, private) != RM_OK)
1426 NV_UMA_ZONE_FREE_STACK(sp);
1428 if (!alloc_type_contiguous) {
1429 if (nv_free_system_pages(nv, private))
1432 if (nv_free_contig_pages(nv, private))
1441 NV_UMA_ZONE_FREE_STACK(sp);
1446 NvU64 NV_API_CALL nv_dma_to_mmap_token(
1451 struct nvidia_alloc *at;
1452 struct nvidia_softc *sc = nv->os_state;
1453 vm_offset_t offset, linear;
1456 offset = (vm_offset_t)address & PAGE_MASK;
1457 address &= ~PAGE_MASK;
1460 * XXX FreeBSD doesn't currently allow the use of physical
1461 * addresses as mmap(2) tokens, a linear address range
1462 * derived from the allocation's contiguous kernel mapping
1465 SLIST_FOREACH(at, &sc->alloc_list, list) {
1466 for (i = 0; i < (at->size / PAGE_SIZE); i++) {
1467 if ((!at->alloc_type_contiguous &&
1468 (address == (NvU64)(NvUPtr)at->pte_array[i]))
1469 || (address == (NvU64)(NvUPtr)at->pte_array[0] + (i * PAGE_SIZE))) {
1470 linear = at->address + (i * PAGE_SIZE);
1471 return NV_VM_TO_MMAP_OFFSET(linear + offset);
1480 NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address)
1482 vm_offset_t va = (vm_offset_t) address;
1484 #if defined(NVCPU_X86_64)
1485 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
1486 return DMAP_TO_PHYS(va);
1489 if (va < KERNBASE) {
1490 os_dbg_breakpoint();
1497 NvU64 NV_API_CALL nv_get_user_phys_address(NvU64 address)
1500 vm_offset_t va = (vm_offset_t) address;
1502 if (va >= KERNBASE) {
1503 os_dbg_breakpoint();
1507 /* if (vm_fault_quick((caddr_t) va, VM_PROT_WRITE))
1510 vm = curproc->p_vmspace;
1511 return pmap_extract(vmspace_pmap(vm), va);
1515 int nvidia_mmap_dev(
1516 struct nvidia_softc *sc,
1518 vm_offset_t *physical
1521 struct nvidia_alloc *at;
1522 nv_state_t *nv = sc->nv_state;
1527 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, TRUE);
1530 * Offsets that fall into the frame buffer, registry or AGP
1531 * apertures are physical addresses and mapped into userspace
1534 if (IS_FB_OFFSET(nv, offset, PAGE_SIZE) ||
1535 IS_BC_OFFSET(nv, offset, PAGE_SIZE)) {
1540 if (IS_REG_OFFSET(nv, offset, PAGE_SIZE)) {
1545 if (IS_AGP_OFFSET(nv, offset, PAGE_SIZE)) {
1550 offset = NV_MMAP_TO_VM_OFFSET(offset);
1552 SLIST_FOREACH(at, &sc->alloc_list, list) {
1553 if (offset >= at->address &&
1554 offset < at->address + at->size) {
1555 *physical = vtophys(offset);
1563 void nvidia_rc_timer(void *data)
1565 nv_state_t *nv = data;
1566 struct nvidia_softc *sc = nv->os_state;
1571 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, TRUE, TRUE, FALSE);
1574 * We need this timer to trigger again one second from
1575 * now, reset the timeout.
1577 rm_run_rc_callback(sp, nv);
1579 callout_reset(&sc->timer_ch, hz, nvidia_rc_timer, (void *) nv);
1582 int NV_API_CALL nv_start_rc_timer(
1586 struct nvidia_softc *sc = nv->os_state;
1588 if (nv->rc_timer_enabled != 0)
1591 callout_reset(&sc->timer_ch, hz, nvidia_rc_timer, (void *) nv);
1592 nv->rc_timer_enabled = 1;
1597 int NV_API_CALL nv_stop_rc_timer(
1601 struct nvidia_softc *sc = nv->os_state;
1603 if (nv->rc_timer_enabled == 0)
1606 callout_stop(&sc->timer_ch);
1607 nv->rc_timer_enabled = 0;
1612 void NV_API_CALL nv_set_dma_address_size(
1619 void* NV_API_CALL nv_get_adapter_state(
1625 struct nvidia_softc *sc;
1628 for (i = 0; i < NV_MAX_DEVICES; i++) {
1629 sc = devclass_get_softc(nvidia_devclass, i);
1634 if (nv->bus == bus && nv->slot == slot)
1638 if (bus == 255 && slot == 255) {
1639 nv = &nvidia_ctl_state;
1646 void NV_API_CALL nv_verify_pci_config(
1652 struct nvidia_softc *sc = nv->os_state;
1654 sp = sc->pci_cfgchk_sp;
1656 NV_PCI_CHECK_CONFIG_SPACE(sp, nv, check_the_bars, FALSE, FALSE);
1659 void NV_API_CALL nv_acpi_methods_init(U032 *handlesPresent)
1661 *handlesPresent = 0;
1664 void NV_API_CALL nv_acpi_methods_uninit(void)
1669 RM_STATUS NV_API_CALL nv_acpi_method(
1680 return RM_ERR_NOT_SUPPORTED;
1683 void* NV_API_CALL nv_get_smu_state(void)