From: Simon 'corecode' Schubert Date: Wed, 9 Jan 2008 01:57:41 +0000 (+0100) Subject: Initial DragonFly port. X-Git-Url: https://gitweb.dragonflybsd.org/~corecode/nvidia.git/commitdiff_plain/bb9101fa6ce827e422a28974d760ef2af615445f Initial DragonFly port. --- diff --git a/src/Makefile b/src/Makefile index 94be475..60e1908 100644 --- a/src/Makefile +++ b/src/Makefile @@ -11,7 +11,7 @@ KMODDIR?= /boot/modules .endif SRCS= nvidia_ctl.c nvidia_dev.c nvidia_linux.c nvidia_os.c nvidia_os_pci.c nvidia_os_registry.c nvidia_pci.c nvidia_subr.c nvidia_sysctl.c nvidia_i2c.c -SRCS+= device_if.h bus_if.h pci_if.h vnode_if.h +SRCS+= device_if.h bus_if.h pci_if.h CFLAGS+= -I${NVIDIA_ROOT}/src -DNV_VERSION_STRING=\"169.07\" CFLAGS+= -D__KERNEL__ -DNVRM -UDEBUG -U_DEBUG -DNDEBUG -O diff --git a/src/nv-freebsd.h b/src/nv-freebsd.h index be19aee..3e258de 100644 --- a/src/nv-freebsd.h +++ b/src/nv-freebsd.h @@ -21,6 +21,7 @@ #include +#ifdef __FreeBSD__ #if __FreeBSD_version >= 700000 #error This driver does not support FreeBSD 7.x/-CURRENT! #endif @@ -30,6 +31,11 @@ #if __FreeBSD_version < 503000 #error This driver requires FreeBSD 5.3 or later! #endif +#elif defined(__DragonFly__) +/* Happy */ +#else +#error Unknown operating system +#endif #include #include @@ -47,10 +53,18 @@ #include #include +#ifdef __FreeBSD__ #include +#else +#include +#endif #include #include +#ifdef __FreeBSD__ #include +#else +#include +#endif #include #include @@ -74,29 +88,44 @@ #include #include #include +#ifdef __FreeBSD__ #include +#endif +#ifdef __FreeBSD__ #include +#else +#include +#endif #include #if defined(NVCPU_X86_64) #define NV_MMAP_TO_VM_OFFSET(_off) ((_off) | 0xfffff00000000000) #define NV_VM_TO_MMAP_OFFSET(_off) ((_off) & 0x00000fffffffffff) #else -#define NV_MMAP_TO_VM_OFFSET(_off) ((_off) + VM_MIN_KERNEL_ADDRESS) -#define NV_VM_TO_MMAP_OFFSET(_off) ((_off) - VM_MIN_KERNEL_ADDRESS) +#define NV_MMAP_TO_VM_OFFSET(_off) ((_off) + KERNBASE) +#define NV_VM_TO_MMAP_OFFSET(_off) ((_off) - KERNBASE) #endif +#ifdef __FreeBSD__ #include #include #include #include +#else +#include +#include +#endif #include #include +#ifdef __FreeBSD__ #include #include #include +#else +#include +#endif #if defined(NVCPU_X86) && defined(PAE) #error This driver does not support PAE enabled kernels! @@ -116,6 +145,8 @@ #if __FreeBSD_version >= 601100 #define __NV_ITHREAD() (curthread->td_pflags & TDP_ITHREAD) +#elif defined(__DragonFly__) +#define __NV_ITHREAD() (curthread->td_flags & TDF_INTTHREAD) #else #define __NV_ITHREAD() (curthread->td_ithd != NULL) #endif @@ -146,7 +177,7 @@ void os_free_contig_pages(void *, U032); * can safely unset this flag. */ -#define NV_SUPPORT_LINUX_COMPAT +#undef NV_SUPPORT_LINUX_COMPAT /* * Enable/Disable support for ACPI Power Management. This is untested and @@ -222,15 +253,15 @@ struct nvidia_softc { struct sysctl_ctx_list sysctl_ctx; struct selinfo rsel; - struct callout_handle timer_ch; + struct callout timer_ch; /* list of allocations */ SLIST_HEAD(alloc_list, nvidia_alloc) alloc_list; uint32_t refcnt; - struct mtx rm_mtx; - struct sx api_sx; + struct spinlock rm_lock; + struct lock api_lock; } nvidia_softc_t; @@ -274,24 +305,37 @@ extern nv_parm_t nv_parms[]; #define __NV_IOC_TYPE(_cmd) (((_cmd) >> 8) & 0xff) #define __NV_IOC_NR(_cmd) (((_cmd) >> 0) & 0xff) -extern uma_zone_t nvidia_stack_t_zone; +MALLOC_DECLARE(M_NV_STACK); #define NV_UMA_ZONE_ALLOC_STACK(ptr) \ { \ - (ptr) = uma_zalloc(nvidia_stack_t_zone, M_WAITOK); \ - if ((ptr) != NULL) \ - { \ - (ptr)->size = sizeof((ptr)->stack); \ - (ptr)->top = (ptr)->stack + (ptr)->size; \ - } \ + (ptr) = kmalloc(sizeof(nv_stack_t), M_NV_STACK, M_WAITOK); \ + (ptr)->size = sizeof((ptr)->stack); \ + (ptr)->top = (ptr)->stack + (ptr)->size; \ } #define NV_UMA_ZONE_FREE_STACK(ptr) \ { \ - uma_zfree(nvidia_stack_t_zone, (ptr)); \ + if ((ptr) != NULL) \ + kfree((ptr), M_NV_STACK); \ (ptr) = NULL; \ } +typedef struct thread d_thread_t; +#define malloc kmalloc +#define sprintf ksprintf +__inline static void +free(void *addr, struct malloc_type *type) +{ + if (addr != NULL) + kfree(addr, type); +} + +#define pte_load(p) (*(p)) +#define pte_store(p, v) (*(p) = (v)) +#define pte_clear(p) pte_store((p), 0) +void pmap_invalidate_range(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva); + /* nvidia_dev.c */ int nvidia_dev_attach (struct nvidia_softc *); int nvidia_dev_detach (struct nvidia_softc *); diff --git a/src/nvidia_ctl.c b/src/nvidia_ctl.c index ee4cd62..fd617d7 100644 --- a/src/nvidia_ctl.c +++ b/src/nvidia_ctl.c @@ -18,14 +18,12 @@ static d_close_t nvidia_ctl_close; static d_ioctl_t nvidia_ctl_ioctl; static d_poll_t nvidia_ctl_poll; -static struct cdevsw nvidia_ctl_cdevsw = { +static struct dev_ops nvidia_ctl_ops = { + { "nvidiactl", CDEV_MAJOR, D_TRACKCLOSE }, .d_open = nvidia_ctl_open, .d_close = nvidia_ctl_close, .d_ioctl = nvidia_ctl_ioctl, .d_poll = nvidia_ctl_poll, - .d_name = "nvidiactl", - .d_version = D_VERSION, - .d_flags = D_TRACKCLOSE|D_NEEDGIANT }; static struct cdev *nvidia_ctl_cdev = NULL; @@ -34,47 +32,39 @@ struct nvidia_softc nvidia_ctl_sc; static int nvidia_count = 0; int nvidia_ctl_open( - struct cdev *dev, - int oflags, - int devtype, - d_thread_t *td + struct dev_open_args *ap ) { int status; nv_state_t *nv = &nvidia_ctl_state; nv_lock_api(nv); - status = nvidia_open_ctl(dev, td); + status = nvidia_open_ctl(ap->a_head.a_dev, curthread); nv_unlock_api(nv); return status; } int nvidia_ctl_close( - struct cdev *dev, - int fflag, - int devtype, - d_thread_t *td + struct dev_close_args *ap ) { int status; nv_state_t *nv = &nvidia_ctl_state; nv_lock_api(nv); - status = nvidia_close_ctl(dev, td); + status = nvidia_close_ctl(ap->a_head.a_dev, curthread); nv_unlock_api(nv); return status; } int nvidia_ctl_ioctl( - struct cdev *dev, - u_long cmd, - caddr_t data, - int fflag, - d_thread_t *td + struct dev_ioctl_args *ap ) { + u_long cmd = ap->a_cmd; + caddr_t data = ap->a_data; int status = 0; nv_state_t *nv = &nvidia_ctl_state; struct nvidia_softc *sc; @@ -105,7 +95,7 @@ int nvidia_ctl_ioctl( break; default: - status = nvidia_handle_ioctl(dev, cmd, data, fflag, td); + status = nvidia_handle_ioctl(ap->a_head.a_dev, cmd, data, ap->a_fflag, curthread); } nv_unlock_api(nv); @@ -114,9 +104,7 @@ int nvidia_ctl_ioctl( } int nvidia_ctl_poll( - struct cdev *dev, - int events, - d_thread_t *td + struct dev_poll_args *ap ) { nv_state_t *nv; @@ -129,16 +117,16 @@ int nvidia_ctl_poll( nv_lock_rm(nv); STAILQ_FOREACH(et, &sc->event_queue, queue) { - if (et->event.file == __TD_FDT(td)) + if (et->event.file == __TD_FDT(curthread)) break; } if (et == NULL) { nv_unlock_rm(nv); - selrecord(td, &sc->rsel); + selrecord(curthread, &sc->rsel); } else { nv_unlock_rm(nv); - return (events & (POLLIN | POLLPRI | POLLRDNORM)); + return (ap->a_events & (POLLIN | POLLPRI | POLLRDNORM)); } return 0; @@ -151,10 +139,11 @@ int nvidia_ctl_attach(void) * This routine is called from nvidia_attach(), multiple times * when more than one device is installed. */ - nvidia_ctl_cdev = make_dev(&nvidia_ctl_cdevsw, + dev_ops_add(&nvidia_ctl_ops, -1, CDEV_CTL_MINOR); + nvidia_ctl_cdev = make_dev(&nvidia_ctl_ops, CDEV_CTL_MINOR, UID_ROOT, GID_WHEEL, 0666, - "%s", nvidia_ctl_cdevsw.d_name); + "%s", nvidia_ctl_ops.head.name); } nvidia_count++; @@ -173,6 +162,7 @@ int nvidia_ctl_detach(void) * Like nvidia_ctl_attach(), nvidia_ctl_detach() will also be * called more than once with multiple devices. */ + dev_ops_remove(&nvidia_ctl_ops, -1, CDEV_CTL_MINOR); destroy_dev(nvidia_ctl_cdev); } diff --git a/src/nvidia_dev.c b/src/nvidia_dev.c index e7248da..7c2f13f 100644 --- a/src/nvidia_dev.c +++ b/src/nvidia_dev.c @@ -19,24 +19,21 @@ static d_ioctl_t nvidia_dev_ioctl; static d_poll_t nvidia_dev_poll; static d_mmap_t nvidia_dev_mmap; -static struct cdevsw nvidia_dev_cdevsw = { +static struct dev_ops nvidia_dev_ops = { + { "nvidia", CDEV_MAJOR, D_MEM|D_TRACKCLOSE }, .d_open = nvidia_dev_open, .d_close = nvidia_dev_close, .d_ioctl = nvidia_dev_ioctl, .d_poll = nvidia_dev_poll, .d_mmap = nvidia_dev_mmap, - .d_name = "nvidia", - .d_version = D_VERSION, - .d_flags = D_MEM|D_TRACKCLOSE|D_NEEDGIANT }; int nvidia_dev_open( - struct cdev *dev, - int oflags, - int devtype, - d_thread_t *td + struct dev_open_args *ap ) { + struct cdev *dev = ap->a_head.a_dev; + d_thread_t *td = curthread; int status; struct nvidia_softc *sc; nv_state_t *nv; @@ -56,12 +53,11 @@ int nvidia_dev_open( } int nvidia_dev_close( - struct cdev *dev, - int fflag, - int devtype, - d_thread_t *td + struct dev_close_args *ap ) { + struct cdev *dev = ap->a_head.a_dev; + d_thread_t *td = curthread; int status; struct nvidia_softc *sc; nv_state_t *nv; @@ -78,13 +74,14 @@ int nvidia_dev_close( } int nvidia_dev_ioctl( - struct cdev *dev, - u_long cmd, - caddr_t data, - int fflag, - d_thread_t *td + struct dev_ioctl_args *ap ) { + struct cdev *dev = ap->a_head.a_dev; + u_long cmd = ap->a_cmd; + caddr_t data = ap->a_data; + int fflag = ap->a_fflag; + d_thread_t *td = curthread; int status; struct nvidia_softc *sc; nv_state_t *nv; @@ -104,11 +101,12 @@ int nvidia_dev_ioctl( } int nvidia_dev_poll( - struct cdev *dev, - int events, - d_thread_t *td + struct dev_poll_args *ap ) { + struct cdev *dev = ap->a_head.a_dev; + int events = ap->a_events; + d_thread_t *td = curthread; struct nvidia_softc *sc; nv_state_t *nv; struct nvidia_event *et; @@ -136,12 +134,11 @@ int nvidia_dev_poll( } int nvidia_dev_mmap( - struct cdev *dev, - vm_offset_t offset, - vm_offset_t *address, - int nprot + struct dev_mmap_args *ap ) { + struct cdev *dev = ap->a_head.a_dev; + vm_offset_t offset = ap->a_offset; int status; struct nvidia_softc *sc; vm_offset_t physical; @@ -156,17 +153,18 @@ int nvidia_dev_mmap( nv_unlock_api(nv); if (status != -1) - *address = physical; + ap->a_result = physical; return status; } int nvidia_dev_attach(struct nvidia_softc *sc) { - sc->cdev = make_dev(&nvidia_dev_cdevsw, + dev_ops_add(&nvidia_dev_ops, -1, device_get_unit(sc->dev)); + sc->cdev = make_dev(&nvidia_dev_ops, device_get_unit(sc->dev), UID_ROOT, GID_WHEEL, 0666, - "%s%d", nvidia_dev_cdevsw.d_name, + "%s%d", nvidia_dev_ops.head.name, device_get_unit(sc->dev)); return 0; @@ -174,6 +172,7 @@ int nvidia_dev_attach(struct nvidia_softc *sc) int nvidia_dev_detach(struct nvidia_softc *sc) { + dev_ops_remove(&nvidia_dev_ops, -1, device_get_unit(sc->dev)); destroy_dev(sc->cdev); return 0; } diff --git a/src/nvidia_os.c b/src/nvidia_os.c index b755376..6f21cce 100644 --- a/src/nvidia_os.c +++ b/src/nvidia_os.c @@ -14,6 +14,22 @@ #include "nv-freebsd.h" +/* DragonFly compat */ +#include + +void +pmap_invalidate_range(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) +{ + struct pmap_inval_info info; + vm_offset_t va; + + pmap_inval_init(&info); + for (va = sva; va < eva; va += PAGE_SIZE) + pmap_inval_add(&info, pmap, va); + pmap_inval_flush(&info); +} + + /* * The NVIDIA kernel module's malloc identifier, needed for both tracking * and actual allocation/freeing purposes. M_NVIDIA is declared elsewhere @@ -54,7 +70,7 @@ RM_STATUS NV_API_CALL os_alloc_mem( ) { /* XXX Fix me? (malloc flags) */ - *address = malloc(size, M_NVIDIA, M_NOWAIT | M_ZERO); + *address = malloc(size, M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK); return *address ? RM_OK : RM_ERROR; } @@ -94,7 +110,7 @@ RM_STATUS NV_API_CALL os_delay(U032 MilliSeconds) if (ticks > 0) { do { - tsleep((void *)os_delay, PUSER | PCATCH, "delay", ticks); + tsleep((void *)os_delay, PCATCH, "delay", ticks); getmicrotime(&tv_aux); if (NV_TIMERCMP(&tv_aux, &tv_end, <)) { /* tv_aux = tv_end - tv_aux */ @@ -143,7 +159,7 @@ RM_STATUS NV_API_CALL os_kill_process( return RM_ERR_OPERATING_SYSTEM; } - psignal(p, sig); + ksignal(p, sig); return RM_OK; } @@ -256,7 +272,7 @@ void* NV_API_CALL os_map_kernel_space( size = NV_ALIGN_UP(size, PAGE_SIZE); - va = kmem_alloc_nofault(kernel_map, size); + va = kmem_alloc_nofault(&kernel_map, size); vm = (void *)va; if (vm != NULL) { @@ -265,7 +281,7 @@ void* NV_API_CALL os_map_kernel_space( pte_store(ptep, start | PG_RW | PG_V | PG_G | cache_bits); start += PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, va, tva); + pmap_invalidate_range(&kernel_pmap, va, tva); } return vm; @@ -284,8 +300,8 @@ void NV_API_CALL os_unmap_kernel_space( if (va != 0) { for (tva = va; tva < (va + size); tva += PAGE_SIZE) pte_clear(vtopte(tva)); - pmap_invalidate_range(kernel_pmap, va, tva); - kmem_free(kernel_map, va, size); + pmap_invalidate_range(&kernel_pmap, va, tva); + kmem_free(&kernel_map, va, size); } } @@ -407,14 +423,14 @@ int NV_API_CALL nv_printf( ) { char *message = nv_error_string; - va_list arglist; + __va_list arglist; int chars_written = 0; if (debuglevel >= ((cur_debuglevel >> 4) & 3)) { - va_start(arglist, format); - chars_written = vsprintf(message, format, arglist); - va_end(arglist); - printf("%s", message); + __va_start(arglist, format); + chars_written = kvsprintf(message, format, arglist); + __va_end(arglist); + kprintf("%s", message); } return chars_written; @@ -427,12 +443,12 @@ int NV_API_CALL nv_snprintf( ... ) { - va_list arglist; + __va_list arglist; int chars_written; - va_start(arglist, fmt); - chars_written = vsnprintf(buf, size, fmt, arglist); - va_end(arglist); + __va_start(arglist, fmt); + chars_written = kvsnprintf(buf, size, fmt, arglist); + __va_end(arglist); return chars_written; } @@ -444,10 +460,10 @@ void NV_API_CALL nv_os_log( ) { int l; - sprintf(nv_error_string, "NVRM: "); + ksprintf(nv_error_string, "NVRM: "); l = strlen(nv_error_string); - vsnprintf(nv_error_string + l, MAX_ERROR_STRING - l, fmt, ap); - printf("%s", nv_error_string); + kvsnprintf(nv_error_string + l, MAX_ERROR_STRING - l, fmt, ap); + kprintf("%s", nv_error_string); } S032 NV_API_CALL os_mem_cmp( @@ -480,7 +496,7 @@ RM_STATUS NV_API_CALL os_memcpy_from_user( U032 length ) { - if (src < (void *) VM_MAXUSER_ADDRESS) + if (src < (void *) VM_MAX_USER_ADDRESS) return copyin(src, dst, length) ? RM_ERR_INVALID_POINTER : RM_OK; return os_mem_copy(dst, src, length) ? RM_ERR_INVALID_POINTER : RM_OK; @@ -492,7 +508,7 @@ RM_STATUS NV_API_CALL os_memcpy_to_user( U032 length ) { - if (dst < (void *) VM_MAXUSER_ADDRESS) + if (dst < (void *) VM_MAX_USER_ADDRESS) return copyout(src, dst, length) ? RM_ERR_INVALID_POINTER : RM_OK; return os_mem_copy(dst, src, length) ? RM_ERR_INVALID_POINTER : RM_OK; @@ -559,7 +575,7 @@ NvU64 NV_API_CALL os_get_system_memory_size(void) U032 NV_API_CALL os_get_cpu_count(void) { - return mp_ncpus; + return ncpus; } RM_STATUS NV_API_CALL os_flush_cpu_cache(void) @@ -589,8 +605,7 @@ RM_STATUS NV_API_CALL os_clear_smp_barrier(void) struct os_mutex { nv_stack_t *sp; - struct mtx mutex_mtx; - struct cv mutex_cv; + struct spinlock lock; int refcnt; }; @@ -610,8 +625,7 @@ RM_STATUS NV_API_CALL os_alloc_sema(void **semaphore) return status; } - mtx_init(&mtx->mutex_mtx, "os.mutex_mtx", NULL, MTX_DEF | MTX_RECURSE); - cv_init(&mtx->mutex_cv, "os.mutex_cv"); + spin_init(&mtx->lock); mtx->sp = sp; mtx->refcnt = 1; @@ -629,8 +643,7 @@ RM_STATUS NV_API_CALL os_free_sema(void *semaphore) sp = mtx->sp; NV_UMA_ZONE_FREE_STACK(sp); - mtx_destroy(&mtx->mutex_mtx); - cv_destroy(&mtx->mutex_cv); + spin_uninit(&mtx->lock); os_free_mem(semaphore); @@ -641,13 +654,13 @@ RM_STATUS NV_API_CALL os_acquire_sema(void *semaphore) { struct os_mutex *mtx = semaphore; - mtx_lock(&mtx->mutex_mtx); + spin_lock_wr(&mtx->lock); if (mtx->refcnt > 0) rm_disable_interrupts(mtx->sp); mtx->refcnt--; if (mtx->refcnt < 0) - cv_wait(&mtx->mutex_cv, &mtx->mutex_mtx); - mtx_unlock(&mtx->mutex_mtx); + msleep(mtx, &mtx->lock, 0, "nvsemaq", 0); + spin_unlock_wr(&mtx->lock); return RM_OK; } @@ -656,14 +669,14 @@ BOOL NV_API_CALL os_cond_acquire_sema(void *semaphore) { struct os_mutex *mtx = semaphore; - mtx_lock(&mtx->mutex_mtx); + spin_lock_wr(&mtx->lock); if (mtx->refcnt < 1) { - mtx_unlock(&mtx->mutex_mtx); + spin_unlock_wr(&mtx->lock); return FALSE; } else { rm_disable_interrupts(mtx->sp); mtx->refcnt--; - mtx_unlock(&mtx->mutex_mtx); + spin_unlock_wr(&mtx->lock); } return TRUE; @@ -673,13 +686,13 @@ RM_STATUS NV_API_CALL os_release_sema(void *semaphore) { struct os_mutex *mtx = semaphore; - mtx_lock(&mtx->mutex_mtx); + spin_lock_wr(&mtx->lock); if (mtx->refcnt < 0) - cv_signal(&mtx->mutex_cv); + wakeup_one(mtx); else rm_enable_interrupts(mtx->sp); mtx->refcnt++; - mtx_unlock(&mtx->mutex_mtx); + spin_unlock_wr(&mtx->lock); return RM_OK; } @@ -701,13 +714,7 @@ BOOL NV_API_CALL os_pat_supported(void) void* NV_API_CALL NV_STACKWATCH_CALLBACK os_get_stack_start(void *stack_pointer) { - struct thread *td; -#if defined(NVCPU_X86_64) - __asm __volatile__("movq %%gs:0,%0" : "=r" (td)); -#elif defined(NVCPU_X86) - __asm __volatile__("movl %%fs:0,%0" : "=r" (td)); -#endif - return (void *)cpu_getstack(td); + return (curthread->td_sp); } NvU64 NV_API_CALL os_get_current_pdpte(U032 address) diff --git a/src/nvidia_os_registry.c b/src/nvidia_os_registry.c index 31328e0..d420bf9 100644 --- a/src/nvidia_os_registry.c +++ b/src/nvidia_os_registry.c @@ -16,6 +16,30 @@ #include "nv-freebsd.h" #include "nv-reg.h" +static char * +strsep(char **strp, const char *delim) +{ + char *start = *strp; + char *p; + + for (p = start; *p != 0; p++) { + int found = 0; + const char *q; + + for (q = delim; !found && *q != 0; q++) + found = *p == *q; + if (found) + break; + } + + if (*p != 0) + *strp = p + 1; + else + *strp = NULL; + + return (start); +} + void nvidia_update_registry(char *new_option_string) { nv_parm_t *entry; @@ -30,7 +54,7 @@ void nvidia_update_registry(char *new_option_string) if (sp == NULL) return; - option_string = strdup(new_option_string, M_NVIDIA); + option_string = kstrdup(new_option_string, M_NVIDIA); ptr = mod = option_string; while (*ptr != '\0') { diff --git a/src/nvidia_pci.c b/src/nvidia_pci.c index 85334bc..1a7b333 100644 --- a/src/nvidia_pci.c +++ b/src/nvidia_pci.c @@ -59,10 +59,12 @@ int nvidia_pci_setup_intr(device_t dev) sc = device_get_softc(dev); /* XXX Revisit! (INTR_FAST, INTR_MPSAFE) */ - flags = INTR_TYPE_AV; + flags = 0; #if __FreeBSD_version >= 700031 status = bus_setup_intr(dev, sc->irq, flags, NULL, nvidia_intr, sc, &sc->irq_ih); +#elif defined(__DragonFly__) + status = bus_setup_intr(dev, sc->irq, flags, nvidia_intr, sc, &sc->irq_ih, NULL); #else status = bus_setup_intr(dev, sc->irq, flags, nvidia_intr, sc, &sc->irq_ih); #endif @@ -267,8 +269,8 @@ int nvidia_pci_attach(device_t dev) goto fail; } - mtx_init(&sc->rm_mtx, "dev.rm_mtx", NULL, MTX_SPIN | MTX_RECURSE); - sx_init(&sc->api_sx, "dev.api_sx"); + spin_init(&sc->rm_lock); + lockinit(&sc->api_lock, "nvapi", 0, LK_CANRECURSE); return 0; @@ -307,8 +309,8 @@ int nvidia_pci_detach(device_t dev) nv_unlock_api(nv); - mtx_destroy(&sc->rm_mtx); - sx_destroy(&sc->api_sx); + spin_uninit(&sc->rm_lock); + lockuninit(&sc->api_lock); status = nvidia_pci_teardown_intr(dev); if (status) diff --git a/src/nvidia_subr.c b/src/nvidia_subr.c index 592be4e..6d1af9c 100644 --- a/src/nvidia_subr.c +++ b/src/nvidia_subr.c @@ -17,7 +17,7 @@ #include #endif -uma_zone_t nvidia_stack_t_zone; +MALLOC_DEFINE(M_NV_STACK, "nvstack", "NVidia stack"); static nv_stack_t *__nvidia_init_sp = NULL; devclass_t nvidia_devclass; @@ -41,6 +41,7 @@ int nvidia_attach(device_t dev) nv->device_id = pci_get_device(dev); nv->interrupt_line = pci_get_irq(dev); nv->handle = dev; + callout_init(&sc->timer_ch); for (i = 0; i < NV_GPU_NUM_BARS; i++) { if (sc->BAR_recs[i] != NULL) { @@ -344,7 +345,7 @@ int nvidia_open_ctl( } if (filep == NULL) { - filep = malloc(sizeof(nvidia_filep_t), M_NVIDIA, M_NOWAIT | M_ZERO); + filep = kmalloc(sizeof(nvidia_filep_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK); if (filep == NULL) return ENOMEM; filep->fd_table = __TD_FDT(td); @@ -433,7 +434,7 @@ int nvidia_open_dev( } if (filep == NULL) { - filep = malloc(sizeof(nvidia_filep_t), M_NVIDIA, M_NOWAIT | M_ZERO); + filep = kmalloc(sizeof(nvidia_filep_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK); if (filep == NULL) return ENOMEM; filep->fd_table = __TD_FDT(td); @@ -569,14 +570,8 @@ int nvidia_modevent( */ sc = &nvidia_ctl_sc; - nvidia_stack_t_zone = uma_zcreate("nv_stack_t", sizeof(nv_stack_t), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - if (nvidia_stack_t_zone == NULL) - return ENOMEM; - NV_UMA_ZONE_ALLOC_STACK(sp); if (sp == NULL) { - uma_zdestroy(nvidia_stack_t_zone); return ENOMEM; } @@ -584,16 +579,15 @@ int nvidia_modevent( STAILQ_INIT(&sc->filep_queue); if (!rm_init_rm(sp)) { - printf("NVRM: rm_init_rm() failed!\n"); + kprintf("NVRM: rm_init_rm() failed!\n"); NV_UMA_ZONE_FREE_STACK(sp); - uma_zdestroy(nvidia_stack_t_zone); return EIO; } __nvidia_init_sp = sp; - mtx_init(&sc->rm_mtx, "ctl.rm_mtx", NULL, MTX_SPIN | MTX_RECURSE); - sx_init(&sc->api_sx, "ctl.api_sx"); + spin_init(&sc->rm_lock); + lockinit(&sc->api_lock, "nvapi", 0, LK_CANRECURSE); nvidia_ctl_state.os_state = sc; sc->nv_state = (void *)&nvidia_ctl_state; @@ -621,8 +615,8 @@ int nvidia_modevent( nv_unlock_api(nv); - mtx_destroy(&sc->rm_mtx); - sx_destroy(&sc->api_sx); + spin_uninit(&sc->rm_lock); + lockuninit(&sc->api_lock); sp = __nvidia_init_sp; rm_shutdown_rm(sp); @@ -632,8 +626,6 @@ int nvidia_modevent( nvidia_sysctl_exit(); nvidia_linux_exit(); - uma_zdestroy(nvidia_stack_t_zone); - break; default: @@ -664,7 +656,7 @@ S032 nv_os_agp_init( sc->agp_dev = agp_find_device(); if (!sc->agp_dev) { - printf("NVRM: agp_find_device failed, chipset unsupported?\n"); + kprintf("NVRM: agp_find_device failed, chipset unsupported?\n"); return -ENODEV; } @@ -812,7 +804,7 @@ RM_STATUS NV_API_CALL nv_agp_init( * DON'T REDISTRIBUTE THE DRIVER WITH THIS SANITY CHECK REMOVED! * ------------------------------------------------------------- */ - printf("NVRM: detected agp.ko, aborting NVIDIA AGP setup!\n"); + kprintf("NVRM: detected agp.ko, aborting NVIDIA AGP setup!\n"); goto failed; } @@ -893,25 +885,25 @@ void NV_API_CALL nv_lock_rm(nv_state_t *nv) * interrupts on the current processor. */ struct nvidia_softc *sc = nv->os_state; - mtx_lock_spin(&sc->rm_mtx); + spin_lock_wr(&sc->rm_lock); } void NV_API_CALL nv_unlock_rm(nv_state_t *nv) { struct nvidia_softc *sc = nv->os_state; - mtx_unlock_spin(&sc->rm_mtx); + spin_unlock_wr(&sc->rm_lock); } void nv_lock_api(nv_state_t *nv) { struct nvidia_softc *sc = nv->os_state; - sx_xlock(&sc->api_sx); + lockmgr(&sc->api_lock, LK_EXCLUSIVE|LK_CANRECURSE); } void nv_unlock_api(nv_state_t *nv) { struct nvidia_softc *sc = nv->os_state; - sx_xunlock(&sc->api_sx); + lockmgr(&sc->api_lock, LK_RELEASE); } @@ -925,7 +917,7 @@ void NV_API_CALL nv_post_event( struct nvidia_softc *sc; struct nvidia_event *et; - et = malloc(sizeof(nvidia_event_t), M_NVIDIA, M_NOWAIT | M_ZERO); + et = kmalloc(sizeof(nvidia_event_t), M_NVIDIA, M_NOWAIT | M_ZERO | M_NULLOK); if (et == NULL) return; @@ -1037,7 +1029,7 @@ S032 nv_alloc_contig_pages( return -ENOMEM; /* XXX: Fix me? (cache_type) */ - at = malloc(sizeof(struct nvidia_alloc), M_NVIDIA, M_WAITOK | M_ZERO); + at = kmalloc(sizeof(struct nvidia_alloc), M_NVIDIA, M_WAITOK | M_ZERO); if (!at) { os_free_contig_pages(address, size); return -ENOMEM; @@ -1089,12 +1081,12 @@ S032 nv_alloc_system_pages( u_int32_t i, size; size = count * PAGE_SIZE; - at = malloc(sizeof(struct nvidia_alloc), M_NVIDIA, M_WAITOK | M_ZERO); + at = kmalloc(sizeof(struct nvidia_alloc), M_NVIDIA, M_WAITOK | M_ZERO); if (!at) { return -ENOMEM; } - address = malloc(size, M_NVIDIA, M_WAITOK | M_ZERO); + address = kmalloc(size, M_NVIDIA, M_WAITOK | M_ZERO); if (!address) { free(at, M_NVIDIA); return -ENOMEM; @@ -1107,7 +1099,7 @@ S032 nv_alloc_system_pages( pt_entry_t *ptep = vtopte(tva); pte_store(ptep, pte_load(ptep) | PG_N); /* PWT, PCD */ } - pmap_invalidate_range(kernel_pmap, va, tva); + pmap_invalidate_range(&kernel_pmap, va, tva); } #endif @@ -1119,9 +1111,7 @@ S032 nv_alloc_system_pages( for (i = 0; i < count; i++) { pte_array[i] = (NvU64)vtophys(at->address + (i * PAGE_SIZE)); - vm_page_lock_queues(); vm_page_wire(PHYS_TO_VM_PAGE(pte_array[i])); - vm_page_unlock_queues(); } *private = at; @@ -1143,9 +1133,7 @@ S032 nv_free_system_pages( SLIST_REMOVE(&sc->alloc_list, at, nvidia_alloc, list); for (i = 0; i < count; i++) { - vm_page_lock_queues(); vm_page_unwire(PHYS_TO_VM_PAGE(at->pte_array[i]), 0); - vm_page_unlock_queues(); } free((void *)at->address, M_NVIDIA); @@ -1398,7 +1386,7 @@ NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address) return DMAP_TO_PHYS(va); #endif - if (va < VM_MIN_KERNEL_ADDRESS) { + if (va < KERNBASE) { os_dbg_breakpoint(); return 0; } @@ -1411,7 +1399,7 @@ NvU64 NV_API_CALL nv_get_user_phys_address(NvU64 address) struct vmspace *vm; vm_offset_t va = (vm_offset_t) address; - if (va >= VM_MIN_KERNEL_ADDRESS) { + if (va >= KERNBASE) { os_dbg_breakpoint(); return 0; } @@ -1488,7 +1476,7 @@ void nvidia_rc_timer(void *data) */ rm_run_rc_callback(sp, nv); - sc->timer_ch = timeout(nvidia_rc_timer, (void *) nv, hz); + callout_reset(&sc->timer_ch, hz, nvidia_rc_timer, (void *) nv); } int NV_API_CALL nv_start_rc_timer( @@ -1500,7 +1488,7 @@ int NV_API_CALL nv_start_rc_timer( if (nv->rc_timer_enabled != 0) return -EIO; - sc->timer_ch = timeout(nvidia_rc_timer, (void *) nv, hz); + callout_reset(&sc->timer_ch, hz, nvidia_rc_timer, (void *) nv); nv->rc_timer_enabled = 1; return 0; @@ -1515,7 +1503,7 @@ int NV_API_CALL nv_stop_rc_timer( if (nv->rc_timer_enabled == 0) return -EIO; - untimeout(nvidia_rc_timer, (void *) nv, sc->timer_ch); + callout_stop(&sc->timer_ch); nv->rc_timer_enabled = 0; return 0;