/* _NVRM_COPYRIGHT_BEGIN_ * * Copyright 2001-2002 by NVIDIA Corporation. All rights reserved. All * information contained herein is proprietary and confidential to NVIDIA * Corporation. Any use, reproduction, or disclosure without the written * permission of NVIDIA Corporation is prohibited. * * _NVRM_COPYRIGHT_END_ */ #include "nv-misc.h" #include "os-interface.h" #include "nv.h" #include "nv-freebsd.h" static d_open_t nvidia_dev_open; static d_close_t nvidia_dev_close; static d_ioctl_t nvidia_dev_ioctl; static d_poll_t nvidia_dev_poll; static d_mmap_t nvidia_dev_mmap; static struct dev_ops nvidia_dev_ops = { { "nvidia", CDEV_MAJOR, D_MEM|D_TRACKCLOSE }, .d_open = nvidia_dev_open, .d_close = nvidia_dev_close, .d_ioctl = nvidia_dev_ioctl, .d_poll = nvidia_dev_poll, .d_mmap = nvidia_dev_mmap, }; int nvidia_dev_open( struct dev_open_args *ap ) { struct cdev *dev = ap->a_head.a_dev; d_thread_t *td = curthread; int status; struct nvidia_softc *sc; nv_state_t *nv; int unit = minor(dev); sc = devclass_get_softc(nvidia_devclass, unit); if (!sc) return ENXIO; nv = sc->nv_state; nv_lock_api(nv); status = nvidia_open_dev(sc, dev, td); nv_unlock_api(nv); return status; } int nvidia_dev_close( struct dev_close_args *ap ) { struct cdev *dev = ap->a_head.a_dev; d_thread_t *td = curthread; int status; struct nvidia_softc *sc; nv_state_t *nv; int unit = minor(dev); sc = devclass_get_softc(nvidia_devclass, unit); nv = sc->nv_state; nv_lock_api(nv); status = nvidia_close_dev(sc, dev, td); nv_unlock_api(nv); return status; } int nvidia_dev_ioctl( struct dev_ioctl_args *ap ) { struct cdev *dev = ap->a_head.a_dev; u_long cmd = ap->a_cmd; caddr_t data = ap->a_data; int fflag = ap->a_fflag; d_thread_t *td = curthread; int status; struct nvidia_softc *sc; nv_state_t *nv; int unit = minor(dev); if (__NV_IOC_TYPE(cmd) != NV_IOCTL_MAGIC) return ENOTTY; sc = devclass_get_softc(nvidia_devclass, unit); nv = sc->nv_state; nv_lock_api(nv); status = nvidia_handle_ioctl(dev, cmd, data, fflag, td); nv_unlock_api(nv); return status; } int nvidia_dev_poll( struct dev_poll_args *ap ) { struct cdev *dev = ap->a_head.a_dev; int events = ap->a_events; d_thread_t *td = curthread; struct nvidia_softc *sc; nv_state_t *nv; struct nvidia_event *et; int unit = minor(dev); sc = devclass_get_softc(nvidia_devclass, unit); nv = sc->nv_state; nv_lock_rm(nv); STAILQ_FOREACH(et, &sc->event_queue, queue) { if (et->event.file == __TD_FDT(td)) break; } if (et == NULL) { nv_unlock_rm(nv); selrecord(td, &sc->rsel); ap->a_events = 0; } else { nv_unlock_rm(nv); ap->a_events = (events & (POLLIN | POLLPRI | POLLRDNORM)); } return 0; } int nvidia_dev_mmap( struct dev_mmap_args *ap ) { struct cdev *dev = ap->a_head.a_dev; vm_offset_t offset = ap->a_offset; int status; struct nvidia_softc *sc; vm_offset_t physical; nv_state_t *nv; int unit = minor(dev); sc = devclass_get_softc(nvidia_devclass, unit); nv = sc->nv_state; nv_lock_api(nv); status = nvidia_mmap_dev(sc, offset, &physical); nv_unlock_api(nv); if (status != -1) ap->a_result = atop(physical); return status; } int nvidia_dev_attach(struct nvidia_softc *sc) { #if __DragonFly_version < 200205 dev_ops_add(&nvidia_dev_ops, -1, device_get_unit(sc->dev)); #endif sc->cdev = make_dev(&nvidia_dev_ops, device_get_unit(sc->dev), UID_ROOT, GID_WHEEL, 0666, "%s%d", nvidia_dev_ops.head.name, device_get_unit(sc->dev)); return 0; } int nvidia_dev_detach(struct nvidia_softc *sc) { #if __DragonFly_version < 200205 dev_ops_remove(&nvidia_dev_ops, -1, device_get_unit(sc->dev)); #endif destroy_dev(sc->cdev); return 0; }