From e8a91b3cefd66c1680879e24e150f5e98d3a8c20 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Tigeot?= Date: Tue, 23 Jul 2013 19:18:48 +0200 Subject: [PATCH] drm2: Handle locking * Locally define PROC_LOCK and PROC_UNLOCK to nothing * irq_lock must be a lwkt serializer * Remove Giant usage Was it even needed on FreeBSD ? --- sys/dev/drm2/drmP.h | 44 ++++----- sys/dev/drm2/drm_crtc.c | 136 +++++++++++++-------------- sys/dev/drm2/drm_crtc.h | 2 +- sys/dev/drm2/drm_crtc_helper.c | 4 +- sys/dev/drm2/drm_dp_iic_helper.c | 5 - sys/dev/drm2/drm_drv.c | 38 ++++---- sys/dev/drm2/drm_fb_helper.c | 6 +- sys/dev/drm2/drm_fops.c | 16 ++-- sys/dev/drm2/drm_gem_names.c | 42 ++++----- sys/dev/drm2/drm_gem_names.h | 3 +- sys/dev/drm2/drm_irq.c | 73 ++++++-------- sys/dev/drm2/drm_mm.c | 26 ++--- sys/dev/drm2/drm_mm.h | 2 +- sys/dev/drm2/drm_pci.c | 5 +- sys/dev/drm2/i915/i915_debug.c | 64 ++++++------- sys/dev/drm2/i915/i915_dma.c | 50 +++++----- sys/dev/drm2/i915/i915_drv.c | 30 +++--- sys/dev/drm2/i915/i915_drv.h | 16 ++-- sys/dev/drm2/i915/i915_gem.c | 59 ++++++------ sys/dev/drm2/i915/i915_irq.c | 98 +++++++++---------- sys/dev/drm2/i915/intel_display.c | 44 ++++----- sys/dev/drm2/i915/intel_dp.c | 4 +- sys/dev/drm2/i915/intel_fb.c | 4 +- sys/dev/drm2/i915/intel_iic.c | 20 ++-- sys/dev/drm2/i915/intel_overlay.c | 14 +-- sys/dev/drm2/i915/intel_ringbuffer.c | 18 ++-- sys/dev/drm2/i915/intel_ringbuffer.h | 2 +- sys/dev/drm2/i915/intel_sprite.c | 8 +- sys/dev/drm2/i915/intel_tv.c | 8 +- 29 files changed, 412 insertions(+), 429 deletions(-) diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h index 8819fd758e..6e8ff68680 100644 --- a/sys/dev/drm2/drmP.h +++ b/sys/dev/drm2/drmP.h @@ -53,6 +53,8 @@ struct drm_file; #include #include #include +#include +#include #include #include #include @@ -193,11 +195,11 @@ SYSCTL_DECL(_hw_drm); #define DRM_CURPROC curthread #define DRM_STRUCTPROC struct thread -#define DRM_SPINTYPE struct mtx -#define DRM_SPININIT(l,name) mtx_init(l, name, NULL, MTX_DEF) -#define DRM_SPINUNINIT(l) mtx_destroy(l) -#define DRM_SPINLOCK(l) mtx_lock(l) -#define DRM_SPINUNLOCK(u) mtx_unlock(u) +#define DRM_SPINTYPE struct spinlock +#define DRM_SPININIT(l,name) spin_init(l) +#define DRM_SPINUNINIT(l) spin_uninit(l) +#define DRM_SPINLOCK(l) spin_lock(l) +#define DRM_SPINUNLOCK(u) spin_unlock(u) #define DRM_SPINLOCK_IRQSAVE(l, irqflags) do { \ mtx_lock(l); \ (void)irqflags; \ @@ -205,13 +207,13 @@ SYSCTL_DECL(_hw_drm); #define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u) #define DRM_SPINLOCK_ASSERT(l) mtx_assert(l, MA_OWNED) #define DRM_CURRENTPID curthread->td_proc->p_pid -#define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock) -#define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock) +#define DRM_LOCK(dev) lockmgr(&(dev)->dev_struct_lock, LK_EXCLUSIVE); +#define DRM_UNLOCK(dev) lockmgr(&(dev)->dev_struct_lock, LK_RELEASE); #define DRM_LOCK_SLEEP(dev, chan, flags, msg, timeout) \ - (sx_sleep((chan), &(dev)->dev_struct_lock, (flags), (msg), (timeout))) + (lksleep((chan), &(dev)->dev_struct_lock, (flags), (msg), (timeout))) #if defined(INVARIANTS) -#define DRM_LOCK_ASSERT(dev) sx_assert(&(dev)->dev_struct_lock, SA_XLOCKED) -#define DRM_UNLOCK_ASSERT(dev) sx_assert(&(dev)->dev_struct_lock, SA_UNLOCKED) +#define DRM_LOCK_ASSERT(dev) KKASSERT(lockstatus(&(dev)->dev_struct_lock, curthread) != 0); +#define DRM_UNLOCK_ASSERT(dev) KKASSERT(lockstatus(&(dev)->dev_struct_lock, curthread) == 0); #else #define DRM_LOCK_ASSERT(d) #define DRM_UNLOCK_ASSERT(d) @@ -331,12 +333,12 @@ do { \ /* Returns -errno to shared code */ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ for ( ret = 0 ; !ret && !(condition) ; ) { \ - DRM_UNLOCK(dev); \ - mtx_lock(&dev->irq_lock); \ + DRM_UNLOCK(dev); \ + lwkt_serialize_enter(&dev->irq_lock); \ if (!(condition)) \ ret = -mtx_sleep(&(queue), &dev->irq_lock, \ PCATCH, "drmwtq", (timeout)); \ - mtx_unlock(&dev->irq_lock); \ + lwkt_serialize_exit(&dev->irq_lock); \ DRM_LOCK(dev); \ } @@ -829,10 +831,10 @@ struct drm_device { int flags; /* Flags to open(2) */ /* Locks */ - struct mtx dma_lock; /* protects dev->dma */ - struct mtx irq_lock; /* protects irq condition checks */ - struct mtx dev_lock; /* protects everything else */ - struct sx dev_struct_lock; + struct spinlock dma_lock; /* protects dev->dma */ + struct lwkt_serialize irq_lock; /* protects irq condition checks */ + struct lock dev_lock; /* protects everything else */ + struct lock dev_struct_lock; DRM_SPINTYPE drw_lock; /* Usage Counters */ @@ -906,8 +908,8 @@ struct drm_device { atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */ - struct mtx vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ - struct mtx vbl_lock; + struct lock vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ + struct lock vbl_lock; atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ u32 *last_vblank; /* protected by dev->vbl_lock, used */ /* for wraparound handling */ @@ -920,12 +922,12 @@ struct drm_device { u32 max_vblank_count; /**< size of vblank counter register */ struct list_head vblank_event_list; - struct mtx event_lock; + struct lock event_lock; struct drm_mode_config mode_config; /**< Current mode config */ /* GEM part */ - struct sx object_name_lock; + struct lock object_name_lock; struct drm_gem_names object_names; void *mm_private; diff --git a/sys/dev/drm2/drm_crtc.c b/sys/dev/drm2/drm_crtc.c index 78c2fb24c0..31fad974ee 100644 --- a/sys/dev/drm2/drm_crtc.c +++ b/sys/dev/drm2/drm_crtc.c @@ -364,7 +364,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, crtc->dev = dev; crtc->funcs = funcs; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); if (ret) goto out; @@ -372,7 +372,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, list_add_tail(&crtc->head, &dev->mode_config.crtc_list); dev->mode_config.num_crtc++; out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -465,7 +465,7 @@ int drm_connector_init(struct drm_device *dev, { int ret; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); if (ret) @@ -491,7 +491,7 @@ int drm_connector_init(struct drm_device *dev, dev->mode_config.dpms_property, 0); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -519,11 +519,11 @@ void drm_connector_cleanup(struct drm_connector *connector) list_for_each_entry_safe(mode, t, &connector->user_modes, head) drm_mode_remove(connector, mode); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); drm_mode_object_put(dev, &connector->base); list_del(&connector->head); dev->mode_config.num_connector--; - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); } int drm_encoder_init(struct drm_device *dev, @@ -533,7 +533,7 @@ int drm_encoder_init(struct drm_device *dev, { int ret; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); if (ret) @@ -547,7 +547,7 @@ int drm_encoder_init(struct drm_device *dev, dev->mode_config.num_encoder++; out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -556,11 +556,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); drm_mode_object_put(dev, &encoder->base); list_del(&encoder->head); dev->mode_config.num_encoder--; - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); } int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, @@ -571,7 +571,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, { int ret; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); if (ret) @@ -598,7 +598,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, } out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -607,7 +607,7 @@ void drm_plane_cleanup(struct drm_plane *plane) { struct drm_device *dev = plane->dev; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); kfree(plane->format_types, DRM_MEM_KMS); drm_mode_object_put(dev, &plane->base); /* if not added to a list, it must be a private plane */ @@ -615,7 +615,7 @@ void drm_plane_cleanup(struct drm_plane *plane) list_del(&plane->head); dev->mode_config.num_plane--; } - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); } /** @@ -878,7 +878,7 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev) */ void drm_mode_config_init(struct drm_device *dev) { - sx_init(&dev->mode_config.mutex, "kmslk"); + lockinit(&dev->mode_config.lock, "kmslk", 0, LK_CANRECURSE); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); @@ -888,9 +888,9 @@ void drm_mode_config_init(struct drm_device *dev) INIT_LIST_HEAD(&dev->mode_config.plane_list); drm_gem_names_init(&dev->mode_config.crtc_names); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); drm_mode_create_standard_connector_properties(dev); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); /* Just to be sure */ dev->mode_config.num_fb = 0; @@ -1114,7 +1114,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); /* * For the non-control nodes we need to limit the list of resources @@ -1277,7 +1277,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, card_res->count_connectors, card_res->count_encoders); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1309,7 +1309,7 @@ int drm_mode_getcrtc(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, crtc_resp->crtc_id, DRM_MODE_OBJECT_CRTC); @@ -1337,7 +1337,7 @@ int drm_mode_getcrtc(struct drm_device *dev, } out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1384,7 +1384,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -1487,7 +1487,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, out_resp->count_encoders = encoders_count; out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1502,7 +1502,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); if (!obj) { @@ -1521,7 +1521,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, enc_resp->possible_clones = encoder->possible_clones; out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1548,7 +1548,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); config = &dev->mode_config; /* @@ -1571,7 +1571,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data, plane_resp->count_planes = config->num_plane; out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1599,7 +1599,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, plane_resp->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { @@ -1639,7 +1639,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data, plane_resp->count_format_types = plane->format_count; out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1670,7 +1670,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); /* * First, find the plane, crtc, and fb objects. If not available, @@ -1769,7 +1769,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data, } out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1813,7 +1813,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) return (ERANGE); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, crtc_req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { @@ -1934,7 +1934,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, out: kfree(connector_set, DRM_MEM_KMS); drm_mode_destroy(dev, mode); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1952,7 +1952,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, if (!req->flags) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); @@ -1980,7 +1980,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, } } out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2061,7 +2061,7 @@ int drm_mode_addfb(struct drm_device *dev, if ((config->min_height > r.height) || (r.height > config->max_height)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); ret = -dev->mode_config.funcs->fb_create(dev, file_priv, &r, &fb); if (ret != 0) { @@ -2074,7 +2074,7 @@ int drm_mode_addfb(struct drm_device *dev, DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2192,7 +2192,7 @@ int drm_mode_addfb2(struct drm_device *dev, return ret; } - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); /* TODO check buffer is sufficiently large */ /* TODO setup destructor callback */ @@ -2208,7 +2208,7 @@ int drm_mode_addfb2(struct drm_device *dev, DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return (ret); } @@ -2242,7 +2242,7 @@ int drm_mode_rmfb(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); /* TODO check that we really get a framebuffer back. */ if (!obj) { @@ -2267,7 +2267,7 @@ int drm_mode_rmfb(struct drm_device *dev, fb->funcs->destroy(fb); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2299,7 +2299,7 @@ int drm_mode_getfb(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { ret = EINVAL; @@ -2315,7 +2315,7 @@ int drm_mode_getfb(struct drm_device *dev, fb->funcs->create_handle(fb, file_priv, &r->handle); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2334,7 +2334,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { ret = EINVAL; @@ -2382,7 +2382,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, out_err2: kfree(clips, DRM_MEM_KMS); out_err1: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2410,12 +2410,12 @@ void drm_fb_release(struct drm_file *priv) #endif struct drm_framebuffer *fb, *tfb; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { list_del(&fb->filp_head); fb->funcs->destroy(fb); } - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); } /** @@ -2529,7 +2529,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { @@ -2553,7 +2553,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, drm_mode_attachmode(dev, connector, mode); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2583,7 +2583,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { @@ -2600,7 +2600,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, ret = drm_mode_detachmode(dev, connector, &mode); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2803,7 +2803,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!obj) { ret = -EINVAL; @@ -2883,7 +2883,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, out_resp->count_enum_blobs = blob_count; } done: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2933,7 +2933,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); if (!obj) { ret = -EINVAL; @@ -2951,7 +2951,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, out_resp->length = blob->length; done: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -2995,7 +2995,7 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { @@ -3052,7 +3052,7 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, if (!ret) drm_connector_property_set_value(connector, property, out_resp->value); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -3108,7 +3108,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; @@ -3144,7 +3144,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -3162,7 +3162,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; @@ -3195,7 +3195,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev, goto out; } out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -3220,7 +3220,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, page_flip->reserved != 0) return (EINVAL); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) goto out; @@ -3257,13 +3257,13 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { ret = ENOMEM; - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); if (file_priv->event_space < sizeof e->event) { - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); goto out; } file_priv->event_space -= sizeof e->event; - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); e = kmalloc(sizeof *e, DRM_MEM_KMS, M_WAITOK | M_ZERO); @@ -3279,15 +3279,15 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, ret = -crtc->funcs->page_flip(crtc, fb, e); if (ret != 0) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); file_priv->event_space += sizeof e->event; - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); kfree(e, DRM_MEM_KMS); } } out: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return (ret); } diff --git a/sys/dev/drm2/drm_crtc.h b/sys/dev/drm2/drm_crtc.h index 97158abcd5..4aaa3a00e7 100644 --- a/sys/dev/drm2/drm_crtc.h +++ b/sys/dev/drm2/drm_crtc.h @@ -640,7 +640,7 @@ struct drm_mode_group { * */ struct drm_mode_config { - struct sx mutex; /* protects configuration (mode lists etc.) */ + struct lock lock; /* protects configuration (mode lists etc.) */ struct drm_gem_names crtc_names; /* use this idr for all IDs, fb, crtc, connector, modes */ /* this is limited to one for now */ int num_fb; diff --git a/sys/dev/drm2/drm_crtc_helper.c b/sys/dev/drm2/drm_crtc_helper.c index 85569098a9..3737075620 100644 --- a/sys/dev/drm2/drm_crtc_helper.c +++ b/sys/dev/drm2/drm_crtc_helper.c @@ -939,7 +939,7 @@ static void output_poll_execute(void *ctx, int pending) dev = ctx; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { /* if this is HPD or polled don't check it - @@ -968,7 +968,7 @@ static void output_poll_execute(void *ctx, int pending) changed = true; } - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); if (changed) { #if 0 diff --git a/sys/dev/drm2/drm_dp_iic_helper.c b/sys/dev/drm2/drm_dp_iic_helper.c index f3aa3657a5..7b40681a12 100644 --- a/sys/dev/drm2/drm_dp_iic_helper.c +++ b/sys/dev/drm2/drm_dp_iic_helper.c @@ -241,12 +241,9 @@ iic_dp_aux_add_bus(device_t dev, const char *name, int idx, error; static int dp_bus_counter; - mtx_lock(&Giant); - idx = atomic_fetchadd_int(&dp_bus_counter, 1); ibus = device_add_child(dev, "drm_iic_dp_aux", idx); if (ibus == NULL) { - mtx_unlock(&Giant); DRM_ERROR("drm_iic_dp_aux bus %d creation error\n", idx); return (-ENXIO); } @@ -254,7 +251,6 @@ iic_dp_aux_add_bus(device_t dev, const char *name, error = device_probe_and_attach(ibus); if (error != 0) { device_delete_child(dev, ibus); - mtx_unlock(&Giant); DRM_ERROR("drm_iic_dp_aux bus %d attach failed, %d\n", idx, error); return (-error); @@ -269,7 +265,6 @@ iic_dp_aux_add_bus(device_t dev, const char *name, *bus = ibus; *adapter = data->port; } - mtx_unlock(&Giant); return (error); } diff --git a/sys/dev/drm2/drm_drv.c b/sys/dev/drm2/drm_drv.c index e60079fec6..79c402e6cf 100644 --- a/sys/dev/drm2/drm_drv.c +++ b/sys/dev/drm2/drm_drv.c @@ -283,12 +283,12 @@ int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) dev->irq = (int) rman_get_start(dev->irqr); } - mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF); - mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF); - mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF); - mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF); - mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF); - sx_init(&dev->dev_struct_lock, "drmslk"); + lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE); + lwkt_serialize_init(&dev->irq_lock); + lockinit(&dev->vbl_lock, "drmvbl", 0, LK_CANRECURSE); + DRM_SPININIT(&dev->drw_lock, "drmdrw"); + lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE); + lockinit(&dev->dev_struct_lock, "drmslk", 0, LK_CANRECURSE); id_entry = drm_find_description(dev->pci_vendor, dev->pci_device, idlist); @@ -576,12 +576,11 @@ error: if (dev->devnode != NULL) destroy_dev(dev->devnode); - mtx_destroy(&dev->drw_lock); - mtx_destroy(&dev->vbl_lock); - mtx_destroy(&dev->irq_lock); - mtx_destroy(&dev->dev_lock); - mtx_destroy(&dev->event_lock); - sx_destroy(&dev->dev_struct_lock); + DRM_SPINUNINIT(&dev->drw_lock); + lockuninit(&dev->vbl_lock); + lockuninit(&dev->dev_lock); + lockuninit(&dev->event_lock); + lockuninit(&dev->dev_struct_lock); return retcode; } @@ -647,12 +646,11 @@ static void drm_unload(struct drm_device *dev) if (pci_disable_busmaster(dev->device)) DRM_ERROR("Request to disable bus-master failed.\n"); - mtx_destroy(&dev->drw_lock); - mtx_destroy(&dev->vbl_lock); - mtx_destroy(&dev->irq_lock); - mtx_destroy(&dev->dev_lock); - mtx_destroy(&dev->event_lock); - sx_destroy(&dev->dev_struct_lock); + DRM_SPINUNINIT(&dev->drw_lock); + lockuninit(&dev->vbl_lock); + lockuninit(&dev->dev_lock); + lockuninit(&dev->event_lock); + lockuninit(&dev->dev_struct_lock); } int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -697,9 +695,7 @@ drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) if (retcode == 0) { atomic_inc(&dev->counts[_DRM_STAT_OPENS]); DRM_LOCK(dev); - mtx_lock(&Giant); device_busy(dev->device); - mtx_unlock(&Giant); if (!dev->open_count++) retcode = drm_firstopen(dev); DRM_UNLOCK(dev); @@ -790,9 +786,7 @@ void drm_close(void *data) */ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); - mtx_lock(&Giant); device_unbusy(dev->device); - mtx_unlock(&Giant); if (--dev->open_count == 0) { retcode = drm_lastclose(dev); } diff --git a/sys/dev/drm2/drm_fb_helper.c b/sys/dev/drm2/drm_fb_helper.c index 746bbc2866..2ffe74d3f3 100644 --- a/sys/dev/drm2/drm_fb_helper.c +++ b/sys/dev/drm2/drm_fb_helper.c @@ -1538,7 +1538,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) if (!fb_helper->fb) return 0; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb) crtcs_bound = true; @@ -1548,7 +1548,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) if (!bound && crtcs_bound) { fb_helper->delayed_hotplug = true; - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return 0; } DRM_DEBUG_KMS("\n"); @@ -1560,7 +1560,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); drm_setup_crtcs(fb_helper); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } diff --git a/sys/dev/drm2/drm_fops.c b/sys/dev/drm2/drm_fops.c index c523c92c61..580681b17b 100644 --- a/sys/dev/drm2/drm_fops.c +++ b/sys/dev/drm2/drm_fops.c @@ -130,27 +130,27 @@ drm_read(struct cdev *kdev, struct uio *uio, int ioflag) return (EINVAL); } dev = drm_get_device_from_kdev(kdev); - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); while (list_empty(&file_priv->event_list)) { if ((ioflag & O_NONBLOCK) != 0) { error = EAGAIN; goto out; } - error = msleep(&file_priv->event_space, &dev->event_lock, + error = lksleep(&file_priv->event_space, &dev->event_lock, PCATCH, "drmrea", 0); if (error != 0) goto out; } while (drm_dequeue_event(dev, file_priv, uio, &e)) { - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); error = uiomove(e->event, e->event->length, uio); e->destroy(e); if (error != 0) return (error); - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); } out: - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); return (error); } @@ -162,7 +162,7 @@ drm_event_wakeup(struct drm_pending_event *e) file_priv = e->file_priv; dev = file_priv->dev; - mtx_assert(&dev->event_lock, MA_OWNED); + KKASSERT(lockstatus(&dev->event_lock, curthread) != 0); wakeup(&file_priv->event_space); selwakeup(&file_priv->event_poll); @@ -183,7 +183,7 @@ drm_poll(struct cdev *kdev, int events, struct thread *td) dev = drm_get_device_from_kdev(kdev); revents = 0; - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); if ((events & (POLLIN | POLLRDNORM)) != 0) { if (list_empty(&file_priv->event_list)) { selrecord(td, &file_priv->event_poll); @@ -191,6 +191,6 @@ drm_poll(struct cdev *kdev, int events, struct thread *td) revents |= events & (POLLIN | POLLRDNORM); } } - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); return (revents); } diff --git a/sys/dev/drm2/drm_gem_names.c b/sys/dev/drm2/drm_gem_names.c index cdd94b4039..72003dc6dd 100644 --- a/sys/dev/drm2/drm_gem_names.c +++ b/sys/dev/drm2/drm_gem_names.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -49,7 +50,7 @@ drm_gem_names_init(struct drm_gem_names *names) names->unr = new_unrhdr(1, INT_MAX, NULL); /* XXXKIB */ names->names_hash = hashinit(1000 /* XXXKIB */, M_GEM_NAMES, &names->hash_mask); - mtx_init(&names->lock, "drmnames", NULL, MTX_DEF); + lockinit(&names->lock, "drmnames", 0, LK_CANRECURSE); } void @@ -58,15 +59,15 @@ drm_gem_names_fini(struct drm_gem_names *names) struct drm_gem_name *np; int i; - mtx_lock(&names->lock); + lockmgr(&names->lock, LK_EXCLUSIVE); for (i = 0; i <= names->hash_mask; i++) { while ((np = LIST_FIRST(&names->names_hash[i])) != NULL) { drm_gem_names_delete_name(names, np); - mtx_lock(&names->lock); + lockmgr(&names->lock, LK_EXCLUSIVE); } } - mtx_unlock(&names->lock); - mtx_destroy(&names->lock); + lockmgr(&names->lock, LK_RELEASE); + lockuninit(&names->lock); hashdestroy(names->names_hash, M_GEM_NAMES, names->hash_mask); delete_unrhdr(names->unr); } @@ -84,16 +85,16 @@ drm_gem_name_ref(struct drm_gem_names *names, uint32_t name, { struct drm_gem_name *n; - mtx_lock(&names->lock); + lockmgr(&names->lock, LK_EXCLUSIVE); LIST_FOREACH(n, gem_name_hash_index(names, name), link) { if (n->name == name) { if (ref != NULL) ref(n->ptr); - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); return (n->ptr); } } - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); return (NULL); } @@ -132,31 +133,30 @@ drm_gem_name_create(struct drm_gem_names *names, void *p, uint32_t *name) struct drm_gem_name *np; np = kmalloc(sizeof(struct drm_gem_name), M_GEM_NAMES, M_WAITOK); - mtx_lock(&names->lock); + lockmgr(&names->lock, LK_EXCLUSIVE); if (*name != 0) { - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); return (EALREADY); } np->name = alloc_unr(names->unr); if (np->name == -1) { - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); free(np, M_GEM_NAMES); return (ENOMEM); } *name = np->name; np->ptr = p; LIST_INSERT_HEAD(gem_name_hash_index(names, np->name), np, link); - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); return (0); } static void drm_gem_names_delete_name(struct drm_gem_names *names, struct drm_gem_name *np) { - - mtx_assert(&names->lock, MA_OWNED); + KKASSERT(lockstatus(&names->lock, curthread) != 0); LIST_REMOVE(np, link); - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); free_unr(names->unr, np->name); kfree(np, M_GEM_NAMES); } @@ -167,7 +167,7 @@ drm_gem_names_remove(struct drm_gem_names *names, uint32_t name) struct drm_gem_name *n; void *res; - mtx_lock(&names->lock); + lockmgr(&names->lock, LK_EXCLUSIVE); LIST_FOREACH(n, gem_name_hash_index(names, name), link) { if (n->name == name) { res = n->ptr; @@ -175,7 +175,7 @@ drm_gem_names_remove(struct drm_gem_names *names, uint32_t name) return (res); } } - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); return (NULL); } @@ -189,7 +189,7 @@ drm_gem_names_foreach(struct drm_gem_names *names, bzero(&marker, sizeof(marker)); marker.name = -1; - mtx_lock(&names->lock); + lockmgr(&names->lock, LK_EXCLUSIVE); for (i = 0; i <= names->hash_mask; i++) { for (np = LIST_FIRST(&names->names_hash[i]); np != NULL; ) { if (np->name == -1) { @@ -197,14 +197,14 @@ drm_gem_names_foreach(struct drm_gem_names *names, continue; } LIST_INSERT_AFTER(np, &marker, link); - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); fres = f(np->name, np->ptr, arg); - mtx_lock(&names->lock); + lockmgr(&names->lock, LK_EXCLUSIVE); np = LIST_NEXT(&marker, link); LIST_REMOVE(&marker, link); if (fres) break; } } - mtx_unlock(&names->lock); + lockmgr(&names->lock, LK_RELEASE); } diff --git a/sys/dev/drm2/drm_gem_names.h b/sys/dev/drm2/drm_gem_names.h index aedf12a026..0c5ba3735f 100644 --- a/sys/dev/drm2/drm_gem_names.h +++ b/sys/dev/drm2/drm_gem_names.h @@ -35,7 +35,6 @@ #include #include -#include #include struct drm_gem_name { @@ -45,7 +44,7 @@ struct drm_gem_name { }; struct drm_gem_names { - struct mtx lock; + struct lock lock; LIST_HEAD(drm_gem_names_head, drm_gem_name) *names_hash; u_long hash_mask; struct unrhdr *unr; diff --git a/sys/dev/drm2/drm_irq.c b/sys/dev/drm2/drm_irq.c index 766edcbe82..4b9c4bacc9 100644 --- a/sys/dev/drm2/drm_irq.c +++ b/sys/dev/drm2/drm_irq.c @@ -70,16 +70,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, return 0; } -static void -drm_irq_handler_wrap(void *arg) -{ - struct drm_device *dev = arg; - - mtx_lock(&dev->irq_lock); - dev->driver->irq_handler(arg); - mtx_unlock(&dev->irq_lock); -} - int drm_irq_install(struct drm_device *dev) { @@ -105,11 +95,8 @@ drm_irq_install(struct drm_device *dev) DRM_UNLOCK(dev); /* Install handler */ - retcode = bus_setup_intr(dev->device, dev->irqr, - INTR_TYPE_TTY | INTR_MPSAFE, NULL, - (dev->driver->driver_features & DRIVER_LOCKLESS_IRQ) != 0 ? - drm_irq_handler_wrap : dev->driver->irq_handler, - dev, &dev->irqh); + retcode = bus_setup_intr(dev->device, dev->irqr, INTR_MPSAFE, + dev->driver->irq_handler, dev, &dev->irqh, &dev->irq_lock); if (retcode != 0) goto err; @@ -140,14 +127,14 @@ int drm_irq_uninstall(struct drm_device *dev) * Wake up any waiters so they don't hang. */ if (dev->num_crtcs) { - mtx_lock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_EXCLUSIVE); for (i = 0; i < dev->num_crtcs; i++) { wakeup(&dev->_vblank_count[i]); dev->vblank_enabled[i] = 0; dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); } - mtx_unlock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_RELEASE); } DRM_DEBUG("irq=%d\n", dev->irq); @@ -259,7 +246,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * so no updates of timestamps or count can happen after we've * disabled. Needed to prevent races in case of delayed irq's. */ - mtx_lock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_EXCLUSIVE); dev->driver->disable_vblank(dev, crtc); dev->vblank_enabled[crtc] = 0; @@ -307,7 +294,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) /* Invalidate all timestamps while vblank irq's are off. */ clear_vblank_timestamps(dev, crtc); - mtx_unlock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_RELEASE); } static void vblank_disable_fn(void * arg) @@ -319,13 +306,13 @@ static void vblank_disable_fn(void * arg) return; for (i = 0; i < dev->num_crtcs; i++) { - mtx_lock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_EXCLUSIVE); if (atomic_read(&dev->vblank_refcount[i]) == 0 && dev->vblank_enabled[i]) { DRM_DEBUG("disabling vblank on crtc %d\n", i); vblank_disable_and_save(dev, i); } - mtx_unlock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_RELEASE); } } @@ -358,7 +345,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) #if 0 mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF); #endif - mtx_init(&dev->vblank_time_lock, "drmvtl", NULL, MTX_DEF); + lockinit(&dev->vblank_time_lock, "drmvtl", 0, LK_CANRECURSE); dev->num_crtcs = num_crtcs; @@ -782,10 +769,10 @@ int drm_vblank_get(struct drm_device *dev, int crtc) { int ret = 0; - mtx_lock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_EXCLUSIVE); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], 1) == 0) { - mtx_lock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_EXCLUSIVE); if (!dev->vblank_enabled[crtc]) { /* Enable vblank irqs under vblank_time_lock protection. * All vblank count & timestamp updates are held off @@ -803,14 +790,14 @@ int drm_vblank_get(struct drm_device *dev, int crtc) drm_update_vblank_count(dev, crtc); } } - mtx_unlock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_RELEASE); } else { if (!dev->vblank_enabled[crtc]) { atomic_dec(&dev->vblank_refcount[crtc]); ret = EINVAL; } } - mtx_unlock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_RELEASE); return ret; } @@ -842,9 +829,9 @@ void drm_vblank_off(struct drm_device *dev, int crtc) struct timeval now; unsigned int seq; - mtx_lock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_EXCLUSIVE); vblank_disable_and_save(dev, crtc); - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); wakeup(&dev->_vblank_count[crtc]); /* Send any queued vblank events, lest the natives grow disquiet */ @@ -864,8 +851,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc) drm_event_wakeup(&e->base); } - mtx_unlock(&dev->event_lock); - mtx_unlock(&dev->vbl_lock); + lockmgr(&dev->event_lock, LK_RELEASE); + lockmgr(&dev->vbl_lock, LK_RELEASE); } /** @@ -900,9 +887,9 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc) { if (dev->vblank_inmodeset[crtc]) { - mtx_lock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_EXCLUSIVE); dev->vblank_disable_allowed = 1; - mtx_unlock(&dev->vbl_lock); + lockmgr(&dev->vbl_lock, LK_RELEASE); if (dev->vblank_inmodeset[crtc] & 0x2) drm_vblank_put(dev, crtc); @@ -982,7 +969,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, e->base.file_priv = file_priv; e->base.destroy = drm_vblank_event_destroy; - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); if (file_priv->event_space < sizeof e->event) { ret = EBUSY; @@ -1016,12 +1003,12 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, vblwait->reply.sequence = vblwait->request.sequence; } - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); return 0; err_unlock: - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); kfree(e, DRM_MEM_VBLANK); drm_vblank_put(dev, pipe); return ret; @@ -1104,7 +1091,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, } dev->last_vblank_wait[crtc] = vblwait->request.sequence; - mtx_lock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_EXCLUSIVE); while (((drm_vblank_count(dev, crtc) - vblwait->request.sequence) > (1 << 23)) && dev->irq_enabled) { /* @@ -1115,12 +1102,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data, * application when crtc is disabled or irq * uninstalled anyway. */ - ret = msleep(&dev->_vblank_count[crtc], &dev->vblank_time_lock, + ret = lksleep(&dev->_vblank_count[crtc], &dev->vblank_time_lock, PCATCH, "drmvbl", 3 * hz); if (ret != 0) break; } - mtx_unlock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_RELEASE); if (ret != EINTR) { struct timeval now; long reply_seq; @@ -1144,7 +1131,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) seq = drm_vblank_count_and_time(dev, crtc, &now); - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != crtc) @@ -1160,7 +1147,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) drm_event_wakeup(&e->base); } - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); } /** @@ -1184,11 +1171,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) * vblank enable/disable, as this would cause inconsistent * or corrupted timestamps and vblank counts. */ - mtx_lock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_EXCLUSIVE); /* Vblank irq handling disabled. Nothing to do. */ if (!dev->vblank_enabled[crtc]) { - mtx_unlock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_RELEASE); return false; } @@ -1229,6 +1216,6 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) wakeup(&dev->_vblank_count[crtc]); drm_handle_vblank_events(dev, crtc); - mtx_unlock(&dev->vblank_time_lock); + lockmgr(&dev->vblank_time_lock, LK_RELEASE); return true; } diff --git a/sys/dev/drm2/drm_mm.c b/sys/dev/drm2/drm_mm.c index 4c1e60d029..0348bf2596 100644 --- a/sys/dev/drm2/drm_mm.c +++ b/sys/dev/drm2/drm_mm.c @@ -54,7 +54,7 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) (atomic ? M_NOWAIT : M_WAITOK)); if (unlikely(child == NULL)) { - mtx_lock(&mm->unused_lock); + spin_lock(&mm->unused_spin); if (list_empty(&mm->unused_nodes)) child = NULL; else { @@ -64,7 +64,7 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) list_del(&child->node_list); --mm->num_unused; } - mtx_unlock(&mm->unused_lock); + spin_unlock(&mm->unused_spin); } return child; } @@ -73,21 +73,21 @@ int drm_mm_pre_get(struct drm_mm *mm) { struct drm_mm_node *node; - mtx_lock(&mm->unused_lock); + spin_lock(&mm->unused_spin); while (mm->num_unused < MM_UNUSED_TARGET) { - mtx_unlock(&mm->unused_lock); + spin_unlock(&mm->unused_spin); node = kmalloc(sizeof(*node), DRM_MEM_MM, M_WAITOK); - mtx_lock(&mm->unused_lock); + spin_lock(&mm->unused_spin); if (unlikely(node == NULL)) { int ret = (mm->num_unused < 2) ? -ENOMEM : 0; - mtx_unlock(&mm->unused_lock); + spin_unlock(&mm->unused_spin); return ret; } ++mm->num_unused; list_add_tail(&node->node_list, &mm->unused_nodes); } - mtx_unlock(&mm->unused_lock); + spin_unlock(&mm->unused_spin); return 0; } @@ -293,13 +293,13 @@ void drm_mm_put_block(struct drm_mm_node *node) drm_mm_remove_node(node); - mtx_lock(&mm->unused_lock); + spin_lock(&mm->unused_spin); if (mm->num_unused < MM_UNUSED_TARGET) { list_add(&node->node_list, &mm->unused_nodes); ++mm->num_unused; } else kfree(node, DRM_MEM_MM); - mtx_unlock(&mm->unused_lock); + spin_unlock(&mm->unused_spin); } static int check_free_hole(unsigned long start, unsigned long end, @@ -521,7 +521,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) INIT_LIST_HEAD(&mm->unused_nodes); mm->num_unused = 0; mm->scanned_blocks = 0; - mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF); + spin_init(&mm->unused_spin); INIT_LIST_HEAD(&mm->head_node.node_list); INIT_LIST_HEAD(&mm->head_node.hole_stack); @@ -546,15 +546,15 @@ void drm_mm_takedown(struct drm_mm * mm) return; } - mtx_lock(&mm->unused_lock); + spin_lock(&mm->unused_spin); list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { list_del(&entry->node_list); kfree(entry, DRM_MEM_MM); --mm->num_unused; } - mtx_unlock(&mm->unused_lock); + spin_unlock(&mm->unused_spin); - mtx_destroy(&mm->unused_lock); + spin_uninit(&mm->unused_spin); KASSERT(mm->num_unused == 0, ("num_unused != 0")); } diff --git a/sys/dev/drm2/drm_mm.h b/sys/dev/drm2/drm_mm.h index f765855f33..68cba36cf5 100644 --- a/sys/dev/drm2/drm_mm.h +++ b/sys/dev/drm2/drm_mm.h @@ -57,7 +57,7 @@ struct drm_mm { struct drm_mm_node head_node; struct list_head unused_nodes; int num_unused; - struct mtx unused_lock; + struct spinlock unused_spin; unsigned int scan_check_range : 1; unsigned scan_alignment; unsigned long scan_size; diff --git a/sys/dev/drm2/drm_pci.c b/sys/dev/drm2/drm_pci.c index 92e53abdaa..e30e379b48 100644 --- a/sys/dev/drm2/drm_pci.c +++ b/sys/dev/drm2/drm_pci.c @@ -71,9 +71,10 @@ drm_pci_alloc(struct drm_device *dev, size_t size, return NULL; /* Make sure we aren't holding mutexes here */ - mtx_assert(&dev->dma_lock, MA_NOTOWNED); +/* FIXME: Is it ok to remove the next 3 lines ? */ +/* mtx_assert(&dev->dma_lock, MA_NOTOWNED); if (mtx_owned(&dev->dma_lock)) - DRM_ERROR("called while holding dma_lock\n"); + DRM_ERROR("called while holding dma_lock\n"); */ ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */ maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */ diff --git a/sys/dev/drm2/i915/i915_debug.c b/sys/dev/drm2/i915/i915_debug.c index a3b35aefcb..e685960ba2 100644 --- a/sys/dev/drm2/i915/i915_debug.c +++ b/sys/dev/drm2/i915/i915_debug.c @@ -161,7 +161,7 @@ i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data) size_t total_obj_size, total_gtt_size; int count; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); switch (list) { @@ -225,7 +225,7 @@ i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data) size_t size, mappable_size; struct drm_i915_gem_object *obj; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); sbuf_printf(m, "%u objects, %zu bytes\n", dev_priv->mm.object_count, @@ -288,7 +288,7 @@ i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data) size_t total_obj_size, total_gtt_size; int count; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); total_obj_size = total_gtt_size = count = 0; @@ -324,7 +324,7 @@ i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data) pipe = pipe_name(crtc->pipe); plane = plane_name(crtc->plane); - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); work = crtc->unpin_work; if (work == NULL) { sbuf_printf(m, "No flip due on pipe %c (plane %c)\n", @@ -354,7 +354,7 @@ i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data) sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } } - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); } return (0); @@ -367,7 +367,7 @@ i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data) struct drm_i915_gem_request *gem_request; int count; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); count = 0; @@ -431,7 +431,7 @@ i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data) drm_i915_private_t *dev_priv = dev->dev_private; int i; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); for (i = 0; i < I915_NUM_RINGS; i++) i915_ring_seqno_info(m, &dev_priv->rings[i]); @@ -446,7 +446,7 @@ i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data) drm_i915_private_t *dev_priv = dev->dev_private; int i, pipe; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); if (!HAS_PCH_SPLIT(dev)) { @@ -501,7 +501,7 @@ i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data) drm_i915_private_t *dev_priv = dev->dev_private; int i; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); @@ -548,7 +548,7 @@ i915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); ring = &dev_priv->rings[(uintptr_t)data]; if (!ring->obj) { @@ -576,7 +576,7 @@ i915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data) if (ring->size == 0) return (0); - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); sbuf_printf(m, "Ring %s:\n", ring->name); @@ -709,7 +709,7 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m, struct drm_i915_error_state *error; int i, j, page, offset, elt; - mtx_lock(&dev_priv->error_lock); + lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); if (!dev_priv->first_error) { sbuf_printf(m, "no error state collected\n"); goto out; @@ -800,7 +800,7 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m, intel_display_print_error_state(m, dev, error->display); out: - mtx_unlock(&dev_priv->error_lock); + lockmgr(&dev_priv->error_lock, LK_RELEASE); return (0); } @@ -811,7 +811,7 @@ i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; u16 crstanddelay; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); crstanddelay = I915_READ16(CRSTANDVID); DRM_UNLOCK(dev); @@ -847,7 +847,7 @@ i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused) int max_freq; /* RPSTAT1 is in the GT power well */ - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); gen6_gt_force_wake_get(dev_priv); @@ -910,7 +910,7 @@ i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused) u32 delayfreq; int i; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); for (i = 0; i < 16; i++) { delayfreq = I915_READ(PXVFREQ_BASE + i * 4); @@ -934,7 +934,7 @@ i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused) u32 inttoext; int i; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); for (i = 1; i <= 32; i++) { inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); @@ -953,7 +953,7 @@ ironlake_drpc_info(struct drm_device *dev, struct sbuf *m) u32 rstdbyctl; u16 crstandvid; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); rgvmodectl = I915_READ(MEMMODECTL); rstdbyctl = I915_READ(RSTDBYCTL); @@ -1016,12 +1016,12 @@ gen6_drpc_info(struct drm_device *dev, struct sbuf *m) unsigned forcewake_count; int count=0; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); - mtx_lock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); forcewake_count = dev_priv->forcewake_count; - mtx_unlock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_RELEASE); if (forcewake_count) { sbuf_printf(m, "RC information inaccurate because userspace " @@ -1165,7 +1165,7 @@ static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m, return (0); } - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); @@ -1201,7 +1201,7 @@ i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused) return (0); } - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); temp = i915_mch_val(dev_priv); chipset = i915_chipset_val(dev_priv); @@ -1221,7 +1221,7 @@ i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); DRM_UNLOCK(dev); @@ -1236,7 +1236,7 @@ i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); if (opregion->header) seq_write(m, opregion->header, OPREGION_SIZE); @@ -1253,7 +1253,7 @@ i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data) struct intel_fbdev *ifbdev; struct intel_framebuffer *fb; - if (sx_xlock_sig(&dev->dev_struct_lock)) + if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL)) return (EINTR); ifbdev = dev_priv->fbdev; @@ -1299,7 +1299,7 @@ i915_context_status(struct drm_device *dev, struct sbuf *m, void *data) return (0); dev_priv = dev->dev_private; - ret = sx_xlock_sig(&dev->mode_config.mutex); + ret = lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE|LK_SLEEPFAIL); if (ret != 0) return (EINTR); @@ -1315,7 +1315,7 @@ i915_context_status(struct drm_device *dev, struct sbuf *m, void *data) sbuf_printf(m, "\n"); } - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return (0); } @@ -1328,9 +1328,9 @@ i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m, unsigned forcewake_count; dev_priv = dev->dev_private; - mtx_lock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); forcewake_count = dev_priv->forcewake_count; - mtx_unlock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_RELEASE); sbuf_printf(m, "forcewake count = %u\n", forcewake_count); @@ -1370,7 +1370,7 @@ i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data) int ret; dev_priv = dev->dev_private; - ret = sx_xlock_sig(&dev->dev_struct_lock); + ret = lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL); if (ret != 0) return (EINTR); @@ -1414,7 +1414,7 @@ i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data) dev_priv = dev->dev_private; - ret = sx_xlock_sig(&dev->dev_struct_lock); + ret = lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL); if (ret != 0) return (EINTR); if (INTEL_INFO(dev)->gen == 6) diff --git a/sys/dev/drm2/i915/i915_dma.c b/sys/dev/drm2/i915/i915_dma.c index a321ea8f92..22440d80eb 100644 --- a/sys/dev/drm2/i915/i915_dma.c +++ b/sys/dev/drm2/i915/i915_dma.c @@ -43,8 +43,8 @@ static struct drm_i915_private *i915_mch_dev; * - dev_priv->fmax * - dev_priv->gpu_busy */ -static struct mtx mchdev_lock; -MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF); +static struct lock mchdev_lock; +LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE); static void i915_pineview_get_mem_freq(struct drm_device *dev); static void i915_ironlake_get_mem_freq(struct drm_device *dev); @@ -1233,10 +1233,10 @@ i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->tq = taskqueue_create("915", M_WAITOK, taskqueue_thread_enqueue, &dev_priv->tq); taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq"); - mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF); - mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF); - mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF); - mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF); + lockinit(&dev_priv->gt_lock, "915gt", 0, LK_CANRECURSE); + lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE); + lockinit(&dev_priv->error_completion_lock, "915cmp", 0, LK_CANRECURSE); + lockinit(&dev_priv->rps_lock, "915rps", 0, LK_CANRECURSE); dev_priv->has_gem = 1; intel_irq_init(dev); @@ -1265,7 +1265,7 @@ i915_driver_load(struct drm_device *dev, unsigned long flags) else if (IS_GEN5(dev)) i915_ironlake_get_mem_freq(dev); - mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF); + lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE); if (IS_IVYBRIDGE(dev)) dev_priv->num_pipe = 3; @@ -1300,10 +1300,10 @@ i915_driver_load(struct drm_device *dev, unsigned long flags) i915_hangcheck_elapsed, dev); if (IS_GEN5(dev)) { - mtx_lock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_EXCLUSIVE); i915_mch_dev = dev_priv; dev_priv->mchdev_lock = &mchdev_lock; - mtx_unlock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_RELEASE); } return (0); @@ -1374,7 +1374,7 @@ i915_driver_unload_int(struct drm_device *dev, bool locked) i915_gem_unload(dev); - mtx_destroy(&dev_priv->irq_lock); + lockuninit(&dev_priv->irq_lock); if (dev_priv->tq != NULL) taskqueue_free(dev_priv->tq); @@ -1383,9 +1383,9 @@ i915_driver_unload_int(struct drm_device *dev, bool locked) drm_rmmap(dev, dev_priv->mmio_map); intel_teardown_gmbus(dev); - mtx_destroy(&dev_priv->error_lock); - mtx_destroy(&dev_priv->error_completion_lock); - mtx_destroy(&dev_priv->rps_lock); + lockuninit(&dev_priv->error_lock); + lockuninit(&dev_priv->error_completion_lock); + lockuninit(&dev_priv->rps_lock); drm_free(dev->dev_private, sizeof(drm_i915_private_t), DRM_MEM_DRIVER); @@ -1407,7 +1407,7 @@ i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) i915_file_priv = kmalloc(sizeof(*i915_file_priv), DRM_MEM_FILES, M_WAITOK | M_ZERO); - mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF); + lockinit(&i915_file_priv->mm.lck, "915fp", 0, LK_CANRECURSE); INIT_LIST_HEAD(&i915_file_priv->mm.request_list); file_priv->driver_priv = i915_file_priv; @@ -1442,7 +1442,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; - mtx_destroy(&i915_file_priv->mm.lck); + lockuninit(&i915_file_priv->mm.lck); drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); } @@ -1951,7 +1951,7 @@ unsigned long i915_read_mch_val(void) struct drm_i915_private *dev_priv; unsigned long chipset_val, graphics_val, ret = 0; - mtx_lock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_EXCLUSIVE); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; @@ -1962,7 +1962,7 @@ unsigned long i915_read_mch_val(void) ret = chipset_val + graphics_val; out_unlock: - mtx_unlock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_RELEASE); return ret; } @@ -1977,7 +1977,7 @@ bool i915_gpu_raise(void) struct drm_i915_private *dev_priv; bool ret = true; - mtx_lock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_EXCLUSIVE); if (!i915_mch_dev) { ret = false; goto out_unlock; @@ -1988,7 +1988,7 @@ bool i915_gpu_raise(void) dev_priv->max_delay--; out_unlock: - mtx_unlock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_RELEASE); return ret; } @@ -2004,7 +2004,7 @@ bool i915_gpu_lower(void) struct drm_i915_private *dev_priv; bool ret = true; - mtx_lock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_EXCLUSIVE); if (!i915_mch_dev) { ret = false; goto out_unlock; @@ -2015,7 +2015,7 @@ bool i915_gpu_lower(void) dev_priv->max_delay++; out_unlock: - mtx_unlock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_RELEASE); return ret; } @@ -2030,7 +2030,7 @@ bool i915_gpu_busy(void) struct drm_i915_private *dev_priv; bool ret = false; - mtx_lock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_EXCLUSIVE); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; @@ -2038,7 +2038,7 @@ bool i915_gpu_busy(void) ret = dev_priv->busy; out_unlock: - mtx_unlock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_RELEASE); return ret; } @@ -2054,7 +2054,7 @@ bool i915_gpu_turbo_disable(void) struct drm_i915_private *dev_priv; bool ret = true; - mtx_lock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_EXCLUSIVE); if (!i915_mch_dev) { ret = false; goto out_unlock; @@ -2067,7 +2067,7 @@ bool i915_gpu_turbo_disable(void) ret = false; out_unlock: - mtx_unlock(&mchdev_lock); + lockmgr(&mchdev_lock, LK_RELEASE); return ret; } diff --git a/sys/dev/drm2/i915/i915_drv.c b/sys/dev/drm2/i915/i915_drv.c index 696c87c4f8..c0e4aeb13d 100644 --- a/sys/dev/drm2/i915/i915_drv.c +++ b/sys/dev/drm2/i915/i915_drv.c @@ -308,15 +308,15 @@ static int i915_drm_thaw(struct drm_device *dev) ironlake_init_pch_refclk(dev); DRM_UNLOCK(dev); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); drm_mode_config_reset(dev); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); drm_irq_install(dev); - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); @@ -515,10 +515,10 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { - mtx_lock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); if (dev_priv->forcewake_count++ == 0) dev_priv->display.force_wake_get(dev_priv); - mtx_unlock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_RELEASE); } static void @@ -555,10 +555,10 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { - mtx_lock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); if (--dev_priv->forcewake_count == 0) dev_priv->display.force_wake_put(dev_priv); - mtx_unlock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_RELEASE); } int @@ -671,7 +671,7 @@ gen6_do_reset(struct drm_device *dev, u8 flags) /* Hold gt_lock across reset to prevent any register access * with forcewake not set correctly */ - mtx_lock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); /* Reset the chip */ @@ -695,7 +695,7 @@ gen6_do_reset(struct drm_device *dev, u8 flags) /* Restore fifo count */ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - mtx_unlock(&dev_priv->gt_lock); + lockmgr(&dev_priv->gt_lock, LK_RELEASE); return (ret); } @@ -713,7 +713,7 @@ i915_reset(struct drm_device *dev, u8 flags) if (!i915_try_reset) return (0); - if (!sx_try_xlock(&dev->dev_struct_lock)) + if (!lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) return (-EBUSY); i915_gem_reset(dev); @@ -768,9 +768,9 @@ i915_reset(struct drm_device *dev, u8 flags) DRM_UNLOCK(dev); if (need_display) { - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); drm_helper_resume_force_mode(dev); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); } return (0); @@ -780,13 +780,13 @@ i915_reset(struct drm_device *dev, u8 flags) u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - mtx_lock(&dev_priv->gt_lock); \ + lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_get(dev_priv); \ val = DRM_READ##y(dev_priv->mmio_map, reg); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_put(dev_priv); \ - mtx_unlock(&dev_priv->gt_lock); \ + lockmgr(&dev_priv->gt_lock, LK_RELEASE); \ } else { \ val = DRM_READ##y(dev_priv->mmio_map, reg); \ } \ diff --git a/sys/dev/drm2/i915/i915_drv.h b/sys/dev/drm2/i915/i915_drv.h index 45b5e4ef28..b2b0a9911e 100644 --- a/sys/dev/drm2/i915/i915_drv.h +++ b/sys/dev/drm2/i915/i915_drv.h @@ -247,7 +247,7 @@ typedef struct drm_i915_private { device_t *bbbus; /** gmbus_sx protects against concurrent usage of the single hw gmbus * controller on different i2c buses. */ - struct sx gmbus_sx; + struct lock gmbus_lock; int has_gem; int relative_constants_mode; @@ -261,7 +261,7 @@ typedef struct drm_i915_private { /** forcewake_count is protected by gt_lock */ unsigned forcewake_count; /** gt_lock is also taken in irq contexts. */ - struct mtx gt_lock; + struct lock gt_lock; drm_i915_sarea_t *sarea_priv; /* drm_i915_ring_buffer_t ring; */ @@ -293,7 +293,7 @@ typedef struct drm_i915_private { u32 irq_mask; u32 gt_irq_mask; u32 pch_irq_mask; - struct mtx irq_lock; + struct lock irq_lock; u32 hotplug_supported_mask; @@ -675,7 +675,7 @@ typedef struct drm_i915_private { int mch_res_rid; struct resource *mch_res; - struct mtx rps_lock; + struct lock rps_lock; u32 pm_iir; struct task rps_task; @@ -694,7 +694,7 @@ typedef struct drm_i915_private { int c_m; int r_t; u8 corr; - struct mtx *mchdev_lock; + struct lock *mchdev_lock; enum no_fbc_reason no_fbc_reason; @@ -710,9 +710,9 @@ typedef struct drm_i915_private { struct task error_task; struct task hotplug_task; int error_completion; - struct mtx error_completion_lock; + struct lock error_completion_lock; struct drm_i915_error_state *first_error; - struct mtx error_lock; + struct lock error_lock; struct callout hangcheck_timer; unsigned long last_gpu_reset; @@ -923,7 +923,7 @@ struct drm_i915_gem_request { struct drm_i915_file_private { struct { struct list_head request_list; - struct mtx lck; + struct lock lck; } mm; }; diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c index c1bf54aea8..0d916a2cf7 100644 --- a/sys/dev/drm2/i915/i915_gem.c +++ b/sys/dev/drm2/i915/i915_gem.c @@ -126,21 +126,21 @@ i915_gem_wait_for_error(struct drm_device *dev) if (!atomic_load_acq_int(&dev_priv->mm.wedged)) return (0); - mtx_lock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); while (dev_priv->error_completion == 0) { - ret = -msleep(&dev_priv->error_completion, + ret = -lksleep(&dev_priv->error_completion, &dev_priv->error_completion_lock, PCATCH, "915wco", 0); if (ret != 0) { - mtx_unlock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); return (ret); } } - mtx_unlock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); if (atomic_read(&dev_priv->mm.wedged)) { - mtx_lock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); dev_priv->error_completion++; - mtx_unlock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); } return (0); } @@ -160,7 +160,7 @@ i915_mutex_lock_interruptible(struct drm_device *dev) * interruptible shall it be. might indeed be if dev_lock is * changed to sx */ - ret = sx_xlock_sig(&dev->dev_struct_lock); + ret = lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL); if (ret != 0) return (-ret); @@ -746,25 +746,25 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ring = NULL; seqno = 0; - mtx_lock(&file_priv->mm.lck); + lockmgr(&file_priv->mm.lck, LK_EXCLUSIVE); list_for_each_entry(request, &file_priv->mm.request_list, client_list) { if (time_after_eq(request->emitted_jiffies, recent_enough)) break; ring = request->ring; seqno = request->seqno; } - mtx_unlock(&file_priv->mm.lck); + lockmgr(&file_priv->mm.lck, LK_RELEASE); if (seqno == 0) return (0); ret = 0; - mtx_lock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_EXCLUSIVE); if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { if (ring->irq_get(ring)) { while (ret == 0 && !(i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged))) - ret = -msleep(ring, &ring->irq_lock, PCATCH, + ret = -lksleep(ring, &ring->irq_lock, PCATCH, "915thr", 0); ring->irq_put(ring); if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) @@ -775,7 +775,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ret = -EBUSY; } } - mtx_unlock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_RELEASE); if (ret == 0) taskqueue_enqueue_timeout(dev_priv->tq, @@ -1241,6 +1241,9 @@ unlock: return (ret); } +#define PROC_LOCK(p) +#define PROC_UNLOCK(p) + int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -2541,9 +2544,9 @@ i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, bool do_retire if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) { /* Give the error handler a chance to run. */ - mtx_lock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); recovery_complete = (&dev_priv->error_completion) > 0; - mtx_unlock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); return (recovery_complete ? -EIO : -EAGAIN); } @@ -2575,19 +2578,19 @@ i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, bool do_retire } ring->waiting_seqno = seqno; - mtx_lock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_EXCLUSIVE); if (ring->irq_get(ring)) { flags = dev_priv->mm.interruptible ? PCATCH : 0; while (!i915_seqno_passed(ring->get_seqno(ring), seqno) && !atomic_load_acq_int(&dev_priv->mm.wedged) && ret == 0) { - ret = -msleep(ring, &ring->irq_lock, flags, + ret = -lksleep(ring, &ring->irq_lock, flags, "915gwr", 0); } ring->irq_put(ring); - mtx_unlock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_RELEASE); } else { - mtx_unlock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_RELEASE); if (_intel_wait_for(ring->dev, i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_load_acq_int(&dev_priv->mm.wedged), 3000, @@ -2665,11 +2668,11 @@ i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, if (file != NULL) { file_priv = file->driver_priv; - mtx_lock(&file_priv->mm.lck); + lockmgr(&file_priv->mm.lck, LK_EXCLUSIVE); request->file_priv = file_priv; list_add_tail(&request->client_list, &file_priv->mm.request_list); - mtx_unlock(&file_priv->mm.lck); + lockmgr(&file_priv->mm.lck, LK_RELEASE); } ring->outstanding_lazy_request = 0; @@ -2696,12 +2699,12 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) DRM_LOCK_ASSERT(request->ring->dev); - mtx_lock(&file_priv->mm.lck); + lockmgr(&file_priv->mm.lck, LK_EXCLUSIVE); if (request->file_priv != NULL) { list_del(&request->client_list); request->file_priv = NULL; } - mtx_unlock(&file_priv->mm.lck); + lockmgr(&file_priv->mm.lck, LK_RELEASE); } void @@ -2716,7 +2719,7 @@ i915_gem_release(struct drm_device *dev, struct drm_file *file) * later retire_requests won't dereference our soon-to-be-gone * file_priv. */ - mtx_lock(&file_priv->mm.lck); + lockmgr(&file_priv->mm.lck, LK_EXCLUSIVE); while (!list_empty(&file_priv->mm.request_list)) { request = list_first_entry(&file_priv->mm.request_list, struct drm_i915_gem_request, @@ -2724,7 +2727,7 @@ i915_gem_release(struct drm_device *dev, struct drm_file *file) list_del(&request->client_list); request->file_priv = NULL; } - mtx_unlock(&file_priv->mm.lck); + lockmgr(&file_priv->mm.lck, LK_RELEASE); } static void @@ -2872,9 +2875,9 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) if (ring->trace_irq_seqno && i915_seqno_passed(seqno, ring->trace_irq_seqno)) { - mtx_lock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_EXCLUSIVE); ring->irq_put(ring); - mtx_unlock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_RELEASE); ring->trace_irq_seqno = 0; } } @@ -3383,7 +3386,7 @@ i915_gem_retire_task_handler(void *arg, int pending) dev = dev_priv->dev; /* Come back later if the device is busy... */ - if (!sx_try_xlock(&dev->dev_struct_lock)) { + if (!lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) { taskqueue_enqueue_timeout(dev_priv->tq, &dev_priv->mm.retire_task, hz); return; @@ -3650,7 +3653,7 @@ i915_gem_lowmem(void *arg) dev = arg; dev_priv = dev->dev_private; - if (!sx_try_xlock(&dev->dev_struct_lock)) + if (!lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) return; rescan: diff --git a/sys/dev/drm2/i915/i915_irq.c b/sys/dev/drm2/i915/i915_irq.c index e62580f05d..e78672d7b4 100644 --- a/sys/dev/drm2/i915/i915_irq.c +++ b/sys/dev/drm2/i915/i915_irq.c @@ -118,7 +118,7 @@ void intel_enable_asle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, DE_GSE); @@ -130,7 +130,7 @@ void intel_enable_asle(struct drm_device *dev) PIPE_LEGACY_BLC_EVENT_ENABLE); } - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); } /** @@ -316,14 +316,14 @@ i915_hotplug_work_func(void *context, int pending) mode_config = &dev->mode_config; - sx_xlock(&mode_config->mutex); + lockmgr(&mode_config->lock, LK_EXCLUSIVE); DRM_DEBUG_KMS("running encoder hotplug functions\n"); list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->hot_plug) encoder->hot_plug(encoder); - sx_xunlock(&mode_config->mutex); + lockmgr(&mode_config->lock, LK_RELEASE); /* Just fire off a uevent and let userspace tell us what to do */ #if 0 @@ -373,10 +373,10 @@ static void notify_ring(struct drm_device *dev, seqno = ring->get_seqno(ring); - mtx_lock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_EXCLUSIVE); ring->irq_seqno = seqno; wakeup(ring); - mtx_unlock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_RELEASE); if (i915_enable_hangcheck) { dev_priv->hangcheck_count = 0; @@ -397,12 +397,12 @@ gen6_pm_rps_work_func(void *arg, int pending) dev = dev_priv->dev; new_delay = dev_priv->cur_delay; - mtx_lock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); pm_iir = dev_priv->pm_iir; dev_priv->pm_iir = 0; pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE(GEN6_PMIMR, 0); - mtx_unlock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_RELEASE); if (!pm_iir) return; @@ -562,13 +562,13 @@ ivybridge_irq_handler(void *arg) } if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { - mtx_lock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); if ((dev_priv->pm_iir & pm_iir) != 0) kprintf("Missed a PM interrupt\n"); dev_priv->pm_iir |= pm_iir; I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); POSTING_READ(GEN6_PMIMR); - mtx_unlock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_RELEASE); taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task); } @@ -677,13 +677,13 @@ ironlake_irq_handler(void *arg) } if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { - mtx_lock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); if ((dev_priv->pm_iir & pm_iir) != 0) kprintf("Missed a PM interrupt\n"); dev_priv->pm_iir |= pm_iir; I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); POSTING_READ(GEN6_PMIMR); - mtx_unlock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_RELEASE); taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task); } @@ -720,10 +720,10 @@ i915_error_work_func(void *context, int pending) atomic_store_rel_int(&dev_priv->mm.wedged, 0); /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */ } - mtx_lock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); dev_priv->error_completion++; wakeup(&dev_priv->error_completion); - mtx_unlock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); } } @@ -854,27 +854,27 @@ void i915_handle_error(struct drm_device *dev, bool wedged) i915_report_and_clear_eir(dev); if (wedged) { - mtx_lock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); dev_priv->error_completion = 0; dev_priv->mm.wedged = 1; /* unlock acts as rel barrier for store to wedged */ - mtx_unlock(&dev_priv->error_completion_lock); + lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); /* * Wakeup waiting processes so they don't hang */ - mtx_lock(&dev_priv->rings[RCS].irq_lock); + lockmgr(&dev_priv->rings[RCS].irq_lock, LK_EXCLUSIVE); wakeup(&dev_priv->rings[RCS]); - mtx_unlock(&dev_priv->rings[RCS].irq_lock); + lockmgr(&dev_priv->rings[RCS].irq_lock, LK_RELEASE); if (HAS_BSD(dev)) { - mtx_lock(&dev_priv->rings[VCS].irq_lock); + lockmgr(&dev_priv->rings[VCS].irq_lock, LK_EXCLUSIVE); wakeup(&dev_priv->rings[VCS]); - mtx_unlock(&dev_priv->rings[VCS].irq_lock); + lockmgr(&dev_priv->rings[VCS].irq_lock, LK_RELEASE); } if (HAS_BLT(dev)) { - mtx_lock(&dev_priv->rings[BCS].irq_lock); + lockmgr(&dev_priv->rings[BCS].irq_lock, LK_EXCLUSIVE); wakeup(&dev_priv->rings[BCS]); - mtx_unlock(&dev_priv->rings[BCS].irq_lock); + lockmgr(&dev_priv->rings[BCS].irq_lock, LK_RELEASE); } } @@ -894,12 +894,12 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) if (intel_crtc == NULL) return; - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); work = intel_crtc->unpin_work; if (work == NULL || work->pending || !work->enable_stall_check) { /* Either the pending flip IRQ arrived, or we're too early. Don't check */ - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); return; } @@ -915,7 +915,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) crtc->x * crtc->fb->bits_per_pixel/8); } - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); if (stall_detected) { DRM_DEBUG("Pageflip stall detected\n"); @@ -956,7 +956,7 @@ i915_driver_irq_handler(void *arg) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) i915_handle_error(dev, false); @@ -975,7 +975,7 @@ i915_driver_irq_handler(void *arg) irq_received = 1; } } - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); if (!irq_received) break; @@ -1138,18 +1138,18 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) #endif ret = 0; - mtx_lock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_EXCLUSIVE); if (ring->irq_get(ring)) { DRM_UNLOCK(dev); while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) { - ret = -msleep(ring, &ring->irq_lock, PCATCH, + ret = -lksleep(ring, &ring->irq_lock, PCATCH, "915wtq", 3 * hz); } ring->irq_put(ring); - mtx_unlock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_RELEASE); DRM_LOCK(dev); } else { - mtx_unlock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_RELEASE); if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr, 3000, 1, "915wir")) ret = -EBUSY; @@ -1218,7 +1218,7 @@ i915_enable_vblank(struct drm_device *dev, int pipe) if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); @@ -1229,7 +1229,7 @@ i915_enable_vblank(struct drm_device *dev, int pipe) /* maintain vblank delivery even in deep C-states */ if (dev_priv->info->gen == 3) I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); return 0; } @@ -1242,10 +1242,10 @@ ironlake_enable_vblank(struct drm_device *dev, int pipe) if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); return 0; } @@ -1258,10 +1258,10 @@ ivybridge_enable_vblank(struct drm_device *dev, int pipe) if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); return 0; } @@ -1275,7 +1275,7 @@ i915_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); if (dev_priv->info->gen == 3) I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); @@ -1283,7 +1283,7 @@ i915_disable_vblank(struct drm_device *dev, int pipe) i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE | PIPE_START_VBLANK_INTERRUPT_ENABLE); - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); } static void @@ -1291,10 +1291,10 @@ ironlake_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); } static void @@ -1302,10 +1302,10 @@ ivybridge_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); } /* Set the vblank monitor pipe @@ -2170,9 +2170,9 @@ i915_capture_error_state(struct drm_device *dev) struct drm_i915_error_state *error; int i, pipe; - mtx_lock(&dev_priv->error_lock); + lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); error = dev_priv->first_error; - mtx_unlock(&dev_priv->error_lock); + lockmgr(&dev_priv->error_lock, LK_RELEASE); if (error != NULL) return; @@ -2234,12 +2234,12 @@ i915_capture_error_state(struct drm_device *dev) error->overlay = intel_overlay_capture_error_state(dev); error->display = intel_display_capture_error_state(dev); - mtx_lock(&dev_priv->error_lock); + lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); if (dev_priv->first_error == NULL) { dev_priv->first_error = error; error = NULL; } - mtx_unlock(&dev_priv->error_lock); + lockmgr(&dev_priv->error_lock, LK_RELEASE); if (error != NULL) i915_error_state_free(dev, error); @@ -2251,10 +2251,10 @@ i915_destroy_error_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; - mtx_lock(&dev_priv->error_lock); + lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); error = dev_priv->first_error; dev_priv->first_error = NULL; - mtx_unlock(&dev_priv->error_lock); + lockmgr(&dev_priv->error_lock, LK_RELEASE); if (error != NULL) i915_error_state_free(dev, error); diff --git a/sys/dev/drm2/i915/intel_display.c b/sys/dev/drm2/i915/intel_display.c index 1557fd9802..8c18edddc9 100644 --- a/sys/dev/drm2/i915/intel_display.c +++ b/sys/dev/drm2/i915/intel_display.c @@ -2259,13 +2259,13 @@ intel_finish_fb(struct drm_framebuffer *old_fb) bool was_interruptible = dev_priv->mm.interruptible; int ret; - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); while (!atomic_read(&dev_priv->mm.wedged) && atomic_read(&obj->pending_flip) != 0) { - msleep(&obj->pending_flip, &dev->event_lock, + lksleep(&obj->pending_flip, &dev->event_lock, 0, "915flp", 0); } - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the @@ -2946,10 +2946,10 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) obj = to_intel_framebuffer(crtc->fb)->obj; dev = crtc->dev; dev_priv = dev->dev_private; - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); while (atomic_read(&obj->pending_flip) != 0) - msleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0); - mtx_unlock(&dev->event_lock); + lksleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0); + lockmgr(&dev->event_lock, LK_RELEASE); } static bool intel_crtc_driving_pch(struct drm_crtc *crtc) @@ -7239,10 +7239,10 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_unpin_work *work; - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); work = intel_crtc->unpin_work; intel_crtc->unpin_work = NULL; - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); if (work) { taskqueue_cancel(dev_priv->tq, &work->task, NULL); @@ -7287,10 +7287,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, microtime(&tnow); - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); return; } @@ -7334,7 +7334,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane); if (atomic_read(&obj->pending_flip) == 0) wakeup(&obj->pending_flip); - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); taskqueue_enqueue(dev_priv->tq, &work->task); } @@ -7361,14 +7361,14 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); if (intel_crtc->unpin_work) { if ((++intel_crtc->unpin_work->pending) > 1) DRM_ERROR("Prepared flip multiple times\n"); } else { DRM_DEBUG("preparing flip with no unpin work?\n"); } - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); } static int intel_gen2_queue_flip(struct drm_device *dev, @@ -7593,9 +7593,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto free_work; /* We borrow the event spin lock for protecting unpin_work */ - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); if (intel_crtc->unpin_work) { - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); kfree(work, DRM_MEM_KMS); drm_vblank_put(dev, intel_crtc->pipe); @@ -7603,7 +7603,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return -EBUSY; } intel_crtc->unpin_work = work; - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; @@ -7639,9 +7639,9 @@ cleanup_pending: drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); - mtx_lock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); intel_crtc->unpin_work = NULL; - mtx_unlock(&dev->event_lock); + lockmgr(&dev->event_lock, LK_RELEASE); drm_vblank_put(dev, intel_crtc->pipe); free_work: @@ -8221,9 +8221,9 @@ void gen6_disable_rps(struct drm_device *dev) * register (PMIMR) to mask PM interrupts. The only risk is in leaving * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - mtx_lock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); dev_priv->pm_iir = 0; - mtx_unlock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_RELEASE); I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); } @@ -8480,11 +8480,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_EI_EXPIRED); - mtx_lock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); if (dev_priv->pm_iir != 0) kprintf("pm_iir %x\n", dev_priv->pm_iir); I915_WRITE(GEN6_PMIMR, 0); - mtx_unlock(&dev_priv->rps_lock); + lockmgr(&dev_priv->rps_lock, LK_RELEASE); /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); diff --git a/sys/dev/drm2/i915/intel_dp.c b/sys/dev/drm2/i915/intel_dp.c index 2ae06eda35..736dcfd435 100644 --- a/sys/dev/drm2/i915/intel_dp.c +++ b/sys/dev/drm2/i915/intel_dp.c @@ -1060,9 +1060,9 @@ static void ironlake_panel_vdd_work(void *arg, int pending __unused) struct intel_dp *intel_dp = arg; struct drm_device *dev = intel_dp->base.base.dev; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); ironlake_panel_vdd_off_sync(intel_dp); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); } static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) diff --git a/sys/dev/drm2/i915/intel_fb.c b/sys/dev/drm2/i915/intel_fb.c index 56a764be50..7257a4c319 100644 --- a/sys/dev/drm2/i915/intel_fb.c +++ b/sys/dev/drm2/i915/intel_fb.c @@ -255,7 +255,7 @@ void intel_fb_restore_mode(struct drm_device *dev) struct drm_mode_config *config = &dev->mode_config; struct drm_plane *plane; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); if (ret) @@ -265,5 +265,5 @@ void intel_fb_restore_mode(struct drm_device *dev) list_for_each_entry(plane, &config->plane_list, head) plane->funcs->disable_plane(plane); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); } diff --git a/sys/dev/drm2/i915/intel_iic.c b/sys/dev/drm2/i915/intel_iic.c index ac02ce8f1b..95e124f839 100644 --- a/sys/dev/drm2/i915/intel_iic.c +++ b/sys/dev/drm2/i915/intel_iic.c @@ -56,6 +56,8 @@ * $FreeBSD: src/sys/dev/drm2/i915/intel_iic.c,v 1.1 2012/05/22 11:07:44 kib Exp $ */ +#include + #include #include #include @@ -238,7 +240,7 @@ intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs) dev_priv = sc->drm_dev->dev_private; unit = device_get_unit(idev); - sx_xlock(&dev_priv->gmbus_sx); + lockmgr(&dev_priv->gmbus_lock, LK_EXCLUSIVE); if (sc->force_bit_dev) { error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs); goto out; @@ -331,7 +333,7 @@ done: DRM_INFO("GMBUS timed out waiting for idle\n"); I915_WRITE(GMBUS0 + reg_offset, 0); out: - sx_xunlock(&dev_priv->gmbus_sx); + lockmgr(&dev_priv->gmbus_lock, LK_RELEASE); return (error); clear_err: @@ -600,7 +602,7 @@ intel_setup_gmbus(struct drm_device *dev) int i, ret; dev_priv = dev->dev_private; - sx_init(&dev_priv->gmbus_sx, "gmbus"); + lockinit(&dev_priv->gmbus_lock, "gmbus", 0, LK_CANRECURSE); dev_priv->gmbus_bridge = kmalloc(sizeof(device_t) * GMBUS_NUM_PORTS, DRM_MEM_DRIVER, M_WAITOK | M_ZERO); dev_priv->bbbus_bridge = kmalloc(sizeof(device_t) * GMBUS_NUM_PORTS, @@ -615,7 +617,7 @@ intel_setup_gmbus(struct drm_device *dev) * intel_setup_gmbus() is called from the attach method of the * driver. */ - mtx_lock(&Giant); + get_mplock(); for (i = 0; i < GMBUS_NUM_PORTS; i++) { /* * Initialized bbbus_bridge before gmbus_bridge, since @@ -679,12 +681,12 @@ intel_setup_gmbus(struct drm_device *dev) intel_iic_reset(dev); } - mtx_unlock(&Giant); + rel_mplock(); return (0); err: intel_teardown_gmbus_m(dev, i); - mtx_unlock(&Giant); + rel_mplock(); return (ret); } @@ -703,14 +705,14 @@ intel_teardown_gmbus_m(struct drm_device *dev, int m) dev_priv->gmbus_bridge = NULL; kfree(dev_priv->bbbus_bridge, DRM_MEM_DRIVER); dev_priv->bbbus_bridge = NULL; - sx_destroy(&dev_priv->gmbus_sx); + lockuninit(&dev_priv->gmbus_lock); } void intel_teardown_gmbus(struct drm_device *dev) { - mtx_lock(&Giant); + get_mplock(); intel_teardown_gmbus_m(dev, GMBUS_NUM_PORTS); - mtx_unlock(&Giant); + rel_mplock(); } diff --git a/sys/dev/drm2/i915/intel_overlay.c b/sys/dev/drm2/i915/intel_overlay.c index 48ae405547..a4fabbbbac 100644 --- a/sys/dev/drm2/i915/intel_overlay.c +++ b/sys/dev/drm2/i915/intel_overlay.c @@ -1118,13 +1118,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, } if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) { - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); DRM_LOCK(dev); ret = intel_overlay_switch_off(overlay); DRM_UNLOCK(dev); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -1147,7 +1147,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, goto out_free; } - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); DRM_LOCK(dev); if (new_bo->tiling_mode) { @@ -1229,7 +1229,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, goto out_unlock; DRM_UNLOCK(dev); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); kfree(params, DRM_I915_GEM); @@ -1237,7 +1237,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, out_unlock: DRM_UNLOCK(dev); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); drm_gem_object_unreference_unlocked(&new_bo->base); out_free: kfree(params, DRM_I915_GEM); @@ -1316,7 +1316,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, return -ENODEV; } - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); DRM_LOCK(dev); ret = -EINVAL; @@ -1382,7 +1382,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, ret = 0; out_unlock: DRM_UNLOCK(dev); - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } diff --git a/sys/dev/drm2/i915/intel_ringbuffer.c b/sys/dev/drm2/i915/intel_ringbuffer.c index 594bab1647..a5cf917224 100644 --- a/sys/dev/drm2/i915/intel_ringbuffer.c +++ b/sys/dev/drm2/i915/intel_ringbuffer.c @@ -52,10 +52,10 @@ i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno) { if (ring->trace_irq_seqno == 0) { - mtx_lock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_EXCLUSIVE); if (ring->irq_get(ring)) ring->trace_irq_seqno = seqno; - mtx_unlock(&ring->irq_lock); + lockmgr(&ring->irq_lock, LK_RELEASE); } } @@ -733,7 +733,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return false; - mtx_assert(&ring->irq_lock, MA_OWNED); + KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0); if (ring->irq_refcount++ == 0) { if (HAS_PCH_SPLIT(dev)) ironlake_enable_irq(dev_priv, @@ -751,7 +751,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - mtx_assert(&ring->irq_lock, MA_OWNED); + KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0); if (--ring->irq_refcount == 0) { if (HAS_PCH_SPLIT(dev)) ironlake_disable_irq(dev_priv, @@ -844,7 +844,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag gen6_gt_force_wake_get(dev_priv); - mtx_assert(&ring->irq_lock, MA_OWNED); + KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0); if (ring->irq_refcount++ == 0) { ring->irq_mask &= ~rflag; I915_WRITE_IMR(ring, ring->irq_mask); @@ -860,7 +860,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - mtx_assert(&ring->irq_lock, MA_OWNED); + KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0); if (--ring->irq_refcount == 0) { ring->irq_mask |= rflag; I915_WRITE_IMR(ring, ring->irq_mask); @@ -879,7 +879,7 @@ bsd_ring_get_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return false; - mtx_assert(&ring->irq_lock, MA_OWNED); + KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0); if (ring->irq_refcount++ == 0) { if (IS_G4X(dev)) i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); @@ -895,7 +895,7 @@ bsd_ring_put_irq(struct intel_ring_buffer *ring) struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - mtx_assert(&ring->irq_lock, MA_OWNED); + KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0); if (--ring->irq_refcount == 0) { if (IS_G4X(dev)) i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); @@ -1046,7 +1046,7 @@ int intel_init_ring_buffer(struct drm_device *dev, INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->gpu_write_list); - mtx_init(&ring->irq_lock, "ringb", NULL, MTX_DEF); + lockinit(&ring->irq_lock, "ringb", 0, LK_CANRECURSE); ring->irq_mask = ~0; if (I915_NEED_GFX_HWS(dev)) { diff --git a/sys/dev/drm2/i915/intel_ringbuffer.h b/sys/dev/drm2/i915/intel_ringbuffer.h index af6a7aeea3..04299f4f1b 100644 --- a/sys/dev/drm2/i915/intel_ringbuffer.h +++ b/sys/dev/drm2/i915/intel_ringbuffer.h @@ -60,7 +60,7 @@ struct intel_ring_buffer { */ u32 last_retired_head; - struct mtx irq_lock; + struct lock irq_lock; uint32_t irq_refcount; uint32_t irq_mask; uint32_t irq_seqno; /* last seq seem at irq time */ diff --git a/sys/dev/drm2/i915/intel_sprite.c b/sys/dev/drm2/i915/intel_sprite.c index fc9cd381be..16cec7dadd 100644 --- a/sys/dev/drm2/i915/intel_sprite.c +++ b/sys/dev/drm2/i915/intel_sprite.c @@ -566,7 +566,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { @@ -579,7 +579,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, ret = intel_plane->update_colorkey(plane, set); out_unlock: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } @@ -596,7 +596,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, if (!dev_priv) return -EINVAL; - sx_xlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { @@ -609,7 +609,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, intel_plane->get_colorkey(plane, get); out_unlock: - sx_xunlock(&dev->mode_config.mutex); + lockmgr(&dev->mode_config.lock, LK_RELEASE); return ret; } diff --git a/sys/dev/drm2/i915/intel_tv.c b/sys/dev/drm2/i915/intel_tv.c index c9e7fc58e1..89d99629c4 100644 --- a/sys/dev/drm2/i915/intel_tv.c +++ b/sys/dev/drm2/i915/intel_tv.c @@ -1126,11 +1126,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv, /* Disable TV interrupts around load detect or we'll recurse */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); } save_tv_dac = tv_dac = I915_READ(TV_DAC); @@ -1189,11 +1189,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv, /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { - mtx_lock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - mtx_unlock(&dev_priv->irq_lock); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); } return type; -- 2.41.0