2 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
32 * Implementation of the ioctls and other support code for dealing with the
35 * The DRM hardware lock is a shared structure between the kernel and userland.
37 * On uncontended access where the new context was the last context, the
38 * client may take the lock without dropping down into the kernel, using atomic
41 * If the client finds during compare-and-set that it was not the last owner
42 * of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
43 * lock, and may have side-effects of kernel-managed context switching.
45 * When the client releases the lock, if the lock is marked as being contended
46 * by another client, then the DRM unlock ioctl is called so that the
47 * contending client may be woken up.
50 #include "dev/drm/drmP.h"
52 int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
54 struct drm_lock *lock = data;
57 if (lock->context == DRM_KERNEL_CONTEXT) {
58 DRM_ERROR("Process %d using kernel context %d\n",
59 DRM_CURRENTPID, lock->context);
63 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
64 lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
67 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) &&
73 if (drm_lock_take(&dev->lock, lock->context)) {
74 dev->lock.file_priv = file_priv;
75 dev->lock.lock_time = jiffies;
76 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
81 tsleep_interlock((void *)&dev->lock.lock_queue, PCATCH);
83 ret = tsleep((void *)&dev->lock.lock_queue,
84 PCATCH | PINTERLOCKED, "drmlk2", 0);
92 DRM_DEBUG("restarting syscall\n");
94 DRM_DEBUG("%d %s\n", lock->context,
95 ret ? "interrupted" : "has lock");
100 /* XXX: Add signal blocking here */
102 if (dev->driver->dma_quiescent != NULL &&
103 (lock->flags & _DRM_LOCK_QUIESCENT))
104 dev->driver->dma_quiescent(dev);
109 int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
111 struct drm_lock *lock = data;
113 DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
114 lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
117 if (lock->context == DRM_KERNEL_CONTEXT) {
118 DRM_ERROR("Process %d using kernel context %d\n",
119 DRM_CURRENTPID, lock->context);
123 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
126 drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT);
128 if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) {
136 int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context)
138 volatile unsigned int *lock = &lock_data->hw_lock->lock;
139 unsigned int old, new;
143 if (old & _DRM_LOCK_HELD)
144 new = old | _DRM_LOCK_CONT;
146 new = context | _DRM_LOCK_HELD;
147 } while (!atomic_cmpset_int(lock, old, new));
149 if (_DRM_LOCKING_CONTEXT(old) == context) {
150 if (old & _DRM_LOCK_HELD) {
151 if (context != DRM_KERNEL_CONTEXT) {
152 DRM_ERROR("%d holds heavyweight lock\n",
158 if (new == (context | _DRM_LOCK_HELD)) {
165 /* This takes a lock forcibly and hands it to context. Should ONLY be used
166 inside *_unlock to give lock to kernel before calling *_dma_schedule. */
167 int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context)
169 volatile unsigned int *lock = &lock_data->hw_lock->lock;
170 unsigned int old, new;
172 lock_data->file_priv = NULL;
175 new = context | _DRM_LOCK_HELD;
176 } while (!atomic_cmpset_int(lock, old, new));
181 int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
183 volatile unsigned int *lock = &lock_data->hw_lock->lock;
184 unsigned int old, new;
186 lock_data->file_priv = NULL;
190 } while (!atomic_cmpset_int(lock, old, new));
192 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
193 DRM_ERROR("%d freed heavyweight lock held by %d\n",
194 context, _DRM_LOCKING_CONTEXT(old));
197 DRM_WAKEUP_INT((void *)&lock_data->lock_queue);