summaryrefslogtreecommitdiffstats
path: root/sys/dev/drm/drm_lock.c
diff options
context:
space:
mode:
authorrnoland <rnoland@FreeBSD.org>2008-10-03 16:59:11 +0000
committerrnoland <rnoland@FreeBSD.org>2008-10-03 16:59:11 +0000
commitbbc754f502e8dc966e5d05fc4fd4d91d4c136d83 (patch)
tree2d4185c3ec896efd0d1b2735041e6ddeb6a750f1 /sys/dev/drm/drm_lock.c
parentbbe0e181656adf606748968f5803bb8cbc7d83c8 (diff)
downloadFreeBSD-src-bbc754f502e8dc966e5d05fc4fd4d91d4c136d83.zip
FreeBSD-src-bbc754f502e8dc966e5d05fc4fd4d91d4c136d83.tar.gz
resync to git master
This reverts a private patch which is causing issues with many Intel chipsets. I will review that patch and see what we need to do to fix it up later, but for the time being, we will just get these chips working again. This update contains a lot of code cleanup and is post gem merge (no, we don't have gem support). It should prove much easier to read the code now. A lot of thanks goes to vehemens for that work. I have adapted the code to use cdevpriv for tracking per open file data. That alleviates the old ugly hack that we used to try and accomplish the task and helped to clean up the open / close behavior a good bit. This also replaces the hack that was put in place a year or so ago to prevent radeons from locking up with AIGLX enabled. I have had a couple of radeon testers report that it still works as expected, though I no longer have radeon hardware to test with myself. Other various fixes from the linux crew and Intel, many of which are muddled in with the gem merge. Approved by: jhb (mentor) Obtained from: mesa/drm git master MFC after: 2 weeks
Diffstat (limited to 'sys/dev/drm/drm_lock.c')
-rw-r--r--sys/dev/drm/drm_lock.c170
1 files changed, 82 insertions, 88 deletions
diff --git a/sys/dev/drm/drm_lock.c b/sys/dev/drm/drm_lock.c
index 938954c..668bce8 100644
--- a/sys/dev/drm/drm_lock.c
+++ b/sys/dev/drm/drm_lock.c
@@ -52,89 +52,28 @@ __FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
-int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
-{
- unsigned int old, new;
-
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
- else new = context | _DRM_LOCK_HELD;
- } while (!atomic_cmpset_int(lock, old, new));
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
- if (new == (context | _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-
-/* This takes a lock forcibly and hands it to context. Should ONLY be used
- inside *_unlock to give lock to kernel before calling *_dma_schedule. */
-int drm_lock_transfer(struct drm_device *dev,
- __volatile__ unsigned int *lock, unsigned int context)
-{
- unsigned int old, new;
-
- dev->lock.file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- } while (!atomic_cmpset_int(lock, old, new));
-
- return 1;
-}
-
-int drm_lock_free(struct drm_device *dev,
- __volatile__ unsigned int *lock, unsigned int context)
-{
- unsigned int old, new;
-
- dev->lock.file_priv = NULL;
- do {
- old = *lock;
- new = 0;
- } while (!atomic_cmpset_int(lock, old, new));
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
- return 0;
-}
-
int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_lock_t *lock = data;
- int ret = 0;
+ struct drm_lock *lock = data;
+ int ret = 0;
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock->context);
- return EINVAL;
- }
+ return EINVAL;
+ }
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
lock->flags);
- if (dev->driver.use_dma_queue && lock->context < 0)
- return EINVAL;
+ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) &&
+ lock->context < 0)
+ return EINVAL;
DRM_LOCK();
for (;;) {
- if (drm_lock_take(&dev->lock.hw_lock->lock, lock->context)) {
+ if (drm_lock_take(&dev->lock, lock->context)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
@@ -142,13 +81,8 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
/* Contention */
-#if defined(__FreeBSD__) && __FreeBSD_version > 500000
ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
PZERO | PCATCH, "drmlk2", 0);
-#else
- ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH,
- "drmlk2", 0);
-#endif
if (ret != 0)
break;
}
@@ -160,34 +94,29 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
/* XXX: Add signal blocking here */
- if (dev->driver.dma_quiescent != NULL &&
+ if (dev->driver->dma_quiescent != NULL &&
(lock->flags & _DRM_LOCK_QUIESCENT))
- dev->driver.dma_quiescent(dev);
+ dev->driver->dma_quiescent(dev);
return 0;
}
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_lock_t *lock = data;
-
- DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
- lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
- lock->flags);
+ struct drm_lock *lock = data;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock->context);
return EINVAL;
}
-#if 0
/* Check that the context unlock being requested actually matches
* who currently holds the lock.
*/
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context)
return EINVAL;
-#endif
+
DRM_SPINLOCK(&dev->tsk_lock);
if (dev->locked_task_call != NULL) {
dev->locked_task_call(dev);
@@ -198,12 +127,77 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
DRM_LOCK();
- drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT);
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) {
+ if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
DRM_UNLOCK();
return 0;
}
+
+int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context)
+{
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
+
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD)
+ new = old | _DRM_LOCK_CONT;
+ else
+ new = context | _DRM_LOCK_HELD;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return 0;
+ }
+ }
+ if (new == (context | _DRM_LOCK_HELD)) {
+ /* Have lock */
+ return 1;
+ }
+ return 0;
+}
+
+/* This takes a lock forcibly and hands it to context. Should ONLY be used
+ inside *_unlock to give lock to kernel before calling *_dma_schedule. */
+int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context)
+{
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
+
+ lock_data->file_priv = NULL;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ return 1;
+}
+
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+{
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
+
+ lock_data->file_priv = NULL;
+ do {
+ old = *lock;
+ new = 0;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+ DRM_ERROR("%d freed heavyweight lock held by %d\n",
+ context, _DRM_LOCKING_CONTEXT(old));
+ return 1;
+ }
+ DRM_WAKEUP_INT((void *)&lock_data->lock_queue);
+ return 0;
+}
OpenPOWER on IntegriCloud