summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Garrett <mjg@redhat.com>2010-04-30 15:24:17 -0400
committerDave Airlie <airlied@redhat.com>2010-05-18 18:21:45 +1000
commit91700f3cac56a1998a56d41e4459a5cebdb4f752 (patch)
tree24d82df7ed2b7f23e49963b6aa7ae31adbebe8f2
parent78930b1c39dd4a5afd5aa873eec11b5bd7079866 (diff)
downloadop-kernel-dev-91700f3cac56a1998a56d41e4459a5cebdb4f752.zip
op-kernel-dev-91700f3cac56a1998a56d41e4459a5cebdb4f752.tar.gz
radeon: Split out ring locking and allocation
We need to handle the ring while we've already locked it, so split out the allocation and commit functions in order to allow them to be used. Signed-off-by: Matthew Garrett <mjg@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c27
2 files changed, 23 insertions, 6 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 93ac88eb..e39e2b4 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -471,7 +471,9 @@ int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f6e1e8d..6cc4259 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -258,31 +258,41 @@ void radeon_ring_free_size(struct radeon_device *rdev)
}
}
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
- mutex_lock(&rdev->cp.mutex);
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
break;
}
r = radeon_fence_wait_next(rdev);
- if (r) {
- mutex_unlock(&rdev->cp.mutex);
+ if (r)
return r;
- }
}
rdev->cp.count_dw = ndw;
rdev->cp.wptr_old = rdev->cp.wptr;
return 0;
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+ int r;
+
+ mutex_lock(&rdev->cp.mutex);
+ r = radeon_ring_alloc(rdev, ndw);
+ if (r) {
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ return 0;
+}
+
+void radeon_ring_commit(struct radeon_device *rdev)
{
unsigned count_dw_pad;
unsigned i;
@@ -295,6 +305,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
}
DRM_MEMORYBARRIER();
radeon_cp_commit(rdev);
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+ radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
}
OpenPOWER on IntegriCloud