summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2009-12-08 13:52:41 +1000
committerDave Airlie <airlied@redhat.com>2009-12-08 13:52:41 +1000
commit1bd049fa895f9c6743f38b52ce14775f5a31ea63 (patch)
treecb9163ac1c20f7fbdbde42eaab8013d0c3734aed /drivers/gpu/drm/ttm
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
parentb0a007dc27d8d3ff3db07b3ea997323d9330f770 (diff)
downloadop-kernel-dev-1bd049fa895f9c6743f38b52ce14775f5a31ea63.zip
op-kernel-dev-1bd049fa895f9c6743f38b52ce14775f5a31ea63.tar.gz
Merge branch 'drm-core-next' into drm-linus
Bring all core drm changes into 2.6.32 tree and resolve the conflict that occurs. Conflicts: drivers/gpu/drm/drm_fb_helper.c
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c311
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
8 files changed, 895 insertions, 7 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7..1e138f5 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c0625..e13fd23 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -275,9 +275,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags | TTM_PAGE_FLAG_USER,
glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
+ if (unlikely(bo->ttm == NULL)) {
ret = -ENOMEM;
- break;
+ break;
+ }
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index c70927e..ceae52f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
#endif
return tmp;
}
+EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long bus_base,
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 0000000..c285c29
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ entry->reserved = false;
+ ttm_bo_unreserve(bo);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+{
+ struct ttm_validate_buffer *entry;
+ int ret;
+
+retry:
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ entry->reserved = false;
+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
+ if (ret != 0) {
+ ttm_eu_backoff_reservation(list);
+ if (ret == -EAGAIN) {
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ } else
+ return ret;
+ }
+
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ttm_eu_backoff_reservation(list);
+ ret = ttm_bo_wait_cpu(bo, false);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ void *old_sync_obj;
+
+ spin_lock(&bo->lock);
+ old_sync_obj = bo->sync_obj;
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_arg = entry->new_sync_obj_arg;
+ spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
+ entry->reserved = false;
+ if (old_sync_obj)
+ driver->sync_obj_unref(&old_sync_obj);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 0000000..f619ebc
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,311 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_module.h"
+#include <asm/atomic.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#define TTM_WRITE_LOCK_PENDING (1 << 0)
+#define TTM_VT_LOCK_PENDING (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
+#define TTM_VT_LOCK (1 << 3)
+#define TTM_SUSPEND_LOCK (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+ spin_lock_init(&lock->lock);
+ init_waitqueue_head(&lock->queue);
+ lock->rw = 0;
+ lock->flags = 0;
+ lock->kill_takers = false;
+ lock->signal = SIGKILL;
+}
+EXPORT_SYMBOL(ttm_lock_init);
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ if (--lock->rw == 0)
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_read_unlock);
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ locked = true;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible)
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_read_lock(lock));
+ else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+ return ret;
+}
+EXPORT_SYMBOL(ttm_read_lock);
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+ bool block = true;
+
+ *locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ block = false;
+ *locked = true;
+ } else if (lock->flags == 0) {
+ block = false;
+ }
+ spin_unlock(&lock->lock);
+
+ return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+ bool locked;
+
+ if (interruptible)
+ ret = wait_event_interruptible
+ (lock->queue, __ttm_read_trylock(lock, &locked));
+ else
+ wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
+
+ if (unlikely(ret != 0)) {
+ BUG_ON(locked);
+ return ret;
+ }
+
+ return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 0;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_write_unlock);
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+ lock->rw = -1;
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ locked = true;
+ } else {
+ lock->flags |= TTM_WRITE_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_write_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ }
+ } else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_write_lock);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 1;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+ int ret = 0;
+
+ spin_lock(&lock->lock);
+ if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+ ret = -EINVAL;
+ lock->flags &= ~TTM_VT_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ printk(KERN_INFO TTM_PFX "vt unlock.\n");
+
+ return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+ int ret;
+
+ *p_base = NULL;
+ ret = __ttm_vt_unlock(lock);
+ BUG_ON(ret != 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ lock->flags |= TTM_VT_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_VT_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+ bool interruptible,
+ struct ttm_object_file *tfile)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_vt_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ return ret;
+ }
+ } else
+ wait_event(lock->queue, __ttm_vt_lock(lock));
+
+ /*
+ * Add a base-object, the destructor of which will
+ * make sure the lock is released if the client dies
+ * while holding it.
+ */
+
+ ret = ttm_base_object_init(tfile, &lock->base, false,
+ ttm_lock_type, &ttm_vt_lock_remove, NULL);
+ if (ret)
+ (void)__ttm_vt_unlock(lock);
+ else {
+ lock->vt_holder = tfile;
+ printk(KERN_INFO TTM_PFX "vt lock.\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_vt_lock);
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+ return ttm_ref_object_base_unref(lock->vt_holder,
+ lock->base.hash.key, TTM_REF_USAGE);
+}
+EXPORT_SYMBOL(ttm_vt_unlock);
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_SUSPEND_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+ lock->flags |= TTM_SUSPEND_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+ wait_event(lock->queue, __ttm_suspend_lock(lock));
+}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281a..8bfde5f 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ struct ttm_mem_zone *zone;
uint64_t mem;
int ret;
- if (unlikely(!zone))
- return -ENOMEM;
-
if (si->totalhigh == 0)
return 0;
+ zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ if (unlikely(!zone))
+ return -ENOMEM;
+
mem = si->totalram;
mem *= si->mem_unit;
@@ -460,6 +461,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
+EXPORT_SYMBOL(ttm_mem_global_free);
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
@@ -533,6 +535,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
+EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
@@ -588,3 +591,4 @@ size_t ttm_round_pot(size_t size)
}
return 0;
}
+EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 0000000..1099aba
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+struct ttm_object_file {
+ struct ttm_object_device *tdev;
+ rwlock_t lock;
+ struct list_head ref_list;
+ struct drm_open_hash ref_hash[TTM_REF_NUM];
+ struct kref refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+ rwlock_t object_lock;
+ struct drm_open_hash object_hash;
+ atomic_t object_count;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+ struct drm_hash_item hash;
+ struct list_head head;
+ struct kref kref;
+ struct ttm_base_object *obj;
+ enum ttm_ref_type ref_type;
+ struct ttm_object_file *tfile;
+};
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+ kref_get(&tfile->refcount);
+ return tfile;
+}
+
+static void ttm_object_file_destroy(struct kref *kref)
+{
+ struct ttm_object_file *tfile =
+ container_of(kref, struct ttm_object_file, refcount);
+
+ kfree(tfile);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ kref_put(&tfile->refcount, ttm_object_file_destroy);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type object_type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ int ret;
+
+ base->shareable = shareable;
+ base->tfile = ttm_object_file_ref(tfile);
+ base->refcount_release = refcount_release;
+ base->ref_obj_release = ref_obj_release;
+ base->object_type = object_type;
+ write_lock(&tdev->object_lock);
+ kref_init(&base->refcount);
+ ret = drm_ht_just_insert_please(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ write_unlock(&tdev->object_lock);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+out_err1:
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+out_err0:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_base_object_init);
+
+static void ttm_release_base(struct kref *kref)
+{
+ struct ttm_base_object *base =
+ container_of(kref, struct ttm_base_object, refcount);
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ write_unlock(&tdev->object_lock);
+ if (base->refcount_release) {
+ ttm_object_file_unref(&base->tfile);
+ base->refcount_release(&base);
+ }
+ write_lock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ *p_base = NULL;
+
+ /*
+ * Need to take the lock here to avoid racing with
+ * users trying to look up the object.
+ */
+
+ write_lock(&tdev->object_lock);
+ (void)kref_put(&base->refcount, &ttm_release_base);
+ write_unlock(&tdev->object_lock);
+}
+EXPORT_SYMBOL(ttm_base_object_unref);
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+ uint32_t key)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct drm_hash_item *hash;
+ int ret;
+
+ read_lock(&tdev->object_lock);
+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+ if (likely(ret == 0)) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ kref_get(&base->refcount);
+ }
+ read_unlock(&tdev->object_lock);
+
+ if (unlikely(ret != 0))
+ return NULL;
+
+ if (tfile != base->tfile && !base->shareable) {
+ printk(KERN_ERR TTM_PFX
+ "Attempted access of non-shareable object.\n");
+ ttm_base_object_unref(&base);
+ return NULL;
+ }
+
+ return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ int ret = -EINVAL;
+
+ if (existed != NULL)
+ *existed = true;
+
+ while (ret == -EINVAL) {
+ read_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+ if (ret == 0) {
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_get(&ref->kref);
+ read_unlock(&tfile->lock);
+ break;
+ }
+
+ read_unlock(&tfile->lock);
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ if (unlikely(ref == NULL)) {
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ return -ENOMEM;
+ }
+
+ ref->hash.key = base->hash.key;
+ ref->obj = base;
+ ref->tfile = tfile;
+ ref->ref_type = ref_type;
+ kref_init(&ref->kref);
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_insert_item(ht, &ref->hash);
+
+ if (likely(ret == 0)) {
+ list_add_tail(&ref->head, &tfile->ref_list);
+ kref_get(&base->refcount);
+ write_unlock(&tfile->lock);
+ if (existed != NULL)
+ *existed = false;
+ break;
+ }
+
+ write_unlock(&tfile->lock);
+ BUG_ON(ret != -EINVAL);
+
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_ref_object_add);
+
+static void ttm_ref_object_release(struct kref *kref)
+{
+ struct ttm_ref_object *ref =
+ container_of(kref, struct ttm_ref_object, kref);
+ struct ttm_base_object *base = ref->obj;
+ struct ttm_object_file *tfile = ref->tfile;
+ struct drm_open_hash *ht;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+ ht = &tfile->ref_hash[ref->ref_type];
+ (void)drm_ht_remove_item(ht, &ref->hash);
+ list_del(&ref->head);
+ write_unlock(&tfile->lock);
+
+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+ base->ref_obj_release(base, ref->ref_type);
+
+ ttm_base_object_unref(&ref->obj);
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ write_lock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key, enum ttm_ref_type ref_type)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ int ret;
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, key, &hash);
+ if (unlikely(ret != 0)) {
+ write_unlock(&tfile->lock);
+ return -EINVAL;
+ }
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_put(&ref->kref, ttm_ref_object_release);
+ write_unlock(&tfile->lock);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_ref_object_base_unref);
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+ struct ttm_ref_object *ref;
+ struct list_head *list;
+ unsigned int i;
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ write_lock(&tfile->lock);
+
+ /*
+ * Since we release the lock within the loop, we have to
+ * restart it from the beginning each time.
+ */
+
+ while (!list_empty(&tfile->ref_list)) {
+ list = tfile->ref_list.next;
+ ref = list_entry(list, struct ttm_ref_object, head);
+ ttm_ref_object_release(&ref->kref);
+ }
+
+ for (i = 0; i < TTM_REF_NUM; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ write_unlock(&tfile->lock);
+ ttm_object_file_unref(&tfile);
+}
+EXPORT_SYMBOL(ttm_object_file_release);
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+ unsigned int hash_order)
+{
+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+ unsigned int i;
+ unsigned int j = 0;
+ int ret;
+
+ if (unlikely(tfile == NULL))
+ return NULL;
+
+ rwlock_init(&tfile->lock);
+ tfile->tdev = tdev;
+ kref_init(&tfile->refcount);
+ INIT_LIST_HEAD(&tfile->ref_list);
+
+ for (i = 0; i < TTM_REF_NUM; ++i) {
+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+ if (ret) {
+ j = i;
+ goto out_err;
+ }
+ }
+
+ return tfile;
+out_err:
+ for (i = 0; i < j; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ kfree(tfile);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_file_init);
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+ *mem_glob,
+ unsigned int hash_order)
+{
+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(tdev == NULL))
+ return NULL;
+
+ tdev->mem_glob = mem_glob;
+ rwlock_init(&tdev->object_lock);
+ atomic_set(&tdev->object_count, 0);
+ ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+ if (likely(ret == 0))
+ return tdev;
+
+ kfree(tdev);
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_device_init);
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+ struct ttm_object_device *tdev = *p_tdev;
+
+ *p_tdev = NULL;
+
+ write_lock(&tdev->object_lock);
+ drm_ht_remove(&tdev->object_hash);
+ write_unlock(&tdev->object_lock);
+
+ kfree(tdev);
+}
+EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f..9c2b1cc 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
ttm->state = tt_unbound;
return 0;
}
+EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
OpenPOWER on IntegriCloud