summaryrefslogtreecommitdiffstats
path: root/sys/dev/drm2
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/drm2')
-rw-r--r--sys/dev/drm2/drmP.h9
-rw-r--r--sys/dev/drm2/drm_drv.c21
-rw-r--r--sys/dev/drm2/drm_gem.c6
-rw-r--r--sys/dev/drm2/ttm/ttm_agp_backend.c145
-rw-r--r--sys/dev/drm2/ttm/ttm_bo.c1820
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_api.h740
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_driver.h1018
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_manager.c157
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_util.c658
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_vm.c492
-rw-r--r--sys/dev/drm2/ttm/ttm_execbuf_util.c230
-rw-r--r--sys/dev/drm2/ttm/ttm_execbuf_util.h109
-rw-r--r--sys/dev/drm2/ttm/ttm_lock.c340
-rw-r--r--sys/dev/drm2/ttm/ttm_lock.h249
-rw-r--r--sys/dev/drm2/ttm/ttm_memory.c471
-rw-r--r--sys/dev/drm2/ttm/ttm_memory.h149
-rw-r--r--sys/dev/drm2/ttm/ttm_module.h37
-rw-r--r--sys/dev/drm2/ttm/ttm_object.c455
-rw-r--r--sys/dev/drm2/ttm/ttm_object.h271
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc.c894
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc.h103
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc_dma.c1134
-rw-r--r--sys/dev/drm2/ttm/ttm_placement.h93
-rw-r--r--sys/dev/drm2/ttm/ttm_tt.c370
24 files changed, 9963 insertions, 8 deletions
diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h
index ecd77e2..06de553 100644
--- a/sys/dev/drm2/drmP.h
+++ b/sys/dev/drm2/drmP.h
@@ -906,6 +906,7 @@ struct drm_device {
struct drm_minor *control; /**< Control node for card */
struct drm_minor *primary; /**< render type primary screen head */
+ void *drm_ttm_bo;
struct unrhdr *drw_unrhdr;
/* RB tree of drawable infos */
RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
@@ -1302,10 +1303,14 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
-int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
- struct vm_object **obj_res, int nprot);
+int drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
+ vm_size_t size, struct vm_object **obj_res, int nprot);
void drm_gem_pager_dtr(void *obj);
+struct ttm_bo_device;
+int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset,
+ vm_size_t size, struct vm_object **obj_res, int nprot);
+
void drm_device_lock_mtx(struct drm_device *dev);
void drm_device_unlock_mtx(struct drm_device *dev);
int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
diff --git a/sys/dev/drm2/drm_drv.c b/sys/dev/drm2/drm_drv.c
index 483aff5..c45bda1 100644
--- a/sys/dev/drm2/drm_drv.c
+++ b/sys/dev/drm2/drm_drv.c
@@ -58,6 +58,8 @@ static int drm_load(struct drm_device *dev);
static void drm_unload(struct drm_device *dev);
static drm_pci_id_list_t *drm_find_description(int vendor, int device,
drm_pci_id_list_t *idlist);
+static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,
+ vm_size_t size, struct vm_object **obj_res, int nprot);
static int
drm_modevent(module_t mod, int type, void *data)
@@ -187,7 +189,7 @@ static struct cdevsw drm_cdevsw = {
.d_ioctl = drm_ioctl,
.d_poll = drm_poll,
.d_mmap = drm_mmap,
- .d_mmap_single = drm_gem_mmap_single,
+ .d_mmap_single = drm_mmap_single,
.d_name = "drm",
.d_flags = D_TRACKCLOSE
};
@@ -955,6 +957,23 @@ drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
return (0);
}
+static int
+drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **obj_res, int nprot)
+{
+ struct drm_device *dev;
+
+ dev = drm_get_device_from_kdev(kdev);
+ if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
+ return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
+ } else if (dev->drm_ttm_bo != NULL) {
+ return (ttm_bo_mmap_single(dev->drm_ttm_bo, offset, size,
+ obj_res, nprot));
+ } else {
+ return (ENODEV);
+ }
+}
+
#if DRM_LINUX
#include <sys/sysproto.h>
diff --git a/sys/dev/drm2/drm_gem.c b/sys/dev/drm2/drm_gem.c
index f2c3e08..a792839 100644
--- a/sys/dev/drm2/drm_gem.c
+++ b/sys/dev/drm2/drm_gem.c
@@ -441,16 +441,12 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
}
int
-drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
+drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj_res, int nprot)
{
- struct drm_device *dev;
struct drm_gem_object *gem_obj;
struct vm_object *vm_obj;
- dev = drm_get_device_from_kdev(kdev);
- if ((dev->driver->driver_features & DRIVER_GEM) == 0)
- return (ENODEV);
DRM_LOCK(dev);
gem_obj = drm_gem_object_from_offset(dev, *offset);
if (gem_obj == NULL) {
diff --git a/sys/dev/drm2/ttm/ttm_agp_backend.c b/sys/dev/drm2/ttm/ttm_agp_backend.c
new file mode 100644
index 0000000..48f2193
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_agp_backend.c
@@ -0,0 +1,145 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ * Keith Packard.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+#ifdef TTM_HAS_AGP
+#include <dev/drm2/ttm/ttm_placement.h>
+
+struct ttm_agp_backend {
+ struct ttm_tt ttm;
+ struct agp_memory *mem;
+ device_t bridge;
+};
+
+MALLOC_DEFINE(M_TTM_AGP, "ttm_agp", "TTM AGP Backend");
+
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ struct drm_mm_node *node = bo_mem->mm_node;
+ struct agp_memory *mem;
+ int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ unsigned i;
+
+ mem = agp_alloc_memory(agp_be->bridge, AGP_USER_MEMORY, ttm->num_pages);
+ if (unlikely(mem == NULL))
+ return -ENOMEM;
+
+ mem->page_count = 0;
+ for (i = 0; i < ttm->num_pages; i++) {
+ vm_page_t page = ttm->pages[i];
+
+ if (!page)
+ page = ttm->dummy_read_page;
+
+ mem->pages[mem->page_count++] = page;
+ }
+ agp_be->mem = mem;
+
+ mem->is_flushed = 1;
+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
+
+ ret = agp_bind_memory(mem, node->start);
+ if (ret)
+ pr_err("AGP Bind memory failed\n");
+
+ return ret;
+}
+
+static int ttm_agp_unbind(struct ttm_tt *ttm)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+ if (agp_be->mem) {
+ if (agp_be->mem->is_bound)
+ return agp_unbind_memory(agp_be->mem);
+ agp_free_memory(agp_be->mem);
+ agp_be->mem = NULL;
+ }
+ return 0;
+}
+
+static void ttm_agp_destroy(struct ttm_tt *ttm)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+ if (agp_be->mem)
+ ttm_agp_unbind(ttm);
+ ttm_tt_fini(ttm);
+ free(agp_be, M_TTM_AGP);
+}
+
+static struct ttm_backend_func ttm_agp_func = {
+ .bind = ttm_agp_bind,
+ .unbind = ttm_agp_unbind,
+ .destroy = ttm_agp_destroy,
+};
+
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ device_t bridge,
+ unsigned long size, uint32_t page_flags,
+ vm_page_t dummy_read_page)
+{
+ struct ttm_agp_backend *agp_be;
+
+ agp_be = malloc(sizeof(*agp_be), M_TTM_AGP, M_WAITOK | M_ZERO);
+
+ agp_be->mem = NULL;
+ agp_be->bridge = bridge;
+ agp_be->ttm.func = &ttm_agp_func;
+
+ if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ return NULL;
+ }
+
+ return &agp_be->ttm;
+}
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ return ttm_pool_populate(ttm);
+}
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_bo.c b/sys/dev/drm2/ttm/ttm_bo.c
new file mode 100644
index 0000000..12e5131
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo.c
@@ -0,0 +1,1820 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+
+#define TTM_ASSERT_LOCKED(param)
+#define TTM_DEBUG(fmt, arg...)
+#define TTM_BO_HASH_ORDER 13
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
+static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
+
+MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
+
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+ int i;
+
+ for (i = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i)) {
+ *mem_type = i;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+ printf(" has_type: %d\n", man->has_type);
+ printf(" use_type: %d\n", man->use_type);
+ printf(" flags: 0x%08X\n", man->flags);
+ printf(" gpu_offset: 0x%08lX\n", man->gpu_offset);
+ printf(" size: %ju\n", (uintmax_t)man->size);
+ printf(" available_caching: 0x%08X\n", man->available_caching);
+ printf(" default_caching: 0x%08X\n", man->default_caching);
+ if (mem_type != TTM_PL_SYSTEM)
+ (*man->func->debug)(man, TTM_PFX);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ int i, ret, mem_type;
+
+ printf("No space for %p (%lu pages, %luK, %luM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
+ for (i = 0; i < placement->num_placement; i++) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return;
+ printf(" placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i], mem_type);
+ ttm_mem_type_debug(bo->bdev, mem_type);
+ }
+}
+
+#if 0
+static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
+ char *buffer)
+{
+
+ return snprintf(buffer, PAGE_SIZE, "%lu\n",
+ (unsigned long) atomic_read(&glob->bo_count));
+}
+#endif
+
+static inline uint32_t ttm_bo_type_flags(unsigned type)
+{
+ return 1 << (type);
+}
+
+static void ttm_bo_release_list(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ size_t acc_size = bo->acc_size;
+
+ MPASS(atomic_read(&bo->list_kref) == 0);
+ MPASS(atomic_read(&bo->kref) == 0);
+ MPASS(atomic_read(&bo->cpu_writers) == 0);
+ MPASS(bo->sync_obj == NULL);
+ MPASS(bo->mem.mm_node == NULL);
+ MPASS(list_empty(&bo->lru));
+ MPASS(list_empty(&bo->ddestroy));
+
+ if (bo->ttm)
+ ttm_tt_destroy(bo->ttm);
+ atomic_dec(&bo->glob->bo_count);
+ if (bo->destroy)
+ bo->destroy(bo);
+ else {
+ free(bo, M_TTM_BO);
+ }
+ ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+}
+
+int
+ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible)
+{
+ const char *wmsg;
+ int flags, ret;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttbowi";
+ } else {
+ flags = 0;
+ wmsg = "ttbowu";
+ }
+ while (!ttm_bo_is_reserved(bo)) {
+ ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
+ if (ret != 0)
+ break;
+ }
+ return (ret);
+}
+
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
+
+ MPASS(ttm_bo_is_reserved(bo));
+
+ if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+
+ MPASS(list_empty(&bo->lru));
+
+ man = &bdev->man[bo->mem.mem_type];
+ list_add_tail(&bo->lru, &man->lru);
+ refcount_acquire(&bo->list_kref);
+
+ if (bo->ttm != NULL) {
+ list_add_tail(&bo->swap, &bo->glob->swap_lru);
+ refcount_acquire(&bo->list_kref);
+ }
+ }
+}
+
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+{
+ int put_count = 0;
+
+ if (!list_empty(&bo->swap)) {
+ list_del_init(&bo->swap);
+ ++put_count;
+ }
+ if (!list_empty(&bo->lru)) {
+ list_del_init(&bo->lru);
+ ++put_count;
+ }
+
+ /*
+ * TODO: Add a driver hook to delete from
+ * driver-specific LRU's here.
+ */
+
+ return put_count;
+}
+
+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ int ret;
+
+ while (unlikely(atomic_read(&bo->reserved) != 0)) {
+ /**
+ * Deadlock avoidance for multi-bo reserving.
+ */
+ if (use_sequence && bo->seq_valid) {
+ /**
+ * We've already reserved this one.
+ */
+ if (unlikely(sequence == bo->val_seq))
+ return -EDEADLK;
+ /**
+ * Already reserved by a thread that will not back
+ * off for us. We need to back off.
+ */
+ if (unlikely(sequence - bo->val_seq < (1 << 31)))
+ return -EAGAIN;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ atomic_set(&bo->reserved, 1);
+ if (use_sequence) {
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ if (unlikely((bo->val_seq - sequence < (1 << 31))
+ || !bo->seq_valid))
+ wakeup(bo);
+
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ } else {
+ bo->seq_valid = false;
+ }
+
+ return 0;
+}
+
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free)
+{
+ u_int old;
+
+ old = atomic_fetchadd_int(&bo->list_kref, -count);
+ if (old <= count) {
+ if (never_free)
+ panic("ttm_bo_ref_buf");
+ ttm_bo_release_list(bo);
+ }
+}
+
+int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count = 0;
+ int ret;
+
+ mtx_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
+ sequence);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
+ mtx_unlock(&glob->lru_lock);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ return ret;
+}
+
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+ ttm_bo_add_to_lru(bo);
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+}
+
+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+
+ mtx_lock(&glob->lru_lock);
+ ttm_bo_unreserve_locked(bo);
+ mtx_unlock(&glob->lru_lock);
+}
+
+/*
+ * Call bo->mutex locked.
+ */
+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+ int ret = 0;
+ uint32_t page_flags = 0;
+
+ TTM_ASSERT_LOCKED(&bo->mutex);
+ bo->ttm = NULL;
+
+ if (bdev->need_dma32)
+ page_flags |= TTM_PAGE_FLAG_DMA32;
+
+ switch (bo->type) {
+ case ttm_bo_type_device:
+ if (zero_alloc)
+ page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ case ttm_bo_type_kernel:
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags, glob->dummy_read_page);
+ if (unlikely(bo->ttm == NULL))
+ ret = -ENOMEM;
+ break;
+ case ttm_bo_type_sg:
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags | TTM_PAGE_FLAG_SG,
+ glob->dummy_read_page);
+ if (unlikely(bo->ttm == NULL)) {
+ ret = -ENOMEM;
+ break;
+ }
+ bo->ttm->sg = bo->sg;
+ break;
+ default:
+ printf("[TTM] Illegal buffer object type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem,
+ bool evict, bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (old_is_pci || new_is_pci ||
+ ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+ ret = ttm_mem_io_lock(old_man, true);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(old_man);
+ }
+
+ /*
+ * Create and bind a ttm if required.
+ */
+
+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+ if (bo->ttm == NULL) {
+ bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+ ret = ttm_bo_add_ttm(bo, zero);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+ if (ret)
+ goto out_err;
+
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_tt_bind(bo->ttm, mem);
+ if (ret)
+ goto out_err;
+ }
+
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
+ bo->mem = *mem;
+ mem->mm_node = NULL;
+ goto moved;
+ }
+ }
+
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
+
+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+ else if (bdev->driver->move)
+ ret = bdev->driver->move(bo, evict, interruptible,
+ no_wait_gpu, mem);
+ else
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+
+ if (ret) {
+ if (bdev->driver->move_notify) {
+ struct ttm_mem_reg tmp_mem = *mem;
+ *mem = bo->mem;
+ bo->mem = tmp_mem;
+ bdev->driver->move_notify(bo, mem);
+ bo->mem = *mem;
+ }
+
+ goto out_err;
+ }
+
+moved:
+ if (bo->evicted) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ printf("[TTM] Can not flush read caches\n");
+ bo->evicted = false;
+ }
+
+ if (bo->mem.mm_node) {
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
+ bdev->man[bo->mem.mem_type].gpu_offset;
+ bo->cur_placement = bo->mem.placement;
+ } else
+ bo->offset = 0;
+
+ return 0;
+
+out_err:
+ new_man = &bdev->man[bo->mem.mem_type];
+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * Call bo::reserved.
+ * Will release GPU memory type usage on destruction.
+ * This is the place to put in driver specific hooks to release
+ * driver private resources.
+ * Will release the bo::reserved lock.
+ */
+
+static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+{
+ if (bo->bdev->driver->move_notify)
+ bo->bdev->driver->move_notify(bo, NULL);
+
+ if (bo->ttm) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ ttm_bo_mem_put(bo, &bo->mem);
+
+ atomic_set(&bo->reserved, 0);
+ wakeup(&bo);
+
+ /*
+ * Since the final reference to this bo may not be dropped by
+ * the current task we have to put a memory barrier here to make
+ * sure the changes done in this function are always visible.
+ *
+ * This function only needs protection against the final kref_put.
+ */
+ mb();
+}
+
+static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+ struct ttm_bo_driver *driver = bdev->driver;
+ void *sync_obj = NULL;
+ int put_count;
+ int ret;
+
+ mtx_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
+ mtx_lock(&bdev->fence_lock);
+ (void) ttm_bo_wait(bo, false, false, true);
+ if (!ret && !bo->sync_obj) {
+ mtx_unlock(&bdev->fence_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+
+ mtx_unlock(&glob->lru_lock);
+ ttm_bo_cleanup_memtype_use(bo);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ return;
+ }
+ if (bo->sync_obj)
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ mtx_unlock(&bdev->fence_lock);
+
+ if (!ret) {
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ }
+
+ refcount_acquire(&bo->list_kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ mtx_unlock(&glob->lru_lock);
+
+ if (sync_obj) {
+ driver->sync_obj_flush(sync_obj);
+ driver->sync_obj_unref(&sync_obj);
+ }
+ taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+ ((hz / 100) < 1) ? 1 : hz / 100);
+}
+
+/**
+ * function ttm_bo_cleanup_refs_and_unlock
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, do nothing.
+ *
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
+ * @interruptible Any sleeps should occur interruptibly.
+ * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
+ */
+
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count;
+ int ret;
+
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+
+ if (ret && !no_wait_gpu) {
+ void *sync_obj;
+
+ /*
+ * Take a reference to the fence and unreserve,
+ * at this point the buffer should be dead, so
+ * no new sync objects can be attached.
+ */
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ mtx_unlock(&bdev->fence_lock);
+
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ mtx_unlock(&glob->lru_lock);
+
+ ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+ driver->sync_obj_unref(&sync_obj);
+ if (ret)
+ return ret;
+
+ /*
+ * remove sync_obj with ttm_bo_wait, the wait should be
+ * finished, and no new wait object should have been added.
+ */
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+ mtx_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
+
+ mtx_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+ * and is probably busy in ttm_bo_cleanup_memtype_use.
+ *
+ * Even if it's not the case, because we finished waiting any
+ * delayed destruction would succeed, so just return success
+ * here.
+ */
+ if (ret) {
+ mtx_unlock(&glob->lru_lock);
+ return 0;
+ }
+ } else
+ mtx_unlock(&bdev->fence_lock);
+
+ if (ret || unlikely(list_empty(&bo->ddestroy))) {
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ mtx_unlock(&glob->lru_lock);
+ return ret;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
+ list_del_init(&bo->ddestroy);
+ ++put_count;
+
+ mtx_unlock(&glob->lru_lock);
+ ttm_bo_cleanup_memtype_use(bo);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ return 0;
+}
+
+/**
+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
+ * encountered buffers.
+ */
+
+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+{
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_buffer_object *entry = NULL;
+ int ret = 0;
+
+ mtx_lock(&glob->lru_lock);
+ if (list_empty(&bdev->ddestroy))
+ goto out_unlock;
+
+ entry = list_first_entry(&bdev->ddestroy,
+ struct ttm_buffer_object, ddestroy);
+ refcount_acquire(&entry->list_kref);
+
+ for (;;) {
+ struct ttm_buffer_object *nentry = NULL;
+
+ if (entry->ddestroy.next != &bdev->ddestroy) {
+ nentry = list_first_entry(&entry->ddestroy,
+ struct ttm_buffer_object, ddestroy);
+ refcount_acquire(&nentry->list_kref);
+ }
+
+ ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ if (!ret)
+ ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+ !remove_all);
+ else
+ mtx_unlock(&glob->lru_lock);
+
+ if (refcount_release(&entry->list_kref))
+ ttm_bo_release_list(entry);
+ entry = nentry;
+
+ if (ret || !entry)
+ goto out;
+
+ mtx_lock(&glob->lru_lock);
+ if (list_empty(&entry->ddestroy))
+ break;
+ }
+
+out_unlock:
+ mtx_unlock(&glob->lru_lock);
+out:
+ if (entry && refcount_release(&entry->list_kref))
+ ttm_bo_release_list(entry);
+ return ret;
+}
+
+static void ttm_bo_delayed_workqueue(void *arg, int pending __unused)
+{
+ struct ttm_bo_device *bdev = arg;
+
+ if (ttm_bo_delayed_delete(bdev, false)) {
+ taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+ ((hz / 100) < 1) ? 1 : hz / 100);
+ }
+}
+
+static void ttm_bo_release(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+ rw_wlock(&bdev->vm_lock);
+ if (likely(bo->vm_node != NULL)) {
+ RB_REMOVE(ttm_bo_device_buffer_objects,
+ &bdev->addr_space_rb, bo);
+ drm_mm_put_block(bo->vm_node);
+ bo->vm_node = NULL;
+ }
+ rw_wunlock(&bdev->vm_lock);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
+ ttm_bo_cleanup_refs_or_queue(bo);
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+}
+
+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+{
+ struct ttm_buffer_object *bo = *p_bo;
+
+ *p_bo = NULL;
+ if (refcount_release(&bo->kref))
+ ttm_bo_release(bo);
+}
+
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+ int pending;
+
+ taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending);
+ if (pending)
+ taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
+ return (pending);
+}
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+ if (resched) {
+ taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+ ((hz / 100) < 1) ? 1 : hz / 100);
+ }
+}
+
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg evict_mem;
+ struct ttm_placement placement;
+ int ret = 0;
+
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ mtx_unlock(&bdev->fence_lock);
+
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTART) {
+ printf("[TTM] Failed to expire sync object before buffer eviction\n");
+ }
+ goto out;
+ }
+
+ MPASS(ttm_bo_is_reserved(bo));
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+ evict_mem.bus.io_reserved_vm = false;
+ evict_mem.bus.io_reserved_count = 0;
+
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+ no_wait_gpu);
+ if (ret) {
+ if (ret != -ERESTART) {
+ printf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
+ bo);
+ ttm_bo_mem_space_debug(bo, &placement);
+ }
+ goto out;
+ }
+
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
+ no_wait_gpu);
+ if (ret) {
+ if (ret != -ERESTART)
+ printf("[TTM] Buffer eviction failed\n");
+ ttm_bo_mem_put(bo, &evict_mem);
+ goto out;
+ }
+ bo->evicted = true;
+out:
+ return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_buffer_object *bo;
+ int ret = -EBUSY, put_count;
+
+ mtx_lock(&glob->lru_lock);
+ list_for_each_entry(bo, &man->lru, lru) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ mtx_unlock(&glob->lru_lock);
+ return ret;
+ }
+
+ refcount_acquire(&bo->list_kref);
+
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+ no_wait_gpu);
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+ return ret;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
+ mtx_unlock(&glob->lru_lock);
+
+ MPASS(ret == 0);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
+ ttm_bo_unreserve(bo);
+
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+ return ret;
+}
+
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
+
+ if (mem->mm_node)
+ (*man->func->put_node)(man, mem);
+}
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ int ret;
+
+ do {
+ ret = (*man->func->get_node)(man, bo, placement, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ if (mem->mm_node)
+ break;
+ ret = ttm_mem_evict_first(bdev, mem_type,
+ interruptible, no_wait_gpu);
+ if (unlikely(ret != 0))
+ return ret;
+ } while (1);
+ if (mem->mm_node == NULL)
+ return -ENOMEM;
+ mem->mem_type = mem_type;
+ return 0;
+}
+
+static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+ uint32_t cur_placement,
+ uint32_t proposed_placement)
+{
+ uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
+ uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
+
+ /**
+ * Keep current caching if possible.
+ */
+
+ if ((cur_placement & caching) != 0)
+ result |= (cur_placement & caching);
+ else if ((man->default_caching & caching) != 0)
+ result |= man->default_caching;
+ else if ((TTM_PL_FLAG_CACHED & caching) != 0)
+ result |= TTM_PL_FLAG_CACHED;
+ else if ((TTM_PL_FLAG_WC & caching) != 0)
+ result |= TTM_PL_FLAG_WC;
+ else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
+ result |= TTM_PL_FLAG_UNCACHED;
+
+ return result;
+}
+
+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+ uint32_t mem_type,
+ uint32_t proposed_placement,
+ uint32_t *masked_placement)
+{
+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+
+ if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
+ return false;
+
+ if ((proposed_placement & man->available_caching) == 0)
+ return false;
+
+ cur_flags |= (proposed_placement & man->available_caching);
+
+ *masked_placement = cur_flags;
+ return true;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver. If free space isn't found, then
+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ uint32_t cur_flags = 0;
+ bool type_found = false;
+ bool type_ok = false;
+ bool has_erestartsys = false;
+ int i, ret;
+
+ mem->mm_node = NULL;
+ for (i = 0; i < placement->num_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
+ man = &bdev->man[mem_type];
+
+ type_ok = ttm_bo_mt_compatible(man,
+ mem_type,
+ placement->placement[i],
+ &cur_flags);
+
+ if (!type_ok)
+ continue;
+
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+ cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
+
+ if (mem_type == TTM_PL_SYSTEM)
+ break;
+
+ if (man->has_type && man->use_type) {
+ type_found = true;
+ ret = (*man->func->get_node)(man, bo, placement, mem);
+ if (unlikely(ret))
+ return ret;
+ }
+ if (mem->mm_node)
+ break;
+ }
+
+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ return 0;
+ }
+
+ if (!type_found)
+ return -EINVAL;
+
+ for (i = 0; i < placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
+ man = &bdev->man[mem_type];
+ if (!man->has_type)
+ continue;
+ if (!ttm_bo_mt_compatible(man,
+ mem_type,
+ placement->busy_placement[i],
+ &cur_flags))
+ continue;
+
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+ cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
+
+
+ if (mem_type == TTM_PL_SYSTEM) {
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ mem->mm_node = NULL;
+ return 0;
+ }
+
+ ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+ interruptible, no_wait_gpu);
+ if (ret == 0 && mem->mm_node) {
+ mem->placement = cur_flags;
+ return 0;
+ }
+ if (ret == -ERESTART)
+ has_erestartsys = true;
+ }
+ ret = (has_erestartsys) ? -ERESTART : -ENOMEM;
+ return ret;
+}
+
+static
+int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ int ret = 0;
+ struct ttm_mem_reg mem;
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ MPASS(ttm_bo_is_reserved(bo));
+
+ /*
+ * FIXME: It's possible to pipeline buffer moves.
+ * Have the driver move function wait for idle when necessary,
+ * instead of doing it here.
+ */
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ mtx_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
+ mem.num_pages = bo->num_pages;
+ mem.size = mem.num_pages << PAGE_SHIFT;
+ mem.page_alignment = bo->mem.page_alignment;
+ mem.bus.io_reserved_vm = false;
+ mem.bus.io_reserved_count = 0;
+ /*
+ * Determine where to move the buffer.
+ */
+ ret = ttm_bo_mem_space(bo, placement, &mem,
+ interruptible, no_wait_gpu);
+ if (ret)
+ goto out_unlock;
+ ret = ttm_bo_handle_move_mem(bo, &mem, false,
+ interruptible, no_wait_gpu);
+out_unlock:
+ if (ret && mem.mm_node)
+ ttm_bo_mem_put(bo, &mem);
+ return ret;
+}
+
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ int i;
+
+ if (mem->mm_node && placement->lpfn != 0 &&
+ (mem->start < placement->fpfn ||
+ mem->start + mem->num_pages > placement->lpfn))
+ return -1;
+
+ for (i = 0; i < placement->num_placement; i++) {
+ if ((placement->placement[i] & mem->placement &
+ TTM_PL_MASK_CACHING) &&
+ (placement->placement[i] & mem->placement &
+ TTM_PL_MASK_MEM))
+ return i;
+ }
+ return -1;
+}
+
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ int ret;
+
+ MPASS(ttm_bo_is_reserved(bo));
+ /* Check that range is valid */
+ if (placement->lpfn || placement->fpfn)
+ if (placement->fpfn > placement->lpfn ||
+ (placement->lpfn - placement->fpfn) < bo->num_pages)
+ return -EINVAL;
+ /*
+ * Check whether we need to move buffer.
+ */
+ ret = ttm_bo_mem_compat(placement, &bo->mem);
+ if (ret < 0) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible,
+ no_wait_gpu);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+ ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ~TTM_PL_MASK_MEMTYPE);
+ }
+ /*
+ * We might need to add a TTM.
+ */
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ ret = ttm_bo_add_ttm(bo, true);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ MPASS(!((placement->fpfn || placement->lpfn) &&
+ (bo->mem.num_pages > (placement->lpfn - placement->fpfn))));
+
+ return 0;
+}
+
+int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interruptible,
+ struct vm_object *persistent_swap_storage,
+ size_t acc_size,
+ struct sg_table *sg,
+ void (*destroy) (struct ttm_buffer_object *))
+{
+ int ret = 0;
+ unsigned long num_pages;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (ret) {
+ printf("[TTM] Out of kernel memory\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ free(bo, M_TTM_BO);
+ return -ENOMEM;
+ }
+
+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (num_pages == 0) {
+ printf("[TTM] Illegal buffer object size\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ free(bo, M_TTM_BO);
+ ttm_mem_global_free(mem_glob, acc_size);
+ return -EINVAL;
+ }
+ bo->destroy = destroy;
+
+ refcount_init(&bo->kref, 1);
+ refcount_init(&bo->list_kref, 1);
+ atomic_set(&bo->cpu_writers, 0);
+ atomic_set(&bo->reserved, 1);
+ INIT_LIST_HEAD(&bo->lru);
+ INIT_LIST_HEAD(&bo->ddestroy);
+ INIT_LIST_HEAD(&bo->swap);
+ INIT_LIST_HEAD(&bo->io_reserve_lru);
+ bo->bdev = bdev;
+ bo->glob = bdev->glob;
+ bo->type = type;
+ bo->num_pages = num_pages;
+ bo->mem.size = num_pages << PAGE_SHIFT;
+ bo->mem.mem_type = TTM_PL_SYSTEM;
+ bo->mem.num_pages = bo->num_pages;
+ bo->mem.mm_node = NULL;
+ bo->mem.page_alignment = page_alignment;
+ bo->mem.bus.io_reserved_vm = false;
+ bo->mem.bus.io_reserved_count = 0;
+ bo->priv_flags = 0;
+ bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
+ bo->seq_valid = false;
+ bo->persistent_swap_storage = persistent_swap_storage;
+ bo->acc_size = acc_size;
+ bo->sg = sg;
+ atomic_inc(&bo->glob->bo_count);
+
+ ret = ttm_bo_check_placement(bo, placement);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ /*
+ * For ttm_bo_type_device buffers, allocate
+ * address space from the device.
+ */
+ if (bo->type == ttm_bo_type_device ||
+ bo->type == ttm_bo_type_sg) {
+ ret = ttm_bo_setup_vm(bo);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (ret)
+ goto out_err;
+
+ ttm_bo_unreserve(bo);
+ return 0;
+
+out_err:
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ return ret;
+}
+
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
+{
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
+
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += ttm_round_pot(sizeof(struct ttm_tt));
+ return size;
+}
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
+{
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
+
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+ size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+ return size;
+}
+
+int ttm_bo_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interruptible,
+ struct vm_object *persistent_swap_storage,
+ struct ttm_buffer_object **p_bo)
+{
+ struct ttm_buffer_object *bo;
+ size_t acc_size;
+ int ret;
+
+ bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
+ ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+ interruptible, persistent_swap_storage, acc_size,
+ NULL, NULL);
+ if (likely(ret == 0))
+ *p_bo = bo;
+
+ return ret;
+}
+
+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+ unsigned mem_type, bool allow_errors)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_bo_global *glob = bdev->glob;
+ int ret;
+
+ /*
+ * Can't use standard list traversal since we're unlocking.
+ */
+
+ mtx_lock(&glob->lru_lock);
+ while (!list_empty(&man->lru)) {
+ mtx_unlock(&glob->lru_lock);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ printf("[TTM] Cleanup eviction failed\n");
+ }
+ }
+ mtx_lock(&glob->lru_lock);
+ }
+ mtx_unlock(&glob->lru_lock);
+ return 0;
+}
+
+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+ struct ttm_mem_type_manager *man;
+ int ret = -EINVAL;
+
+ if (mem_type >= TTM_NUM_MEM_TYPES) {
+ printf("[TTM] Illegal memory type %d\n", mem_type);
+ return ret;
+ }
+ man = &bdev->man[mem_type];
+
+ if (!man->has_type) {
+ printf("[TTM] Trying to take down uninitialized memory manager type %u\n",
+ mem_type);
+ return ret;
+ }
+
+ man->use_type = false;
+ man->has_type = false;
+
+ ret = 0;
+ if (mem_type > 0) {
+ ttm_bo_force_list_clean(bdev, mem_type, false);
+
+ ret = (*man->func->takedown)(man);
+ }
+
+ return ret;
+}
+
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
+ printf("[TTM] Illegal memory manager memory type %u\n", mem_type);
+ return -EINVAL;
+ }
+
+ if (!man->has_type) {
+ printf("[TTM] Memory type %u has not been initialized\n", mem_type);
+ return 0;
+ }
+
+ return ttm_bo_force_list_clean(bdev, mem_type, true);
+}
+
+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+ unsigned long p_size)
+{
+ int ret = -EINVAL;
+ struct ttm_mem_type_manager *man;
+
+ MPASS(type < TTM_NUM_MEM_TYPES);
+ man = &bdev->man[type];
+ MPASS(!man->has_type);
+ man->io_reserve_fastpath = true;
+ man->use_io_reserve_lru = false;
+ sx_init(&man->io_reserve_mutex, "ttmman");
+ INIT_LIST_HEAD(&man->io_reserve_lru);
+
+ ret = bdev->driver->init_mem_type(bdev, type, man);
+ if (ret)
+ return ret;
+ man->bdev = bdev;
+
+ ret = 0;
+ if (type != TTM_PL_SYSTEM) {
+ ret = (*man->func->init)(man, p_size);
+ if (ret)
+ return ret;
+ }
+ man->has_type = true;
+ man->use_type = true;
+ man->size = p_size;
+
+ INIT_LIST_HEAD(&man->lru);
+
+ return 0;
+}
+
+static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
+{
+
+ ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
+ vm_page_free(glob->dummy_read_page);
+ free(glob, M_DRM_GLOBAL);
+}
+
+void ttm_bo_global_release(struct drm_global_reference *ref)
+{
+ struct ttm_bo_global *glob = ref->object;
+
+ if (refcount_release(&glob->kobj_ref))
+ ttm_bo_global_kobj_release(glob);
+}
+
+int ttm_bo_global_init(struct drm_global_reference *ref)
+{
+ struct ttm_bo_global_ref *bo_ref =
+ container_of(ref, struct ttm_bo_global_ref, ref);
+ struct ttm_bo_global *glob = ref->object;
+ int ret;
+
+ sx_init(&glob->device_list_mutex, "ttmdlm");
+ mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
+ glob->mem_glob = bo_ref->mem_glob;
+ glob->dummy_read_page = vm_page_alloc_contig(NULL, 0,
+ VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ,
+ 1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
+
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_drp;
+ }
+
+ INIT_LIST_HEAD(&glob->swap_lru);
+ INIT_LIST_HEAD(&glob->device_list);
+
+ ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
+ ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
+ if (unlikely(ret != 0)) {
+ printf("[TTM] Could not register buffer object swapout\n");
+ goto out_no_shrink;
+ }
+
+ atomic_set(&glob->bo_count, 0);
+
+ refcount_init(&glob->kobj_ref, 1);
+ return (0);
+
+out_no_shrink:
+ vm_page_free(glob->dummy_read_page);
+out_no_drp:
+ free(glob, M_DRM_GLOBAL);
+ return ret;
+}
+
+int ttm_bo_device_release(struct ttm_bo_device *bdev)
+{
+ int ret = 0;
+ unsigned i = TTM_NUM_MEM_TYPES;
+ struct ttm_mem_type_manager *man;
+ struct ttm_bo_global *glob = bdev->glob;
+
+ while (i--) {
+ man = &bdev->man[i];
+ if (man->has_type) {
+ man->use_type = false;
+ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
+ ret = -EBUSY;
+ printf("[TTM] DRM memory manager type %d is not clean\n",
+ i);
+ }
+ man->has_type = false;
+ }
+ }
+
+ sx_xlock(&glob->device_list_mutex);
+ list_del(&bdev->device_list);
+ sx_xunlock(&glob->device_list_mutex);
+
+ if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL))
+ taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
+
+ while (ttm_bo_delayed_delete(bdev, true))
+ ;
+
+ mtx_lock(&glob->lru_lock);
+ if (list_empty(&bdev->ddestroy))
+ TTM_DEBUG("Delayed destroy list was clean\n");
+
+ if (list_empty(&bdev->man[0].lru))
+ TTM_DEBUG("Swap list was clean\n");
+ mtx_unlock(&glob->lru_lock);
+
+ MPASS(drm_mm_clean(&bdev->addr_space_mm));
+ rw_wlock(&bdev->vm_lock);
+ drm_mm_takedown(&bdev->addr_space_mm);
+ rw_wunlock(&bdev->vm_lock);
+
+ return ret;
+}
+
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_bo_global *glob,
+ struct ttm_bo_driver *driver,
+ uint64_t file_page_offset,
+ bool need_dma32)
+{
+ int ret = -EINVAL;
+
+ rw_init(&bdev->vm_lock, "ttmvml");
+ bdev->driver = driver;
+
+ memset(bdev->man, 0, sizeof(bdev->man));
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
+ if (unlikely(ret != 0))
+ goto out_no_sys;
+
+ RB_INIT(&bdev->addr_space_rb);
+ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
+ if (unlikely(ret != 0))
+ goto out_no_addr_mm;
+
+ TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0,
+ ttm_bo_delayed_workqueue, bdev);
+ INIT_LIST_HEAD(&bdev->ddestroy);
+ bdev->dev_mapping = NULL;
+ bdev->glob = glob;
+ bdev->need_dma32 = need_dma32;
+ bdev->val_seq = 0;
+ mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF);
+ sx_xlock(&glob->device_list_mutex);
+ list_add_tail(&bdev->device_list, &glob->device_list);
+ sx_xunlock(&glob->device_list_mutex);
+
+ return 0;
+out_no_addr_mm:
+ ttm_bo_clean_mm(bdev, 0);
+out_no_sys:
+ return ret;
+}
+
+/*
+ * buffer object vm functions.
+ */
+
+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+ if (mem->mem_type == TTM_PL_SYSTEM)
+ return false;
+
+ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
+ return false;
+
+ if (mem->placement & TTM_PL_FLAG_CACHED)
+ return false;
+ }
+ return true;
+}
+
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ /* off_t offset = (off_t)bo->addr_space_offset;XXXKIB */
+ /* off_t holelen = ((off_t)bo->mem.num_pages) << PAGE_SHIFT;XXXKIB */
+
+ if (!bdev->dev_mapping)
+ return;
+ /* unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); XXXKIB */
+ ttm_mem_io_free_vm(bo);
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+ ttm_mem_io_lock(man, false);
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(man);
+}
+
+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ /* The caller acquired bdev->vm_lock. */
+ RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
+}
+
+/**
+ * ttm_bo_setup_vm:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to ttm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret;
+
+retry_pre_get:
+ ret = drm_mm_pre_get(&bdev->addr_space_mm);
+ if (unlikely(ret != 0))
+ return ret;
+
+ rw_wlock(&bdev->vm_lock);
+ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
+ bo->mem.num_pages, 0, 0);
+
+ if (unlikely(bo->vm_node == NULL)) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
+ bo->mem.num_pages, 0);
+
+ if (unlikely(bo->vm_node == NULL)) {
+ rw_wunlock(&bdev->vm_lock);
+ goto retry_pre_get;
+ }
+
+ ttm_bo_vm_insert_rb(bo);
+ rw_wunlock(&bdev->vm_lock);
+ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
+
+ return 0;
+out_unlock:
+ rw_wunlock(&bdev->vm_lock);
+ return ret;
+}
+
+int ttm_bo_wait(struct ttm_buffer_object *bo,
+ bool lazy, bool interruptible, bool no_wait)
+{
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ struct ttm_bo_device *bdev = bo->bdev;
+ void *sync_obj;
+ int ret = 0;
+
+ if (likely(bo->sync_obj == NULL))
+ return 0;
+
+ while (bo->sync_obj) {
+
+ if (driver->sync_obj_signaled(bo->sync_obj)) {
+ void *tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ mtx_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&tmp_obj);
+ mtx_lock(&bdev->fence_lock);
+ continue;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ mtx_unlock(&bdev->fence_lock);
+ ret = driver->sync_obj_wait(sync_obj,
+ lazy, interruptible);
+ if (unlikely(ret != 0)) {
+ driver->sync_obj_unref(&sync_obj);
+ mtx_lock(&bdev->fence_lock);
+ return ret;
+ }
+ mtx_lock(&bdev->fence_lock);
+ if (likely(bo->sync_obj == sync_obj)) {
+ void *tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING,
+ &bo->priv_flags);
+ mtx_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&sync_obj);
+ driver->sync_obj_unref(&tmp_obj);
+ mtx_lock(&bdev->fence_lock);
+ } else {
+ mtx_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&sync_obj);
+ mtx_lock(&bdev->fence_lock);
+ }
+ }
+ return 0;
+}
+
+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+
+ /*
+ * Using ttm_bo_reserve makes sure the lru lists are updated.
+ */
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+ if (unlikely(ret != 0))
+ return ret;
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, true, no_wait);
+ mtx_unlock(&bdev->fence_lock);
+ if (likely(ret == 0))
+ atomic_inc(&bo->cpu_writers);
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+
+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
+{
+ atomic_dec(&bo->cpu_writers);
+}
+
+/**
+ * A buffer object shrink method that tries to swap out the first
+ * buffer object on the bo_global::swap_lru list.
+ */
+
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+{
+ struct ttm_bo_global *glob =
+ container_of(shrink, struct ttm_bo_global, shrink);
+ struct ttm_buffer_object *bo;
+ int ret = -EBUSY;
+ int put_count;
+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+
+ mtx_lock(&glob->lru_lock);
+ list_for_each_entry(bo, &glob->swap_lru, swap) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ mtx_unlock(&glob->lru_lock);
+ return ret;
+ }
+
+ refcount_acquire(&bo->list_kref);
+
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+ return ret;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
+ mtx_unlock(&glob->lru_lock);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ /**
+ * Wait for GPU, then move to system cached.
+ */
+
+ mtx_lock(&bo->bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, false);
+ mtx_unlock(&bo->bdev->fence_lock);
+
+ if (unlikely(ret != 0))
+ goto out;
+
+ if ((bo->mem.placement & swap_placement) != swap_placement) {
+ struct ttm_mem_reg evict_mem;
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+ evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ evict_mem.mem_type = TTM_PL_SYSTEM;
+
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
+ false, false);
+ if (unlikely(ret != 0))
+ goto out;
+ }
+
+ ttm_bo_unmap_virtual(bo);
+
+ /**
+ * Swap out. Buffer will be swapped in again as soon as
+ * anyone tries to access a ttm page.
+ */
+
+ if (bo->bdev->driver->swap_notify)
+ bo->bdev->driver->swap_notify(bo);
+
+ ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
+out:
+
+ /**
+ *
+ * Unreserve without putting on LRU to avoid swapping out an
+ * already swapped buffer.
+ */
+
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+ return ret;
+}
+
+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+{
+ while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
+ ;
+}
diff --git a/sys/dev/drm2/ttm/ttm_bo_api.h b/sys/dev/drm2/ttm/ttm_bo_api.h
new file mode 100644
index 0000000..4b16ebd
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_api.h
@@ -0,0 +1,740 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_BO_API_H_
+#define _TTM_BO_API_H_
+
+#include <dev/drm2/drmP.h>
+
+struct ttm_bo_device;
+
+struct drm_mm_node;
+
+
+/**
+ * struct ttm_placement
+ *
+ * @fpfn: first valid page frame number to put the object
+ * @lpfn: last valid page frame number to put the object
+ * @num_placement: number of preferred placements
+ * @placement: preferred placements
+ * @num_busy_placement: number of preferred placements when need to evict buffer
+ * @busy_placement: preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+ unsigned fpfn;
+ unsigned lpfn;
+ unsigned num_placement;
+ const uint32_t *placement;
+ unsigned num_busy_placement;
+ const uint32_t *busy_placement;
+};
+
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr: mapped virtual address
+ * @base: bus base address
+ * @is_iomem: is this io memory ?
+ * @size: size in byte
+ * @offset: offset from the base address
+ * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
+ * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+ void *addr;
+ unsigned long base;
+ unsigned long size;
+ unsigned long offset;
+ bool is_iomem;
+ bool io_reserved_vm;
+ uint64_t io_reserved_count;
+};
+
+
+/**
+ * struct ttm_mem_reg
+ *
+ * @mm_node: Memory manager node.
+ * @size: Requested size of memory region.
+ * @num_pages: Actual size of memory region in pages.
+ * @page_alignment: Page alignment.
+ * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
+ *
+ * Structure indicating the placement and space resources used by a
+ * buffer object.
+ */
+
+struct ttm_mem_reg {
+ void *mm_node;
+ unsigned long start;
+ unsigned long size;
+ unsigned long num_pages;
+ uint32_t page_alignment;
+ uint32_t mem_type;
+ uint32_t placement;
+ struct ttm_bus_placement bus;
+};
+
+/**
+ * enum ttm_bo_type
+ *
+ * @ttm_bo_type_device: These are 'normal' buffers that can
+ * be mmapped by user space. Each of these bos occupy a slot in the
+ * device address space, that can be used for normal vm operations.
+ *
+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
+ * but they cannot be accessed from user-space. For kernel-only use.
+ *
+ * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
+ * driver.
+ */
+
+enum ttm_bo_type {
+ ttm_bo_type_device,
+ ttm_bo_type_kernel,
+ ttm_bo_type_sg
+};
+
+struct ttm_tt;
+
+/**
+ * struct ttm_buffer_object
+ *
+ * @bdev: Pointer to the buffer object device structure.
+ * @type: The bo type.
+ * @destroy: Destruction function. If NULL, kfree is used.
+ * @num_pages: Actual number of pages.
+ * @addr_space_offset: Address space offset.
+ * @acc_size: Accounted size for this object.
+ * @kref: Reference count of this buffer object. When this refcount reaches
+ * zero, the object is put on the delayed delete list.
+ * @list_kref: List reference count of this buffer object. This member is
+ * used to avoid destruction while the buffer object is still on a list.
+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
+ * keeps one refcount. When this refcount reaches zero,
+ * the object is destroyed.
+ * @event_queue: Queue for processes waiting on buffer object status change.
+ * @mem: structure describing current placement.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object.
+ * @ttm: TTM structure holding system pages.
+ * @evicted: Whether the object was evicted without user-space knowing.
+ * @cpu_writes: For synchronization. Number of cpu writers.
+ * @lru: List head for the lru list.
+ * @ddestroy: List head for the delayed destroy list.
+ * @swap: List head for swap LRU list.
+ * @val_seq: Sequence of the validation holding the @reserved lock.
+ * Used to avoid starvation when many processes compete to validate the
+ * buffer. This member is protected by the bo_device::lru_lock.
+ * @seq_valid: The value of @val_seq is valid. This value is protected by
+ * the bo_device::lru_lock.
+ * @reserved: Deadlock-free lock used for synchronization state transitions.
+ * @sync_obj: Pointer to a synchronization object.
+ * @priv_flags: Flags describing buffer object internal state.
+ * @vm_rb: Rb node for the vm rb tree.
+ * @vm_node: Address space manager node.
+ * @offset: The current GPU offset, which can have different meanings
+ * depending on the memory type. For SYSTEM type memory, it should be 0.
+ * @cur_placement: Hint of current placement.
+ *
+ * Base class for TTM buffer object, that deals with data placement and CPU
+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
+ * the driver can usually use the placement offset @offset directly as the
+ * GPU virtual address. For drivers implementing multiple
+ * GPU memory manager contexts, the driver should manage the address space
+ * in these contexts separately and use these objects to get the correct
+ * placement and caching for these GPU maps. This makes it possible to use
+ * these objects for even quite elaborate memory management schemes.
+ * The destroy member, the API visibility of this object makes it possible
+ * to derive driver specific types.
+ */
+
+struct ttm_buffer_object {
+ /**
+ * Members constant at init.
+ */
+
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ enum ttm_bo_type type;
+ void (*destroy) (struct ttm_buffer_object *);
+ unsigned long num_pages;
+ uint64_t addr_space_offset;
+ size_t acc_size;
+
+ /**
+ * Members not needing protection.
+ */
+
+ u_int kref;
+ u_int list_kref;
+ /* wait_queue_head_t event_queue; */
+
+ /**
+ * Members protected by the bo::reserved lock.
+ */
+
+ struct ttm_mem_reg mem;
+ struct vm_object *persistent_swap_storage;
+ struct ttm_tt *ttm;
+ bool evicted;
+
+ /**
+ * Members protected by the bo::reserved lock only when written to.
+ */
+
+ atomic_t cpu_writers;
+
+ /**
+ * Members protected by the bdev::lru_lock.
+ */
+
+ struct list_head lru;
+ struct list_head ddestroy;
+ struct list_head swap;
+ struct list_head io_reserve_lru;
+ uint32_t val_seq;
+ bool seq_valid;
+
+ /**
+ * Members protected by the bdev::lru_lock
+ * only when written to.
+ */
+
+ atomic_t reserved;
+
+ /**
+ * Members protected by struct buffer_object_device::fence_lock
+ * In addition, setting sync_obj to anything else
+ * than NULL requires bo::reserved to be held. This allows for
+ * checking NULL while reserved but not holding the mentioned lock.
+ */
+
+ void *sync_obj;
+ unsigned long priv_flags;
+
+ /**
+ * Members protected by the bdev::vm_lock
+ */
+
+ RB_ENTRY(ttm_buffer_object) vm_rb;
+ struct drm_mm_node *vm_node;
+
+
+ /**
+ * Special members that are protected by the reserve lock
+ * and the bo::lock when written to. Can be read with
+ * either of these locks held.
+ */
+
+ unsigned long offset;
+ uint32_t cur_placement;
+
+ struct sg_table *sg;
+};
+
+/**
+ * struct ttm_bo_kmap_obj
+ *
+ * @virtual: The current kernel virtual address.
+ * @page: The page when kmap'ing a single page.
+ * @bo_kmap_type: Type of bo_kmap.
+ *
+ * Object describing a kernel mapping. Since a TTM bo may be located
+ * in various memory types with various caching policies, the
+ * mapping can either be an ioremap, a vmap, a kmap or part of a
+ * premapped region.
+ */
+
+#define TTM_BO_MAP_IOMEM_MASK 0x80
+struct ttm_bo_kmap_obj {
+ void *virtual;
+ struct vm_page *page;
+ struct sf_buf *sf;
+ int num_pages;
+ unsigned long size;
+ enum {
+ ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
+ ttm_bo_map_vmap = 2,
+ ttm_bo_map_kmap = 3,
+ ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
+ } bo_kmap_type;
+ struct ttm_buffer_object *bo;
+};
+
+/**
+ * ttm_bo_reference - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ *
+ * Returns a refcounted pointer to a buffer object.
+ */
+
+static inline struct ttm_buffer_object *
+ttm_bo_reference(struct ttm_buffer_object *bo)
+{
+ refcount_acquire(&bo->kref);
+ return bo;
+}
+
+/**
+ * ttm_bo_wait - wait for buffer idle.
+ *
+ * @bo: The buffer object.
+ * @interruptible: Use interruptible wait.
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * This function must be called with the bo::mutex held, and makes
+ * sure any previous rendering to the buffer is completed.
+ * Note: It might be necessary to block validations before the
+ * wait by reserving the buffer.
+ * Returns -EBUSY if no_wait is true and the buffer is busy.
+ * Returns -ERESTARTSYS if interrupted by a signal.
+ */
+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+ bool interruptible, bool no_wait);
+/**
+ * ttm_bo_validate
+ *
+ * @bo: The buffer object.
+ * @placement: Proposed placement for the buffer object.
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according proposed placement.
+ * Returns
+ * -EINVAL on invalid proposed placement.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+extern int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible,
+ bool no_wait_gpu);
+
+/**
+ * ttm_bo_unref
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference and clear a pointer to a buffer object.
+ */
+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
+/**
+ * ttm_bo_lock_delayed_workqueue
+ *
+ * Prevent the delayed workqueue from running.
+ * Returns
+ * True if the workqueue was queued at the time
+ */
+extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_unlock_delayed_workqueue
+ *
+ * Allows the delayed workqueue to run.
+ */
+extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
+ int resched);
+
+/**
+ * ttm_bo_synccpu_write_grab
+ *
+ * @bo: The buffer object:
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * Synchronizes a buffer object for CPU RW access. This means
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
+ * Returns
+ * -EBUSY if the buffer is busy and no_wait is true.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+extern int
+ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+
+/**
+ * ttm_bo_synccpu_write_release:
+ *
+ * @bo : The buffer object.
+ *
+ * Releases a synccpu lock.
+ */
+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_acc_size
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo_size: size of the buffer object in byte.
+ * @struct_size: size of the structure holding buffer object datas
+ *
+ * Returns size to account for a buffer object
+ */
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size);
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size);
+
+/**
+ * ttm_bo_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @acc_size: Accounted size for this object.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ * On successful return, the object kref and list_kref are set to 1.
+ * If a failure occurs, the function will call the @destroy function, or
+ * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
+ * illegal and will likely cause memory corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+
+extern int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interrubtible,
+ struct vm_object *persistent_swap_storage,
+ size_t acc_size,
+ struct sg_table *sg,
+ void (*destroy) (struct ttm_buffer_object *));
+
+/**
+ * ttm_bo_synccpu_object_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep while waiting for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @p_bo: On successful completion *p_bo points to the created object.
+ *
+ * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
+ * on that object. The destroy function is set to kfree().
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while waiting for resources.
+ */
+
+extern int ttm_bo_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interruptible,
+ struct vm_object *persistent_swap_storage,
+ struct ttm_buffer_object **p_bo);
+
+/**
+ * ttm_bo_check_placement
+ *
+ * @bo: the buffer object.
+ * @placement: placements
+ *
+ * Performs minimal validity checking on an intended change of
+ * placement flags.
+ * Returns
+ * -EINVAL: Intended change is invalid or not allowed.
+ */
+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
+/**
+ * ttm_bo_init_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ * @p_size: size managed area in pages.
+ *
+ * Initialize a manager for a given memory type.
+ * Note: if part of driver firstopen, it must be protected from a
+ * potentially racing lastclose.
+ * Returns:
+ * -EINVAL: invalid size or memory type.
+ * -ENOMEM: Not enough memory.
+ * May also return driver-specified errors.
+ */
+
+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+ unsigned long p_size);
+/**
+ * ttm_bo_clean_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Take down a manager for a given memory type after first walking
+ * the LRU list to evict any buffers left alive.
+ *
+ * Normally, this function is part of lastclose() or unload(), and at that
+ * point there shouldn't be any buffers left created by user-space, since
+ * there should've been removed by the file descriptor release() method.
+ * However, before this function is run, make sure to signal all sync objects,
+ * and verify that the delayed delete queue is empty. The driver must also
+ * make sure that there are no NO_EVICT buffers present in this memory type
+ * when the call is made.
+ *
+ * If this function is part of a VT switch, the caller must make sure that
+ * there are no appications currently validating buffers before this
+ * function is called. The caller can do that by first taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: invalid or uninitialized memory type.
+ * -EBUSY: There are still buffers left in this memory type.
+ */
+
+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_bo_evict_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Evicts all buffers on the lru list of the memory type.
+ * This is normally part of a VT switch or an
+ * out-of-memory-space-due-to-fragmentation handler.
+ * The caller must make sure that there are no other processes
+ * currently validating buffers, and can do that by taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: Invalid or uninitialized memory type.
+ * -ERESTARTSYS: The call was interrupted by a signal while waiting to
+ * evict a buffer.
+ */
+
+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_kmap_obj_virtual
+ *
+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
+ * virtual map is io memory, 0 if normal memory.
+ *
+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
+ * that should strictly be accessed by the iowriteXX() and similar functions.
+ */
+
+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
+ bool *is_iomem)
+{
+ *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
+ return map->virtual;
+}
+
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
+
+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
+
+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ *
+ * @vma: vma as input from the fbdev mmap method.
+ * @bo: The bo backing the address space. The address space will
+ * have the same size as the bo, and start at offset 0.
+ *
+ * This function is intended to be called by the fbdev mmap method
+ * if the fbdev address space is to be backed by a bo.
+ */
+
+/* XXXKIB
+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
+ struct ttm_buffer_object *bo);
+*/
+/**
+ * ttm_bo_mmap - mmap out of the ttm device address space.
+ *
+ * @filp: filp as input from the mmap method.
+ * @vma: vma as input from the mmap method.
+ * @bdev: Pointer to the ttm_bo_device with the address space manager.
+ *
+ * This function is intended to be called by the device mmap method.
+ * if the device address space is to be backed by the bo manager.
+ */
+/* XXXKIB
+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+ struct ttm_bo_device *bdev);
+*/
+/**
+ * ttm_bo_io
+ *
+ * @bdev: Pointer to the struct ttm_bo_device.
+ * @filp: Pointer to the struct file attempting to read / write.
+ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
+ * @rbuf: User-space pointer to address of buffer to read into.
+ * Null on write.
+ * @count: Number of bytes to read / write.
+ * @f_pos: Pointer to current file position.
+ * @write: 1 for read, 0 for write.
+ *
+ * This function implements read / write into ttm buffer objects, and is
+ * intended to
+ * be called from the fops::read and fops::write method.
+ * Returns:
+ * See man (2) write, man(2) read. In particular,
+ * the function may return -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+
+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ const char *wbuf, char *rbuf,
+ size_t count, off_t *f_pos, bool write);
+
+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
+ *
+ * @bo: The buffer object to check.
+ *
+ * This function returns an indication if a bo is reserved or not, and should
+ * only be used to print an error when it is not from incorrect api usage, since
+ * there's no guarantee that it is the caller that is holding the reservation.
+ */
+static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
+{
+ return atomic_read(&bo->reserved);
+}
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_bo_driver.h b/sys/dev/drm2/ttm/ttm_bo_driver.h
new file mode 100644
index 0000000..3f08976
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_driver.h
@@ -0,0 +1,1018 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_BO_DRIVER_H_
+#define _TTM_BO_DRIVER_H_
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_api.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/drm_global.h>
+#include <sys/rwlock.h>
+#include <sys/tree.h>
+
+struct ttm_backend_func {
+ /**
+ * struct ttm_backend_func member bind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+ * memory type and location for binding.
+ *
+ * Bind the backend pages into the aperture in the location
+ * indicated by @bo_mem. This function should be able to handle
+ * differences between aperture and system page sizes.
+ */
+ int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+ /**
+ * struct ttm_backend_func member unbind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Unbind previously bound backend pages. This function should be
+ * able to handle differences between aperture and system page sizes.
+ */
+ int (*unbind) (struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_backend_func member destroy
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*destroy) (struct ttm_tt *ttm);
+};
+
+#define TTM_PAGE_FLAG_WRITE (1 << 3)
+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
+#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
+#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
+#define TTM_PAGE_FLAG_DMA32 (1 << 7)
+#define TTM_PAGE_FLAG_SG (1 << 8)
+
+enum ttm_caching_state {
+ tt_uncached,
+ tt_wc,
+ tt_cached
+};
+
+/**
+ * struct ttm_tt
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
+ * pointer.
+ * @pages: Array of pages backing the data.
+ * @num_pages: Number of pages in the page array.
+ * @bdev: Pointer to the current struct ttm_bo_device.
+ * @be: Pointer to the ttm backend.
+ * @swap_storage: Pointer to shmem struct file for swap storage.
+ * @caching_state: The current caching state of the pages.
+ * @state: The current binding state of the pages.
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+
+struct ttm_tt {
+ struct ttm_bo_device *bdev;
+ struct ttm_backend_func *func;
+ struct vm_page *dummy_read_page;
+ struct vm_page **pages;
+ uint32_t page_flags;
+ unsigned long num_pages;
+ struct sg_table *sg; /* for SG objects via dma-buf */
+ struct ttm_bo_global *glob;
+ struct vm_object *swap_storage;
+ enum ttm_caching_state caching_state;
+ enum {
+ tt_bound,
+ tt_unbound,
+ tt_unpopulated,
+ } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+ struct ttm_tt ttm;
+ dma_addr_t *dma_address;
+ struct list_head pages_list;
+};
+
+#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
+#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
+#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
+
+struct ttm_mem_type_manager;
+
+struct ttm_mem_type_manager_func {
+ /**
+ * struct ttm_mem_type_manager member init
+ *
+ * @man: Pointer to a memory type manager.
+ * @p_size: Implementation dependent, but typically the size of the
+ * range to be managed in pages.
+ *
+ * Called to initialize a private range manager. The function is
+ * expected to initialize the man::priv member.
+ * Returns 0 on success, negative error code on failure.
+ */
+ int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+
+ /**
+ * struct ttm_mem_type_manager member takedown
+ *
+ * @man: Pointer to a memory type manager.
+ *
+ * Called to undo the setup done in init. All allocated resources
+ * should be freed.
+ */
+ int (*takedown)(struct ttm_mem_type_manager *man);
+
+ /**
+ * struct ttm_mem_type_manager member get_node
+ *
+ * @man: Pointer to a memory type manager.
+ * @bo: Pointer to the buffer object we're allocating space for.
+ * @placement: Placement details.
+ * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+ *
+ * This function should allocate space in the memory type managed
+ * by @man. Placement details if
+ * applicable are given by @placement. If successful,
+ * @mem::mm_node should be set to a non-null value, and
+ * @mem::start should be set to a value identifying the beginning
+ * of the range allocated, and the function should return zero.
+ * If the memory region accommodate the buffer object, @mem::mm_node
+ * should be set to NULL, and the function should return 0.
+ * If a system error occurred, preventing the request to be fulfilled,
+ * the function should return a negative error code.
+ *
+ * Note that @mem::mm_node will only be dereferenced by
+ * struct ttm_mem_type_manager functions and optionally by the driver,
+ * which has knowledge of the underlying type.
+ *
+ * This function may not be called from within atomic context, so
+ * an implementation can and must use either a mutex or a spinlock to
+ * protect any data structures managing the space.
+ */
+ int (*get_node)(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem);
+
+ /**
+ * struct ttm_mem_type_manager member put_node
+ *
+ * @man: Pointer to a memory type manager.
+ * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+ *
+ * This function frees memory type resources previously allocated
+ * and that are identified by @mem::mm_node and @mem::start. May not
+ * be called from within atomic context.
+ */
+ void (*put_node)(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem);
+
+ /**
+ * struct ttm_mem_type_manager member debug
+ *
+ * @man: Pointer to a memory type manager.
+ * @prefix: Prefix to be used in printout to identify the caller.
+ *
+ * This function is called to print out the state of the memory
+ * type manager to aid debugging of out-of-memory conditions.
+ * It may not be called from within atomic context.
+ */
+ void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
+};
+
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @func: structure pointer implementing the range manager. See above
+ * @priv: Driver private closure for @func.
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
+ * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
+ * reserved by the TTM vm system.
+ * @io_reserve_lru: Optional lru list for unreserving io mem regions.
+ * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * static information. bdev::driver::io_mem_free is never used.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+
+
+struct ttm_mem_type_manager {
+ struct ttm_bo_device *bdev;
+
+ /*
+ * No protection. Constant from start.
+ */
+
+ bool has_type;
+ bool use_type;
+ uint32_t flags;
+ unsigned long gpu_offset;
+ uint64_t size;
+ uint32_t available_caching;
+ uint32_t default_caching;
+ const struct ttm_mem_type_manager_func *func;
+ void *priv;
+ struct sx io_reserve_mutex;
+ bool use_io_reserve_lru;
+ bool io_reserve_fastpath;
+
+ /*
+ * Protected by @io_reserve_mutex:
+ */
+
+ struct list_head io_reserve_lru;
+
+ /*
+ * Protected by the global->lru_lock.
+ */
+
+ struct list_head lru;
+};
+
+/**
+ * struct ttm_bo_driver
+ *
+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
+ * has been evicted.
+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
+ * structure.
+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
+ * @move: Callback for a driver to hook in accelerated functions to
+ * move a buffer.
+ * If set to NULL, a potentially slow memcpy() move is used.
+ * @sync_obj_signaled: See ttm_fence_api.h
+ * @sync_obj_wait: See ttm_fence_api.h
+ * @sync_obj_flush: See ttm_fence_api.h
+ * @sync_obj_unref: See ttm_fence_api.h
+ * @sync_obj_ref: See ttm_fence_api.h
+ */
+
+struct ttm_bo_driver {
+ /**
+ * ttm_tt_create
+ *
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct vm_page *dummy_read_page);
+
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_bo_driver member invalidate_caches
+ *
+ * @bdev: the buffer object device.
+ * @flags: new placement of the rebound buffer object.
+ *
+ * A previosly evicted buffer has been rebound in a
+ * potentially new location. Tell the driver that it might
+ * consider invalidating read (texture) caches on the next command
+ * submission as a consequence.
+ */
+
+ int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
+ int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man);
+ /**
+ * struct ttm_bo_driver member evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+
+ void(*evict_flags) (struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+ /**
+ * struct ttm_bo_driver member move:
+ *
+ * @bo: the buffer to move
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ * @interruptible: Use interruptible sleeps if possible when sleeping.
+ * @no_wait: whether this should give up and return -EBUSY
+ * if this move would require sleeping
+ * @new_mem: the new memory region receiving the buffer
+ *
+ * Move a buffer between two memory regions.
+ */
+ int (*move) (struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+ /**
+ * struct ttm_bo_driver_member verify_access
+ *
+ * @bo: Pointer to a buffer object.
+ * @filp: Pointer to a struct file trying to access the object.
+ * FreeBSD: use devfs_get_cdevpriv etc.
+ *
+ * Called from the map / write / read methods to verify that the
+ * caller is permitted to access the buffer object.
+ * This member may be set to NULL, which will refuse this kind of
+ * access for all buffer objects.
+ * This function should return 0 if access is granted, -EPERM otherwise.
+ */
+ int (*verify_access) (struct ttm_buffer_object *bo);
+
+ /**
+ * In case a driver writer dislikes the TTM fence objects,
+ * the driver writer can replace those with sync objects of
+ * his / her own. If it turns out that no driver writer is
+ * using these. I suggest we remove these hooks and plug in
+ * fences directly. The bo driver needs the following functionality:
+ * See the corresponding functions in the fence object API
+ * documentation.
+ */
+
+ bool (*sync_obj_signaled) (void *sync_obj);
+ int (*sync_obj_wait) (void *sync_obj,
+ bool lazy, bool interruptible);
+ int (*sync_obj_flush) (void *sync_obj);
+ void (*sync_obj_unref) (void **sync_obj);
+ void *(*sync_obj_ref) (void *sync_obj);
+
+ /* hook to notify driver about a driver move so it
+ * can do tiling things */
+ void (*move_notify)(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem);
+ /* notify the driver we are taking a fault on this BO
+ * and have reserved it */
+ int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * notify the driver that we're about to swap out this bo
+ */
+ void (*swap_notify) (struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+ void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+};
+
+/**
+ * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
+ */
+
+struct ttm_bo_global_ref {
+ struct drm_global_reference ref;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_bo_global - Buffer object driver global data.
+ *
+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages.
+ * @shrink: A shrink callback object used for buffer object swap.
+ * @device_list_mutex: Mutex protecting the device list.
+ * This mutex is held while traversing the device list for pm options.
+ * @lru_lock: Spinlock protecting the bo subsystem lru lists.
+ * @device_list: List of buffer object devices.
+ * @swap_lru: Lru list of buffer objects used for swapping.
+ */
+
+struct ttm_bo_global {
+ u_int kobj_ref;
+
+ /**
+ * Constant after init.
+ */
+
+ struct ttm_mem_global *mem_glob;
+ struct vm_page *dummy_read_page;
+ struct ttm_mem_shrink shrink;
+ struct sx device_list_mutex;
+ struct mtx lru_lock;
+
+ /**
+ * Protected by device_list_mutex.
+ */
+ struct list_head device_list;
+
+ /**
+ * Protected by the lru_lock.
+ */
+ struct list_head swap_lru;
+
+ /**
+ * Internal protection.
+ */
+ atomic_t bo_count;
+};
+
+
+#define TTM_NUM_MEM_TYPES 8
+
+#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
+ idling before CPU mapping */
+#define TTM_BO_PRIV_FLAG_MAX 1
+/**
+ * struct ttm_bo_device - Buffer object driver device-specific data.
+ *
+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
+ * @man: An array of mem_type_managers.
+ * @fence_lock: Protects the synchronizing members on *all* bos belonging
+ * to this device.
+ * @addr_space_mm: Range manager for the device address space.
+ * lru_lock: Spinlock that protects the buffer+device lru lists and
+ * ddestroy lists.
+ * @val_seq: Current validation sequence.
+ * @dev_mapping: A pointer to the struct address_space representing the
+ * device address space.
+ * @wq: Work queue structure for the delayed delete workqueue.
+ *
+ */
+
+struct ttm_bo_device {
+
+ /*
+ * Constant after bo device init / atomic.
+ */
+ struct list_head device_list;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_driver *driver;
+ struct rwlock vm_lock;
+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+ struct mtx fence_lock;
+ /*
+ * Protected by the vm lock.
+ */
+ RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
+ struct drm_mm addr_space_mm;
+
+ /*
+ * Protected by the global:lru lock.
+ */
+ struct list_head ddestroy;
+ uint32_t val_seq;
+
+ /*
+ * Protected by load / firstopen / lastclose /unload sync.
+ */
+
+ struct address_space *dev_mapping;
+
+ /*
+ * Internal protection.
+ */
+
+ struct timeout_task wq;
+
+ bool need_dma32;
+};
+
+/**
+ * ttm_flag_masked
+ *
+ * @old: Pointer to the result and original value.
+ * @new: New value of bits.
+ * @mask: Mask of bits to change.
+ *
+ * Convenience function to change a number of bits identified by a mask.
+ */
+
+static inline uint32_t
+ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
+{
+ *old ^= (*old ^ new) & mask;
+ return *old;
+}
+
+/**
+ * ttm_tt_init
+ *
+ * @ttm: The struct ttm_tt.
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct vm_page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct vm_page *dummy_read_page);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: the ttm_tt structure.
+ *
+ * Free memory of ttm_tt structure
+ */
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
+
+/**
+ * ttm_ttm_bind:
+ *
+ * @ttm: The struct ttm_tt containing backing pages.
+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ *
+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
+ */
+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind, unpopulate and destroy common struct ttm_tt.
+ */
+extern void ttm_tt_destroy(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+extern void ttm_tt_unbind(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_swapin:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Swap in a previously swap out ttm_tt.
+ */
+extern int ttm_tt_swapin(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_cache_flush:
+ *
+ * @pages: An array of pointers to struct page:s to flush.
+ * @num_pages: Number of pages to flush.
+ *
+ * Flush the data of the indicated pages from the cpu caches.
+ * This is used when changing caching attributes of the pages from
+ * cache-coherent.
+ */
+extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
+
+/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+ * @placement: Flag indicating the desired caching policy.
+ *
+ * This function will change caching policy of any default kernel mappings of
+ * the pages backing @ttm. If changing from cached to uncached or
+ * write-combined,
+ * all CPU caches will first be flushed to make sure the data of the pages
+ * hit RAM. This function may be very costly as it involves global TLB
+ * and cache flushes and potential page splitting / combining.
+ */
+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
+extern int ttm_tt_swapout(struct ttm_tt *ttm,
+ struct vm_object *persistent_swap_storage);
+
+/*
+ * ttm_bo.c
+ */
+
+/**
+ * ttm_mem_reg_is_pci
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @mem: A valid struct ttm_mem_reg.
+ *
+ * Returns true if the memory described by @mem is PCI memory,
+ * false otherwise.
+ */
+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+
+/**
+ * ttm_bo_mem_space
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
+ * we want to allocate space for.
+ * @proposed_placement: Proposed new placement for the buffer object.
+ * @mem: A struct ttm_mem_reg.
+ * @interruptible: Sleep interruptible when sliping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Allocate memory space for the buffer object pointed to by @bo, using
+ * the placement flags in @mem, potentially evicting other idle buffer objects.
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
+ */
+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible,
+ bool no_wait_gpu);
+
+extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+
+extern void ttm_bo_global_release(struct drm_global_reference *ref);
+extern int ttm_bo_global_init(struct drm_global_reference *ref);
+
+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_device_init
+ *
+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
+ * @glob: A pointer to an initialized struct ttm_bo_global.
+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @file_page_offset: Offset into the device address space that is available
+ * for buffer data. This ensures compatibility with other users of the
+ * address space.
+ *
+ * Initializes a struct ttm_bo_device:
+ * Returns:
+ * !0: Failure.
+ */
+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_bo_global *glob,
+ struct ttm_bo_driver *driver,
+ uint64_t file_page_offset, bool need_dma32);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
+extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ *
+ * The caller must take ttm_mem_io_lock before calling this function.
+ */
+extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+
+extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
+ bool interruptible);
+extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+
+
+/**
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation) and removes it from lru lists, while taking
+ * a number of measures to prevent deadlocks.
+ *
+ * Deadlocks may occur when two processes try to reserve multiple buffers in
+ * different order, either by will or as a result of a buffer being evicted
+ * to make room for a buffer already reserved. (Buffers are reserved before
+ * they are evicted). The following algorithm prevents such deadlocks from
+ * occurring:
+ * 1) Buffers are reserved with the lru spinlock held. Upon successful
+ * reservation they are removed from the lru list. This stops a reserved buffer
+ * from being evicted. However the lru spinlock is released between the time
+ * a buffer is selected for eviction and the time it is reserved.
+ * Therefore a check is made when a buffer is reserved for eviction, that it
+ * is still the first buffer in the lru list, before it is removed from the
+ * list. @check_lru == 1 forces this check. If it fails, the function returns
+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
+ * the procedure.
+ * 2) Processes attempting to reserve multiple buffers other than for eviction,
+ * (typically execbuf), should first obtain a unique 32-bit
+ * validation sequence number,
+ * and call this function with @use_sequence == 1 and @sequence == the unique
+ * sequence number. If upon call of this function, the buffer object is already
+ * reserved, the validation sequence is checked against the validation
+ * sequence of the process currently reserving the buffer,
+ * and if the current validation sequence is greater than that of the process
+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
+ * waiting for the buffer to become unreserved, after which it retries
+ * reserving.
+ * The caller should, when receiving an -EAGAIN error
+ * release all its buffer reservations, wait for @bo to become unreserved, and
+ * then rerun the validation with the same validation sequence. This procedure
+ * will always guarantee that the process with the lowest validation sequence
+ * will eventually succeed, preventing both deadlocks and starvation.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence);
+
+
+/**
+ * ttm_bo_reserve_locked:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Must be called with struct ttm_bo_global::lru_lock held,
+ * and will not remove reserved buffers from the lru lists.
+ * The function may release the LRU spinlock if it needs to sleep.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence,
+ uint32_t sequence);
+
+/**
+ * ttm_bo_unreserve
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ */
+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_unreserve_locked
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ * Needs to be called with struct ttm_bo_global::lru_lock held.
+ */
+extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_wait_unreserved
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Wait for a struct ttm_buffer_object to become unreserved.
+ * This is typically used in the execbuf code to relax cpu-usage when
+ * a potential deadlock condition backoff.
+ */
+extern int ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo,
+ bool interruptible);
+
+/*
+ * ttm_bo_util.c
+ */
+
+/**
+ * ttm_bo_move_ttm
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Optimized move function for a buffer object with both old and
+ * new placement backed by a TTM. The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_move_memcpy
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_free_old_node
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Utility function to free an old placement after a successful move.
+ */
+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @sync_obj: A sync object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
+
+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ void *sync_obj,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+/**
+ * ttm_io_prot
+ *
+ * @c_state: Caching state.
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
+extern vm_memattr_t ttm_io_prot(uint32_t caching_flags);
+
+extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+
+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+#define TTM_HAS_AGP
+#include <linux/agp_backend.h>
+
+/**
+ * ttm_agp_tt_create
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @bridge: The agp bridge this device is sitting on.
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ *
+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
+ * for TT memory. This function uses the linux agpgart interface to
+ * bind and unbind memory backing a ttm_tt.
+ */
+extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge,
+ unsigned long size, uint32_t page_flags,
+ struct vm_page *dummy_read_page);
+int ttm_agp_tt_populate(struct ttm_tt *ttm);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+#endif
+
+int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
+ struct ttm_buffer_object *b);
+
+RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
+ ttm_bo_cmp_rb_tree_items);
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_bo_manager.c b/sys/dev/drm2/ttm/ttm_bo_manager.c
new file mode 100644
index 0000000..4686511
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_manager.c
@@ -0,0 +1,157 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/drm2/drm_mm.h>
+
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+ struct drm_mm mm;
+ struct mtx lock;
+};
+
+MALLOC_DEFINE(M_TTM_RMAN, "ttm_rman", "TTM Range Manager");
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+ struct drm_mm_node *node = NULL;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ do {
+ ret = drm_mm_pre_get(mm);
+ if (unlikely(ret))
+ return ret;
+
+ mtx_lock(&rman->lock);
+ node = drm_mm_search_free_in_range(mm,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(node == NULL)) {
+ mtx_unlock(&rman->lock);
+ return 0;
+ }
+ node = drm_mm_get_block_atomic_range(node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ mtx_unlock(&rman->lock);
+ } while (node == NULL);
+
+ mem->mm_node = node;
+ mem->start = node->start;
+ return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+ if (mem->mm_node) {
+ mtx_lock(&rman->lock);
+ drm_mm_put_block(mem->mm_node);
+ mtx_unlock(&rman->lock);
+ mem->mm_node = NULL;
+ }
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct ttm_range_manager *rman;
+ int ret;
+
+ rman = malloc(sizeof(*rman), M_TTM_RMAN, M_ZERO | M_WAITOK);
+ ret = drm_mm_init(&rman->mm, 0, p_size);
+ if (ret) {
+ free(rman, M_TTM_RMAN);
+ return ret;
+ }
+
+ mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF);
+ man->priv = rman;
+ return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+
+ mtx_lock(&rman->lock);
+ if (drm_mm_clean(mm)) {
+ drm_mm_takedown(mm);
+ mtx_unlock(&rman->lock);
+ mtx_destroy(&rman->lock);
+ free(rman, M_TTM_RMAN);
+ man->priv = NULL;
+ return 0;
+ }
+ mtx_unlock(&rman->lock);
+ return -EBUSY;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+ mtx_lock(&rman->lock);
+ drm_mm_debug_table(&rman->mm, prefix);
+ mtx_unlock(&rman->lock);
+}
+
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+ ttm_bo_man_init,
+ ttm_bo_man_takedown,
+ ttm_bo_man_get_node,
+ ttm_bo_man_put_node,
+ ttm_bo_man_debug
+};
diff --git a/sys/dev/drm2/ttm/ttm_bo_util.c b/sys/dev/drm2/ttm/ttm_bo_util.c
new file mode 100644
index 0000000..139d134
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_util.c
@@ -0,0 +1,658 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <sys/sf_buf.h>
+
+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
+{
+ ttm_bo_mem_put(bo, &bo->mem);
+}
+
+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ bool evict,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+ struct ttm_tt *ttm = bo->ttm;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+
+ if (old_mem->mem_type != TTM_PL_SYSTEM) {
+ ttm_tt_unbind(ttm);
+ ttm_bo_free_old_node(bo);
+ ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
+ TTM_PL_MASK_MEM);
+ old_mem->mem_type = TTM_PL_SYSTEM;
+ }
+
+ ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_tt_bind(ttm, new_mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+}
+
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
+{
+ if (likely(man->io_reserve_fastpath))
+ return 0;
+
+ if (interruptible) {
+ if (sx_xlock_sig(&man->io_reserve_mutex))
+ return (-EINTR);
+ else
+ return (0);
+ }
+
+ sx_xlock(&man->io_reserve_mutex);
+ return 0;
+}
+
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ sx_xunlock(&man->io_reserve_mutex);
+}
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+ struct ttm_buffer_object *bo;
+
+ if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+ return -EAGAIN;
+
+ bo = list_first_entry(&man->io_reserve_lru,
+ struct ttm_buffer_object,
+ io_reserve_lru);
+ list_del_init(&bo->io_reserve_lru);
+ ttm_bo_unmap_virtual_locked(bo);
+
+ return 0;
+}
+
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (!bdev->driver->io_mem_reserve)
+ return 0;
+ if (likely(man->io_reserve_fastpath))
+ return bdev->driver->io_mem_reserve(bdev, mem);
+
+ if (bdev->driver->io_mem_reserve &&
+ mem->bus.io_reserved_count++ == 0) {
+retry:
+ ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (ret == -EAGAIN) {
+ ret = ttm_mem_io_evict(man);
+ if (ret == 0)
+ goto retry;
+ }
+ }
+ return ret;
+}
+
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ if (bdev->driver->io_mem_reserve &&
+ --mem->bus.io_reserved_count == 0 &&
+ bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, mem);
+
+}
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+ int ret;
+
+ if (!mem->bus.io_reserved_vm) {
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[mem->mem_type];
+
+ ret = ttm_mem_io_reserve(bo->bdev, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ mem->bus.io_reserved_vm = true;
+ if (man->use_io_reserve_lru)
+ list_add_tail(&bo->io_reserve_lru,
+ &man->io_reserve_lru);
+ }
+ return 0;
+}
+
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+
+ if (mem->bus.io_reserved_vm) {
+ mem->bus.io_reserved_vm = false;
+ list_del_init(&bo->io_reserve_lru);
+ ttm_mem_io_free(bo->bdev, mem);
+ }
+}
+
+static
+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+ void **virtual)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ int ret;
+ void *addr;
+
+ *virtual = NULL;
+ (void) ttm_mem_io_lock(man, false);
+ ret = ttm_mem_io_reserve(bdev, mem);
+ ttm_mem_io_unlock(man);
+ if (ret || !mem->bus.is_iomem)
+ return ret;
+
+ if (mem->bus.addr) {
+ addr = mem->bus.addr;
+ } else {
+ addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
+ mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
+ VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
+ if (!addr) {
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
+ return -ENOMEM;
+ }
+ }
+ *virtual = addr;
+ return 0;
+}
+
+static
+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+ void *virtual)
+{
+ struct ttm_mem_type_manager *man;
+
+ man = &bdev->man[mem->mem_type];
+
+ if (virtual && mem->bus.addr == NULL)
+ pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
+}
+
+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+ uint32_t *dstP =
+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
+ uint32_t *srcP =
+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+
+ int i;
+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+ /* iowrite32(ioread32(srcP++), dstP++); */
+ *dstP++ = *srcP++;
+ return 0;
+}
+
+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
+ unsigned long page,
+ vm_memattr_t prot)
+{
+ vm_page_t d = ttm->pages[page];
+ void *dst;
+
+ if (!d)
+ return -ENOMEM;
+
+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+
+ /* XXXKIB can't sleep ? */
+ dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
+ if (!dst)
+ return -ENOMEM;
+
+ memcpy(dst, src, PAGE_SIZE);
+
+ pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
+
+ return 0;
+}
+
+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
+ unsigned long page,
+ vm_memattr_t prot)
+{
+ vm_page_t s = ttm->pages[page];
+ void *src;
+
+ if (!s)
+ return -ENOMEM;
+
+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+ src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
+ if (!src)
+ return -ENOMEM;
+
+ memcpy(dst, src, PAGE_SIZE);
+
+ pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
+
+ return 0;
+}
+
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_tt *ttm = bo->ttm;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg old_copy = *old_mem;
+ void *old_iomap;
+ void *new_iomap;
+ int ret;
+ unsigned long i;
+ unsigned long page;
+ unsigned long add = 0;
+ int dir;
+
+ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+ if (ret)
+ return ret;
+ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+ if (ret)
+ goto out;
+
+ if (old_iomap == NULL && new_iomap == NULL)
+ goto out2;
+ if (old_iomap == NULL && ttm == NULL)
+ goto out2;
+
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ goto out1;
+ }
+
+ add = 0;
+ dir = 1;
+
+ if ((old_mem->mem_type == new_mem->mem_type) &&
+ (new_mem->start < old_mem->start + old_mem->size)) {
+ dir = -1;
+ add = new_mem->num_pages - 1;
+ }
+
+ for (i = 0; i < new_mem->num_pages; ++i) {
+ page = i * dir + add;
+ if (old_iomap == NULL) {
+ vm_memattr_t prot = ttm_io_prot(old_mem->placement);
+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+ prot);
+ } else if (new_iomap == NULL) {
+ vm_memattr_t prot = ttm_io_prot(new_mem->placement);
+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+ prot);
+ } else
+ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+ if (ret)
+ goto out1;
+ }
+ mb();
+out2:
+ old_copy = *old_mem;
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
+ ttm_tt_unbind(ttm);
+ ttm_tt_destroy(ttm);
+ bo->ttm = NULL;
+ }
+
+out1:
+ ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
+out:
+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+ ttm_bo_mem_put(bo, &old_copy);
+ return ret;
+}
+
+MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects");
+
+static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+{
+ free(bo, M_TTM_TRANSF_OBJ);
+}
+
+/**
+ * ttm_buffer_object_transfer
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
+ * holding the data of @bo with the old placement.
+ *
+ * This is a utility function that may be called after an accelerated move
+ * has been scheduled. A new buffer object is created as a placeholder for
+ * the old data while it's being copied. When that buffer object is idle,
+ * it can be destroyed, releasing the space of the old placement.
+ * Returns:
+ * !0: Failure.
+ */
+
+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ struct ttm_buffer_object **new_obj)
+{
+ struct ttm_buffer_object *fbo;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+
+ fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_ZERO | M_WAITOK);
+ *fbo = *bo;
+
+ /**
+ * Fix up members that we shouldn't copy directly:
+ * TODO: Explicit member copy would probably be better here.
+ */
+
+ INIT_LIST_HEAD(&fbo->ddestroy);
+ INIT_LIST_HEAD(&fbo->lru);
+ INIT_LIST_HEAD(&fbo->swap);
+ INIT_LIST_HEAD(&fbo->io_reserve_lru);
+ fbo->vm_node = NULL;
+ atomic_set(&fbo->cpu_writers, 0);
+
+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ refcount_init(&fbo->list_kref, 1);
+ refcount_init(&fbo->kref, 1);
+ fbo->destroy = &ttm_transfered_destroy;
+ fbo->acc_size = 0;
+
+ *new_obj = fbo;
+ return 0;
+}
+
+vm_memattr_t
+ttm_io_prot(uint32_t caching_flags)
+{
+#if defined(__i386__) || defined(__amd64__)
+ if (caching_flags & TTM_PL_FLAG_WC)
+ return (VM_MEMATTR_WRITE_COMBINING);
+ else
+ /*
+ * We do not support i386, look at the linux source
+ * for the reason of the comment.
+ */
+ return (VM_MEMATTR_UNCACHEABLE);
+#else
+#error Port me
+#endif
+}
+
+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
+ unsigned long offset,
+ unsigned long size,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+
+ if (bo->mem.bus.addr) {
+ map->bo_kmap_type = ttm_bo_map_premapped;
+ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+ } else {
+ map->bo_kmap_type = ttm_bo_map_iomap;
+ map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
+ bo->mem.bus.offset + offset, size,
+ (mem->placement & TTM_PL_FLAG_WC) ?
+ VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
+ map->size = size;
+ }
+ return (!map->virtual) ? -ENOMEM : 0;
+}
+
+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ unsigned long start_page,
+ unsigned long num_pages,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+ vm_memattr_t prot;
+ struct ttm_tt *ttm = bo->ttm;
+ int i, ret;
+
+ MPASS(ttm != NULL);
+
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+ }
+
+ if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ /*
+ * We're mapping a single page, and the desired
+ * page protection is consistent with the bo.
+ */
+
+ map->bo_kmap_type = ttm_bo_map_kmap;
+ map->page = ttm->pages[start_page];
+ map->sf = sf_buf_alloc(map->page, 0);
+ map->virtual = (void *)sf_buf_kva(map->sf);
+ } else {
+ /*
+ * We need to use vmap to get the desired page protection
+ * or to make the buffer object look contiguous.
+ */
+ prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+ VM_MEMATTR_WRITE_COMBINING :
+ ttm_io_prot(mem->placement);
+ map->bo_kmap_type = ttm_bo_map_vmap;
+ map->num_pages = num_pages;
+ map->virtual = (void *)kmem_alloc_nofault(kernel_map,
+ num_pages * PAGE_SIZE);
+ if (map->virtual != NULL) {
+ for (i = 0; i < num_pages; i++) {
+ /* XXXKIB hack */
+ pmap_page_set_memattr(ttm->pages[start_page +
+ i], prot);
+ }
+ pmap_qenter((vm_offset_t)map->virtual,
+ &ttm->pages[start_page], num_pages);
+ }
+ }
+ return (!map->virtual) ? -ENOMEM : 0;
+}
+
+int ttm_bo_kmap(struct ttm_buffer_object *bo,
+ unsigned long start_page, unsigned long num_pages,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
+ unsigned long offset, size;
+ int ret;
+
+ MPASS(list_empty(&bo->swap));
+ map->virtual = NULL;
+ map->bo = bo;
+ if (num_pages > bo->num_pages)
+ return -EINVAL;
+ if (start_page > bo->num_pages)
+ return -EINVAL;
+#if 0
+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+ return -EPERM;
+#endif
+ (void) ttm_mem_io_lock(man, false);
+ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+ ttm_mem_io_unlock(man);
+ if (ret)
+ return ret;
+ if (!bo->mem.bus.is_iomem) {
+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
+ } else {
+ offset = start_page << PAGE_SHIFT;
+ size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, offset, size, map);
+ }
+}
+
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_buffer_object *bo = map->bo;
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
+
+ if (!map->virtual)
+ return;
+ switch (map->bo_kmap_type) {
+ case ttm_bo_map_iomap:
+ pmap_unmapdev((vm_offset_t)map->virtual, map->size);
+ break;
+ case ttm_bo_map_vmap:
+ pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
+ kmem_free(kernel_map, (vm_offset_t)map->virtual,
+ map->num_pages * PAGE_SIZE);
+ break;
+ case ttm_bo_map_kmap:
+ sf_buf_free(map->sf);
+ break;
+ case ttm_bo_map_premapped:
+ break;
+ default:
+ MPASS(0);
+ }
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+ ttm_mem_io_unlock(man);
+ map->virtual = NULL;
+ map->page = NULL;
+ map->sf = NULL;
+}
+
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ void *sync_obj,
+ bool evict,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+ struct ttm_buffer_object *ghost_obj;
+ void *tmp_obj = NULL;
+
+ mtx_lock(&bdev->fence_lock);
+ if (bo->sync_obj) {
+ tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ }
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ if (evict) {
+ ret = ttm_bo_wait(bo, false, false, false);
+ mtx_unlock(&bdev->fence_lock);
+ if (tmp_obj)
+ driver->sync_obj_unref(&tmp_obj);
+ if (ret)
+ return ret;
+
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+ (bo->ttm != NULL)) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ ttm_bo_free_old_node(bo);
+ } else {
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+
+ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+
+ /* ttm_buffer_object_transfer accesses bo->sync_obj */
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ mtx_unlock(&bdev->fence_lock);
+ if (tmp_obj)
+ driver->sync_obj_unref(&tmp_obj);
+
+ if (ret)
+ return ret;
+
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+
+ ttm_bo_unreserve(ghost_obj);
+ ttm_bo_unref(&ghost_obj);
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+}
diff --git a/sys/dev/drm2/ttm/ttm_bo_vm.c b/sys/dev/drm2/ttm/ttm_bo_vm.c
new file mode 100644
index 0000000..03e2f2b
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_vm.c
@@ -0,0 +1,492 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_vm.h"
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
+ ttm_bo_cmp_rb_tree_items);
+
+int
+ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
+ struct ttm_buffer_object *b)
+{
+
+ if (a->vm_node->start < b->vm_node->start) {
+ return (-1);
+ } else if (a->vm_node->start > b->vm_node->start) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
+ unsigned long page_start,
+ unsigned long num_pages)
+{
+ unsigned long cur_offset;
+ struct ttm_buffer_object *bo;
+ struct ttm_buffer_object *best_bo = NULL;
+
+ RB_FOREACH(bo, ttm_bo_device_buffer_objects, &bdev->addr_space_rb) {
+ cur_offset = bo->vm_node->start;
+ if (page_start >= cur_offset) {
+ best_bo = bo;
+ if (page_start == cur_offset)
+ break;
+ }
+ }
+
+ if (unlikely(best_bo == NULL))
+ return NULL;
+
+ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
+ (page_start + num_pages)))
+ return NULL;
+
+ return best_bo;
+}
+
+static int
+ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
+ int prot, vm_page_t *mres)
+{
+
+ struct ttm_buffer_object *bo = vm_obj->handle;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_tt *ttm = NULL;
+ vm_page_t m, oldm;
+ int ret;
+ int retval = VM_PAGER_OK;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
+
+ vm_object_pip_add(vm_obj, 1);
+ oldm = *mres;
+ if (oldm != NULL) {
+ vm_page_lock(oldm);
+ vm_page_remove(oldm);
+ vm_page_unlock(oldm);
+ *mres = NULL;
+ } else
+ oldm = NULL;
+retry:
+ VM_OBJECT_UNLOCK(vm_obj);
+ m = NULL;
+
+reserve:
+ mtx_lock(&bo->glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
+ mtx_unlock(&bo->glob->lru_lock);
+ if (unlikely(ret != 0)) {
+ if (ret == -EBUSY) {
+ kern_yield(0);
+ goto reserve;
+ }
+ }
+
+ if (bdev->driver->fault_reserve_notify) {
+ ret = bdev->driver->fault_reserve_notify(bo);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ case -ERESTART:
+ case -EINTR:
+ kern_yield(0);
+ goto reserve;
+ default:
+ retval = VM_PAGER_ERROR;
+ goto out_unlock;
+ }
+ }
+
+ /*
+ * Wait for buffer data in transit, due to a pipelined
+ * move.
+ */
+
+ mtx_lock(&bdev->fence_lock);
+ if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
+ ret = ttm_bo_wait(bo, false, true, false);
+ mtx_unlock(&bdev->fence_lock);
+ if (unlikely(ret != 0)) {
+ retval = VM_PAGER_ERROR;
+ goto out_unlock;
+ }
+ } else
+ mtx_unlock(&bdev->fence_lock);
+
+ ret = ttm_mem_io_lock(man, true);
+ if (unlikely(ret != 0)) {
+ retval = VM_PAGER_ERROR;
+ goto out_unlock;
+ }
+ ret = ttm_mem_io_reserve_vm(bo);
+ if (unlikely(ret != 0)) {
+ retval = VM_PAGER_ERROR;
+ goto out_io_unlock;
+ }
+
+ /*
+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
+ * since the mmap_sem is only held in read mode. However, we
+ * modify only the caching bits of vma->vm_page_prot and
+ * consider those bits protected by
+ * the bo->mutex, as we should be the only writers.
+ * There shouldn't really be any readers of these bits except
+ * within vm_insert_mixed()? fork?
+ *
+ * TODO: Add a list of vmas to the bo, and change the
+ * vma->vm_page_prot when the object changes caching policy, with
+ * the correct locks held.
+ */
+ if (!bo->mem.bus.is_iomem) {
+ /* Allocate all page at once, most common usage */
+ ttm = bo->ttm;
+ if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+ retval = VM_PAGER_ERROR;
+ goto out_io_unlock;
+ }
+ }
+
+ if (bo->mem.bus.is_iomem) {
+ m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
+ bo->mem.bus.offset + offset);
+ pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
+ } else {
+ ttm = bo->ttm;
+ m = ttm->pages[OFF_TO_IDX(offset)];
+ if (unlikely(!m)) {
+ retval = VM_PAGER_ERROR;
+ goto out_io_unlock;
+ }
+ pmap_page_set_memattr(m,
+ (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
+ VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
+ }
+
+ VM_OBJECT_LOCK(vm_obj);
+ if ((m->flags & VPO_BUSY) != 0) {
+ vm_page_sleep(m, "ttmpbs");
+ ttm_mem_io_unlock(man);
+ ttm_bo_unreserve(bo);
+ goto retry;
+ }
+ m->valid = VM_PAGE_BITS_ALL;
+ *mres = m;
+ vm_page_lock(m);
+ vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
+ vm_page_unlock(m);
+ vm_page_busy(m);
+
+ if (oldm != NULL) {
+ vm_page_lock(oldm);
+ vm_page_free(oldm);
+ vm_page_unlock(oldm);
+ }
+
+out_io_unlock1:
+ ttm_mem_io_unlock(man);
+out_unlock1:
+ ttm_bo_unreserve(bo);
+ vm_object_pip_wakeup(vm_obj);
+ return (retval);
+
+out_io_unlock:
+ VM_OBJECT_LOCK(vm_obj);
+ goto out_io_unlock1;
+
+out_unlock:
+ VM_OBJECT_LOCK(vm_obj);
+ goto out_unlock1;
+}
+
+static int
+ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+ struct ttm_buffer_object *bo = handle;
+
+ *color = 0;
+ (void)ttm_bo_reference(bo);
+ return (0);
+}
+
+static void
+ttm_bo_vm_dtor(void *handle)
+{
+ struct ttm_buffer_object *bo = handle;
+
+ ttm_bo_unref(&bo);
+}
+
+static struct cdev_pager_ops ttm_pager_ops = {
+ .cdev_pg_fault = ttm_bo_vm_fault,
+ .cdev_pg_ctor = ttm_bo_vm_ctor,
+ .cdev_pg_dtor = ttm_bo_vm_dtor
+};
+
+int
+ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **obj_res, int nprot)
+{
+ struct ttm_bo_driver *driver;
+ struct ttm_buffer_object *bo;
+ struct vm_object *vm_obj;
+ int ret;
+
+ rw_wlock(&bdev->vm_lock);
+ bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
+ if (likely(bo != NULL))
+ refcount_acquire(&bo->kref);
+ rw_wunlock(&bdev->vm_lock);
+
+ if (unlikely(bo == NULL)) {
+ printf("[TTM] Could not find buffer object to map\n");
+ return (EINVAL);
+ }
+
+ driver = bo->bdev->driver;
+ if (unlikely(!driver->verify_access)) {
+ ret = EPERM;
+ goto out_unref;
+ }
+ ret = -driver->verify_access(bo);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
+ size, nprot, 0, curthread->td_ucred);
+ if (vm_obj == NULL) {
+ ret = EINVAL;
+ goto out_unref;
+ }
+ /*
+ * Note: We're transferring the bo reference to vm_obj->handle here.
+ */
+ *offset = 0;
+ *obj_res = vm_obj;
+ return 0;
+out_unref:
+ ttm_bo_unref(&bo);
+ return ret;
+}
+
+#if 0
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ vma->vm_ops = &ttm_bo_vm_ops;
+ vma->vm_private_data = ttm_bo_reference(bo);
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ return 0;
+}
+
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ const char __user *wbuf, char __user *rbuf, size_t count,
+ loff_t *f_pos, bool write)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_driver *driver;
+ struct ttm_bo_kmap_obj map;
+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
+ unsigned long kmap_offset;
+ unsigned long kmap_end;
+ unsigned long kmap_num;
+ size_t io_size;
+ unsigned int page_offset;
+ char *virtual;
+ int ret;
+ bool no_wait = false;
+ bool dummy;
+
+ read_lock(&bdev->vm_lock);
+ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
+ if (likely(bo != NULL))
+ ttm_bo_reference(bo);
+ read_unlock(&bdev->vm_lock);
+
+ if (unlikely(bo == NULL))
+ return -EFAULT;
+
+ driver = bo->bdev->driver;
+ if (unlikely(!driver->verify_access)) {
+ ret = -EPERM;
+ goto out_unref;
+ }
+
+ ret = driver->verify_access(bo, filp);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ kmap_offset = dev_offset - bo->vm_node->start;
+ if (unlikely(kmap_offset >= bo->num_pages)) {
+ ret = -EFBIG;
+ goto out_unref;
+ }
+
+ page_offset = *f_pos & ~PAGE_MASK;
+ io_size = bo->num_pages - kmap_offset;
+ io_size = (io_size << PAGE_SHIFT) - page_offset;
+ if (count < io_size)
+ io_size = count;
+
+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+ kmap_num = kmap_end - kmap_offset + 1;
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = -EAGAIN;
+ goto out_unref;
+ default:
+ goto out_unref;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(bo);
+ goto out_unref;
+ }
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual += page_offset;
+
+ if (write)
+ ret = copy_from_user(virtual, wbuf, io_size);
+ else
+ ret = copy_to_user(rbuf, virtual, io_size);
+
+ ttm_bo_kunmap(&map);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ if (unlikely(ret != 0))
+ return -EFBIG;
+
+ *f_pos += io_size;
+
+ return io_size;
+out_unref:
+ ttm_bo_unref(&bo);
+ return ret;
+}
+
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+ char __user *rbuf, size_t count, loff_t *f_pos,
+ bool write)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_end;
+ unsigned long kmap_num;
+ size_t io_size;
+ unsigned int page_offset;
+ char *virtual;
+ int ret;
+ bool no_wait = false;
+ bool dummy;
+
+ kmap_offset = (*f_pos >> PAGE_SHIFT);
+ if (unlikely(kmap_offset >= bo->num_pages))
+ return -EFBIG;
+
+ page_offset = *f_pos & ~PAGE_MASK;
+ io_size = bo->num_pages - kmap_offset;
+ io_size = (io_size << PAGE_SHIFT) - page_offset;
+ if (count < io_size)
+ io_size = count;
+
+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+ kmap_num = kmap_end - kmap_offset + 1;
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ return -EAGAIN;
+ default:
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(bo);
+ return ret;
+ }
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual += page_offset;
+
+ if (write)
+ ret = copy_from_user(virtual, wbuf, io_size);
+ else
+ ret = copy_to_user(rbuf, virtual, io_size);
+
+ ttm_bo_kunmap(&map);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ *f_pos += io_size;
+
+ return io_size;
+}
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_execbuf_util.c b/sys/dev/drm2/ttm/ttm_execbuf_util.c
new file mode 100644
index 0000000..ce5cf15
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_execbuf_util.c
@@ -0,0 +1,230 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_execbuf_util.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (entry->removed) {
+ ttm_bo_add_to_lru(bo);
+ entry->removed = false;
+
+ }
+ entry->reserved = false;
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ }
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (!entry->removed) {
+ entry->put_count = ttm_bo_del_from_lru(bo);
+ entry->removed = true;
+ }
+ }
+}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ if (entry->put_count) {
+ ttm_bo_list_ref_sub(bo, entry->put_count, true);
+ entry->put_count = 0;
+ }
+ }
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+ struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ ttm_eu_del_from_lru_locked(list);
+ ret = ttm_bo_wait_unreserved_locked(bo, true);
+ if (unlikely(ret != 0))
+ ttm_eu_backoff_reservation_locked(list);
+ return ret;
+}
+
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_bo_global *glob;
+
+ if (list_empty(list))
+ return;
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+ mtx_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list);
+ mtx_unlock(&glob->lru_lock);
+}
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list)
+{
+ struct ttm_bo_global *glob;
+ struct ttm_validate_buffer *entry;
+ int ret;
+ uint32_t val_seq;
+
+ if (list_empty(list))
+ return 0;
+
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->put_count = 0;
+ entry->removed = false;
+ }
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+
+ mtx_lock(&glob->lru_lock);
+retry_locked:
+ val_seq = entry->bo->bdev->val_seq++;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+retry_this_bo:
+ ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = ttm_eu_wait_unreserved_locked(list, bo);
+ if (unlikely(ret != 0)) {
+ mtx_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
+ }
+ goto retry_this_bo;
+ case -EAGAIN:
+ ttm_eu_backoff_reservation_locked(list);
+ ttm_eu_list_ref_sub(list);
+ ret = ttm_bo_wait_unreserved_locked(bo, true);
+ if (unlikely(ret != 0)) {
+ mtx_unlock(&glob->lru_lock);
+ return ret;
+ }
+ goto retry_locked;
+ default:
+ ttm_eu_backoff_reservation_locked(list);
+ mtx_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
+ }
+
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ttm_eu_backoff_reservation_locked(list);
+ mtx_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return -EBUSY;
+ }
+ }
+
+ ttm_eu_del_from_lru_locked(list);
+ mtx_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+
+ return 0;
+}
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
+
+ if (list_empty(list))
+ return;
+
+ bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
+
+ mtx_lock(&glob->lru_lock);
+ mtx_lock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, list, head) {
+ bo = entry->bo;
+ entry->old_sync_obj = bo->sync_obj;
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ ttm_bo_unreserve_locked(bo);
+ entry->reserved = false;
+ }
+ mtx_unlock(&bdev->fence_lock);
+ mtx_unlock(&glob->lru_lock);
+
+ list_for_each_entry(entry, list, head) {
+ if (entry->old_sync_obj)
+ driver->sync_obj_unref(&entry->old_sync_obj);
+ }
+}
diff --git a/sys/dev/drm2/ttm/ttm_execbuf_util.h b/sys/dev/drm2/ttm/ttm_execbuf_util.h
new file mode 100644
index 0000000..68b0eba
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_execbuf_util.h
@@ -0,0 +1,109 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_EXECBUF_UTIL_H_
+#define _TTM_EXECBUF_UTIL_H_
+
+#include <dev/drm2/ttm/ttm_bo_api.h>
+
+/**
+ * struct ttm_validate_buffer
+ *
+ * @head: list head for thread-private list.
+ * @bo: refcounted buffer object pointer.
+ * @reserved: Indicates whether @bo has been reserved for validation.
+ * @removed: Indicates whether @bo has been removed from lru lists.
+ * @put_count: Number of outstanding references on bo::list_kref.
+ * @old_sync_obj: Pointer to a sync object about to be unreferenced
+ */
+
+struct ttm_validate_buffer {
+ struct list_head head;
+ struct ttm_buffer_object *bo;
+ bool reserved;
+ bool removed;
+ int put_count;
+ void *old_sync_obj;
+};
+
+/**
+ * function ttm_eu_backoff_reservation
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ *
+ * Undoes all buffer validation reservations for bos pointed to by
+ * the list entries.
+ */
+
+extern void ttm_eu_backoff_reservation(struct list_head *list);
+
+/**
+ * function ttm_eu_reserve_buffers
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ *
+ * Tries to reserve bos pointed to by the list entries for validation.
+ * If the function returns 0, all buffers are marked as "unfenced",
+ * taken off the lru lists and are not synced for write CPU usage.
+ *
+ * If the function detects a deadlock due to multiple threads trying to
+ * reserve the same buffers in reverse order, all threads except one will
+ * back off and retry. This function may sleep while waiting for
+ * CPU write reservations to be cleared, and for other threads to
+ * unreserve their buffers.
+ *
+ * This function may return -ERESTART or -EAGAIN if the calling process
+ * receives a signal while waiting. In that case, no buffers on the list
+ * will be reserved upon return.
+ *
+ * Buffers reserved by this function should be unreserved by
+ * a call to either ttm_eu_backoff_reservation() or
+ * ttm_eu_fence_buffer_objects() when command submission is complete or
+ * has failed.
+ */
+
+extern int ttm_eu_reserve_buffers(struct list_head *list);
+
+/**
+ * function ttm_eu_fence_buffer_objects.
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ * @sync_obj: The new sync object for the buffers.
+ *
+ * This function should be called when command submission is complete, and
+ * it will add a new sync object to bos pointed to by entries on @list.
+ * It also unreserves all buffers, putting them on lru lists.
+ *
+ */
+
+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_lock.c b/sys/dev/drm2/ttm/ttm_lock.c
new file mode 100644
index 0000000..8ec3041
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_lock.c
@@ -0,0 +1,340 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/ttm/ttm_lock.h>
+#include <dev/drm2/ttm/ttm_module.h>
+
+#define TTM_WRITE_LOCK_PENDING (1 << 0)
+#define TTM_VT_LOCK_PENDING (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
+#define TTM_VT_LOCK (1 << 3)
+#define TTM_SUSPEND_LOCK (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+ mtx_init(&lock->lock, "ttmlk", NULL, MTX_DEF);
+ lock->rw = 0;
+ lock->flags = 0;
+ lock->kill_takers = false;
+ lock->signal = SIGKILL;
+}
+
+static void
+ttm_lock_send_sig(int signo)
+{
+ struct proc *p;
+
+ p = curproc; /* XXXKIB curthread ? */
+ PROC_LOCK(p);
+ kern_psignal(p, signo);
+ PROC_UNLOCK(p);
+}
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ if (--lock->rw == 0)
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ if (unlikely(lock->kill_takers)) {
+ ttm_lock_send_sig(lock->signal);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ locked = true;
+ }
+ return locked;
+}
+
+int
+ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+ const char *wmsg;
+ int flags, ret;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttmri";
+ } else {
+ flags = 0;
+ wmsg = "ttmr";
+ }
+ mtx_lock(&lock->lock);
+ while (!__ttm_read_lock(lock)) {
+ ret = msleep(lock, &lock->lock, flags, wmsg, 0);
+ if (ret != 0)
+ break;
+ }
+ return (-ret);
+}
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+ bool block = true;
+
+ *locked = false;
+
+ if (unlikely(lock->kill_takers)) {
+ ttm_lock_send_sig(lock->signal);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ block = false;
+ *locked = true;
+ } else if (lock->flags == 0) {
+ block = false;
+ }
+
+ return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+ const char *wmsg;
+ int flags, ret;
+ bool locked;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttmrti";
+ } else {
+ flags = 0;
+ wmsg = "ttmrt";
+ }
+ mtx_lock(&lock->lock);
+ while (!__ttm_read_trylock(lock, &locked)) {
+ ret = msleep(lock, &lock->lock, flags, wmsg, 0);
+ if (ret != 0)
+ break;
+ }
+ MPASS(!locked || ret == 0);
+ mtx_unlock(&lock->lock);
+
+ return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ lock->rw = 0;
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ if (unlikely(lock->kill_takers)) {
+ ttm_lock_send_sig(lock->signal);
+ return false;
+ }
+ if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+ lock->rw = -1;
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ locked = true;
+ } else {
+ lock->flags |= TTM_WRITE_LOCK_PENDING;
+ }
+ return locked;
+}
+
+int
+ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+ const char *wmsg;
+ int flags, ret;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttmwi";
+ } else {
+ flags = 0;
+ wmsg = "ttmw";
+ }
+ mtx_lock(&lock->lock);
+ /* XXXKIB: linux uses __ttm_read_lock for uninterruptible sleeps */
+ while (!__ttm_write_lock(lock)) {
+ ret = msleep(lock, &lock->lock, flags, wmsg, 0);
+ if (interruptible && ret != 0) {
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ wakeup(lock);
+ break;
+ }
+ }
+ mtx_unlock(&lock->lock);
+
+ return (-ret);
+}
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ lock->rw = 1;
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+ int ret = 0;
+
+ mtx_lock(&lock->lock);
+ if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+ ret = -EINVAL;
+ lock->flags &= ~TTM_VT_LOCK;
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+
+ return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+ int ret;
+
+ *p_base = NULL;
+ ret = __ttm_vt_unlock(lock);
+ MPASS(ret == 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ lock->flags |= TTM_VT_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_VT_LOCK_PENDING;
+ }
+ return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+ bool interruptible,
+ struct ttm_object_file *tfile)
+{
+ const char *wmsg;
+ int flags, ret;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttmwi";
+ } else {
+ flags = 0;
+ wmsg = "ttmw";
+ }
+ mtx_lock(&lock->lock);
+ while (!__ttm_vt_lock(lock)) {
+ ret = msleep(lock, &lock->lock, flags, wmsg, 0);
+ if (interruptible && ret != 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ wakeup(lock);
+ break;
+ }
+ }
+
+ /*
+ * Add a base-object, the destructor of which will
+ * make sure the lock is released if the client dies
+ * while holding it.
+ */
+
+ ret = ttm_base_object_init(tfile, &lock->base, false,
+ ttm_lock_type, &ttm_vt_lock_remove, NULL);
+ if (ret)
+ (void)__ttm_vt_unlock(lock);
+ else
+ lock->vt_holder = tfile;
+
+ return (-ret);
+}
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+ return ttm_ref_object_base_unref(lock->vt_holder,
+ lock->base.hash.key, TTM_REF_USAGE);
+}
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ lock->flags &= ~TTM_SUSPEND_LOCK;
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+ lock->flags |= TTM_SUSPEND_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+ }
+ return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ while (!__ttm_suspend_lock(lock))
+ msleep(lock, &lock->lock, 0, "ttms", 0);
+ mtx_unlock(&lock->lock);
+}
diff --git a/sys/dev/drm2/ttm/ttm_lock.h b/sys/dev/drm2/ttm/ttm_lock.h
new file mode 100644
index 0000000..ac8159e
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_lock.h
@@ -0,0 +1,249 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/ttm/ttm_object.h>
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+ struct ttm_base_object base;
+ struct mtx lock;
+ int32_t rw;
+ uint32_t flags;
+ bool kill_takers;
+ int signal;
+ struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+ struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+ int signal)
+{
+ lock->kill_takers = val;
+ if (val)
+ lock->signal = signal;
+}
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_memory.c b/sys/dev/drm2/ttm/ttm_memory.c
new file mode 100644
index 0000000..ee74d94
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_memory.c
@@ -0,0 +1,471 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+
+#define TTM_MEMORY_ALLOC_RETRIES 4
+
+struct ttm_mem_zone {
+ u_int kobj_ref;
+ struct ttm_mem_global *glob;
+ const char *name;
+ uint64_t zone_mem;
+ uint64_t emer_mem;
+ uint64_t max_mem;
+ uint64_t swap_limit;
+ uint64_t used_mem;
+};
+
+MALLOC_DEFINE(M_TTM_ZONE, "ttm_zone", "TTM Zone");
+
+static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone)
+{
+
+ printf("pTTM] Zone %7s: Used memory at exit: %llu kiB\n",
+ zone->name, (unsigned long long)zone->used_mem >> 10);
+ free(zone, M_TTM_ZONE);
+}
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone;
+ struct attribute *attr,
+ char *buffer)
+{
+ uint64_t val = 0;
+
+ mtx_lock(&zone->glob->lock);
+ if (attr == &ttm_mem_sys)
+ val = zone->zone_mem;
+ else if (attr == &ttm_mem_emer)
+ val = zone->emer_mem;
+ else if (attr == &ttm_mem_max)
+ val = zone->max_mem;
+ else if (attr == &ttm_mem_swap)
+ val = zone->swap_limit;
+ else if (attr == &ttm_mem_used)
+ val = zone->used_mem;
+ mtx_unlock(&zone->glob->lock);
+
+ return snprintf(buffer, PAGE_SIZE, "%llu\n",
+ (unsigned long long) val >> 10);
+}
+#endif
+
+static void ttm_check_swapping(struct ttm_mem_global *glob);
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
+ struct attribute *attr,
+ const char *buffer,
+ size_t size)
+{
+ int chars;
+ unsigned long val;
+ uint64_t val64;
+
+ chars = sscanf(buffer, "%lu", &val);
+ if (chars == 0)
+ return size;
+
+ val64 = val;
+ val64 <<= 10;
+
+ mtx_lock(&zone->glob->lock);
+ if (val64 > zone->zone_mem)
+ val64 = zone->zone_mem;
+ if (attr == &ttm_mem_emer) {
+ zone->emer_mem = val64;
+ if (zone->max_mem > val64)
+ zone->max_mem = val64;
+ } else if (attr == &ttm_mem_max) {
+ zone->max_mem = val64;
+ if (zone->emer_mem < val64)
+ zone->emer_mem = val64;
+ } else if (attr == &ttm_mem_swap)
+ zone->swap_limit = val64;
+ mtx_unlock(&zone->glob->lock);
+
+ ttm_check_swapping(zone->glob);
+
+ return size;
+}
+#endif
+
+static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
+{
+
+ free(glob, M_TTM_ZONE);
+}
+
+static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
+ bool from_wq, uint64_t extra)
+{
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+ uint64_t target;
+
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+
+ if (from_wq)
+ target = zone->swap_limit;
+ else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
+ target = zone->emer_mem;
+ else
+ target = zone->max_mem;
+
+ target = (extra > target) ? 0ULL : target;
+
+ if (zone->used_mem > target)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * At this point we only support a single shrink callback.
+ * Extend this if needed, perhaps using a linked list of callbacks.
+ * Note that this function is reentrant:
+ * many threads may try to swap out at any given time.
+ */
+
+static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
+ uint64_t extra)
+{
+ int ret;
+ struct ttm_mem_shrink *shrink;
+
+ mtx_lock(&glob->lock);
+ if (glob->shrink == NULL)
+ goto out;
+
+ while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
+ shrink = glob->shrink;
+ mtx_unlock(&glob->lock);
+ ret = shrink->do_shrink(shrink);
+ mtx_lock(&glob->lock);
+ if (unlikely(ret != 0))
+ goto out;
+ }
+out:
+ mtx_unlock(&glob->lock);
+}
+
+
+
+static void ttm_shrink_work(void *arg, int pending __unused)
+{
+ struct ttm_mem_global *glob = arg;
+
+ ttm_shrink(glob, true, 0ULL);
+}
+
+static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
+ uint64_t mem)
+{
+ struct ttm_mem_zone *zone;
+
+ zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
+
+ zone->name = "kernel";
+ zone->zone_mem = mem;
+ zone->max_mem = mem >> 1;
+ zone->emer_mem = (mem >> 1) + (mem >> 2);
+ zone->swap_limit = zone->max_mem - (mem >> 3);
+ zone->used_mem = 0;
+ zone->glob = glob;
+ glob->zone_kernel = zone;
+ refcount_init(&zone->kobj_ref, 1);
+ glob->zones[glob->num_zones++] = zone;
+ return 0;
+}
+
+static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
+ uint64_t mem)
+{
+ struct ttm_mem_zone *zone;
+
+ zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
+
+ /**
+ * No special dma32 zone needed.
+ */
+
+ if (mem <= ((uint64_t) 1ULL << 32)) {
+ free(zone, M_TTM_ZONE);
+ return 0;
+ }
+
+ /*
+ * Limit max dma32 memory to 4GB for now
+ * until we can figure out how big this
+ * zone really is.
+ */
+
+ mem = ((uint64_t) 1ULL << 32);
+ zone->name = "dma32";
+ zone->zone_mem = mem;
+ zone->max_mem = mem >> 1;
+ zone->emer_mem = (mem >> 1) + (mem >> 2);
+ zone->swap_limit = zone->max_mem - (mem >> 3);
+ zone->used_mem = 0;
+ zone->glob = glob;
+ glob->zone_dma32 = zone;
+ refcount_init(&zone->kobj_ref, 1);
+ glob->zones[glob->num_zones++] = zone;
+ return 0;
+}
+
+int ttm_mem_global_init(struct ttm_mem_global *glob)
+{
+ u_int64_t mem;
+ int ret;
+ int i;
+ struct ttm_mem_zone *zone;
+
+ mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF);
+ glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
+ taskqueue_thread_enqueue, &glob->swap_queue);
+ taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap");
+ TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
+
+ refcount_init(&glob->kobj_ref, 1);
+
+ mem = physmem * PAGE_SIZE;
+
+ ret = ttm_mem_init_kernel_zone(glob, mem);
+ if (unlikely(ret != 0))
+ goto out_no_zone;
+ ret = ttm_mem_init_dma32_zone(glob, mem);
+ if (unlikely(ret != 0))
+ goto out_no_zone;
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ printf("[TTM] Zone %7s: Available graphics memory: %llu kiB\n",
+ zone->name, (unsigned long long)zone->max_mem >> 10);
+ }
+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ return 0;
+out_no_zone:
+ ttm_mem_global_release(glob);
+ return ret;
+}
+
+void ttm_mem_global_release(struct ttm_mem_global *glob)
+{
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+
+ /* let the page allocator first stop the shrink work. */
+ ttm_page_alloc_fini();
+ ttm_dma_page_alloc_fini();
+
+ taskqueue_drain(glob->swap_queue, &glob->work);
+ taskqueue_free(glob->swap_queue);
+ glob->swap_queue = NULL;
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (refcount_release(&zone->kobj_ref))
+ ttm_mem_zone_kobj_release(zone);
+ }
+ if (refcount_release(&glob->kobj_ref))
+ ttm_mem_global_kobj_release(glob);
+}
+
+static void ttm_check_swapping(struct ttm_mem_global *glob)
+{
+ bool needs_swapping = false;
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+
+ mtx_lock(&glob->lock);
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (zone->used_mem > zone->swap_limit) {
+ needs_swapping = true;
+ break;
+ }
+ }
+
+ mtx_unlock(&glob->lock);
+
+ if (unlikely(needs_swapping))
+ taskqueue_enqueue(glob->swap_queue, &glob->work);
+
+}
+
+static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
+ struct ttm_mem_zone *single_zone,
+ uint64_t amount)
+{
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+
+ mtx_lock(&glob->lock);
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (single_zone && zone != single_zone)
+ continue;
+ zone->used_mem -= amount;
+ }
+ mtx_unlock(&glob->lock);
+}
+
+void ttm_mem_global_free(struct ttm_mem_global *glob,
+ uint64_t amount)
+{
+ return ttm_mem_global_free_zone(glob, NULL, amount);
+}
+
+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
+ struct ttm_mem_zone *single_zone,
+ uint64_t amount, bool reserve)
+{
+ uint64_t limit;
+ int ret = -ENOMEM;
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+
+ mtx_lock(&glob->lock);
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (single_zone && zone != single_zone)
+ continue;
+
+ limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
+ zone->emer_mem : zone->max_mem;
+
+ if (zone->used_mem > limit)
+ goto out_unlock;
+ }
+
+ if (reserve) {
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (single_zone && zone != single_zone)
+ continue;
+ zone->used_mem += amount;
+ }
+ }
+
+ ret = 0;
+out_unlock:
+ mtx_unlock(&glob->lock);
+ ttm_check_swapping(glob);
+
+ return ret;
+}
+
+
+static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
+ struct ttm_mem_zone *single_zone,
+ uint64_t memory,
+ bool no_wait, bool interruptible)
+{
+ int count = TTM_MEMORY_ALLOC_RETRIES;
+
+ while (unlikely(ttm_mem_global_reserve(glob,
+ single_zone,
+ memory, true)
+ != 0)) {
+ if (no_wait)
+ return -ENOMEM;
+ if (unlikely(count-- == 0))
+ return -ENOMEM;
+ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
+ }
+
+ return 0;
+}
+
+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+ bool no_wait, bool interruptible)
+{
+ /**
+ * Normal allocations of kernel memory are registered in
+ * all zones.
+ */
+
+ return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
+ interruptible);
+}
+
+#define page_to_pfn(pp) OFF_TO_IDX(VM_PAGE_TO_PHYS(pp))
+
+int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+ struct vm_page *page,
+ bool no_wait, bool interruptible)
+{
+
+ struct ttm_mem_zone *zone = NULL;
+
+ /**
+ * Page allocations may be registed in a single zone
+ * only if highmem or !dma32.
+ */
+
+ if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
+ zone = glob->zone_kernel;
+ return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
+ interruptible);
+}
+
+void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
+{
+ struct ttm_mem_zone *zone = NULL;
+
+ if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
+ zone = glob->zone_kernel;
+ ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+}
+
+
+size_t ttm_round_pot(size_t size)
+{
+ if ((size & (size - 1)) == 0)
+ return size;
+ else if (size > PAGE_SIZE)
+ return PAGE_ALIGN(size);
+ else {
+ size_t tmp_size = 4;
+
+ while (tmp_size < size)
+ tmp_size <<= 1;
+
+ return tmp_size;
+ }
+ return 0;
+}
diff --git a/sys/dev/drm2/ttm/ttm_memory.h b/sys/dev/drm2/ttm/ttm_memory.h
new file mode 100644
index 0000000..53263c6
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_memory.h
@@ -0,0 +1,149 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/* $FreeBSD$ */
+
+#ifndef TTM_MEMORY_H
+#define TTM_MEMORY_H
+
+/**
+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
+ *
+ * @do_shrink: The callback function.
+ *
+ * Arguments to the do_shrink functions are intended to be passed using
+ * inheritance. That is, the argument class derives from struct ttm_mem_shrink,
+ * and can be accessed using container_of().
+ */
+
+struct ttm_mem_shrink {
+ int (*do_shrink) (struct ttm_mem_shrink *);
+};
+
+/**
+ * struct ttm_mem_global - Global memory accounting structure.
+ *
+ * @shrink: A single callback to shrink TTM memory usage. Extend this
+ * to a linked list to be able to handle multiple callbacks when needed.
+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
+ * need a separate workqueue since it will spend a lot of time waiting
+ * for the GPU, and this will otherwise block other workqueue tasks(?)
+ * At this point we use only a single-threaded workqueue.
+ * @work: The workqueue callback for the shrink queue.
+ * @lock: Lock to protect the @shrink - and the memory accounting members,
+ * that is, essentially the whole structure with some exceptions.
+ * @zones: Array of pointers to accounting zones.
+ * @num_zones: Number of populated entries in the @zones array.
+ * @zone_kernel: Pointer to the kernel zone.
+ * @zone_highmem: Pointer to the highmem zone if there is one.
+ * @zone_dma32: Pointer to the dma32 zone if there is one.
+ *
+ * Note that this structure is not per device. It should be global for all
+ * graphics devices.
+ */
+
+#define TTM_MEM_MAX_ZONES 2
+struct ttm_mem_zone;
+struct ttm_mem_global {
+ u_int kobj_ref;
+ struct ttm_mem_shrink *shrink;
+ struct taskqueue *swap_queue;
+ struct task work;
+ struct mtx lock;
+ struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
+ unsigned int num_zones;
+ struct ttm_mem_zone *zone_kernel;
+ struct ttm_mem_zone *zone_dma32;
+};
+
+/**
+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
+ *
+ * @shrink: The object to initialize.
+ * @func: The callback function.
+ */
+
+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
+ int (*func) (struct ttm_mem_shrink *))
+{
+ shrink->do_shrink = func;
+}
+
+/**
+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to register with.
+ * @shrink: An initialized struct ttm_mem_shrink object to register.
+ *
+ * Returns:
+ * -EBUSY: There's already a callback registered. (May change).
+ */
+
+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
+ struct ttm_mem_shrink *shrink)
+{
+ mtx_lock(&glob->lock);
+ if (glob->shrink != NULL) {
+ mtx_unlock(&glob->lock);
+ return -EBUSY;
+ }
+ glob->shrink = shrink;
+ mtx_unlock(&glob->lock);
+ return 0;
+}
+
+/**
+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to unregister from.
+ * @shrink: A previously registert struct ttm_mem_shrink object.
+ *
+ */
+
+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
+ struct ttm_mem_shrink *shrink)
+{
+ mtx_lock(&glob->lock);
+ MPASS(glob->shrink == shrink);
+ glob->shrink = NULL;
+ mtx_unlock(&glob->lock);
+}
+
+struct vm_page;
+
+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+ bool no_wait, bool interruptible);
+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
+ uint64_t amount);
+extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+ struct vm_page *page,
+ bool no_wait, bool interruptible);
+extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
+ struct vm_page *page);
+extern size_t ttm_round_pot(size_t size);
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_module.h b/sys/dev/drm2/ttm/ttm_module.h
new file mode 100644
index 0000000..b1ce2bc
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_module.h
@@ -0,0 +1,37 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_MODULE_H_
+#define _TTM_MODULE_H_
+
+#define TTM_PFX "[TTM] "
+
+#endif /* _TTM_MODULE_H_ */
diff --git a/sys/dev/drm2/ttm/ttm_object.c b/sys/dev/drm2/ttm/ttm_object.c
new file mode 100644
index 0000000..fd48044
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_object.c
@@ -0,0 +1,455 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <sys/rwlock.h>
+#include <dev/drm2/ttm/ttm_object.h>
+#include <dev/drm2/ttm/ttm_module.h>
+
+struct ttm_object_file {
+ struct ttm_object_device *tdev;
+ struct rwlock lock;
+ struct list_head ref_list;
+ struct drm_open_hash ref_hash[TTM_REF_NUM];
+ u_int refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+ struct rwlock object_lock;
+ struct drm_open_hash object_hash;
+ atomic_t object_count;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+ struct drm_hash_item hash;
+ struct list_head head;
+ u_int kref;
+ enum ttm_ref_type ref_type;
+ struct ttm_base_object *obj;
+ struct ttm_object_file *tfile;
+};
+
+MALLOC_DEFINE(M_TTM_OBJ_FILE, "ttm_obj_file", "TTM File Objects");
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+ refcount_acquire(&tfile->refcount);
+ return tfile;
+}
+
+static void ttm_object_file_destroy(struct ttm_object_file *tfile)
+{
+
+ free(tfile, M_TTM_OBJ_FILE);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ if (refcount_release(&tfile->refcount))
+ ttm_object_file_destroy(tfile);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type object_type,
+ void (*rcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ int ret;
+
+ base->shareable = shareable;
+ base->tfile = ttm_object_file_ref(tfile);
+ base->refcount_release = rcount_release;
+ base->ref_obj_release = ref_obj_release;
+ base->object_type = object_type;
+ refcount_init(&base->refcount, 1);
+ rw_init(&tdev->object_lock, "ttmbao");
+ rw_wlock(&tdev->object_lock);
+ ret = drm_ht_just_insert_please(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ rw_wunlock(&tdev->object_lock);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+out_err1:
+ rw_wlock(&tdev->object_lock);
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ rw_wunlock(&tdev->object_lock);
+out_err0:
+ return ret;
+}
+
+static void ttm_release_base(struct ttm_base_object *base)
+{
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ rw_wunlock(&tdev->object_lock);
+ /*
+ * Note: We don't use synchronize_rcu() here because it's far
+ * too slow. It's up to the user to free the object using
+ * call_rcu() or ttm_base_object_kfree().
+ */
+
+ if (base->refcount_release) {
+ ttm_object_file_unref(&base->tfile);
+ base->refcount_release(&base);
+ }
+ rw_wlock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ *p_base = NULL;
+
+ /*
+ * Need to take the lock here to avoid racing with
+ * users trying to look up the object.
+ */
+
+ rw_wlock(&tdev->object_lock);
+ if (refcount_release(&base->refcount))
+ ttm_release_base(base);
+ rw_wunlock(&tdev->object_lock);
+}
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+ uint32_t key)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct drm_hash_item *hash;
+ int ret;
+
+ rw_rlock(&tdev->object_lock);
+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+ if (ret == 0) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ refcount_acquire(&base->refcount);
+ }
+ rw_runlock(&tdev->object_lock);
+
+ if (unlikely(ret != 0))
+ return NULL;
+
+ if (tfile != base->tfile && !base->shareable) {
+ printf("[TTM] Attempted access of non-shareable object %p\n",
+ base);
+ ttm_base_object_unref(&base);
+ return NULL;
+ }
+
+ return base;
+}
+
+MALLOC_DEFINE(M_TTM_OBJ_REF, "ttm_obj_ref", "TTM Ref Objects");
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ int ret = -EINVAL;
+
+ if (existed != NULL)
+ *existed = true;
+
+ while (ret == -EINVAL) {
+ rw_rlock(&tfile->lock);
+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+ if (ret == 0) {
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ refcount_acquire(&ref->kref);
+ rw_runlock(&tfile->lock);
+ break;
+ }
+
+ rw_runlock(&tfile->lock);
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+ ref = malloc(sizeof(*ref), M_TTM_OBJ_REF, M_WAITOK);
+ if (unlikely(ref == NULL)) {
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ return -ENOMEM;
+ }
+
+ ref->hash.key = base->hash.key;
+ ref->obj = base;
+ ref->tfile = tfile;
+ ref->ref_type = ref_type;
+ refcount_init(&ref->kref, 1);
+
+ rw_wlock(&tfile->lock);
+ ret = drm_ht_insert_item(ht, &ref->hash);
+
+ if (ret == 0) {
+ list_add_tail(&ref->head, &tfile->ref_list);
+ refcount_acquire(&base->refcount);
+ rw_wunlock(&tfile->lock);
+ if (existed != NULL)
+ *existed = false;
+ break;
+ }
+
+ rw_wunlock(&tfile->lock);
+ MPASS(ret == -EINVAL);
+
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ free(ref, M_TTM_OBJ_REF);
+ }
+
+ return ret;
+}
+
+static void ttm_ref_object_release(struct ttm_ref_object *ref)
+{
+ struct ttm_base_object *base = ref->obj;
+ struct ttm_object_file *tfile = ref->tfile;
+ struct drm_open_hash *ht;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+ ht = &tfile->ref_hash[ref->ref_type];
+ (void)drm_ht_remove_item(ht, &ref->hash);
+ list_del(&ref->head);
+ rw_wunlock(&tfile->lock);
+
+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+ base->ref_obj_release(base, ref->ref_type);
+
+ ttm_base_object_unref(&ref->obj);
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ free(ref, M_TTM_OBJ_REF);
+ rw_wlock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key, enum ttm_ref_type ref_type)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ int ret;
+
+ rw_wlock(&tfile->lock);
+ ret = drm_ht_find_item(ht, key, &hash);
+ if (unlikely(ret != 0)) {
+ rw_wunlock(&tfile->lock);
+ return -EINVAL;
+ }
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ if (refcount_release(&ref->kref))
+ ttm_ref_object_release(ref);
+ rw_wunlock(&tfile->lock);
+ return 0;
+}
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+ struct ttm_ref_object *ref;
+ struct list_head *list;
+ unsigned int i;
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ rw_wlock(&tfile->lock);
+
+ /*
+ * Since we release the lock within the loop, we have to
+ * restart it from the beginning each time.
+ */
+
+ while (!list_empty(&tfile->ref_list)) {
+ list = tfile->ref_list.next;
+ ref = list_entry(list, struct ttm_ref_object, head);
+ ttm_ref_object_release(ref);
+ }
+
+ for (i = 0; i < TTM_REF_NUM; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ rw_wunlock(&tfile->lock);
+ ttm_object_file_unref(&tfile);
+}
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+ unsigned int hash_order)
+{
+ struct ttm_object_file *tfile;
+ unsigned int i;
+ unsigned int j = 0;
+ int ret;
+
+ tfile = malloc(sizeof(*tfile), M_TTM_OBJ_FILE, M_WAITOK);
+ rw_init(&tfile->lock, "ttmfo");
+ tfile->tdev = tdev;
+ refcount_init(&tfile->refcount, 1);
+ INIT_LIST_HEAD(&tfile->ref_list);
+
+ for (i = 0; i < TTM_REF_NUM; ++i) {
+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+ if (ret) {
+ j = i;
+ goto out_err;
+ }
+ }
+
+ return tfile;
+out_err:
+ for (i = 0; i < j; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ free(tfile, M_TTM_OBJ_FILE);
+
+ return NULL;
+}
+
+MALLOC_DEFINE(M_TTM_OBJ_DEV, "ttm_obj_dev", "TTM Device Objects");
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+ *mem_glob,
+ unsigned int hash_order)
+{
+ struct ttm_object_device *tdev;
+ int ret;
+
+ tdev = malloc(sizeof(*tdev), M_TTM_OBJ_DEV, M_WAITOK);
+ tdev->mem_glob = mem_glob;
+ rw_init(&tdev->object_lock, "ttmdo");
+ atomic_set(&tdev->object_count, 0);
+ ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+ if (ret == 0)
+ return tdev;
+
+ free(tdev, M_TTM_OBJ_DEV);
+ return NULL;
+}
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+ struct ttm_object_device *tdev = *p_tdev;
+
+ *p_tdev = NULL;
+
+ rw_wlock(&tdev->object_lock);
+ drm_ht_remove(&tdev->object_hash);
+ rw_wunlock(&tdev->object_lock);
+
+ free(tdev, M_TTM_OBJ_DEV);
+}
diff --git a/sys/dev/drm2/ttm/ttm_object.h b/sys/dev/drm2/ttm/ttm_object.h
new file mode 100644
index 0000000..8a286f1
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_object.h
@@ -0,0 +1,271 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <dev/drm2/drm_hashtab.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+ TTM_REF_USAGE,
+ TTM_REF_SYNCCPU_READ,
+ TTM_REF_SYNCCPU_WRITE,
+ TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+ ttm_fence_type,
+ ttm_buffer_type,
+ ttm_lock_type,
+ ttm_driver_type0 = 256,
+ ttm_driver_type1,
+ ttm_driver_type2,
+ ttm_driver_type3,
+ ttm_driver_type4,
+ ttm_driver_type5
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * This function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+ /* struct rcu_head rhead;XXXKIB */
+ struct drm_hash_item hash;
+ enum ttm_object_type object_type;
+ bool shareable;
+ struct ttm_object_file *tfile;
+ u_int refcount;
+ void (*refcount_release) (struct ttm_base_object **base);
+ void (*ref_obj_release) (struct ttm_base_object *base,
+ enum ttm_ref_type ref_type);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object
+ **),
+ void (*ref_obj_release) (struct ttm_base_object
+ *,
+ enum ttm_ref_type
+ ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * Also verifies that the object is visible to the application, by
+ * comparing the @tfile argument and checking the object shareable flag.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+ *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed);
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key,
+ enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+ *tdev,
+ unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @hash_order: Order of hash table used to hash the base objects.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *ttm_object_device_init
+ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.c b/sys/dev/drm2/ttm/ttm_page_alloc.c
new file mode 100644
index 0000000..ffc8483
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_page_alloc.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define VM_ALLOC_DMA32 VM_ALLOC_RESERVED1
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t))
+#define SMALL_ALLOCATION 16
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL 1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+ struct mtx lock;
+ bool fill_lock;
+ bool dma32;
+ struct pglist list;
+ int ttm_page_alloc_flags;
+ unsigned npages;
+ char *name;
+ unsigned long nfrees;
+ unsigned long nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+ unsigned int kobj_ref;
+ eventhandler_tag lowmem_handler;
+ struct ttm_pool_opts options;
+
+ union {
+ struct ttm_page_pool pools[NUM_POOLS];
+ struct {
+ struct ttm_page_pool wc_pool;
+ struct ttm_page_pool uc_pool;
+ struct ttm_page_pool wc_pool_dma32;
+ struct ttm_page_pool uc_pool_dma32;
+ } ;
+ };
+};
+
+MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
+
+static void
+ttm_vm_page_free(vm_page_t m)
+{
+
+ KASSERT(m->object == NULL, ("ttm page %p is owned", m));
+ KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
+ KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
+ m->flags &= ~PG_FICTITIOUS;
+ m->oflags |= VPO_UNMANAGED;
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+}
+
+static vm_memattr_t
+ttm_caching_state_to_vm(enum ttm_caching_state cstate)
+{
+
+ switch (cstate) {
+ case tt_uncached:
+ return (VM_MEMATTR_UNCACHEABLE);
+ case tt_wc:
+ return (VM_MEMATTR_WRITE_COMBINING);
+ case tt_cached:
+ return (VM_MEMATTR_WRITE_BACK);
+ }
+ panic("caching state %d\n", cstate);
+}
+
+static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
+{
+
+ free(m, M_TTM_POOLMGR);
+}
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
+ struct attribute *attr, const char *buffer, size_t size)
+{
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
+ struct attribute *attr, char *buffer)
+{
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+#endif
+
+static struct ttm_pool_manager *_manager;
+
+static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
+{
+ vm_page_t m;
+ int i;
+
+ for (i = 0; i < addrinarray; i++) {
+ m = pages[i];
+#ifdef TTM_HAS_AGP
+ unmap_page_from_agp(m);
+#endif
+ pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
+ }
+ return 0;
+}
+
+static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
+{
+ vm_page_t m;
+ int i;
+
+ for (i = 0; i < addrinarray; i++) {
+ m = pages[i];
+#ifdef TTM_HAS_AGP
+ map_page_into_agp(pages[i]);
+#endif
+ pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING);
+ }
+ return 0;
+}
+
+static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
+{
+ vm_page_t m;
+ int i;
+
+ for (i = 0; i < addrinarray; i++) {
+ m = pages[i];
+#ifdef TTM_HAS_AGP
+ map_page_into_agp(pages[i]);
+#endif
+ pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE);
+ }
+ return 0;
+}
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+ enum ttm_caching_state cstate)
+{
+ int pool_index;
+
+ if (cstate == tt_cached)
+ return NULL;
+
+ if (cstate == tt_wc)
+ pool_index = 0x0;
+ else
+ pool_index = 0x1;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ pool_index |= 0x2;
+
+ return &_manager->pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(vm_page_t *pages, unsigned npages)
+{
+ unsigned i;
+
+ /* Our VM handles vm memattr automatically on the page free. */
+ if (set_pages_array_wb(pages, npages))
+ printf("[TTM] Failed to set %d pages to wb!\n", npages);
+ for (i = 0; i < npages; ++i)
+ ttm_vm_page_free(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages -= freed_pages;
+ pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+ vm_page_t p, p1;
+ vm_page_t *pages_to_free;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
+ M_TEMP, M_WAITOK | M_ZERO);
+
+restart:
+ mtx_lock(&pool->lock);
+
+ TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ pages_to_free[freed_pages++] = p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+ /* remove range of pages from the pool */
+ TAILQ_REMOVE(&pool->list, p, pageq);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ mtx_unlock(&pool->lock);
+
+ ttm_pages_put(pages_to_free, freed_pages);
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall through or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ TAILQ_REMOVE(&pool->list, p, pageq);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ mtx_unlock(&pool->lock);
+
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+out:
+ free(pages_to_free, M_TEMP);
+ return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+ unsigned i;
+ int total = 0;
+ for (i = 0; i < NUM_POOLS; ++i)
+ total += _manager->pools[i].npages;
+
+ return total;
+}
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(void *arg)
+{
+ static unsigned int start_pool = 0;
+ unsigned i;
+ unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
+ struct ttm_page_pool *pool;
+ int shrink_pages = 100; /* XXXKIB */
+
+ pool_offset = pool_offset % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+ unsigned nr_free = shrink_pages;
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
+ }
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+
+ manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
+ ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+
+ EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
+}
+
+static int ttm_set_pages_caching(vm_page_t *pages,
+ enum ttm_caching_state cstate, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ switch (cstate) {
+ case tt_uncached:
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ printf("[TTM] Failed to set %d pages to uc!\n", cpages);
+ break;
+ case tt_wc:
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ printf("[TTM] Failed to set %d pages to wc!\n", cpages);
+ break;
+ default:
+ break;
+ }
+ return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct pglist *pages,
+ int ttm_flags, enum ttm_caching_state cstate,
+ vm_page_t *failed_pages, unsigned cpages)
+{
+ unsigned i;
+ /* Failed pages have to be freed */
+ for (i = 0; i < cpages; ++i) {
+ TAILQ_REMOVE(pages, failed_pages[i], pageq);
+ ttm_vm_page_free(failed_pages[i]);
+ }
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ vm_page_t *caching_array;
+ vm_page_t p;
+ int r = 0;
+ unsigned i, cpages, aflags;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
+
+ aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
+ ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
+ VM_ALLOC_ZERO : 0);
+
+ /* allocate array for page caching change */
+ caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
+ M_WAITOK | M_ZERO);
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
+ (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
+ VM_MAX_ADDRESS, PAGE_SIZE, 0,
+ ttm_caching_state_to_vm(cstate));
+ if (!p) {
+ printf("[TTM] Unable to get page %u\n", i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+ p->oflags &= ~VPO_UNMANAGED;
+ p->flags |= PG_FICTITIOUS;
+
+#ifdef CONFIG_HIGHMEM /* KIB: nop */
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r) {
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+
+ TAILQ_INSERT_HEAD(pages, p, pageq);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+out:
+ free(caching_array, M_TEMP);
+
+ return r;
+}
+
+/**
+ * Fill the given pool if there aren't enough pages and the requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ vm_page_t p;
+ int r;
+ unsigned cpages = 0;
+ /**
+ * Only allow one pool fill operation at a time.
+ * If pool doesn't have enough pages for the allocation new pages are
+ * allocated from outside of pool.
+ */
+ if (pool->fill_lock)
+ return;
+
+ pool->fill_lock = true;
+
+ /* If allocation request is small and there are not enough
+ * pages in a pool we fill the pool up first. */
+ if (count < _manager->options.small
+ && count > pool->npages) {
+ struct pglist new_pages;
+ unsigned alloc_size = _manager->options.alloc_size;
+
+ /**
+ * Can't change page caching if in irqsave context. We have to
+ * drop the pool->lock.
+ */
+ mtx_unlock(&pool->lock);
+
+ TAILQ_INIT(&new_pages);
+ r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
+ ttm_flags, cstate, alloc_size);
+ mtx_lock(&pool->lock);
+
+ if (!r) {
+ TAILQ_CONCAT(&pool->list, &new_pages, pageq);
+ ++pool->nrefills;
+ pool->npages += alloc_size;
+ } else {
+ printf("[TTM] Failed to fill pool (%p)\n", pool);
+ /* If we have any pages left put them to the pool. */
+ TAILQ_FOREACH(p, &pool->list, pageq) {
+ ++cpages;
+ }
+ TAILQ_CONCAT(&pool->list, &new_pages, pageq);
+ pool->npages += cpages;
+ }
+
+ }
+ pool->fill_lock = false;
+}
+
+/**
+ * Cut 'count' number of pages from the pool and put them on the return list.
+ *
+ * @return count of pages still required to fulfill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct pglist *pages,
+ int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count)
+{
+ vm_page_t p;
+ unsigned i;
+
+ mtx_lock(&pool->lock);
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
+
+ if (count >= pool->npages) {
+ /* take all pages from the pool */
+ TAILQ_CONCAT(pages, &pool->list, pageq);
+ count -= pool->npages;
+ pool->npages = 0;
+ goto out;
+ }
+ for (i = 0; i < count; i++) {
+ p = TAILQ_FIRST(&pool->list);
+ TAILQ_REMOVE(&pool->list, p, pageq);
+ TAILQ_INSERT_TAIL(pages, p, pageq);
+ }
+ pool->npages -= count;
+ count = 0;
+out:
+ mtx_unlock(&pool->lock);
+ return count;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ unsigned i;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ ttm_vm_page_free(pages[i]);
+ pages[i] = NULL;
+ }
+ }
+ return;
+ }
+
+ mtx_lock(&pool->lock);
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
+ pages[i] = NULL;
+ pool->npages++;
+ }
+ }
+ /* Check that we don't go over the pool limit */
+ npages = 0;
+ if (pool->npages > _manager->options.max_size) {
+ npages = pool->npages - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ mtx_unlock(&pool->lock);
+ if (npages)
+ ttm_page_pool_free(pool, npages);
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct pglist plist;
+ vm_page_t p = NULL;
+ int gfp_flags, aflags;
+ unsigned count;
+ int r;
+
+ aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
+
+ /* No pool for cached pages */
+ if (pool == NULL) {
+ for (r = 0; r < npages; ++r) {
+ p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
+ (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
+ VM_MAX_ADDRESS, PAGE_SIZE,
+ 0, ttm_caching_state_to_vm(cstate));
+ if (!p) {
+ printf("[TTM] Unable to allocate page\n");
+ return -ENOMEM;
+ }
+ p->oflags &= ~VPO_UNMANAGED;
+ p->flags |= PG_FICTITIOUS;
+ pages[r] = p;
+ }
+ return 0;
+ }
+
+ /* combine zero flag to pool flags */
+ gfp_flags = flags | pool->ttm_page_alloc_flags;
+
+ /* First we take pages from the pool */
+ TAILQ_INIT(&plist);
+ npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+ count = 0;
+ TAILQ_FOREACH(p, &plist, pageq) {
+ pages[count++] = p;
+ }
+
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ TAILQ_FOREACH(p, &plist, pageq) {
+ pmap_zero_page(p);
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (npages > 0) {
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ TAILQ_INIT(&plist);
+ r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
+ npages);
+ TAILQ_FOREACH(p, &plist, pageq) {
+ pages[count++] = p;
+ }
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool. */
+ printf("[TTM] Failed to allocate extra pages for large request\n");
+ ttm_put_pages(pages, count, flags, cstate);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+ char *name)
+{
+ mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
+ pool->fill_lock = false;
+ TAILQ_INIT(&pool->list);
+ pool->npages = pool->nfrees = 0;
+ pool->ttm_page_alloc_flags = flags;
+ pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+
+ if (_manager != NULL)
+ printf("[TTM] manager != NULL\n");
+ printf("[TTM] Initializing pool allocator\n");
+
+ _manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
+
+ ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
+ ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
+ ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
+ TTM_PAGE_FLAG_DMA32, "wc dma");
+ ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
+ TTM_PAGE_FLAG_DMA32, "uc dma");
+
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ refcount_init(&_manager->kobj_ref, 1);
+ ttm_pool_mm_shrink_init(_manager);
+
+ return 0;
+}
+
+void ttm_page_alloc_fini(void)
+{
+ int i;
+
+ printf("[TTM] Finalizing pool allocator\n");
+ ttm_pool_mm_shrink_fini(_manager);
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+
+ if (refcount_release(&_manager->kobj_ref))
+ ttm_pool_kobj_release(_manager);
+ _manager = NULL;
+}
+
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ unsigned i;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_get_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ if (ret != 0) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+ unsigned i;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i]) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ ttm_put_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ }
+ }
+ ttm->state = tt_unpopulated;
+}
+
+#if 0
+/* XXXKIB sysctl */
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct ttm_page_pool *p;
+ unsigned i;
+ char *h[] = {"pool", "refills", "pages freed", "size"};
+ if (!_manager) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%6s %12s %13s %8s\n",
+ h[0], h[1], h[2], h[3]);
+ for (i = 0; i < NUM_POOLS; ++i) {
+ p = &_manager->pools[i];
+
+ seq_printf(m, "%6s %12ld %13ld %8d\n",
+ p->name, p->nrefills,
+ p->nfrees, p->npages);
+ }
+ return 0;
+}
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.h b/sys/dev/drm2/ttm/ttm_page_alloc.h
new file mode 100644
index 0000000..0824e2d
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_page_alloc.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ */
+/* $FreeBSD$ */
+#ifndef TTM_PAGE_ALLOC
+#define TTM_PAGE_ALLOC
+
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+
+/**
+ * Initialize pool allocator.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * ttm_pool_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Add backing pages to all of @ttm
+ */
+extern int ttm_pool_populate(struct ttm_tt *ttm);
+
+/**
+ * ttm_pool_unpopulate:
+ *
+ * @ttm: The struct ttm_tt which to free backing pages.
+ *
+ * Free all pages of @ttm
+ */
+extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+/* XXXKIB
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+*/
+
+#ifdef CONFIG_SWIOTLB
+/**
+ * Initialize pool allocator.
+ */
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+
+/**
+ * Free pool allocator.
+ */
+void ttm_dma_page_alloc_fini(void);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
+
+extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+
+#else
+static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
+ unsigned max_pages)
+{
+ return -ENODEV;
+}
+
+static inline void ttm_dma_page_alloc_fini(void) { return; }
+
+/* XXXKIB
+static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ return 0;
+}
+*/
+#endif
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc_dma.c b/sys/dev/drm2/ttm/ttm_page_alloc_dma.c
new file mode 100644
index 0000000..a5c6fed
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_page_alloc_dma.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ * the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ * when freed).
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 4
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define IS_UNDEFINED (0)
+#define IS_WC (1<<1)
+#define IS_UC (1<<2)
+#define IS_CACHED (1<<3)
+#define IS_DMA32 (1<<4)
+
+enum pool_type {
+ POOL_IS_UNDEFINED,
+ POOL_IS_WC = IS_WC,
+ POOL_IS_UC = IS_UC,
+ POOL_IS_CACHED = IS_CACHED,
+ POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+ POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+ POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ * - generic (not restricted to DMA32):
+ * - write combined, uncached, cached.
+ * - dma32 (up to 2^32 - so up 4GB):
+ * - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ * it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ * Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+ struct list_head pools; /* The 'struct device->dma_pools link */
+ enum pool_type type;
+ spinlock_t lock;
+ struct list_head inuse_list;
+ struct list_head free_list;
+ struct device *dev;
+ unsigned size;
+ unsigned npages_free;
+ unsigned npages_in_use;
+ unsigned long nfrees; /* Stats when shrunk. */
+ unsigned long nrefills; /* Stats when grown. */
+ gfp_t gfp_flags;
+ char name[13]; /* "cached dma32" */
+ char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ * via the DMA API, it will be -1.
+ */
+struct dma_page {
+ struct list_head page_list;
+ void *vaddr;
+ struct page *p;
+ dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+ struct list_head pools;
+ struct device *dev;
+ struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+ struct mutex lock;
+ struct list_head pools;
+ struct ttm_pool_opts options;
+ unsigned npools;
+ struct shrinker mm_shrink;
+ struct kobject kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+ struct page **pages, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ if (pool->type & IS_UC) {
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ pr_err("%s: Failed to set %d pages to uc!\n",
+ pool->dev_name, cpages);
+ }
+ if (pool->type & IS_WC) {
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ pr_err("%s: Failed to set %d pages to wc!\n",
+ pool->dev_name, cpages);
+ }
+ return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+ dma_addr_t dma = d_page->dma;
+ dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+ kfree(d_page);
+ d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+ struct dma_page *d_page;
+
+ d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+ if (!d_page)
+ return NULL;
+
+ d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+ &d_page->dma,
+ pool->gfp_flags);
+ if (d_page->vaddr)
+ d_page->p = virt_to_page(d_page->vaddr);
+ else {
+ kfree(d_page);
+ d_page = NULL;
+ }
+ return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+ enum pool_type type = IS_UNDEFINED;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ type |= IS_DMA32;
+ if (cstate == tt_cached)
+ type |= IS_CACHED;
+ else if (cstate == tt_uncached)
+ type |= IS_UC;
+ else
+ type |= IS_WC;
+
+ return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages_free -= freed_pages;
+ pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+ struct page *pages[], unsigned npages)
+{
+ struct dma_page *d_page, *tmp;
+
+ /* Don't set WB on WB page pool. */
+ if (npages && !(pool->type & IS_CACHED) &&
+ set_pages_array_wb(pages, npages))
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, npages);
+
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ }
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+ /* Don't set WB on WB page pool. */
+ if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, 1);
+
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct dma_page *dma_p, *tmp;
+ struct page **pages_to_free;
+ struct list_head d_pages;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+ if (nr_free > 1) {
+ pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+ pool->dev_name, pool->name, current->pid,
+ npages_to_free, nr_free);
+ }
+#endif
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!pages_to_free) {
+ pr_err("%s: Failed to allocate memory for pool free operation\n",
+ pool->dev_name);
+ return 0;
+ }
+ INIT_LIST_HEAD(&d_pages);
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ /* We picking the oldest ones off the list */
+ list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+ page_list) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ /* Move the dma_page from one list to another. */
+ list_move(&dma_p->page_list, &d_pages);
+
+ pages_to_free[freed_pages++] = dma_p->p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+ freed_pages);
+
+ INIT_LIST_HEAD(&d_pages);
+
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall through or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+ struct device_pools *p;
+ struct dma_pool *pool;
+
+ if (!dev)
+ return;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry_reverse(p, &_manager->pools, pools) {
+ if (p->dev != dev)
+ continue;
+ pool = p->pool;
+ if (pool->type != type)
+ continue;
+
+ list_del(&p->pools);
+ kfree(p);
+ _manager->npools--;
+ break;
+ }
+ list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ /* Takes a spinlock.. */
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ /* This code path is called after _all_ references to the
+ * struct device has been dropped - so nobody should be
+ * touching it. In case somebody is trying to _add_ we are
+ * guarded by the mutex. */
+ list_del(&pool->pools);
+ kfree(pool);
+ break;
+ }
+ mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+ struct dma_pool *pool = *(struct dma_pool **)res;
+
+ if (pool)
+ ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+ return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+ enum pool_type type)
+{
+ char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+ enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+ struct device_pools *sec_pool = NULL;
+ struct dma_pool *pool = NULL, **ptr;
+ unsigned i;
+ int ret = -ENODEV;
+ char *p;
+
+ if (!dev)
+ return NULL;
+
+ ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ret = -ENOMEM;
+
+ pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!pool)
+ goto err_mem;
+
+ sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!sec_pool)
+ goto err_mem;
+
+ INIT_LIST_HEAD(&sec_pool->pools);
+ sec_pool->dev = dev;
+ sec_pool->pool = pool;
+
+ INIT_LIST_HEAD(&pool->free_list);
+ INIT_LIST_HEAD(&pool->inuse_list);
+ INIT_LIST_HEAD(&pool->pools);
+ spin_lock_init(&pool->lock);
+ pool->dev = dev;
+ pool->npages_free = pool->npages_in_use = 0;
+ pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->size = PAGE_SIZE;
+ pool->type = type;
+ pool->nrefills = 0;
+ p = pool->name;
+ for (i = 0; i < 5; i++) {
+ if (type & t[i]) {
+ p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ "%s", n[i]);
+ }
+ }
+ *p = 0;
+ /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+ * - the kobj->name has already been deallocated.*/
+ snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+ dev_driver_string(dev), dev_name(dev));
+ mutex_lock(&_manager->lock);
+ /* You can get the dma_pool from either the global: */
+ list_add(&sec_pool->pools, &_manager->pools);
+ _manager->npools++;
+ /* or from 'struct device': */
+ list_add(&pool->pools, &dev->dma_pools);
+ mutex_unlock(&_manager->lock);
+
+ *ptr = pool;
+ devres_add(dev, ptr);
+
+ return pool;
+err_mem:
+ devres_free(ptr);
+ kfree(sec_pool);
+ kfree(pool);
+ return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+ enum pool_type type)
+{
+ struct dma_pool *pool, *tmp, *found = NULL;
+
+ if (type == IS_UNDEFINED)
+ return found;
+
+ /* NB: We iterate on the 'struct dev' which has no spinlock, but
+ * it does have a kref which we have taken. The kref is taken during
+ * graphic driver loading - in the drm_pci_init it calls either
+ * pci_dev_get or pci_register_driver which both end up taking a kref
+ * on 'struct device'.
+ *
+ * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+ * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+ * thing is at that point of time there are no pages associated with the
+ * driver so this function will not be called.
+ */
+ list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ found = pool;
+ break;
+ }
+ return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+ struct list_head *d_pages,
+ struct page **failed_pages,
+ unsigned cpages)
+{
+ struct dma_page *d_page, *tmp;
+ struct page *p;
+ unsigned i = 0;
+
+ p = failed_pages[0];
+ if (!p)
+ return;
+ /* Find the failed page. */
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ if (d_page->p != p)
+ continue;
+ /* .. and then progress over the full list. */
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ if (++i < cpages)
+ p = failed_pages[i];
+ else
+ break;
+ }
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+ struct list_head *d_pages,
+ unsigned count)
+{
+ struct page **caching_array;
+ struct dma_page *dma_p;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ pr_err("%s: Unable to allocate table for new pages\n",
+ pool->dev_name);
+ return -ENOMEM;
+ }
+
+ if (count > 1) {
+ pr_debug("%s: (%s:%d) Getting %d pages\n",
+ pool->dev_name, pool->name, current->pid, count);
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ dma_p = __ttm_dma_alloc_page(pool);
+ if (!dma_p) {
+ pr_err("%s: Unable to get page %u\n",
+ pool->dev_name, i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+ p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+ /* Note: Cannot hold the spinlock */
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r) {
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+ list_add(&dma_p->page_list, d_pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array, cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(pool, d_pages,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+ unsigned long *irq_flags)
+{
+ unsigned count = _manager->options.small;
+ int r = pool->npages_free;
+
+ if (count > pool->npages_free) {
+ struct list_head d_pages;
+
+ INIT_LIST_HEAD(&d_pages);
+
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ /* Returns how many more are neccessary to fulfill the
+ * request. */
+ r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+ if (!r) {
+ /* Add the fresh to the end.. */
+ list_splice(&d_pages, &pool->free_list);
+ ++pool->nrefills;
+ pool->npages_free += count;
+ r = count;
+ } else {
+ struct dma_page *d_page;
+ unsigned cpages = 0;
+
+ pr_err("%s: Failed to fill %s pool (r:%d)!\n",
+ pool->dev_name, pool->name, r);
+
+ list_for_each_entry(d_page, &d_pages, page_list) {
+ cpages++;
+ }
+ list_splice_tail(&d_pages, &pool->free_list);
+ pool->npages_free += cpages;
+ r = cpages;
+ }
+ }
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+ struct ttm_dma_tt *ttm_dma,
+ unsigned index)
+{
+ struct dma_page *d_page;
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ unsigned long irq_flags;
+ int count, r = -ENOMEM;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+ if (count) {
+ d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+ ttm->pages[index] = d_page->p;
+ ttm_dma->dma_address[index] = d_page->dma;
+ list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+ r = 0;
+ pool->npages_in_use += 1;
+ pool->npages_free -= 1;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ struct dma_pool *pool;
+ enum pool_type type;
+ unsigned i;
+ gfp_t gfp_flags;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags = GFP_USER | GFP_DMA32;
+ else
+ gfp_flags = GFP_HIGHUSER;
+ if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool) {
+ pool = ttm_dma_pool_init(dev, gfp_flags, type);
+ if (IS_ERR_OR_NULL(pool)) {
+ return -ENOMEM;
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (ret != 0) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+ struct device_pools *p;
+ unsigned total = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ total += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct dma_pool *pool;
+ struct dma_page *d_page, *next;
+ enum pool_type type;
+ bool is_cached = false;
+ unsigned count = 0, i, npages = 0;
+ unsigned long irq_flags;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool)
+ return;
+
+ is_cached = (ttm_dma_find_pool(pool->dev,
+ ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+ /* make sure pages array match list and count number of pages */
+ list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+ ttm->pages[count] = d_page->p;
+ count++;
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ pool->npages_in_use -= count;
+ if (is_cached) {
+ pool->nfrees += count;
+ } else {
+ pool->npages_free += count;
+ list_splice(&ttm_dma->pages_list, &pool->free_list);
+ npages = count;
+ if (pool->npages_free > _manager->options.max_size) {
+ npages = pool->npages_free - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (is_cached) {
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p);
+ ttm_dma_page_put(pool, d_page);
+ }
+ } else {
+ for (i = 0; i < count; i++) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm->pages[i] = NULL;
+ ttm_dma->dma_address[i] = 0;
+ }
+
+ /* shrink pool if necessary (only on !is_cached pools)*/
+ if (npages)
+ ttm_dma_page_pool_free(pool, npages);
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned idx = 0;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned shrink_pages = sc->nr_to_scan;
+ struct device_pools *p;
+
+ if (list_empty(&_manager->pools))
+ return 0;
+
+ mutex_lock(&_manager->lock);
+ pool_offset = pool_offset % _manager->npools;
+ list_for_each_entry(p, &_manager->pools, pools) {
+ unsigned nr_free;
+
+ if (!p->dev)
+ continue;
+ if (shrink_pages == 0)
+ break;
+ /* Do it in round-robin fashion. */
+ if (++idx < pool_offset)
+ continue;
+ nr_free = shrink_pages;
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+ p->pool->dev_name, p->pool->name, current->pid,
+ nr_free, shrink_pages);
+ }
+ mutex_unlock(&_manager->lock);
+ /* return estimated number of unused pages in pool */
+ return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret = -ENOMEM;
+
+ WARN_ON(_manager);
+
+ pr_info("Initializing DMA pool allocator\n");
+
+ _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ goto err;
+
+ mutex_init(&_manager->lock);
+ INIT_LIST_HEAD(&_manager->pools);
+
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ /* This takes care of auto-freeing the _manager */
+ ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+ &glob->kobj, "dma_pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager->kobj);
+ goto err;
+ }
+ ttm_dma_pool_mm_shrink_init(_manager);
+ return 0;
+err:
+ return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+ struct device_pools *p, *t;
+
+ pr_info("Finalizing DMA pool allocator\n");
+ ttm_dma_pool_mm_shrink_fini(_manager);
+
+ list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+ dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+ current->pid);
+ WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+ ttm_dma_pool_match, p->pool));
+ ttm_dma_free_pool(p->dev, p->pool->type);
+ }
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct device_pools *p;
+ struct dma_pool *pool = NULL;
+ char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+ "name", "virt", "busaddr"};
+
+ if (!_manager) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+ h[0], h[1], h[2], h[3], h[4], h[5]);
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools) {
+ struct device *dev = p->dev;
+ if (!dev)
+ continue;
+ pool = p->pool;
+ seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+ pool->name, pool->nrefills,
+ pool->nfrees, pool->npages_in_use,
+ pool->npages_free,
+ pool->dev_name);
+ }
+ mutex_unlock(&_manager->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/sys/dev/drm2/ttm/ttm_placement.h b/sys/dev/drm2/ttm/ttm_placement.h
new file mode 100644
index 0000000..20e1b4c
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_placement.h
@@ -0,0 +1,93 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_PLACEMENT_H_
+#define _TTM_PLACEMENT_H_
+/*
+ * Memory regions for data placement.
+ */
+
+#define TTM_PL_SYSTEM 0
+#define TTM_PL_TT 1
+#define TTM_PL_VRAM 2
+#define TTM_PL_PRIV0 3
+#define TTM_PL_PRIV1 4
+#define TTM_PL_PRIV2 5
+#define TTM_PL_PRIV3 6
+#define TTM_PL_PRIV4 7
+#define TTM_PL_PRIV5 8
+#define TTM_PL_SWAPPED 15
+
+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
+#define TTM_PL_MASK_MEM 0x0000FFFF
+
+/*
+ * Other flags that affects data placement.
+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
+ * if available.
+ * TTM_PL_FLAG_SHARED means that another application may
+ * reference the buffer.
+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
+ * be evicted to make room for other buffers.
+ */
+
+#define TTM_PL_FLAG_CACHED (1 << 16)
+#define TTM_PL_FLAG_UNCACHED (1 << 17)
+#define TTM_PL_FLAG_WC (1 << 18)
+#define TTM_PL_FLAG_SHARED (1 << 20)
+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
+
+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
+ TTM_PL_FLAG_UNCACHED | \
+ TTM_PL_FLAG_WC)
+
+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
+
+/*
+ * Access flags to be used for CPU- and GPU- mappings.
+ * The idea is that the TTM synchronization mechanism will
+ * allow concurrent READ access and exclusive write access.
+ * Currently GPU- and CPU accesses are exclusive.
+ */
+
+#define TTM_ACCESS_READ (1 << 0)
+#define TTM_ACCESS_WRITE (1 << 1)
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_tt.c b/sys/dev/drm2/ttm/ttm_tt.c
new file mode 100644
index 0000000..82547f1
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_tt.c
@@ -0,0 +1,370 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+
+MALLOC_DEFINE(M_TTM_PD, "ttm_pd", "TTM Page Directories");
+
+/**
+ * Allocates storage for pointers to the pages that back the ttm.
+ */
+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
+{
+ ttm->pages = malloc(ttm->num_pages * sizeof(void *),
+ M_TTM_PD, M_WAITOK | M_ZERO);
+}
+
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+{
+ ttm->ttm.pages = malloc(ttm->ttm.num_pages * sizeof(void *),
+ M_TTM_PD, M_WAITOK | M_ZERO);
+ ttm->dma_address = malloc(ttm->ttm.num_pages *
+ sizeof(*ttm->dma_address), M_TTM_PD, M_WAITOK);
+}
+
+#if defined(__i386__) || defined(__amd64__)
+static inline int ttm_tt_set_page_caching(vm_page_t p,
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
+{
+
+ /* XXXKIB our VM does not need this. */
+#if 0
+ if (c_old != tt_cached) {
+ /* p isn't in the default caching state, set it to
+ * writeback first to free its current memtype. */
+ pmap_page_set_memattr(p, VM_MEMATTR_WRITE_BACK);
+ }
+#endif
+
+ if (c_new == tt_wc)
+ pmap_page_set_memattr(p, VM_MEMATTR_WRITE_COMBINING);
+ else if (c_new == tt_uncached)
+ pmap_page_set_memattr(p, VM_MEMATTR_UNCACHEABLE);
+
+ return (0);
+}
+#else
+static inline int ttm_tt_set_page_caching(vm_page_t p,
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
+ */
+
+static int ttm_tt_set_caching(struct ttm_tt *ttm,
+ enum ttm_caching_state c_state)
+{
+ int i, j;
+ vm_page_t cur_page;
+ int ret;
+
+ if (ttm->caching_state == c_state)
+ return 0;
+
+ if (ttm->state == tt_unpopulated) {
+ /* Change caching but don't populate */
+ ttm->caching_state = c_state;
+ return 0;
+ }
+
+ if (ttm->caching_state == tt_cached)
+ drm_clflush_pages(ttm->pages, ttm->num_pages);
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages[i];
+ if (likely(cur_page != NULL)) {
+ ret = ttm_tt_set_page_caching(cur_page,
+ ttm->caching_state,
+ c_state);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+ }
+
+ ttm->caching_state = c_state;
+
+ return 0;
+
+out_err:
+ for (j = 0; j < i; ++j) {
+ cur_page = ttm->pages[j];
+ if (cur_page != NULL) {
+ (void)ttm_tt_set_page_caching(cur_page, c_state,
+ ttm->caching_state);
+ }
+ }
+
+ return ret;
+}
+
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+{
+ enum ttm_caching_state state;
+
+ if (placement & TTM_PL_FLAG_WC)
+ state = tt_wc;
+ else if (placement & TTM_PL_FLAG_UNCACHED)
+ state = tt_uncached;
+ else
+ state = tt_cached;
+
+ return ttm_tt_set_caching(ttm, state);
+}
+
+void ttm_tt_destroy(struct ttm_tt *ttm)
+{
+ if (unlikely(ttm == NULL))
+ return;
+
+ if (ttm->state == tt_bound) {
+ ttm_tt_unbind(ttm);
+ }
+
+ if (likely(ttm->pages != NULL)) {
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ }
+
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
+ ttm->swap_storage)
+ vm_object_deallocate(ttm->swap_storage);
+
+ ttm->swap_storage = NULL;
+ ttm->func->destroy(ttm);
+}
+
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ vm_page_t dummy_read_page)
+{
+ ttm->bdev = bdev;
+ ttm->glob = bdev->glob;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+ ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
+
+ ttm_tt_alloc_page_directory(ttm);
+ if (!ttm->pages) {
+ ttm_tt_destroy(ttm);
+ printf("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void ttm_tt_fini(struct ttm_tt *ttm)
+{
+ free(ttm->pages, M_TTM_PD);
+ ttm->pages = NULL;
+}
+
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ vm_page_t dummy_read_page)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ ttm->bdev = bdev;
+ ttm->glob = bdev->glob;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+ ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (!ttm->pages || !ttm_dma->dma_address) {
+ ttm_tt_destroy(ttm);
+ printf("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ free(ttm->pages, M_TTM_PD);
+ ttm->pages = NULL;
+ free(ttm_dma->dma_address, M_TTM_PD);
+ ttm_dma->dma_address = NULL;
+}
+
+void ttm_tt_unbind(struct ttm_tt *ttm)
+{
+ int ret;
+
+ if (ttm->state == tt_bound) {
+ ret = ttm->func->unbind(ttm);
+ MPASS(ret == 0);
+ ttm->state = tt_unbound;
+ }
+}
+
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+ int ret = 0;
+
+ if (!ttm)
+ return -EINVAL;
+
+ if (ttm->state == tt_bound)
+ return 0;
+
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+
+ ret = ttm->func->bind(ttm, bo_mem);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ttm->state = tt_bound;
+
+ return 0;
+}
+
+int ttm_tt_swapin(struct ttm_tt *ttm)
+{
+ vm_object_t obj;
+ vm_page_t from_page, to_page;
+ int i, ret, rv;
+
+ obj = ttm->swap_storage;
+
+ VM_OBJECT_LOCK(obj);
+ vm_object_pip_add(obj, 1);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ from_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
+ if (from_page->valid != VM_PAGE_BITS_ALL) {
+ if (vm_pager_has_page(obj, i, NULL, NULL)) {
+ rv = vm_pager_get_pages(obj, &from_page, 1, 0);
+ if (rv != VM_PAGER_OK) {
+ vm_page_lock(from_page);
+ vm_page_free(from_page);
+ vm_page_unlock(from_page);
+ ret = -EIO;
+ goto err_ret;
+ }
+ } else
+ vm_page_zero_invalid(from_page, TRUE);
+ }
+ to_page = ttm->pages[i];
+ if (unlikely(to_page == NULL)) {
+ vm_page_wakeup(from_page);
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+ pmap_copy_page(from_page, to_page);
+ vm_page_wakeup(from_page);
+ }
+ vm_object_pip_wakeup(obj);
+ VM_OBJECT_UNLOCK(obj);
+
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
+ vm_object_deallocate(obj);
+ ttm->swap_storage = NULL;
+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ return (0);
+
+err_ret:
+ vm_object_pip_wakeup(obj);
+ VM_OBJECT_UNLOCK(obj);
+ return (ret);
+}
+
+int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
+{
+ vm_object_t obj;
+ vm_page_t from_page, to_page;
+ int i;
+
+ MPASS(ttm->state == tt_unbound || ttm->state == tt_unpopulated);
+ MPASS(ttm->caching_state == tt_cached);
+
+ if (persistent_swap_storage == NULL) {
+ obj = vm_pager_allocate(OBJT_SWAP, NULL,
+ IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0,
+ curthread->td_ucred);
+ if (obj == NULL) {
+ printf("[TTM] Failed allocating swap storage\n");
+ return (-ENOMEM);
+ }
+ } else
+ obj = persistent_swap_storage;
+
+ VM_OBJECT_LOCK(obj);
+ vm_object_pip_add(obj, 1);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ from_page = ttm->pages[i];
+ if (unlikely(from_page == NULL))
+ continue;
+ to_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
+ pmap_copy_page(from_page, to_page);
+ vm_page_dirty(to_page);
+ to_page->valid = VM_PAGE_BITS_ALL;
+ vm_page_wakeup(to_page);
+ }
+ vm_object_pip_wakeup(obj);
+ VM_OBJECT_UNLOCK(obj);
+
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ ttm->swap_storage = obj;
+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ if (persistent_swap_storage != NULL)
+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
+ return (0);
+}
OpenPOWER on IntegriCloud