diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 628 |
1 files changed, 579 insertions, 49 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 24fe8c1..cc2ca55 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -30,6 +30,7 @@ #include "i915_drm.h" #include "i915_drv.h" #include <linux/swap.h> +#include <linux/pci.h> #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) @@ -40,8 +41,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); -static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, - int write); static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write); static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, @@ -51,34 +50,43 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o static int i915_gem_object_get_page_list(struct drm_gem_object *obj); static void i915_gem_object_free_page_list(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); +static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, + unsigned alignment); +static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); +static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); +static int i915_gem_evict_something(struct drm_device *dev); + +int i915_gem_do_init(struct drm_device *dev, unsigned long start, + unsigned long end) +{ + drm_i915_private_t *dev_priv = dev->dev_private; -static void -i915_gem_cleanup_ringbuffer(struct drm_device *dev); + if (start >= end || + (start & (PAGE_SIZE - 1)) != 0 || + (end & (PAGE_SIZE - 1)) != 0) { + return -EINVAL; + } + + drm_mm_init(&dev_priv->mm.gtt_space, start, + end - start); + + dev->gtt_total = (uint32_t) (end - start); + + return 0; +} int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_init *args = data; + int ret; mutex_lock(&dev->struct_mutex); - - if (args->gtt_start >= args->gtt_end || - (args->gtt_start & (PAGE_SIZE - 1)) != 0 || - (args->gtt_end & (PAGE_SIZE - 1)) != 0) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, - args->gtt_end - args->gtt_start); - - dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); - + ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } int @@ -529,6 +537,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return 0; } +/** + * i915_gem_fault - fault a page into the GTT + * vma: VMA in question + * vmf: fault info + * + * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped + * from userspace. The fault handler takes care of binding the object to + * the GTT (if needed), allocating and programming a fence register (again, + * only if needed based on whether the old reg is still valid or the object + * is tiled) and inserting a new PTE into the faulting process. + * + * Note that the faulting process may involve evicting existing objects + * from the GTT and/or fence registers to make room. So performance may + * suffer if the GTT working set is large or there are few fence registers + * left. + */ +int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + pgoff_t page_offset; + unsigned long pfn; + int ret = 0; + + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> + PAGE_SHIFT; + + /* Now bind it into the GTT if needed */ + mutex_lock(&dev->struct_mutex); + if (!obj_priv->gtt_space) { + ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return VM_FAULT_SIGBUS; + } + list_add(&obj_priv->list, &dev_priv->mm.inactive_list); + } + + /* Need a new fence register? */ + if (obj_priv->fence_reg == I915_FENCE_REG_NONE && + obj_priv->tiling_mode != I915_TILING_NONE) + i915_gem_object_get_fence_reg(obj); + + pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + + page_offset; + + /* Finally, remap it using the new GTT offset */ + ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + + mutex_unlock(&dev->struct_mutex); + + switch (ret) { + case -ENOMEM: + case -EAGAIN: + return VM_FAULT_OOM; + case -EFAULT: + case -EBUSY: + DRM_ERROR("can't insert pfn?? fault or busy...\n"); + return VM_FAULT_SIGBUS; + default: + return VM_FAULT_NOPAGE; + } +} + +/** + * i915_gem_create_mmap_offset - create a fake mmap offset for an object + * @obj: obj in question + * + * GEM memory mapping works by handing back to userspace a fake mmap offset + * it can use in a subsequent mmap(2) call. The DRM core code then looks + * up the object based on the offset and sets up the various memory mapping + * structures. + * + * This routine allocates and attaches a fake offset for @obj. + */ +static int +i915_gem_create_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_map_list *list; + struct drm_map *map; + int ret = 0; + + /* Set the object up for mmap'ing */ + list = &obj->map_list; + list->map = drm_calloc(1, sizeof(struct drm_map_list), + DRM_MEM_DRIVER); + if (!list->map) + return -ENOMEM; + + map = list->map; + map->type = _DRM_GEM; + map->size = obj->size; + map->handle = obj; + + /* Get a DRM GEM mmap offset allocated... */ + list->file_offset_node = drm_mm_search_free(&mm->offset_manager, + obj->size / PAGE_SIZE, 0, 0); + if (!list->file_offset_node) { + DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); + ret = -ENOMEM; + goto out_free_list; + } + + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + obj->size / PAGE_SIZE, 0); + if (!list->file_offset_node) { + ret = -ENOMEM; + goto out_free_list; + } + + list->hash.key = list->file_offset_node->start; + if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { + DRM_ERROR("failed to add to map hash\n"); + goto out_free_mm; + } + + /* By now we should be all set, any drm_mmap request on the offset + * below will get to our mmap & fault handler */ + obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; + + return 0; + +out_free_mm: + drm_mm_put_block(list->file_offset_node); +out_free_list: + drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); + + return ret; +} + +/** + * i915_gem_get_gtt_alignment - return required GTT alignment for an object + * @obj: object to check + * + * Return the required GTT alignment for an object, taking into account + * potential fence register mapping if needed. + */ +static uint32_t +i915_gem_get_gtt_alignment(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int start, i; + + /* + * Minimum alignment is 4k (GTT page size), but might be greater + * if a fence register is needed for the object. + */ + if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) + return 4096; + + /* + * Previous chips need to be aligned to the size of the smallest + * fence register that can contain the object. + */ + if (IS_I9XX(dev)) + start = 1024*1024; + else + start = 512*1024; + + for (i = start; i < obj->size; i <<= 1) + ; + + return i; +} + +/** + * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing + * @dev: DRM device + * @data: GTT mapping ioctl data + * @file_priv: GEM object info + * + * Simply returns the fake offset to userspace so it can mmap it. + * The mmap call will end up in drm_gem_mmap(), which will set things + * up so we can get faults in the handler above. + * + * The fault handler will take care of binding the object into the GTT + * (since it may have been evicted to make room for something), allocating + * a fence register, and mapping the appropriate aperture address into + * userspace. + */ +int +i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_mmap_gtt *args = data; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EBADF; + + mutex_lock(&dev->struct_mutex); + + obj_priv = obj->driver_private; + + if (!obj_priv->mmap_offset) { + ret = i915_gem_create_mmap_offset(obj); + if (ret) + return ret; + } + + args->offset = obj_priv->mmap_offset; + + obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); + + /* Make sure the alignment is correct for fence regs etc */ + if (obj_priv->agp_mem && + (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + /* + * Pull it into the GTT so that we have a page list (makes the + * initial fault faster and any subsequent flushing possible). + */ + if (!obj_priv->agp_mem) { + ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + list_add(&obj_priv->list, &dev_priv->mm.inactive_list); + } + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + static void i915_gem_object_free_page_list(struct drm_gem_object *obj) { @@ -726,6 +980,7 @@ i915_gem_retire_request(struct drm_device *dev, */ if (obj_priv->last_rendering_seqno != request->seqno) return; + #if WATCH_LRU DRM_INFO("%s: retire %d moves to inactive list %p\n", __func__, request->seqno, obj); @@ -956,6 +1211,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; + loff_t offset; int ret = 0; #if WATCH_BUF @@ -991,6 +1247,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj) BUG_ON(obj_priv->active); + /* blow away mappings if mapped through GTT */ + offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; + if (dev->dev_mapping) + unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1); + + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + i915_gem_clear_fence_reg(obj); + i915_gem_object_free_page_list(obj); if (obj_priv->gtt_space) { @@ -1149,6 +1413,204 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) return 0; } +static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) +{ + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int regnum = obj_priv->fence_reg; + uint64_t val; + + val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & + 0xfffff000) << 32; + val |= obj_priv->gtt_offset & 0xfffff000; + val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; + + I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); +} + +static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) +{ + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int regnum = obj_priv->fence_reg; + uint32_t val; + uint32_t pitch_val; + + if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || + (obj_priv->gtt_offset & (obj->size - 1))) { + WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__); + return; + } + + if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || + IS_I945GM(dev) || + IS_G33(dev))) + pitch_val = (obj_priv->stride / 128) - 1; + else + pitch_val = (obj_priv->stride / 512) - 1; + + val = obj_priv->gtt_offset; + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I915_FENCE_SIZE_BITS(obj->size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + + I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); +} + +static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) +{ + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int regnum = obj_priv->fence_reg; + uint32_t val; + uint32_t pitch_val; + + if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || + (obj_priv->gtt_offset & (obj->size - 1))) { + WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__); + return; + } + + pitch_val = (obj_priv->stride / 128) - 1; + + val = obj_priv->gtt_offset; + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I830_FENCE_SIZE_BITS(obj->size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + + I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); + +} + +/** + * i915_gem_object_get_fence_reg - set up a fence reg for an object + * @obj: object to map through a fence reg + * + * When mapping objects through the GTT, userspace wants to be able to write + * to them without having to worry about swizzling if the object is tiled. + * + * This function walks the fence regs looking for a free one for @obj, + * stealing one if it can't find any. + * + * It then sets up the reg based on the object's properties: address, pitch + * and tiling format. + */ +static void +i915_gem_object_get_fence_reg(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_fence_reg *reg = NULL; + int i, ret; + + switch (obj_priv->tiling_mode) { + case I915_TILING_NONE: + WARN(1, "allocating a fence for non-tiled object?\n"); + break; + case I915_TILING_X: + WARN(obj_priv->stride & (512 - 1), + "object is X tiled but has non-512B pitch\n"); + break; + case I915_TILING_Y: + WARN(obj_priv->stride & (128 - 1), + "object is Y tiled but has non-128B pitch\n"); + break; + } + + /* First try to find a free reg */ + for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { + reg = &dev_priv->fence_regs[i]; + if (!reg->obj) + break; + } + + /* None available, try to steal one or wait for a user to finish */ + if (i == dev_priv->num_fence_regs) { + struct drm_i915_gem_object *old_obj_priv = NULL; + loff_t offset; + +try_again: + /* Could try to use LRU here instead... */ + for (i = dev_priv->fence_reg_start; + i < dev_priv->num_fence_regs; i++) { + reg = &dev_priv->fence_regs[i]; + old_obj_priv = reg->obj->driver_private; + if (!old_obj_priv->pin_count) + break; + } + + /* + * Now things get ugly... we have to wait for one of the + * objects to finish before trying again. + */ + if (i == dev_priv->num_fence_regs) { + ret = i915_gem_object_wait_rendering(reg->obj); + if (ret) { + WARN(ret, "wait_rendering failed: %d\n", ret); + return; + } + goto try_again; + } + + /* + * Zap this virtual mapping so we can set up a fence again + * for this object next time we need it. + */ + offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; + if (dev->dev_mapping) + unmap_mapping_range(dev->dev_mapping, offset, + reg->obj->size, 1); + old_obj_priv->fence_reg = I915_FENCE_REG_NONE; + } + + obj_priv->fence_reg = i; + reg->obj = obj; + + if (IS_I965G(dev)) + i965_write_fence_reg(reg); + else if (IS_I9XX(dev)) + i915_write_fence_reg(reg); + else + i830_write_fence_reg(reg); +} + +/** + * i915_gem_clear_fence_reg - clear out fence register info + * @obj: object to clear + * + * Zeroes out the fence register itself and clears out the associated + * data structures in dev_priv and obj_priv. + */ +static void +i915_gem_clear_fence_reg(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + if (IS_I965G(dev)) + I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); + else + I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0); + + dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; + obj_priv->fence_reg = I915_FENCE_REG_NONE; +} + /** * Finds free space in the GTT aperture and binds the object there. */ @@ -1307,7 +1769,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) * This function returns when the move is complete, including waiting on * flushes to occur. */ -static int +int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) { struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -2029,13 +2491,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* error other than GTT full, or we've already tried again */ if (ret != -ENOMEM || pin_tries >= 1) { - DRM_ERROR("Failed to pin buffers %d\n", ret); + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to pin buffers %d\n", ret); goto err; } /* unpin all of our buffers */ for (i = 0; i < pinned; i++) i915_gem_object_unpin(object_list[i]); + pinned = 0; /* evict everyone we can from the aperture */ ret = i915_gem_evict_everything(dev); @@ -2149,13 +2613,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, "back to user (%d)\n", args->buffer_count, ret); err: - if (object_list != NULL) { - for (i = 0; i < pinned; i++) - i915_gem_object_unpin(object_list[i]); + for (i = 0; i < pinned; i++) + i915_gem_object_unpin(object_list[i]); + + for (i = 0; i < args->buffer_count; i++) + drm_gem_object_unreference(object_list[i]); - for (i = 0; i < args->buffer_count; i++) - drm_gem_object_unreference(object_list[i]); - } mutex_unlock(&dev->struct_mutex); pre_mutex_err: @@ -2178,7 +2641,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (obj_priv->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment); if (ret != 0) { - DRM_ERROR("Failure to bind: %d", ret); + if (ret != -ERESTARTSYS) + DRM_ERROR("Failure to bind: %d", ret); return ret; } } @@ -2249,11 +2713,22 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } obj_priv = obj->driver_private; - ret = i915_gem_object_pin(obj, args->alignment); - if (ret != 0) { - drm_gem_object_unreference(obj); + if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { + DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", + args->handle); mutex_unlock(&dev->struct_mutex); - return ret; + return -EINVAL; + } + + obj_priv->user_pin_count++; + obj_priv->pin_filp = file_priv; + if (obj_priv->user_pin_count == 1) { + ret = i915_gem_object_pin(obj, args->alignment); + if (ret != 0) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } } /* XXX - flush the CPU caches for pinned objects @@ -2273,6 +2748,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_pin *args = data; struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; mutex_lock(&dev->struct_mutex); @@ -2284,7 +2760,19 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, return -EBADF; } - i915_gem_object_unpin(obj); + obj_priv = obj->driver_private; + if (obj_priv->pin_filp != file_priv) { + DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", + args->handle); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + obj_priv->user_pin_count--; + if (obj_priv->user_pin_count == 0) { + obj_priv->pin_filp = NULL; + i915_gem_object_unpin(obj); + } drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -2351,12 +2839,18 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj->driver_private = obj_priv; obj_priv->obj = obj; + obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); + return 0; } void i915_gem_free_object(struct drm_gem_object *obj) { + struct drm_device *dev = obj->dev; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_map_list *list; + struct drm_map *map; struct drm_i915_gem_object *obj_priv = obj->driver_private; while (obj_priv->pin_count > 0) @@ -2364,6 +2858,20 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_object_unbind(obj); + list = &obj->map_list; + drm_ht_remove_item(&mm->offset_hash, &list->hash); + + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + map = list->map; + if (map) { + drm_free(map, sizeof(*map), DRM_MEM_DRIVER); + list->map = NULL; + } + drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); } @@ -2432,8 +2940,7 @@ i915_gem_idle(struct drm_device *dev) */ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); - seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT)); + seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU); if (seqno == 0) { mutex_unlock(&dev->struct_mutex); @@ -2560,12 +3067,13 @@ i915_gem_init_hws(struct drm_device *dev) return 0; } -static int +int i915_gem_init_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + drm_i915_ring_buffer_t *ring = &dev_priv->ring; int ret; u32 head; @@ -2587,24 +3095,24 @@ i915_gem_init_ringbuffer(struct drm_device *dev) } /* Set up the kernel mapping for the ring. */ - dev_priv->ring.Size = obj->size; - dev_priv->ring.tail_mask = obj->size - 1; + ring->Size = obj->size; + ring->tail_mask = obj->size - 1; - dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; - dev_priv->ring.map.size = obj->size; - dev_priv->ring.map.type = 0; - dev_priv->ring.map.flags = 0; - dev_priv->ring.map.mtrr = 0; + ring->map.offset = dev->agp->base + obj_priv->gtt_offset; + ring->map.size = obj->size; + ring->map.type = 0; + ring->map.flags = 0; + ring->map.mtrr = 0; - drm_core_ioremap_wc(&dev_priv->ring.map, dev); - if (dev_priv->ring.map.handle == NULL) { + drm_core_ioremap_wc(&ring->map, dev); + if (ring->map.handle == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); drm_gem_object_unreference(obj); return -EINVAL; } - dev_priv->ring.ring_obj = obj; - dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + ring->ring_obj = obj; + ring->virtual_start = ring->map.handle; /* Stop the ring if it's running. */ I915_WRITE(PRB0_CTL, 0); @@ -2652,12 +3160,20 @@ i915_gem_init_ringbuffer(struct drm_device *dev) } /* Update our cache of the ring state */ - i915_kernel_lost_context(dev); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_kernel_lost_context(dev); + else { + ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; + ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->Size; + } return 0; } -static void +void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -2695,6 +3211,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, drm_i915_private_t *dev_priv = dev->dev_private; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + if (dev_priv->mm.wedged) { DRM_ERROR("Reenabling wedged hardware, good luck\n"); dev_priv->mm.wedged = 0; @@ -2728,6 +3247,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, drm_i915_private_t *dev_priv = dev->dev_private; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + ret = i915_gem_idle(dev); drm_irq_uninstall(dev); @@ -2758,5 +3280,13 @@ i915_gem_load(struct drm_device *dev) i915_gem_retire_work_handler); dev_priv->mm.next_gem_seqno = 1; + /* Old X drivers will take 0-2 for front, back, depth buffers */ + dev_priv->fence_reg_start = 3; + + if (IS_I965G(dev)) + dev_priv->num_fence_regs = 16; + else + dev_priv->num_fence_regs = 8; + i915_gem_detect_bit_6_swizzle(dev); } |