diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 16:04:31 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 16:04:31 -0700 |
commit | 2e17c5a97e231f3cb426f4b7895eab5be5c5442e (patch) | |
tree | 80871817427250200d6931a45ccb4833c4add74a /drivers/gpu/drm/ttm/ttm_bo.c | |
parent | 5f097cd249f00683442c3e265d6f27d80fc83563 (diff) | |
parent | 774d8e34e46506222bb5e2888e3ef42b2775715f (diff) | |
download | op-kernel-dev-2e17c5a97e231f3cb426f4b7895eab5be5c5442e.zip op-kernel-dev-2e17c5a97e231f3cb426f4b7895eab5be5c5442e.tar.gz |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Okay this is the big one, I was stalled on the fbdev pull req as I
stupidly let fbdev guys merge a patch I required to fix a warning with
some patches I had, they ended up merging the patch from the wrong
place, but the warning should be fixed. In future I'll just take the
patch myself!
Outside drm:
There are some snd changes for the HDMI audio interactions on haswell,
they've been acked for inclusion via my tree. This relies on the
wound/wait tree from Ingo which is already merged.
Major changes:
AMD finally released the dynamic power management code for all their
GPUs from r600->present day, this is great, off by default for now but
also a huge amount of code, in fact it is most of this pull request.
Since it landed there has been a lot of community testing and Alex has
sent a lot of fixes for any bugs found so far. I suspect radeon might
now be the biggest kernel driver ever :-P p.s. radeon.dpm=1 to enable
dynamic powermanagement for anyone.
New drivers:
Renesas r-car display unit.
Other highlights:
- core: GEM CMA prime support, use new w/w mutexs for TTM
reservations, cursor hotspot, doc updates
- dvo chips: chrontel 7010B support
- i915: Haswell (fbc, ips, vecs, watermarks, audio powerwell),
Valleyview (enabled by default, rc6), lots of pll reworking, 30bpp
support (this time for sure)
- nouveau: async buffer object deletion, context/register init
updates, kernel vp2 engine support, GF117 support, GK110 accel
support (with external nvidia ucode), context cleanups.
- exynos: memory leak fixes, Add S3C64XX SoC series support, device
tree updates, common clock framework support,
- qxl: cursor hotspot support, multi-monitor support, suspend/resume
support
- mgag200: hw cursor support, g200 mode limiting
- shmobile: prime support
- tegra: fixes mostly
I've been banging on this quite a lot due to the size of it, and it
seems to okay on everything I've tested it on."
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (811 commits)
drm/radeon/dpm: implement vblank_too_short callback for si
drm/radeon/dpm: implement vblank_too_short callback for cayman
drm/radeon/dpm: implement vblank_too_short callback for btc
drm/radeon/dpm: implement vblank_too_short callback for evergreen
drm/radeon/dpm: implement vblank_too_short callback for 7xx
drm/radeon/dpm: add checks against vblank time
drm/radeon/dpm: add helper to calculate vblank time
drm/radeon: remove stray line in old pm code
drm/radeon/dpm: fix display_gap programming on rv7xx
drm/nvc0/gr: fix gpc firmware regression
drm/nouveau: fix minor thinko causing bo moves to not be async on kepler
drm/radeon/dpm: implement force performance level for TN
drm/radeon/dpm: implement force performance level for ON/LN
drm/radeon/dpm: implement force performance level for SI
drm/radeon/dpm: implement force performance level for cayman
drm/radeon/dpm: implement force performance levels for 7xx/eg/btc
drm/radeon/dpm: add infrastructure to force performance levels
drm/radeon: fix surface setup on r1xx
drm/radeon: add support for 3d perf states on older asics
drm/radeon: set default clocks for SI when DPM is disabled
...
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 239 |
1 files changed, 35 insertions, 204 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9b07b7d..cb9dd67 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -150,6 +150,9 @@ static void ttm_bo_release_list(struct kref *list_kref) if (bo->ttm) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); + if (bo->resv == &bo->ttm_resv) + reservation_object_fini(&bo->ttm_resv); + if (bo->destroy) bo->destroy(bo); else { @@ -158,24 +161,12 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_mem_global_free(bdev->glob->mem_glob, acc_size); } -static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, - bool interruptible) -{ - if (interruptible) { - return wait_event_interruptible(bo->event_queue, - !ttm_bo_is_reserved(bo)); - } else { - wait_event(bo->event_queue, !ttm_bo_is_reserved(bo)); - return 0; - } -} - void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; - BUG_ON(!ttm_bo_is_reserved(bo)); + lockdep_assert_held(&bo->resv->lock.base); if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { @@ -191,6 +182,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) } } } +EXPORT_SYMBOL(ttm_bo_add_to_lru); int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { @@ -213,71 +205,6 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) return put_count; } -int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_sequence, uint32_t sequence) -{ - int ret; - - while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { - /** - * Deadlock avoidance for multi-bo reserving. - */ - if (use_sequence && bo->seq_valid) { - /** - * We've already reserved this one. - */ - if (unlikely(sequence == bo->val_seq)) - return -EDEADLK; - /** - * Already reserved by a thread that will not back - * off for us. We need to back off. - */ - if (unlikely(sequence - bo->val_seq < (1 << 31))) - return -EAGAIN; - } - - if (no_wait) - return -EBUSY; - - ret = ttm_bo_wait_unreserved(bo, interruptible); - - if (unlikely(ret)) - return ret; - } - - if (use_sequence) { - bool wake_up = false; - /** - * Wake up waiters that may need to recheck for deadlock, - * if we decreased the sequence number. - */ - if (unlikely((bo->val_seq - sequence < (1 << 31)) - || !bo->seq_valid)) - wake_up = true; - - /* - * In the worst case with memory ordering these values can be - * seen in the wrong order. However since we call wake_up_all - * in that case, this will hopefully not pose a problem, - * and the worst case would only cause someone to accidentally - * hit -EAGAIN in ttm_bo_reserve when they see old value of - * val_seq. However this would only happen if seq_valid was - * written before val_seq was, and just means some slightly - * increased cpu usage - */ - bo->val_seq = sequence; - bo->seq_valid = true; - if (wake_up) - wake_up_all(&bo->event_queue); - } else { - bo->seq_valid = false; - } - - return 0; -} -EXPORT_SYMBOL(ttm_bo_reserve); - static void ttm_bo_ref_bug(struct kref *list_kref) { BUG(); @@ -290,89 +217,16 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); } -int ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_sequence, uint32_t sequence) -{ - struct ttm_bo_global *glob = bo->glob; - int put_count = 0; - int ret; - - ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, - sequence); - if (likely(ret == 0)) { - spin_lock(&glob->lru_lock); - put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); - ttm_bo_list_ref_sub(bo, put_count, true); - } - - return ret; -} - -int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, - bool interruptible, uint32_t sequence) -{ - bool wake_up = false; - int ret; - - while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { - WARN_ON(bo->seq_valid && sequence == bo->val_seq); - - ret = ttm_bo_wait_unreserved(bo, interruptible); - - if (unlikely(ret)) - return ret; - } - - if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) - wake_up = true; - - /** - * Wake up waiters that may need to recheck for deadlock, - * if we decreased the sequence number. - */ - bo->val_seq = sequence; - bo->seq_valid = true; - if (wake_up) - wake_up_all(&bo->event_queue); - - return 0; -} - -int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, - bool interruptible, uint32_t sequence) -{ - struct ttm_bo_global *glob = bo->glob; - int put_count, ret; - - ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); - if (likely(!ret)) { - spin_lock(&glob->lru_lock); - put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); - ttm_bo_list_ref_sub(bo, put_count, true); - } - return ret; -} -EXPORT_SYMBOL(ttm_bo_reserve_slowpath); - -void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) -{ - ttm_bo_add_to_lru(bo); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); -} - -void ttm_bo_unreserve(struct ttm_buffer_object *bo) +void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) { - struct ttm_bo_global *glob = bo->glob; + int put_count; - spin_lock(&glob->lru_lock); - ttm_bo_unreserve_locked(bo); - spin_unlock(&glob->lru_lock); + spin_lock(&bo->glob->lru_lock); + put_count = ttm_bo_del_from_lru(bo); + spin_unlock(&bo->glob->lru_lock); + ttm_bo_list_ref_sub(bo, put_count, true); } -EXPORT_SYMBOL(ttm_bo_unreserve); +EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); /* * Call bo->mutex locked. @@ -544,17 +398,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) } ttm_bo_mem_put(bo, &bo->mem); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - - /* - * Since the final reference to this bo may not be dropped by - * the current task we have to put a memory barrier here to make - * sure the changes done in this function are always visible. - * - * This function only needs protection against the final kref_put. - */ - smp_mb__before_atomic_dec(); + ww_mutex_unlock (&bo->resv->lock); } static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) @@ -586,10 +430,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) sync_obj = driver->sync_obj_ref(bo->sync_obj); spin_unlock(&bdev->fence_lock); - if (!ret) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - } + if (!ret) + ww_mutex_unlock(&bo->resv->lock); kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); @@ -639,8 +481,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, sync_obj = driver->sync_obj_ref(bo->sync_obj); spin_unlock(&bdev->fence_lock); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ww_mutex_unlock(&bo->resv->lock); spin_unlock(&glob->lru_lock); ret = driver->sync_obj_wait(sync_obj, false, interruptible); @@ -678,8 +519,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, spin_unlock(&bdev->fence_lock); if (ret || unlikely(list_empty(&bo->ddestroy))) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ww_mutex_unlock(&bo->resv->lock); spin_unlock(&glob->lru_lock); return ret; } @@ -831,7 +671,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, goto out; } - BUG_ON(!ttm_bo_is_reserved(bo)); + lockdep_assert_held(&bo->resv->lock.base); evict_mem = bo->mem; evict_mem.mm_node = NULL; @@ -1121,7 +961,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_mem_reg mem; struct ttm_bo_device *bdev = bo->bdev; - BUG_ON(!ttm_bo_is_reserved(bo)); + lockdep_assert_held(&bo->resv->lock.base); /* * FIXME: It's possible to pipeline buffer moves. @@ -1180,7 +1020,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, { int ret; - BUG_ON(!ttm_bo_is_reserved(bo)); + lockdep_assert_held(&bo->resv->lock.base); /* Check that range is valid */ if (placement->lpfn || placement->fpfn) if (placement->fpfn > placement->lpfn || @@ -1239,6 +1079,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, int ret = 0; unsigned long num_pages; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; + bool locked; ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (ret) { @@ -1265,8 +1106,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, kref_init(&bo->kref); kref_init(&bo->list_kref); atomic_set(&bo->cpu_writers, 0); - atomic_set(&bo->reserved, 1); - init_waitqueue_head(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); @@ -1284,37 +1123,34 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.bus.io_reserved_count = 0; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); - bo->seq_valid = false; bo->persistent_swap_storage = persistent_swap_storage; bo->acc_size = acc_size; bo->sg = sg; + bo->resv = &bo->ttm_resv; + reservation_object_init(bo->resv); atomic_inc(&bo->glob->bo_count); ret = ttm_bo_check_placement(bo, placement); - if (unlikely(ret != 0)) - goto out_err; /* * For ttm_bo_type_device buffers, allocate * address space from the device. */ - if (bo->type == ttm_bo_type_device || - bo->type == ttm_bo_type_sg) { + if (likely(!ret) && + (bo->type == ttm_bo_type_device || + bo->type == ttm_bo_type_sg)) ret = ttm_bo_setup_vm(bo); - if (ret) - goto out_err; - } - ret = ttm_bo_validate(bo, placement, interruptible, false); - if (ret) - goto out_err; + locked = ww_mutex_trylock(&bo->resv->lock); + WARN_ON(!locked); - ttm_bo_unreserve(bo); - return 0; + if (likely(!ret)) + ret = ttm_bo_validate(bo, placement, interruptible, false); -out_err: ttm_bo_unreserve(bo); - ttm_bo_unref(&bo); + + if (unlikely(ret)) + ttm_bo_unref(&bo); return ret; } @@ -1619,9 +1455,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, goto out_no_sys; bdev->addr_space_rb = RB_ROOT; - ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); - if (unlikely(ret != 0)) - goto out_no_addr_mm; + drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); @@ -1635,8 +1469,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, mutex_unlock(&glob->device_list_mutex); return 0; -out_no_addr_mm: - ttm_bo_clean_mm(bdev, 0); out_no_sys: return ret; } @@ -1927,8 +1759,7 @@ out: * already swapped buffer. */ - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ww_mutex_unlock(&bo->resv->lock); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } |