diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-07 17:36:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-07 17:36:12 -0700 |
commit | a7d7a143d0b4cb1914705884ca5c25e322dba693 (patch) | |
tree | 0ee5e9e43f0863b38a29e8abc293e80eab177d74 /drivers/gpu/drm/ttm | |
parent | 43c40df2c7fedce640a6c39fcdf58764f6bbac5c (diff) | |
parent | 7963e9db1b1f842fdc53309baa8714d38e9f5681 (diff) | |
download | op-kernel-dev-a7d7a143d0b4cb1914705884ca5c25e322dba693.zip op-kernel-dev-a7d7a143d0b4cb1914705884ca5c25e322dba693.tar.gz |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull DRM updates from Dave Airlie:
"Like all good pull reqs this ends with a revert, so it must mean we
tested it,
[ Ed. That's _one_ way of looking at it ]
This pull is missing nouveau, Ben has been stuck trying to track down
a very longstanding bug that revealed itself due to some other
changes. I've asked him to send you a direct pull request for nouveau
once he cleans things up. I'm away until Monday so don't want to
delay things, you can make a decision on that when he sends it, I have
my phone so I can ack things just not really merge much.
It has one trivial conflict with your tree in armada_drv.c, and also
the pull request contains some component changes that are already in
your tree, the base tree from Russell went via Greg's tree already,
but some stuff still shows up in here that doesn't when I merge my
tree into yours.
Otherwise all pretty standard graphics fare, one new driver and
changes all over the place.
New drivers:
- sti kms driver for STMicroelectronics chipsets stih416 and stih407.
core:
- lots of cleanups to the drm core
- DP MST helper code merged
- universal cursor planes.
- render nodes enabled by default
panel:
- better panel interfaces
- new panel support
- non-continuous cock advertising ability
ttm:
- shrinker fixes
i915:
- hopefully ditched UMS support
- runtime pm fixes
- psr tracking and locking - now enabled by default
- userptr fixes
- backlight brightness fixes
- MST support merged
- runtime PM for dpms
- primary planes locking fixes
- gen8 hw semaphore support
- fbc fixes
- runtime PM on SOix sleep state hw.
- mmio base page flipping
- lots of vlv/chv fixes.
- universal cursor planes
radeon:
- Hawaii fixes
- display scalar support for non-fixed mode displays
- new firmware format support
- dpm on more asics by default
- GPUVM improvements
- uncached and wc GTT buffers
- BOs > visible VRAM
exynos:
- i80 interface support
- module auto-loading
- ipp driver consolidated.
armada:
- irq handling in crtc layer only
- crtc renumbering
- add component support
- DT interaction changes.
tegra:
- load as module fixes
- eDP bpp and sync polarity fixed
- DSI non-continuous clock mode support
- better support for importing buffers from nouveau
msm:
- mdp5/adq8084 v1.3 hw enablement
- devicetree clk changse
- ifc6410 board working
tda998x:
- component support
- DT documentation update
vmwgfx:
- fix compat shader namespace"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (551 commits)
Revert "drm: drop redundant drm_file->is_master"
drm/panel: simple: Use devm_gpiod_get_optional()
drm/dsi: Replace upcasting macro by function
drm/panel: ld9040: Replace upcasting macro by function
drm/exynos: dp: Modify driver to support drm_panel
drm/exynos: Move DP setup into commit()
drm/panel: simple: Add AUO B133HTN01 panel support
drm/panel: simple: Support delays in panel functions
drm/panel: simple: Add proper definition for prepare and unprepare
drm/panel: s6e8aa0: Add proper definition for prepare and unprepare
drm/panel: ld9040: Add proper definition for prepare and unprepare
drm/tegra: Add support for panel prepare and unprepare routines
drm/exynos: dsi: Add support for panel prepare and unprepare routines
drm/exynos: dpi: Add support for panel prepare and unprepare routines
drm/panel: simple: Add dummy prepare and unprepare routines
drm/panel: s6e8aa0: Add dummy prepare and unprepare routines
drm/panel: ld9040: Add dummy prepare and unprepare routines
drm/panel: Provide convenience wrapper for .get_modes()
drm/panel: add .prepare() and .unprepare() functions
drm/panel: simple: Remove simple-panel compatible
...
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_manager.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_module.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 34 |
6 files changed, 51 insertions, 41 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4ab9f71..3da89d5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int ret; spin_lock(&glob->lru_lock); - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); @@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return ret; spin_lock(&glob->lru_lock); - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); /* * We raced, and lost, someone else holds the reservation now, @@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) kref_get(&nentry->list_kref); } - ret = __ttm_bo_reserve(entry, false, true, false, 0); + ret = __ttm_bo_reserve(entry, false, true, false, NULL); if (remove_all && ret) { spin_unlock(&glob->lru_lock); ret = __ttm_bo_reserve(entry, false, false, - false, 0); + false, NULL); spin_lock(&glob->lru_lock); } @@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); list_for_each_entry(bo, &man->lru, lru) { - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); if (!ret) break; } @@ -784,7 +784,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, int ret; do { - ret = (*man->func->get_node)(man, bo, placement, mem); + ret = (*man->func->get_node)(man, bo, placement, 0, mem); if (unlikely(ret != 0)) return ret; if (mem->mm_node) @@ -897,7 +897,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (man->has_type && man->use_type) { type_found = true; - ret = (*man->func->get_node)(man, bo, placement, mem); + ret = (*man->func->get_node)(man, bo, placement, + cur_flags, mem); if (unlikely(ret)) return ret; } @@ -937,7 +938,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ttm_flag_masked(&cur_flags, placement->busy_placement[i], ~TTM_PL_MASK_MEMTYPE); - if (mem_type == TTM_PL_SYSTEM) { mem->mem_type = mem_type; mem->placement = cur_flags; @@ -1595,7 +1595,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) * Using ttm_bo_reserve makes sure the lru lists are updated. */ - ret = ttm_bo_reserve(bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); if (unlikely(ret != 0)) return ret; spin_lock(&bdev->fence_lock); @@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) spin_lock(&glob->lru_lock); list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); if (!ret) break; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index bd850c9..9e103a48 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -50,6 +50,7 @@ struct ttm_range_manager { static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; @@ -67,7 +68,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, if (!node) return -ENOMEM; - if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN) + if (flags & TTM_PL_FLAG_TOPDOWN) aflags = DRM_MM_CREATE_TOP; spin_lock(&rman->lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 1df856f..30e5d90 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) pgprot_val(tmp) |= _PAGE_GUARDED; } #endif -#if defined(__ia64__) +#if defined(__ia64__) || defined(__arm__) if (caching_flags & TTM_PL_FLAG_WC) tmp = pgprot_writecombine(tmp); else diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index d7f92fe..66fc639 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -35,7 +35,7 @@ #include <drm/drm_sysfs.h> static DECLARE_WAIT_QUEUE_HEAD(exit_q); -atomic_t device_released; +static atomic_t device_released; static struct device_type ttm_drm_class_type = { .name = "ttm", diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 863bef9..09874d6 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, * * @pool: to free the pages from * @free_all: If set to true will free all pages in pool + * @gfp: GFP flags. **/ -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) +static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, + gfp_t gfp) { unsigned long irq_flags; struct page *p; @@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), - GFP_KERNEL); + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); if (!pages_to_free) { pr_err("Failed to allocate memory for pool free operation\n"); return 0; @@ -382,32 +383,35 @@ out: * * XXX: (dchinner) Deadlock warning! * - * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means - * this can deadlock when called a sc->gfp_mask that is not equal to - * GFP_KERNEL. + * We need to pass sc->gfp_mask to ttm_page_pool_free(). * * This code is crying out for a shrinker per pool.... */ static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static DEFINE_MUTEX(lock); + static unsigned start_pool; unsigned i; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; - pool_offset = pool_offset % NUM_POOLS; + if (!mutex_trylock(&lock)) + return SHRINK_STOP; + pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; - shrink_pages = ttm_page_pool_free(pool, nr_free); + shrink_pages = ttm_page_pool_free(pool, nr_free, + sc->gfp_mask); freed += nr_free - shrink_pages; } + mutex_unlock(&lock); return freed; } @@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) - ttm_page_pool_free(pool, npages); + ttm_page_pool_free(pool, npages, GFP_KERNEL); } /* @@ -790,7 +794,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return 0; } -static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, +static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, char *name) { spin_lock_init(&pool->lock); @@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void) ttm_pool_mm_shrink_fini(_manager); for (i = 0; i < NUM_POOLS; ++i) - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, + GFP_KERNEL); kobject_put(&_manager->kobj); _manager = NULL; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index fb8259f..ca65df1 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) * * @pool: to free the pages from * @nr_free: If set to true will free all pages in pool + * @gfp: GFP flags. **/ -static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) +static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, + gfp_t gfp) { unsigned long irq_flags; struct dma_page *dma_p, *tmp; @@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) npages_to_free, nr_free); } #endif - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), - GFP_KERNEL); + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); if (!pages_to_free) { pr_err("%s: Failed to allocate memory for pool free operation\n", @@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type) if (pool->type != type) continue; /* Takes a spinlock.. */ - ttm_dma_page_pool_free(pool, FREE_ALL_PAGES); + ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL); WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); /* This code path is called after _all_ references to the * struct device has been dropped - so nobody should be @@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) /* shrink pool if necessary (only on !is_cached pools)*/ if (npages) - ttm_dma_page_pool_free(pool, npages); + ttm_dma_page_pool_free(pool, npages, GFP_KERNEL); ttm->state = tt_unpopulated; } EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); @@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); * * XXX: (dchinner) Deadlock warning! * - * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention - * needs to be paid to sc->gfp_mask to determine if this can be done or not. - * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really - * bad. + * We need to pass sc->gfp_mask to ttm_dma_page_pool_free(). * * I'm getting sadder as I hear more pathetical whimpers about needing per-pool * shrinkers @@ -1004,9 +1002,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); static unsigned long ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static unsigned start_pool; unsigned idx = 0; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; unsigned shrink_pages = sc->nr_to_scan; struct device_pools *p; unsigned long freed = 0; @@ -1014,8 +1012,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (list_empty(&_manager->pools)) return SHRINK_STOP; - mutex_lock(&_manager->lock); - pool_offset = pool_offset % _manager->npools; + if (!mutex_trylock(&_manager->lock)) + return SHRINK_STOP; + if (!_manager->npools) + goto out; + pool_offset = ++start_pool % _manager->npools; list_for_each_entry(p, &_manager->pools, pools) { unsigned nr_free; @@ -1027,13 +1028,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (++idx < pool_offset) continue; nr_free = shrink_pages; - shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); + shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, + sc->gfp_mask); freed += nr_free - shrink_pages; pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", p->pool->dev_name, p->pool->name, current->pid, nr_free, shrink_pages); } +out: mutex_unlock(&_manager->lock); return freed; } @@ -1044,7 +1047,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) struct device_pools *p; unsigned long count = 0; - mutex_lock(&_manager->lock); + if (!mutex_trylock(&_manager->lock)) + return 0; list_for_each_entry(p, &_manager->pools, pools) count += p->pool->npages_free; mutex_unlock(&_manager->lock); |