summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
authorRob Clark <robdclark@gmail.com>2017-06-13 11:50:05 -0400
committerRob Clark <robdclark@gmail.com>2017-06-16 11:16:05 -0400
commitf4839bd5126310635314610a85468e87b40ce4c8 (patch)
treed1557b893dcf931540557060c395724e03ae0383 /drivers/gpu/drm/msm
parent8bdcd949bbe7e7f9e60a3564baa600884f8f4ba7 (diff)
downloadop-kernel-dev-f4839bd5126310635314610a85468e87b40ce4c8.zip
op-kernel-dev-f4839bd5126310635314610a85468e87b40ce4c8.tar.gz
drm/msm: refactor how we handle vram carveout buffers
Pull some of the logic out into msm_gem_new() (since we don't need to care about the imported-bo case), and don't defer allocating pages. The latter is generally a good idea, since if we are using VRAM carveout to allocate contiguous buffers (ie. no IOMMU), the allocation is more likely to fail. So failing at allocation time is a more sane option. Plus this simplifies things in the next patch. Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 0a38c5b..2e5c987 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -324,12 +324,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (iommu_present(&platform_bus_type)) {
- ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
- msm_obj->sgt, obj->size >> PAGE_SHIFT);
- } else {
- msm_obj->domain[id].iova = physaddr(obj);
- }
+ ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
+ msm_obj->sgt, obj->size >> PAGE_SHIFT);
}
if (!ret)
@@ -765,7 +761,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- bool use_vram = false;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -780,21 +775,10 @@ static int msm_gem_new_impl(struct drm_device *dev,
return -EINVAL;
}
- if (!iommu_present(&platform_bus_type))
- use_vram = true;
- else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
- use_vram = true;
-
- if (WARN_ON(use_vram && !priv->vram.size))
- return -EINVAL;
-
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
- if (use_vram)
- msm_obj->vram_node = &msm_obj->domain[0].node;
-
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
@@ -816,13 +800,23 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct drm_gem_object *obj = NULL;
+ bool use_vram = false;
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
size = PAGE_ALIGN(size);
+ if (!iommu_present(&platform_bus_type))
+ use_vram = true;
+ else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+ use_vram = true;
+
+ if (WARN_ON(use_vram && !priv->vram.size))
+ return ERR_PTR(-EINVAL);
+
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
@@ -833,12 +827,24 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
if (ret)
goto fail;
- if (use_pages(obj)) {
+ if (use_vram) {
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+
+ msm_obj->vram_node = &msm_obj->domain[0].node;
+ drm_gem_private_object_init(dev, obj, size);
+
+ msm_obj->pages = get_pages(obj);
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto fail;
+ }
+ msm_obj->domain[0].iova = physaddr(obj);
+ } else {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
- } else {
- drm_gem_private_object_init(dev, obj, size);
}
return obj;
OpenPOWER on IntegriCloud