summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-10 12:59:51 +1000
committerBen Skeggs <bskeggs@redhat.com>2011-02-25 06:46:01 +1000
commit26c0c9e33a2eb44b345d22d5928d5c8b7b261226 (patch)
treed15305e77bfc4547a36cfa9755aeeffb15dd59ce /drivers/gpu/drm/nouveau/nouveau_sgdma.c
parentd5f423947a11103c43ad26ebb680d049c2d8edd6 (diff)
downloadop-kernel-dev-26c0c9e33a2eb44b345d22d5928d5c8b7b261226.zip
op-kernel-dev-26c0c9e33a2eb44b345d22d5928d5c8b7b261226.tar.gz
drm/nv50-nvc0: delay GART binding until move_notify time
The immediate benefit of doing this is that on NV50 and up, the GPU virtual address of any buffer is now constant, regardless of what memtype they're placed in. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c30
1 files changed, 9 insertions, 21 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a2b89bf..1205f0f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -375,12 +375,10 @@ static int
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
-
- nvbe->offset = mem->start << PAGE_SHIFT;
-
- nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+ struct nouveau_mem *node = mem->mm_node;
+ /* noop: bound in move_notify() */
+ node->pages = nvbe->pages;
+ nvbe->pages = (dma_addr_t *)node;
nvbe->bound = true;
return 0;
}
@@ -389,13 +387,10 @@ static int
nv50_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
-
- if (!nvbe->bound)
- return 0;
-
- nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT);
+ struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
+ /* noop: unbound in move_notify() */
+ nvbe->pages = node->pages;
+ node->pages = NULL;
nvbe->bound = false;
return 0;
}
@@ -457,13 +452,7 @@ nouveau_sgdma_init(struct drm_device *dev)
}
if (dev_priv->card_type >= NV_50) {
- ret = nouveau_vm_get(dev_priv->chan_vm, aper_size,
- 12, NV_MEM_ACCESS_RW,
- &dev_priv->gart_info.vma);
- if (ret)
- return ret;
-
- dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
+ dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
dev_priv->gart_info.type = NOUVEAU_GART_HW;
dev_priv->gart_info.func = &nv50_sgdma_backend;
@@ -522,7 +511,6 @@ nouveau_sgdma_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
- nouveau_vm_put(&dev_priv->gart_info.vma);
if (dev_priv->gart_info.dummy.page) {
pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
OpenPOWER on IntegriCloud