summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2015-08-20 14:54:09 +1000
committerBen Skeggs <bskeggs@redhat.com>2015-08-28 12:40:16 +1000
commit83f56106ead017a07868176279746d73bc7a7060 (patch)
tree714e684b2d2d9dbada3f577f51427056f69a83fd /drivers/gpu/drm/nouveau/nvkm/subdev/mmu
parent25e3a463fc1bd39c01cc6d19d2c8b4c4725699b9 (diff)
downloadop-kernel-dev-83f56106ead017a07868176279746d73bc7a7060.zip
op-kernel-dev-83f56106ead017a07868176279746d73bc7a7060.tar.gz
drm/nouveau/mmu: switch to device pri macros
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c5
4 files changed, 32 insertions, 26 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 982f7c7..3551b55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -154,7 +154,8 @@ static void
gf100_vm_flush(struct nvkm_vm *vm)
{
struct nvkm_mmu *mmu = (void *)vm->mmu;
- struct nvkm_bar *bar = nvkm_bar(mmu);
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_bar *bar = device->bar;
struct nvkm_vm_pgd *vpgd;
u32 type;
@@ -171,16 +172,16 @@ gf100_vm_flush(struct nvkm_vm *vm)
*/
if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) {
nv_error(mmu, "vm timeout 0: 0x%08x %d\n",
- nv_rd32(mmu, 0x100c80), type);
+ nvkm_rd32(device, 0x100c80), type);
}
- nv_wr32(mmu, 0x100cb8, vpgd->obj->addr >> 8);
- nv_wr32(mmu, 0x100cbc, 0x80000000 | type);
+ nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
+ nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
/* wait for flush to be queued? */
if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) {
nv_error(mmu, "vm timeout 1: 0x%08x %d\n",
- nv_rd32(mmu, 0x100c80), type);
+ nvkm_rd32(device, 0x100c80), type);
}
}
mutex_unlock(&nv_subdev(mmu)->mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index 17b2b39..609c6a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -65,14 +65,15 @@ static void
nv41_vm_flush(struct nvkm_vm *vm)
{
struct nv04_mmu *mmu = (void *)vm->mmu;
+ struct nvkm_device *device = mmu->base.subdev.device;
mutex_lock(&nv_subdev(mmu)->mutex);
- nv_wr32(mmu, 0x100810, 0x00000022);
+ nvkm_wr32(device, 0x100810, 0x00000022);
if (!nv_wait(mmu, 0x100810, 0x00000020, 0x00000020)) {
nv_warn(mmu, "flush timeout, 0x%08x\n",
- nv_rd32(mmu, 0x100810));
+ nvkm_rd32(device, 0x100810));
}
- nv_wr32(mmu, 0x100810, 0x00000000);
+ nvkm_wr32(device, 0x100810, 0x00000000);
mutex_unlock(&nv_subdev(mmu)->mutex);
}
@@ -131,6 +132,7 @@ static int
nv41_mmu_init(struct nvkm_object *object)
{
struct nv04_mmu *mmu = (void *)object;
+ struct nvkm_device *device = mmu->base.subdev.device;
struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0];
int ret;
@@ -138,9 +140,9 @@ nv41_mmu_init(struct nvkm_object *object)
if (ret)
return ret;
- nv_wr32(mmu, 0x100800, dma->addr | 0x00000002);
- nv_mask(mmu, 0x10008c, 0x00000100, 0x00000100);
- nv_wr32(mmu, 0x100820, 0x00000000);
+ nvkm_wr32(device, 0x100800, dma->addr | 0x00000002);
+ nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
+ nvkm_wr32(device, 0x100820, 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index 860654f..371f627 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -140,11 +140,12 @@ static void
nv44_vm_flush(struct nvkm_vm *vm)
{
struct nv04_mmu *mmu = (void *)vm->mmu;
- nv_wr32(mmu, 0x100814, mmu->base.limit - NV44_GART_PAGE);
- nv_wr32(mmu, 0x100808, 0x00000020);
+ struct nvkm_device *device = mmu->base.subdev.device;
+ nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
+ nvkm_wr32(device, 0x100808, 0x00000020);
if (!nv_wait(mmu, 0x100808, 0x00000001, 0x00000001))
- nv_error(mmu, "timeout: 0x%08x\n", nv_rd32(mmu, 0x100808));
- nv_wr32(mmu, 0x100808, 0x00000000);
+ nv_error(mmu, "timeout: 0x%08x\n", nvkm_rd32(device, 0x100808));
+ nvkm_wr32(device, 0x100808, 0x00000000);
}
/*******************************************************************************
@@ -208,6 +209,7 @@ static int
nv44_mmu_init(struct nvkm_object *object)
{
struct nv04_mmu *mmu = (void *)object;
+ struct nvkm_device *device = mmu->base.subdev.device;
struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
u32 addr;
int ret;
@@ -220,17 +222,17 @@ nv44_mmu_init(struct nvkm_object *object)
* allocated on 512KiB alignment, and not exceed a total size
* of 512KiB for this to work correctly
*/
- addr = nv_rd32(mmu, 0x10020c);
+ addr = nvkm_rd32(device, 0x10020c);
addr -= ((gart->addr >> 19) + 1) << 19;
- nv_wr32(mmu, 0x100850, 0x80000000);
- nv_wr32(mmu, 0x100818, mmu->null);
- nv_wr32(mmu, 0x100804, NV44_GART_SIZE);
- nv_wr32(mmu, 0x100850, 0x00008000);
- nv_mask(mmu, 0x10008c, 0x00000200, 0x00000200);
- nv_wr32(mmu, 0x100820, 0x00000000);
- nv_wr32(mmu, 0x10082c, 0x00000001);
- nv_wr32(mmu, 0x100800, addr | 0x00000010);
+ nvkm_wr32(device, 0x100850, 0x80000000);
+ nvkm_wr32(device, 0x100818, mmu->null);
+ nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+ nvkm_wr32(device, 0x100850, 0x00008000);
+ nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
+ nvkm_wr32(device, 0x100820, 0x00000000);
+ nvkm_wr32(device, 0x10082c, 0x00000001);
+ nvkm_wr32(device, 0x100800, addr | 0x00000010);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index 75c6a07..1d7e1aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -146,7 +146,8 @@ static void
nv50_vm_flush(struct nvkm_vm *vm)
{
struct nvkm_mmu *mmu = (void *)vm->mmu;
- struct nvkm_bar *bar = nvkm_bar(mmu);
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_bar *bar = device->bar;
struct nvkm_engine *engine;
int i, vme;
@@ -180,7 +181,7 @@ nv50_vm_flush(struct nvkm_vm *vm)
continue;
}
- nv_wr32(mmu, 0x100c80, (vme << 16) | 1);
+ nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
if (!nv_wait(mmu, 0x100c80, 0x00000001, 0x00000000))
nv_error(mmu, "vm flush timeout: engine %d\n", vme);
}
OpenPOWER on IntegriCloud