From 03f62abd112d5150b6ce8957fa85d4f6e85e357f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 30 Jul 2014 21:05:17 +0200 Subject: drm/radeon: split PT setup in more functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the decision what to use into the common VM code. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ni_dma.c | 197 +++++++++++++++++++++++++--------------- 1 file changed, 123 insertions(+), 74 deletions(-) (limited to 'drivers/gpu/drm/radeon/ni_dma.c') diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 66325ef..8a3e622 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) } /** - * cayman_dma_vm_set_page - update the page tables using the DMA + * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr where to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using the DMA (cayman/TN). + */ +void cayman_dma_vm_copy_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, + 0, 0, ndw); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; + + pe += ndw * 4; + src += ndw * 4; + count -= ndw / 2; + } +} + +/** + * cayman_dma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands @@ -315,90 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: hw access flags + * @flags: hw access flags * - * Update the page tables using the DMA (cayman/TN). + * Update PTEs by writing them manually using the DMA (cayman/TN). */ -void cayman_dma_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +void cayman_dma_vm_write_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; - trace_radeon_vm_set_page(pe, addr, count, incr, flags); - - if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { - uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, - 0, 0, ndw); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; - - pe += ndw * 4; - src += ndw * 4; - count -= ndw / 2; - } - - } else if ((flags & R600_PTE_SYSTEM) || (count == 1)) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & R600_PTE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & R600_PTE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - if (flags & R600_PTE_VALID) + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, + 0, 0, ndw); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & R600_PTE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & R600_PTE_VALID) { value = addr; - else + } else { value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ + } + addr += incr; + value |= flags; + ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - pe += ndw * 4; - addr += (ndw / 2) * incr; - count -= ndw / 2; } } +} + +/** + * cayman_dma_vm_set_pages - update the page tables using the DMA + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: hw access flags + * + * Update the page tables using the DMA (cayman/TN). + */ +void cayman_dma_vm_set_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + if (flags & R600_PTE_VALID) + value = addr; + else + value = 0; + + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + + pe += ndw * 4; + addr += (ndw / 2) * incr; + count -= ndw / 2; + } +} + +/** + * cayman_dma_vm_pad_ib - pad the IB to the required number of dw + * + * @ib: indirect buffer to fill with padding + * + */ +void cayman_dma_vm_pad_ib(struct radeon_ib *ib) +{ while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); } -- cgit v1.1