drm/radeon: split PT setup in more functions
Move the decision what to use into the common VM code. Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:

committed by
Alex Deucher

parent
5a341be27f
commit
03f62abd11
@@ -749,7 +749,93 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cik_sdma_vm_set_page - update the page tables using sDMA
|
* cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @ib: indirect buffer to fill with commands
|
||||||
|
* @pe: addr of the page entry
|
||||||
|
* @src: src addr to copy from
|
||||||
|
* @count: number of page entries to update
|
||||||
|
*
|
||||||
|
* Update PTEs by copying them from the GART using sDMA (CIK).
|
||||||
|
*/
|
||||||
|
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe, uint64_t src,
|
||||||
|
unsigned count)
|
||||||
|
{
|
||||||
|
while (count) {
|
||||||
|
unsigned bytes = count * 8;
|
||||||
|
if (bytes > 0x1FFFF8)
|
||||||
|
bytes = 0x1FFFF8;
|
||||||
|
|
||||||
|
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
|
||||||
|
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||||
|
ib->ptr[ib->length_dw++] = bytes;
|
||||||
|
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
|
||||||
|
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(src);
|
||||||
|
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||||
|
|
||||||
|
pe += bytes;
|
||||||
|
src += bytes;
|
||||||
|
count -= bytes / 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cik_sdma_vm_write_pages - update PTEs by writing them manually
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @ib: indirect buffer to fill with commands
|
||||||
|
* @pe: addr of the page entry
|
||||||
|
* @addr: dst addr to write into pe
|
||||||
|
* @count: number of page entries to update
|
||||||
|
* @incr: increase next addr by incr bytes
|
||||||
|
* @flags: access flags
|
||||||
|
*
|
||||||
|
* Update PTEs by writing them manually using sDMA (CIK).
|
||||||
|
*/
|
||||||
|
void cik_sdma_vm_write_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags)
|
||||||
|
{
|
||||||
|
uint64_t value;
|
||||||
|
unsigned ndw;
|
||||||
|
|
||||||
|
while (count) {
|
||||||
|
ndw = count * 2;
|
||||||
|
if (ndw > 0xFFFFE)
|
||||||
|
ndw = 0xFFFFE;
|
||||||
|
|
||||||
|
/* for non-physically contiguous pages (system) */
|
||||||
|
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
|
||||||
|
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||||
|
ib->ptr[ib->length_dw++] = pe;
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||||
|
ib->ptr[ib->length_dw++] = ndw;
|
||||||
|
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||||
|
if (flags & R600_PTE_SYSTEM) {
|
||||||
|
value = radeon_vm_map_gart(rdev, addr);
|
||||||
|
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||||
|
} else if (flags & R600_PTE_VALID) {
|
||||||
|
value = addr;
|
||||||
|
} else {
|
||||||
|
value = 0;
|
||||||
|
}
|
||||||
|
addr += incr;
|
||||||
|
value |= flags;
|
||||||
|
ib->ptr[ib->length_dw++] = value;
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cik_sdma_vm_set_pages - update the page tables using sDMA
|
||||||
*
|
*
|
||||||
* @rdev: radeon_device pointer
|
* @rdev: radeon_device pointer
|
||||||
* @ib: indirect buffer to fill with commands
|
* @ib: indirect buffer to fill with commands
|
||||||
@@ -761,82 +847,51 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||||||
*
|
*
|
||||||
* Update the page tables using sDMA (CIK).
|
* Update the page tables using sDMA (CIK).
|
||||||
*/
|
*/
|
||||||
void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
void cik_sdma_vm_set_pages(struct radeon_device *rdev,
|
||||||
struct radeon_ib *ib,
|
struct radeon_ib *ib,
|
||||||
uint64_t pe,
|
uint64_t pe,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint32_t flags)
|
uint32_t incr, uint32_t flags)
|
||||||
{
|
{
|
||||||
uint64_t value;
|
uint64_t value;
|
||||||
unsigned ndw;
|
unsigned ndw;
|
||||||
|
|
||||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
while (count) {
|
||||||
|
ndw = count;
|
||||||
|
if (ndw > 0x7FFFF)
|
||||||
|
ndw = 0x7FFFF;
|
||||||
|
|
||||||
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
|
if (flags & R600_PTE_VALID)
|
||||||
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
|
value = addr;
|
||||||
while (count) {
|
else
|
||||||
unsigned bytes = count * 8;
|
value = 0;
|
||||||
if (bytes > 0x1FFFF8)
|
|
||||||
bytes = 0x1FFFF8;
|
|
||||||
|
|
||||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
/* for physically contiguous pages (vram) */
|
||||||
ib->ptr[ib->length_dw++] = bytes;
|
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
|
||||||
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
|
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(src);
|
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
ib->ptr[ib->length_dw++] = 0;
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
ib->ptr[ib->length_dw++] = value; /* value */
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
|
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||||
|
ib->ptr[ib->length_dw++] = 0;
|
||||||
|
ib->ptr[ib->length_dw++] = ndw; /* number of entries */
|
||||||
|
|
||||||
pe += bytes;
|
pe += ndw * 8;
|
||||||
src += bytes;
|
addr += ndw * incr;
|
||||||
count -= bytes / 8;
|
count -= ndw;
|
||||||
}
|
|
||||||
} else if (flags & R600_PTE_SYSTEM) {
|
|
||||||
while (count) {
|
|
||||||
ndw = count * 2;
|
|
||||||
if (ndw > 0xFFFFE)
|
|
||||||
ndw = 0xFFFFE;
|
|
||||||
|
|
||||||
/* for non-physically contiguous pages (system) */
|
|
||||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
|
||||||
ib->ptr[ib->length_dw++] = pe;
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
|
||||||
ib->ptr[ib->length_dw++] = ndw;
|
|
||||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
|
||||||
value = radeon_vm_map_gart(rdev, addr);
|
|
||||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
|
||||||
addr += incr;
|
|
||||||
value |= flags;
|
|
||||||
ib->ptr[ib->length_dw++] = value;
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (count) {
|
|
||||||
ndw = count;
|
|
||||||
if (ndw > 0x7FFFF)
|
|
||||||
ndw = 0x7FFFF;
|
|
||||||
|
|
||||||
if (flags & R600_PTE_VALID)
|
|
||||||
value = addr;
|
|
||||||
else
|
|
||||||
value = 0;
|
|
||||||
/* for physically contiguous pages (vram) */
|
|
||||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
|
|
||||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
|
||||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
|
||||||
ib->ptr[ib->length_dw++] = 0;
|
|
||||||
ib->ptr[ib->length_dw++] = value; /* value */
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
|
||||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
|
||||||
ib->ptr[ib->length_dw++] = 0;
|
|
||||||
ib->ptr[ib->length_dw++] = ndw; /* number of entries */
|
|
||||||
pe += ndw * 8;
|
|
||||||
addr += ndw * incr;
|
|
||||||
count -= ndw;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cik_sdma_vm_pad_ib - pad the IB to the required number of dw
|
||||||
|
*
|
||||||
|
* @ib: indirect buffer to fill with padding
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
|
||||||
|
{
|
||||||
while (ib->length_dw & 0x7)
|
while (ib->length_dw & 0x7)
|
||||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
|
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
|
||||||
}
|
}
|
||||||
|
@@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cayman_dma_vm_set_page - update the page tables using the DMA
|
* cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @ib: indirect buffer to fill with commands
|
||||||
|
* @pe: addr of the page entry
|
||||||
|
* @src: src addr where to copy from
|
||||||
|
* @count: number of page entries to update
|
||||||
|
*
|
||||||
|
* Update PTEs by copying them from the GART using the DMA (cayman/TN).
|
||||||
|
*/
|
||||||
|
void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe, uint64_t src,
|
||||||
|
unsigned count)
|
||||||
|
{
|
||||||
|
unsigned ndw;
|
||||||
|
|
||||||
|
while (count) {
|
||||||
|
ndw = count * 2;
|
||||||
|
if (ndw > 0xFFFFE)
|
||||||
|
ndw = 0xFFFFE;
|
||||||
|
|
||||||
|
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
||||||
|
0, 0, ndw);
|
||||||
|
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||||
|
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
|
||||||
|
|
||||||
|
pe += ndw * 4;
|
||||||
|
src += ndw * 4;
|
||||||
|
count -= ndw / 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cayman_dma_vm_write_pages - update PTEs by writing them manually
|
||||||
*
|
*
|
||||||
* @rdev: radeon_device pointer
|
* @rdev: radeon_device pointer
|
||||||
* @ib: indirect buffer to fill with commands
|
* @ib: indirect buffer to fill with commands
|
||||||
@@ -315,90 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||||||
* @addr: dst addr to write into pe
|
* @addr: dst addr to write into pe
|
||||||
* @count: number of page entries to update
|
* @count: number of page entries to update
|
||||||
* @incr: increase next addr by incr bytes
|
* @incr: increase next addr by incr bytes
|
||||||
* @flags: hw access flags
|
* @flags: hw access flags
|
||||||
*
|
*
|
||||||
* Update the page tables using the DMA (cayman/TN).
|
* Update PTEs by writing them manually using the DMA (cayman/TN).
|
||||||
*/
|
*/
|
||||||
void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
void cayman_dma_vm_write_pages(struct radeon_device *rdev,
|
||||||
struct radeon_ib *ib,
|
struct radeon_ib *ib,
|
||||||
uint64_t pe,
|
uint64_t pe,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint32_t flags)
|
uint32_t incr, uint32_t flags)
|
||||||
{
|
{
|
||||||
uint64_t value;
|
uint64_t value;
|
||||||
unsigned ndw;
|
unsigned ndw;
|
||||||
|
|
||||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
while (count) {
|
||||||
|
ndw = count * 2;
|
||||||
|
if (ndw > 0xFFFFE)
|
||||||
|
ndw = 0xFFFFE;
|
||||||
|
|
||||||
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
|
/* for non-physically contiguous pages (system) */
|
||||||
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
|
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
|
||||||
while (count) {
|
0, 0, ndw);
|
||||||
ndw = count * 2;
|
ib->ptr[ib->length_dw++] = pe;
|
||||||
if (ndw > 0xFFFFE)
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
ndw = 0xFFFFE;
|
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||||
|
if (flags & R600_PTE_SYSTEM) {
|
||||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
value = radeon_vm_map_gart(rdev, addr);
|
||||||
0, 0, ndw);
|
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
} else if (flags & R600_PTE_VALID) {
|
||||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
|
|
||||||
|
|
||||||
pe += ndw * 4;
|
|
||||||
src += ndw * 4;
|
|
||||||
count -= ndw / 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
|
|
||||||
while (count) {
|
|
||||||
ndw = count * 2;
|
|
||||||
if (ndw > 0xFFFFE)
|
|
||||||
ndw = 0xFFFFE;
|
|
||||||
|
|
||||||
/* for non-physically contiguous pages (system) */
|
|
||||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
|
|
||||||
ib->ptr[ib->length_dw++] = pe;
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
|
||||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
|
||||||
if (flags & R600_PTE_SYSTEM) {
|
|
||||||
value = radeon_vm_map_gart(rdev, addr);
|
|
||||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
|
||||||
} else if (flags & R600_PTE_VALID) {
|
|
||||||
value = addr;
|
|
||||||
} else {
|
|
||||||
value = 0;
|
|
||||||
}
|
|
||||||
addr += incr;
|
|
||||||
value |= flags;
|
|
||||||
ib->ptr[ib->length_dw++] = value;
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (count) {
|
|
||||||
ndw = count * 2;
|
|
||||||
if (ndw > 0xFFFFE)
|
|
||||||
ndw = 0xFFFFE;
|
|
||||||
|
|
||||||
if (flags & R600_PTE_VALID)
|
|
||||||
value = addr;
|
value = addr;
|
||||||
else
|
} else {
|
||||||
value = 0;
|
value = 0;
|
||||||
/* for physically contiguous pages (vram) */
|
}
|
||||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
addr += incr;
|
||||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
value |= flags;
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
ib->ptr[ib->length_dw++] = value;
|
||||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
|
||||||
ib->ptr[ib->length_dw++] = 0;
|
|
||||||
ib->ptr[ib->length_dw++] = value; /* value */
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
|
||||||
ib->ptr[ib->length_dw++] = 0;
|
|
||||||
pe += ndw * 4;
|
|
||||||
addr += (ndw / 2) * incr;
|
|
||||||
count -= ndw / 2;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cayman_dma_vm_set_pages - update the page tables using the DMA
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @ib: indirect buffer to fill with commands
|
||||||
|
* @pe: addr of the page entry
|
||||||
|
* @addr: dst addr to write into pe
|
||||||
|
* @count: number of page entries to update
|
||||||
|
* @incr: increase next addr by incr bytes
|
||||||
|
* @flags: hw access flags
|
||||||
|
*
|
||||||
|
* Update the page tables using the DMA (cayman/TN).
|
||||||
|
*/
|
||||||
|
void cayman_dma_vm_set_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags)
|
||||||
|
{
|
||||||
|
uint64_t value;
|
||||||
|
unsigned ndw;
|
||||||
|
|
||||||
|
while (count) {
|
||||||
|
ndw = count * 2;
|
||||||
|
if (ndw > 0xFFFFE)
|
||||||
|
ndw = 0xFFFFE;
|
||||||
|
|
||||||
|
if (flags & R600_PTE_VALID)
|
||||||
|
value = addr;
|
||||||
|
else
|
||||||
|
value = 0;
|
||||||
|
|
||||||
|
/* for physically contiguous pages (vram) */
|
||||||
|
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||||
|
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
|
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||||
|
ib->ptr[ib->length_dw++] = 0;
|
||||||
|
ib->ptr[ib->length_dw++] = value; /* value */
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
|
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||||
|
ib->ptr[ib->length_dw++] = 0;
|
||||||
|
|
||||||
|
pe += ndw * 4;
|
||||||
|
addr += (ndw / 2) * incr;
|
||||||
|
count -= ndw / 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cayman_dma_vm_pad_ib - pad the IB to the required number of dw
|
||||||
|
*
|
||||||
|
* @ib: indirect buffer to fill with padding
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
|
||||||
|
{
|
||||||
while (ib->length_dw & 0x7)
|
while (ib->length_dw & 0x7)
|
||||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
||||||
}
|
}
|
||||||
|
@@ -1797,11 +1797,21 @@ struct radeon_asic {
|
|||||||
struct {
|
struct {
|
||||||
int (*init)(struct radeon_device *rdev);
|
int (*init)(struct radeon_device *rdev);
|
||||||
void (*fini)(struct radeon_device *rdev);
|
void (*fini)(struct radeon_device *rdev);
|
||||||
void (*set_page)(struct radeon_device *rdev,
|
void (*copy_pages)(struct radeon_device *rdev,
|
||||||
struct radeon_ib *ib,
|
struct radeon_ib *ib,
|
||||||
uint64_t pe,
|
uint64_t pe, uint64_t src,
|
||||||
uint64_t addr, unsigned count,
|
unsigned count);
|
||||||
uint32_t incr, uint32_t flags);
|
void (*write_pages)(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags);
|
||||||
|
void (*set_pages)(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags);
|
||||||
|
void (*pad_ib)(struct radeon_ib *ib);
|
||||||
} vm;
|
} vm;
|
||||||
/* ring specific callbacks */
|
/* ring specific callbacks */
|
||||||
struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
|
struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
|
||||||
@@ -2761,7 +2771,10 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
|||||||
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
|
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
|
||||||
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
|
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
|
||||||
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
|
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
|
||||||
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
|
||||||
|
#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
||||||
|
#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
||||||
|
#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
|
||||||
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
|
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
|
||||||
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
|
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
|
||||||
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
|
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
|
||||||
|
@@ -1613,7 +1613,10 @@ static struct radeon_asic cayman_asic = {
|
|||||||
.vm = {
|
.vm = {
|
||||||
.init = &cayman_vm_init,
|
.init = &cayman_vm_init,
|
||||||
.fini = &cayman_vm_fini,
|
.fini = &cayman_vm_fini,
|
||||||
.set_page = &cayman_dma_vm_set_page,
|
.copy_pages = &cayman_dma_vm_copy_pages,
|
||||||
|
.write_pages = &cayman_dma_vm_write_pages,
|
||||||
|
.set_pages = &cayman_dma_vm_set_pages,
|
||||||
|
.pad_ib = &cayman_dma_vm_pad_ib,
|
||||||
},
|
},
|
||||||
.ring = {
|
.ring = {
|
||||||
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
|
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
|
||||||
@@ -1713,7 +1716,10 @@ static struct radeon_asic trinity_asic = {
|
|||||||
.vm = {
|
.vm = {
|
||||||
.init = &cayman_vm_init,
|
.init = &cayman_vm_init,
|
||||||
.fini = &cayman_vm_fini,
|
.fini = &cayman_vm_fini,
|
||||||
.set_page = &cayman_dma_vm_set_page,
|
.copy_pages = &cayman_dma_vm_copy_pages,
|
||||||
|
.write_pages = &cayman_dma_vm_write_pages,
|
||||||
|
.set_pages = &cayman_dma_vm_set_pages,
|
||||||
|
.pad_ib = &cayman_dma_vm_pad_ib,
|
||||||
},
|
},
|
||||||
.ring = {
|
.ring = {
|
||||||
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
|
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
|
||||||
@@ -1843,7 +1849,10 @@ static struct radeon_asic si_asic = {
|
|||||||
.vm = {
|
.vm = {
|
||||||
.init = &si_vm_init,
|
.init = &si_vm_init,
|
||||||
.fini = &si_vm_fini,
|
.fini = &si_vm_fini,
|
||||||
.set_page = &si_dma_vm_set_page,
|
.copy_pages = &si_dma_vm_copy_pages,
|
||||||
|
.write_pages = &si_dma_vm_write_pages,
|
||||||
|
.set_pages = &si_dma_vm_set_pages,
|
||||||
|
.pad_ib = &cayman_dma_vm_pad_ib,
|
||||||
},
|
},
|
||||||
.ring = {
|
.ring = {
|
||||||
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
|
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
|
||||||
@@ -2001,7 +2010,10 @@ static struct radeon_asic ci_asic = {
|
|||||||
.vm = {
|
.vm = {
|
||||||
.init = &cik_vm_init,
|
.init = &cik_vm_init,
|
||||||
.fini = &cik_vm_fini,
|
.fini = &cik_vm_fini,
|
||||||
.set_page = &cik_sdma_vm_set_page,
|
.copy_pages = &cik_sdma_vm_copy_pages,
|
||||||
|
.write_pages = &cik_sdma_vm_write_pages,
|
||||||
|
.set_pages = &cik_sdma_vm_set_pages,
|
||||||
|
.pad_ib = &cik_sdma_vm_pad_ib,
|
||||||
},
|
},
|
||||||
.ring = {
|
.ring = {
|
||||||
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
|
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
|
||||||
@@ -2105,7 +2117,10 @@ static struct radeon_asic kv_asic = {
|
|||||||
.vm = {
|
.vm = {
|
||||||
.init = &cik_vm_init,
|
.init = &cik_vm_init,
|
||||||
.fini = &cik_vm_fini,
|
.fini = &cik_vm_fini,
|
||||||
.set_page = &cik_sdma_vm_set_page,
|
.copy_pages = &cik_sdma_vm_copy_pages,
|
||||||
|
.write_pages = &cik_sdma_vm_write_pages,
|
||||||
|
.set_pages = &cik_sdma_vm_set_pages,
|
||||||
|
.pad_ib = &cik_sdma_vm_pad_ib,
|
||||||
},
|
},
|
||||||
.ring = {
|
.ring = {
|
||||||
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
|
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
|
||||||
|
@@ -607,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
|||||||
struct radeon_ib *ib);
|
struct radeon_ib *ib);
|
||||||
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||||
void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
|
||||||
struct radeon_ib *ib,
|
void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
|
||||||
uint64_t pe,
|
struct radeon_ib *ib,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t pe, uint64_t src,
|
||||||
uint32_t incr, uint32_t flags);
|
unsigned count);
|
||||||
|
void cayman_dma_vm_write_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags);
|
||||||
|
void cayman_dma_vm_set_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags);
|
||||||
|
void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
|
||||||
|
|
||||||
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||||
|
|
||||||
@@ -694,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev,
|
|||||||
uint64_t src_offset, uint64_t dst_offset,
|
uint64_t src_offset, uint64_t dst_offset,
|
||||||
unsigned num_gpu_pages,
|
unsigned num_gpu_pages,
|
||||||
struct radeon_fence **fence);
|
struct radeon_fence **fence);
|
||||||
void si_dma_vm_set_page(struct radeon_device *rdev,
|
|
||||||
struct radeon_ib *ib,
|
void si_dma_vm_copy_pages(struct radeon_device *rdev,
|
||||||
uint64_t pe,
|
struct radeon_ib *ib,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t pe, uint64_t src,
|
||||||
uint32_t incr, uint32_t flags);
|
unsigned count);
|
||||||
|
void si_dma_vm_write_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags);
|
||||||
|
void si_dma_vm_set_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags);
|
||||||
|
|
||||||
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||||
u32 si_get_xclk(struct radeon_device *rdev);
|
u32 si_get_xclk(struct radeon_device *rdev);
|
||||||
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
|
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
|
||||||
@@ -772,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev);
|
|||||||
int cik_vm_init(struct radeon_device *rdev);
|
int cik_vm_init(struct radeon_device *rdev);
|
||||||
void cik_vm_fini(struct radeon_device *rdev);
|
void cik_vm_fini(struct radeon_device *rdev);
|
||||||
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||||
void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
|
||||||
struct radeon_ib *ib,
|
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
|
||||||
uint64_t pe,
|
struct radeon_ib *ib,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t pe, uint64_t src,
|
||||||
uint32_t incr, uint32_t flags);
|
unsigned count);
|
||||||
|
void cik_sdma_vm_write_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags);
|
||||||
|
void cik_sdma_vm_set_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags);
|
||||||
|
void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
|
||||||
|
|
||||||
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||||
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
|
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
|
||||||
|
@@ -340,6 +340,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
|
|||||||
return bo_va;
|
return bo_va;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* radeon_vm_set_pages - helper to call the right asic function
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @ib: indirect buffer to fill with commands
|
||||||
|
* @pe: addr of the page entry
|
||||||
|
* @addr: dst addr to write into pe
|
||||||
|
* @count: number of page entries to update
|
||||||
|
* @incr: increase next addr by incr bytes
|
||||||
|
* @flags: hw access flags
|
||||||
|
*
|
||||||
|
* Traces the parameters and calls the right asic functions
|
||||||
|
* to setup the page table using the DMA.
|
||||||
|
*/
|
||||||
|
static void radeon_vm_set_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags)
|
||||||
|
{
|
||||||
|
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
||||||
|
|
||||||
|
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
|
||||||
|
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
|
||||||
|
radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
|
||||||
|
|
||||||
|
} else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
|
||||||
|
radeon_asic_vm_write_pages(rdev, ib, pe, addr,
|
||||||
|
count, incr, flags);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
radeon_asic_vm_set_pages(rdev, ib, pe, addr,
|
||||||
|
count, incr, flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* radeon_vm_clear_bo - initially clear the page dir/table
|
* radeon_vm_clear_bo - initially clear the page dir/table
|
||||||
*
|
*
|
||||||
@@ -381,7 +417,8 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
|
|||||||
|
|
||||||
ib.length_dw = 0;
|
ib.length_dw = 0;
|
||||||
|
|
||||||
radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
|
radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
|
||||||
|
radeon_asic_vm_pad_ib(rdev, &ib);
|
||||||
|
|
||||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
@@ -634,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
|
|||||||
((last_pt + incr * count) != pt)) {
|
((last_pt + incr * count) != pt)) {
|
||||||
|
|
||||||
if (count) {
|
if (count) {
|
||||||
radeon_asic_vm_set_page(rdev, &ib, last_pde,
|
radeon_vm_set_pages(rdev, &ib, last_pde,
|
||||||
last_pt, count, incr,
|
last_pt, count, incr,
|
||||||
R600_PTE_VALID);
|
R600_PTE_VALID);
|
||||||
}
|
}
|
||||||
|
|
||||||
count = 1;
|
count = 1;
|
||||||
@@ -648,10 +685,11 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (count)
|
if (count)
|
||||||
radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
|
radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
|
||||||
incr, R600_PTE_VALID);
|
incr, R600_PTE_VALID);
|
||||||
|
|
||||||
if (ib.length_dw != 0) {
|
if (ib.length_dw != 0) {
|
||||||
|
radeon_asic_vm_pad_ib(rdev, &ib);
|
||||||
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
|
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
|
||||||
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
|
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
|
||||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||||
@@ -719,30 +757,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
|
|||||||
(frag_start >= frag_end)) {
|
(frag_start >= frag_end)) {
|
||||||
|
|
||||||
count = (pe_end - pe_start) / 8;
|
count = (pe_end - pe_start) / 8;
|
||||||
radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
|
radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
|
||||||
RADEON_GPU_PAGE_SIZE, flags);
|
RADEON_GPU_PAGE_SIZE, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* handle the 4K area at the beginning */
|
/* handle the 4K area at the beginning */
|
||||||
if (pe_start != frag_start) {
|
if (pe_start != frag_start) {
|
||||||
count = (frag_start - pe_start) / 8;
|
count = (frag_start - pe_start) / 8;
|
||||||
radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
|
radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
|
||||||
RADEON_GPU_PAGE_SIZE, flags);
|
RADEON_GPU_PAGE_SIZE, flags);
|
||||||
addr += RADEON_GPU_PAGE_SIZE * count;
|
addr += RADEON_GPU_PAGE_SIZE * count;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* handle the area in the middle */
|
/* handle the area in the middle */
|
||||||
count = (frag_end - frag_start) / 8;
|
count = (frag_end - frag_start) / 8;
|
||||||
radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
|
radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
|
||||||
RADEON_GPU_PAGE_SIZE, flags | frag_flags);
|
RADEON_GPU_PAGE_SIZE, flags | frag_flags);
|
||||||
|
|
||||||
/* handle the 4K area at the end */
|
/* handle the 4K area at the end */
|
||||||
if (frag_end != pe_end) {
|
if (frag_end != pe_end) {
|
||||||
addr += RADEON_GPU_PAGE_SIZE * count;
|
addr += RADEON_GPU_PAGE_SIZE * count;
|
||||||
count = (pe_end - frag_end) / 8;
|
count = (pe_end - frag_end) / 8;
|
||||||
radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
|
radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
|
||||||
RADEON_GPU_PAGE_SIZE, flags);
|
RADEON_GPU_PAGE_SIZE, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -900,6 +938,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
|
|||||||
bo_va->it.last + 1, addr,
|
bo_va->it.last + 1, addr,
|
||||||
radeon_vm_page_flags(bo_va->flags));
|
radeon_vm_page_flags(bo_va->flags));
|
||||||
|
|
||||||
|
radeon_asic_vm_pad_ib(rdev, &ib);
|
||||||
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
|
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
|
||||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@@ -56,7 +56,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* si_dma_vm_set_page - update the page tables using the DMA
|
* si_dma_vm_copy_pages - update PTEs by copying them from the GART
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @ib: indirect buffer to fill with commands
|
||||||
|
* @pe: addr of the page entry
|
||||||
|
* @src: src addr where to copy from
|
||||||
|
* @count: number of page entries to update
|
||||||
|
*
|
||||||
|
* Update PTEs by copying them from the GART using the DMA (SI).
|
||||||
|
*/
|
||||||
|
void si_dma_vm_copy_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe, uint64_t src,
|
||||||
|
unsigned count)
|
||||||
|
{
|
||||||
|
while (count) {
|
||||||
|
unsigned bytes = count * 8;
|
||||||
|
if (bytes > 0xFFFF8)
|
||||||
|
bytes = 0xFFFF8;
|
||||||
|
|
||||||
|
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
||||||
|
1, 0, 0, bytes);
|
||||||
|
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||||
|
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
|
||||||
|
|
||||||
|
pe += bytes;
|
||||||
|
src += bytes;
|
||||||
|
count -= bytes / 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* si_dma_vm_write_pages - update PTEs by writing them manually
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @ib: indirect buffer to fill with commands
|
||||||
|
* @pe: addr of the page entry
|
||||||
|
* @addr: dst addr to write into pe
|
||||||
|
* @count: number of page entries to update
|
||||||
|
* @incr: increase next addr by incr bytes
|
||||||
|
* @flags: access flags
|
||||||
|
*
|
||||||
|
* Update PTEs by writing them manually using the DMA (SI).
|
||||||
|
*/
|
||||||
|
void si_dma_vm_write_pages(struct radeon_device *rdev,
|
||||||
|
struct radeon_ib *ib,
|
||||||
|
uint64_t pe,
|
||||||
|
uint64_t addr, unsigned count,
|
||||||
|
uint32_t incr, uint32_t flags)
|
||||||
|
{
|
||||||
|
uint64_t value;
|
||||||
|
unsigned ndw;
|
||||||
|
|
||||||
|
while (count) {
|
||||||
|
ndw = count * 2;
|
||||||
|
if (ndw > 0xFFFFE)
|
||||||
|
ndw = 0xFFFFE;
|
||||||
|
|
||||||
|
/* for non-physically contiguous pages (system) */
|
||||||
|
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
|
||||||
|
ib->ptr[ib->length_dw++] = pe;
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
|
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||||
|
if (flags & R600_PTE_SYSTEM) {
|
||||||
|
value = radeon_vm_map_gart(rdev, addr);
|
||||||
|
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||||
|
} else if (flags & R600_PTE_VALID) {
|
||||||
|
value = addr;
|
||||||
|
} else {
|
||||||
|
value = 0;
|
||||||
|
}
|
||||||
|
addr += incr;
|
||||||
|
value |= flags;
|
||||||
|
ib->ptr[ib->length_dw++] = value;
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* si_dma_vm_set_pages - update the page tables using the DMA
|
||||||
*
|
*
|
||||||
* @rdev: radeon_device pointer
|
* @rdev: radeon_device pointer
|
||||||
* @ib: indirect buffer to fill with commands
|
* @ib: indirect buffer to fill with commands
|
||||||
@@ -68,81 +150,39 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||||||
*
|
*
|
||||||
* Update the page tables using the DMA (SI).
|
* Update the page tables using the DMA (SI).
|
||||||
*/
|
*/
|
||||||
void si_dma_vm_set_page(struct radeon_device *rdev,
|
void si_dma_vm_set_pages(struct radeon_device *rdev,
|
||||||
struct radeon_ib *ib,
|
struct radeon_ib *ib,
|
||||||
uint64_t pe,
|
uint64_t pe,
|
||||||
uint64_t addr, unsigned count,
|
uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint32_t flags)
|
uint32_t incr, uint32_t flags)
|
||||||
{
|
{
|
||||||
uint64_t value;
|
uint64_t value;
|
||||||
unsigned ndw;
|
unsigned ndw;
|
||||||
|
|
||||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
while (count) {
|
||||||
|
ndw = count * 2;
|
||||||
|
if (ndw > 0xFFFFE)
|
||||||
|
ndw = 0xFFFFE;
|
||||||
|
|
||||||
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
|
if (flags & R600_PTE_VALID)
|
||||||
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
|
value = addr;
|
||||||
while (count) {
|
else
|
||||||
unsigned bytes = count * 8;
|
value = 0;
|
||||||
if (bytes > 0xFFFF8)
|
|
||||||
bytes = 0xFFFF8;
|
|
||||||
|
|
||||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
/* for physically contiguous pages (vram) */
|
||||||
1, 0, 0, bytes);
|
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
|
ib->ptr[ib->length_dw++] = 0;
|
||||||
|
ib->ptr[ib->length_dw++] = value; /* value */
|
||||||
pe += bytes;
|
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||||
src += bytes;
|
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||||
count -= bytes / 8;
|
ib->ptr[ib->length_dw++] = 0;
|
||||||
}
|
pe += ndw * 4;
|
||||||
} else if (flags & R600_PTE_SYSTEM) {
|
addr += (ndw / 2) * incr;
|
||||||
while (count) {
|
count -= ndw / 2;
|
||||||
ndw = count * 2;
|
|
||||||
if (ndw > 0xFFFFE)
|
|
||||||
ndw = 0xFFFFE;
|
|
||||||
|
|
||||||
/* for non-physically contiguous pages (system) */
|
|
||||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
|
|
||||||
ib->ptr[ib->length_dw++] = pe;
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
|
||||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
|
||||||
value = radeon_vm_map_gart(rdev, addr);
|
|
||||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
|
||||||
addr += incr;
|
|
||||||
value |= flags;
|
|
||||||
ib->ptr[ib->length_dw++] = value;
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (count) {
|
|
||||||
ndw = count * 2;
|
|
||||||
if (ndw > 0xFFFFE)
|
|
||||||
ndw = 0xFFFFE;
|
|
||||||
|
|
||||||
if (flags & R600_PTE_VALID)
|
|
||||||
value = addr;
|
|
||||||
else
|
|
||||||
value = 0;
|
|
||||||
/* for physically contiguous pages (vram) */
|
|
||||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
|
||||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
|
||||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
|
||||||
ib->ptr[ib->length_dw++] = 0;
|
|
||||||
ib->ptr[ib->length_dw++] = value; /* value */
|
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
|
||||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
|
||||||
ib->ptr[ib->length_dw++] = 0;
|
|
||||||
pe += ndw * 4;
|
|
||||||
addr += (ndw / 2) * incr;
|
|
||||||
count -= ndw / 2;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
while (ib->length_dw & 0x7)
|
|
||||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||||
|
Reference in New Issue
Block a user