drm/radeon: drop CP page table updates & cleanup v2

The DMA ring seems to be stable now.

v2: remove pt_ring_index as well

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2013-10-30 11:51:09 -04:00 committed by Alex Deucher
parent 74d360f66b
commit 24c164393d
10 changed files with 73 additions and 267 deletions

View file

@ -67,11 +67,6 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
extern int cik_sdma_resume(struct radeon_device *rdev);
extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
extern void cik_sdma_fini(struct radeon_device *rdev);
extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
static void cik_rlc_stop(struct radeon_device *rdev);
static void cik_pcie_gen3_enable(struct radeon_device *rdev);
static void cik_program_aspm(struct radeon_device *rdev);
@ -4903,62 +4898,6 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
}
}
/**
* cik_vm_set_page - update the page tables using sDMA
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using CP or sDMA (CIK).
*/
void cik_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
/* CP */
while (count) {
ndw = 2 + count * 2;
if (ndw > 0x3FFE)
ndw = 0x3FFE;
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(1));
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
value |= r600_flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
/* DMA */
cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
/*
* RLC
* The RLC is a multi-purpose microengine that handles a

View file

@ -654,13 +654,12 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
trace_radeon_vm_set_page(pe, addr, count, incr, r600_flags);
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if (flags & RADEON_VM_PAGE_SYSTEM) {
if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@ -672,16 +671,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
value |= r600_flags;
value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@ -692,7 +685,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0x7FFFF)
ndw = 0x7FFFF;
if (flags & RADEON_VM_PAGE_VALID)
if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@ -700,7 +693,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);

View file

@ -174,11 +174,6 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@ -2399,77 +2394,6 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
block, mc_id);
}
#define R600_ENTRY_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
{
uint32_t r600_flags = 0;
r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
r600_flags |= R600_PTE_SYSTEM;
r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
}
return r600_flags;
}
/**
* cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using the CP (cayman/TN).
*/
void cayman_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
while (count) {
ndw = 1 + count * 2;
if (ndw > 0x3FFF)
ndw = 0x3FFF;
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
value |= r600_flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
/**
* cayman_vm_flush - vm flush using the CP
*

View file

@ -246,8 +246,7 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
* @r600_flags: hw access flags
* @flags: hw access flags
*
* Update the page tables using the DMA (cayman/TN).
*/
@ -257,13 +256,12 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
trace_radeon_vm_set_page(pe, addr, count, incr, r600_flags);
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@ -274,16 +272,16 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
value |= r600_flags;
value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@ -294,7 +292,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
if (flags & RADEON_VM_PAGE_VALID)
if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@ -302,7 +300,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);

View file

@ -832,6 +832,12 @@ struct radeon_mec {
#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
#define R600_PTE_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
struct radeon_vm {
struct list_head list;
struct list_head va;
@ -1675,8 +1681,6 @@ struct radeon_asic {
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
u32 pt_ring_index;
void (*set_page)(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,

View file

@ -1622,8 +1622,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
.set_page = &cayman_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@ -1723,8 +1722,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
.set_page = &cayman_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@ -1854,8 +1852,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &si_vm_set_page,
.set_page = &si_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@ -2000,8 +1997,7 @@ static struct radeon_asic ci_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cik_vm_set_page,
.set_page = &cik_sdma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@ -2100,8 +2096,7 @@ static struct radeon_asic kv_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cik_vm_set_page,
.set_page = &cik_sdma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,

View file

@ -581,17 +581,18 @@ int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
void cayman_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
void cayman_dma_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int ni_dpm_init(struct radeon_device *rdev);
@ -653,17 +654,17 @@ int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
void si_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
void si_dma_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@ -735,11 +736,11 @@ int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
void cik_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cik_sdma_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,

View file

@ -913,6 +913,26 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
return result;
}
/**
* radeon_vm_page_flags - translate page flags to what the hw uses
*
* @flags: flags comming from userspace
*
* Translate the flags the userspace ABI uses to hw flags.
*/
static uint32_t radeon_vm_page_flags(uint32_t flags)
{
uint32_t hw_flags = 0;
hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
hw_flags |= R600_PTE_SYSTEM;
hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
}
return hw_flags;
}
/**
* radeon_vm_update_pdes - make sure that page directory is valid
*
@ -974,7 +994,7 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev,
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
R600_PTE_VALID);
}
count = 1;
@ -987,7 +1007,7 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev,
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
incr, R600_PTE_VALID);
}
@ -1082,7 +1102,6 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_bo *bo,
struct ttm_mem_reg *mem)
{
unsigned ridx = rdev->asic->vm.pt_ring_index;
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
@ -1155,7 +1174,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
if (ndw > 0xfffff)
return -ENOMEM;
r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
ib.length_dw = 0;
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@ -1165,7 +1184,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
}
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
addr, radeon_vm_page_flags(bo_va->flags));
radeon_ib_sync_to(&ib, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);

View file

@ -78,11 +78,6 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
extern void si_dma_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
@ -4662,61 +4657,6 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
block, mc_id);
}
/**
* si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using the CP (SI).
*/
void si_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
while (count) {
ndw = 2 + count * 2;
if (ndw > 0x3FFE)
ndw = 0x3FFE;
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(1));
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
value |= r600_flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
/* DMA */
si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];

View file

@ -76,13 +76,12 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
trace_radeon_vm_set_page(pe, addr, count, incr, r600_flags);
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if (flags & RADEON_VM_PAGE_SYSTEM) {
if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@ -93,16 +92,10 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
value |= r600_flags;
value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@ -113,7 +106,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
if (flags & RADEON_VM_PAGE_VALID)
if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@ -121,7 +114,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);