drm/amdgpu: move more logic into amdgpu_vm_map_gart v3

No need to duplicate that code over and over again. Also stop using the
flags to determine if we need to map the addresses.

v2: constify the pages_addr
v3: rebased, fix typo in commit message

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2015-11-30 13:26:07 +01:00 committed by Alex Deucher
parent 599f434817
commit b07c9d2a73
5 changed files with 33 additions and 42 deletions

View file

@ -283,7 +283,7 @@ struct amdgpu_vm_pte_funcs {
unsigned count); unsigned count);
/* write pte one entry at a time with addr mapping */ /* write pte one entry at a time with addr mapping */
void (*write_pte)(struct amdgpu_ib *ib, void (*write_pte)(struct amdgpu_ib *ib,
uint64_t pe, const dma_addr_t *pages_addr, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags); uint32_t incr, uint32_t flags);
/* for linear pte/pde updates without addr mapping */ /* for linear pte/pde updates without addr mapping */
@ -962,7 +962,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
void amdgpu_vm_flush(struct amdgpu_ring *ring, void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct fence *updates); struct fence *updates);
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm); struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@ -2198,7 +2198,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib))) #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))

View file

@ -299,8 +299,13 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
amdgpu_vm_copy_pte(adev, ib, pe, src, count); amdgpu_vm_copy_pte(adev, ib, pe, src, count);
} else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) { } else if (flags & AMDGPU_PTE_SYSTEM) {
amdgpu_vm_write_pte(adev, ib, pe, addr, dma_addr_t *pages_addr = adev->gart.pages_addr;
amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
count, incr, flags);
} else if (count < 3) {
amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
count, incr, flags); count, incr, flags);
} else { } else {
@ -378,25 +383,32 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
} }
/** /**
* amdgpu_vm_map_gart - get the physical address of a gart page * amdgpu_vm_map_gart - Resolve gart mapping of addr
* *
* @adev: amdgpu_device pointer * @pages_addr: optional DMA address to use for lookup
* @addr: the unmapped addr * @addr: the unmapped addr
* *
* Look up the physical address of the page that the pte resolves * Look up the physical address of the page that the pte resolves
* to (cayman+). * to and return the pointer for the page table entry.
* Returns the physical address of the page.
*/ */
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{ {
uint64_t result; uint64_t result;
if (pages_addr) {
/* page table offset */ /* page table offset */
result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; result = pages_addr[addr >> PAGE_SHIFT];
/* in case cpu page size != gpu page size*/ /* in case cpu page size != gpu page size*/
result |= addr & (~PAGE_MASK); result |= addr & (~PAGE_MASK);
} else {
/* No mapping required */
result = addr;
}
result &= 0xFFFFFFFFFFFFF000ULL;
return result; return result;
} }

View file

@ -714,7 +714,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
* Update PTEs by writing them manually using sDMA (CIK). * Update PTEs by writing them manually using sDMA (CIK).
*/ */
static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
uint64_t pe, const dma_addr_t *pages_addr, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags) uint32_t incr, uint32_t flags)
{ {
@ -733,14 +733,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw; ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & AMDGPU_PTE_SYSTEM) { value = amdgpu_vm_map_gart(pages_addr, addr);
value = amdgpu_vm_map_gart(ib->ring->adev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & AMDGPU_PTE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr; addr += incr;
value |= flags; value |= flags;
ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = value;

View file

@ -772,7 +772,7 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
* Update PTEs by writing them manually using sDMA (CIK). * Update PTEs by writing them manually using sDMA (CIK).
*/ */
static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
uint64_t pe, const dma_addr_t *pages_addr, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags) uint32_t incr, uint32_t flags)
{ {
@ -791,14 +791,7 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw; ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & AMDGPU_PTE_SYSTEM) { value = amdgpu_vm_map_gart(pages_addr, addr);
value = amdgpu_vm_map_gart(ib->ring->adev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & AMDGPU_PTE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr; addr += incr;
value |= flags; value |= flags;
ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = value;

View file

@ -922,7 +922,7 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
* Update PTEs by writing them manually using sDMA (CIK). * Update PTEs by writing them manually using sDMA (CIK).
*/ */
static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
uint64_t pe, const dma_addr_t *pages_addr, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags) uint32_t incr, uint32_t flags)
{ {
@ -941,14 +941,7 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw; ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & AMDGPU_PTE_SYSTEM) { value = amdgpu_vm_map_gart(pages_addr, addr);
value = amdgpu_vm_map_gart(ib->ring->adev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & AMDGPU_PTE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr; addr += incr;
value |= flags; value |= flags;
ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = value;