drm/amdgpu: consistent name all GART related parts

Rename symbols from gtt_ to gart_ as appropriate.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2017-07-07 11:56:59 +02:00 committed by Alex Deucher
parent ed21c047e9
commit 6f02a69648
11 changed files with 72 additions and 72 deletions

View File

@ -556,9 +556,9 @@ struct amdgpu_mc {
* about vram size near mc fb location */ * about vram size near mc fb location */
u64 mc_vram_size; u64 mc_vram_size;
u64 visible_vram_size; u64 visible_vram_size;
u64 gtt_size; u64 gart_size;
u64 gtt_start; u64 gart_start;
u64 gtt_end; u64 gart_end;
u64 vram_start; u64 vram_start;
u64 vram_end; u64 vram_end;
unsigned vram_width; unsigned vram_width;
@ -1860,7 +1860,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
int amdgpu_ttm_init(struct amdgpu_device *adev); int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev);

View File

@ -681,7 +681,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
} }
/** /**
* amdgpu_gtt_location - try to find GTT location * amdgpu_gart_location - try to find GTT location
* @adev: amdgpu device structure holding all necessary informations * @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations * @mc: memory controller structure holding memory informations
* *
@ -692,28 +692,28 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
* *
* FIXME: when reducing GTT size align new size on power of 2. * FIXME: when reducing GTT size align new size on power of 2.
*/ */
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
{ {
u64 size_af, size_bf; u64 size_af, size_bf;
size_af = adev->mc.mc_mask - mc->vram_end; size_af = adev->mc.mc_mask - mc->vram_end;
size_bf = mc->vram_start; size_bf = mc->vram_start;
if (size_bf > size_af) { if (size_bf > size_af) {
if (mc->gtt_size > size_bf) { if (mc->gart_size > size_bf) {
dev_warn(adev->dev, "limiting GTT\n"); dev_warn(adev->dev, "limiting GTT\n");
mc->gtt_size = size_bf; mc->gart_size = size_bf;
} }
mc->gtt_start = 0; mc->gart_start = 0;
} else { } else {
if (mc->gtt_size > size_af) { if (mc->gart_size > size_af) {
dev_warn(adev->dev, "limiting GTT\n"); dev_warn(adev->dev, "limiting GTT\n");
mc->gtt_size = size_af; mc->gart_size = size_af;
} }
mc->gtt_start = mc->vram_end + 1; mc->gart_start = mc->vram_end + 1;
} }
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; mc->gart_end = mc->gart_start + mc->gart_size - 1;
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); mc->gart_size >> 20, mc->gart_start, mc->gart_end);
} }
/* /*
@ -2031,7 +2031,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->flags = flags; adev->flags = flags;
adev->asic_type = flags & AMD_ASIC_MASK; adev->asic_type = flags & AMD_ASIC_MASK;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
adev->mc.gtt_size = 512 * 1024 * 1024; adev->mc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false; adev->accel_working = false;
adev->num_rings = 0; adev->num_rings = 0;
adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs = NULL;

View File

@ -57,11 +57,11 @@
*/ */
/** /**
* amdgpu_gart_set_defaults - set the default gtt_size * amdgpu_gart_set_defaults - set the default gart_size
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* *
* Set the default gtt_size based on parameters and available VRAM. * Set the default gart_size based on parameters and available VRAM.
*/ */
void amdgpu_gart_set_defaults(struct amdgpu_device *adev) void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
{ {
@ -69,10 +69,10 @@ void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger. * size equal to the 1024 or vram, whichever is larger.
*/ */
if (amdgpu_gart_size == -1) if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), adev->mc.gart_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size); adev->mc.mc_vram_size);
else else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20;
} }
/** /**
@ -387,8 +387,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
/* Compute table size */ /* Compute table size */
adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE; adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE;
adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE; adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);

View File

@ -33,7 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *vram_obj = NULL; struct amdgpu_bo *vram_obj = NULL;
struct amdgpu_bo **gtt_obj = NULL; struct amdgpu_bo **gtt_obj = NULL;
uint64_t gtt_addr, vram_addr; uint64_t gart_addr, vram_addr;
unsigned n, size; unsigned n, size;
int i, r; int i, r;
@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests = /* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size * (Total GTT - IB pool - writeback page - ring buffers) / test size
*/ */
n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024; n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i]) if (adev->rings[i])
n -= adev->rings[i]->ring_size; n -= adev->rings[i]->ring_size;
@ -76,7 +76,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
} }
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
void *gtt_map, *vram_map; void *gtt_map, *vram_map;
void **gtt_start, **gtt_end; void **gart_start, **gart_end;
void **vram_start, **vram_end; void **vram_start, **vram_end;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
@ -91,7 +91,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(gtt_obj[i], false); r = amdgpu_bo_reserve(gtt_obj[i], false);
if (unlikely(r != 0)) if (unlikely(r != 0))
goto out_lclean_unref; goto out_lclean_unref;
r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr); r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
if (r) { if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i); DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_lclean_unres; goto out_lclean_unres;
@ -103,14 +103,14 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin; goto out_lclean_unpin;
} }
for (gtt_start = gtt_map, gtt_end = gtt_map + size; for (gart_start = gtt_map, gart_end = gtt_map + size;
gtt_start < gtt_end; gart_start < gart_end;
gtt_start++) gart_start++)
*gtt_start = gtt_start; *gart_start = gart_start;
amdgpu_bo_kunmap(gtt_obj[i]); amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
size, NULL, &fence, false, false); size, NULL, &fence, false, false);
if (r) { if (r) {
@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin; goto out_lclean_unpin;
} }
for (gtt_start = gtt_map, gtt_end = gtt_map + size, for (gart_start = gtt_map, gart_end = gtt_map + size,
vram_start = vram_map, vram_end = vram_map + size; vram_start = vram_map, vram_end = vram_map + size;
vram_start < vram_end; vram_start < vram_end;
gtt_start++, vram_start++) { gart_start++, vram_start++) {
if (*vram_start != gtt_start) { if (*vram_start != gart_start) {
DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
"expected 0x%p (GTT/VRAM offset " "expected 0x%p (GTT/VRAM offset "
"0x%16llx/0x%16llx)\n", "0x%16llx/0x%16llx)\n",
i, *vram_start, gtt_start, i, *vram_start, gart_start,
(unsigned long long) (unsigned long long)
(gtt_addr - adev->mc.gtt_start + (gart_addr - adev->mc.gart_start +
(void*)gtt_start - gtt_map), (void*)gart_start - gtt_map),
(unsigned long long) (unsigned long long)
(vram_addr - adev->mc.vram_start + (vram_addr - adev->mc.vram_start +
(void*)gtt_start - gtt_map)); (void*)gart_start - gtt_map));
amdgpu_bo_kunmap(vram_obj); amdgpu_bo_kunmap(vram_obj);
goto out_lclean_unpin; goto out_lclean_unpin;
} }
@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj); amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
size, NULL, &fence, false, false); size, NULL, &fence, false, false);
if (r) { if (r) {
@ -177,20 +177,20 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin; goto out_lclean_unpin;
} }
for (gtt_start = gtt_map, gtt_end = gtt_map + size, for (gart_start = gtt_map, gart_end = gtt_map + size,
vram_start = vram_map, vram_end = vram_map + size; vram_start = vram_map, vram_end = vram_map + size;
gtt_start < gtt_end; gart_start < gart_end;
gtt_start++, vram_start++) { gart_start++, vram_start++) {
if (*gtt_start != vram_start) { if (*gart_start != vram_start) {
DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
"expected 0x%p (VRAM/GTT offset " "expected 0x%p (VRAM/GTT offset "
"0x%16llx/0x%16llx)\n", "0x%16llx/0x%16llx)\n",
i, *gtt_start, vram_start, i, *gart_start, vram_start,
(unsigned long long) (unsigned long long)
(vram_addr - adev->mc.vram_start + (vram_addr - adev->mc.vram_start +
(void*)vram_start - vram_map), (void*)vram_start - vram_map),
(unsigned long long) (unsigned long long)
(gtt_addr - adev->mc.gtt_start + (gart_addr - adev->mc.gart_start +
(void*)vram_start - vram_map)); (void*)vram_start - vram_map));
amdgpu_bo_kunmap(gtt_obj[i]); amdgpu_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin; goto out_lclean_unpin;
@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]); amdgpu_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
gtt_addr - adev->mc.gtt_start); gart_addr - adev->mc.gart_start);
continue; continue;
out_lclean_unpin: out_lclean_unpin:

View File

@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break; break;
case TTM_PL_TT: case TTM_PL_TT:
man->func = &amdgpu_gtt_mgr_func; man->func = &amdgpu_gtt_mgr_func;
man->gpu_offset = adev->mc.gtt_start; man->gpu_offset = adev->mc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of VRAM memory ready\n", DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024))); (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
adev->mc.gtt_size >> PAGE_SHIFT); adev->mc.gart_size >> PAGE_SHIFT);
if (r) { if (r) {
DRM_ERROR("Failed initializing GTT heap.\n"); DRM_ERROR("Failed initializing GTT heap.\n");
return r; return r;
} }
DRM_INFO("amdgpu: %uM of GTT memory ready.\n", DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned)(adev->mc.gtt_size / (1024 * 1024))); (unsigned)(adev->mc.gart_size / (1024 * 1024)));
adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
*addr = adev->mc.gtt_start; *addr = adev->mc.gart_start;
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE; AMDGPU_GPU_PAGE_SIZE;
@ -1645,7 +1645,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
adev, &amdgpu_ttm_gtt_fops); adev, &amdgpu_ttm_gtt_fops);
if (IS_ERR(ent)) if (IS_ERR(ent))
return PTR_ERR(ent); return PTR_ERR(ent);
i_size_write(ent->d_inode, adev->mc.gtt_size); i_size_write(ent->d_inode, adev->mc.gart_size);
adev->mman.gtt = ent; adev->mman.gtt = ent;
#endif #endif

View File

@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
gfxhub_v1_0_init_gart_pt_regs(adev); gfxhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->mc.gtt_start >> 12)); (u32)(adev->mc.gart_start >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->mc.gtt_start >> 44)); (u32)(adev->mc.gart_start >> 44));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->mc.gtt_end >> 12)); (u32)(adev->mc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->mc.gtt_end >> 44)); (u32)(adev->mc.gart_end >> 44));
} }
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)

View File

@ -228,7 +228,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
mc->mc_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL;
} }
amdgpu_vram_location(adev, &adev->mc, base); amdgpu_vram_location(adev, &adev->mc, base);
amdgpu_gtt_location(adev, mc); amdgpu_gart_location(adev, mc);
} }
static void gmc_v6_0_mc_program(struct amdgpu_device *adev) static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@ -481,8 +481,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
(4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) | (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
(4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */ /* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12)); (u32)(adev->dummy_page.addr >> 12));
@ -529,7 +529,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
gmc_v6_0_gart_flush_gpu_tlb(adev, 0); gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->mc.gtt_size >> 20), (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
adev->gart.ready = true; adev->gart.ready = true;
return 0; return 0;

View File

@ -244,7 +244,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
mc->mc_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL;
} }
amdgpu_vram_location(adev, &adev->mc, base); amdgpu_vram_location(adev, &adev->mc, base);
amdgpu_gtt_location(adev, mc); amdgpu_gart_location(adev, mc);
} }
/** /**
@ -584,8 +584,8 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
WREG32(mmVM_L2_CNTL3, tmp); WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */ /* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12)); (u32)(adev->dummy_page.addr >> 12));
@ -639,7 +639,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
gmc_v7_0_gart_flush_gpu_tlb(adev, 0); gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->mc.gtt_size >> 20), (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
adev->gart.ready = true; adev->gart.ready = true;
return 0; return 0;

View File

@ -406,7 +406,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
mc->mc_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL;
} }
amdgpu_vram_location(adev, &adev->mc, base); amdgpu_vram_location(adev, &adev->mc, base);
amdgpu_gtt_location(adev, mc); amdgpu_gart_location(adev, mc);
} }
/** /**
@ -786,8 +786,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
WREG32(mmVM_L2_CNTL4, tmp); WREG32(mmVM_L2_CNTL4, tmp);
/* setup context0 */ /* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12)); (u32)(adev->dummy_page.addr >> 12));
@ -842,7 +842,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
gmc_v8_0_gart_flush_gpu_tlb(adev, 0); gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->mc.gtt_size >> 20), (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
adev->gart.ready = true; adev->gart.ready = true;
return 0; return 0;

View File

@ -420,7 +420,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
if (!amdgpu_sriov_vf(adev)) if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev); base = mmhub_v1_0_get_fb_location(adev);
amdgpu_vram_location(adev, &adev->mc, base); amdgpu_vram_location(adev, &adev->mc, base);
amdgpu_gtt_location(adev, mc); amdgpu_gart_location(adev, mc);
/* base offset of vram pages */ /* base offset of vram pages */
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@ -736,7 +736,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gmc_v9_0_gart_flush_gpu_tlb(adev, 0); gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->mc.gtt_size >> 20), (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
adev->gart.ready = true; adev->gart.ready = true;
return 0; return 0;

View File

@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
mmhub_v1_0_init_gart_pt_regs(adev); mmhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->mc.gtt_start >> 12)); (u32)(adev->mc.gart_start >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->mc.gtt_start >> 44)); (u32)(adev->mc.gart_start >> 44));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->mc.gtt_end >> 12)); (u32)(adev->mc.gart_end >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->mc.gtt_end >> 44)); (u32)(adev->mc.gart_end >> 44));
} }
static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)