mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
drm/nouveau: explicitly specify caching to use
Instead of letting TTM masking the caching bits specify directly what the driver needs. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/390206
This commit is contained in:
parent
0fe438cec9
commit
5839172f09
2 changed files with 25 additions and 22 deletions
|
@ -340,18 +340,33 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
|
|||
}
|
||||
|
||||
static void
|
||||
set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain,
|
||||
uint32_t flags)
|
||||
set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
|
||||
uint32_t domain, uint32_t flags)
|
||||
{
|
||||
*n = 0;
|
||||
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
|
||||
struct nvif_mmu *mmu = &drm->client.mmu;
|
||||
const u8 type = mmu->type[drm->ttm.type_vram].type;
|
||||
|
||||
pl[*n].mem_type = TTM_PL_VRAM;
|
||||
pl[(*n)++].flags = flags;
|
||||
pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
|
||||
|
||||
/* Some BARs do not support being ioremapped WC */
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
|
||||
type & NVIF_MEM_UNCACHED)
|
||||
pl[*n].flags &= ~TTM_PL_FLAG_WC;
|
||||
|
||||
(*n)++;
|
||||
}
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_GART) {
|
||||
pl[*n].mem_type = TTM_PL_TT;
|
||||
pl[(*n)++].flags = flags;
|
||||
pl[*n].flags = flags;
|
||||
|
||||
if (drm->agp.bridge)
|
||||
pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
|
||||
|
||||
(*n)++;
|
||||
}
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
|
||||
pl[*n].mem_type = TTM_PL_SYSTEM;
|
||||
|
@ -397,17 +412,18 @@ void
|
|||
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
|
||||
uint32_t busy)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct ttm_placement *pl = &nvbo->placement;
|
||||
uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
|
||||
TTM_PL_MASK_CACHING) |
|
||||
(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
|
||||
|
||||
pl->placement = nvbo->placements;
|
||||
set_placement_list(nvbo->placements, &pl->num_placement,
|
||||
set_placement_list(drm, nvbo->placements, &pl->num_placement,
|
||||
domain, flags);
|
||||
|
||||
pl->busy_placement = nvbo->busy_placements;
|
||||
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
|
||||
set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
|
||||
domain | busy, flags);
|
||||
|
||||
set_placement_range(nvbo, domain);
|
||||
|
|
|
@ -194,19 +194,13 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
|
|||
static int
|
||||
nouveau_ttm_init_vram(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nvif_mmu *mmu = &drm->client.mmu;
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
/* Some BARs do not support being ioremapped WC */
|
||||
const u8 type = mmu->type[drm->ttm.type_vram].type;
|
||||
struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
|
||||
|
||||
if (!man)
|
||||
return -ENOMEM;
|
||||
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
|
||||
|
||||
if (type & NVIF_MEM_UNCACHED)
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
man->func = &nouveau_vram_manager;
|
||||
|
||||
ttm_resource_manager_init(man,
|
||||
|
@ -243,13 +237,6 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
|
|||
struct ttm_resource_manager *man;
|
||||
unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
|
||||
const struct ttm_resource_manager_func *func = NULL;
|
||||
unsigned available_caching;
|
||||
|
||||
if (drm->agp.bridge)
|
||||
available_caching = TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_WC;
|
||||
else
|
||||
available_caching = TTM_PL_MASK_CACHING;
|
||||
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
func = &nouveau_gart_manager;
|
||||
|
@ -257,7 +244,7 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
|
|||
func = &nv04_gart_manager;
|
||||
else
|
||||
return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT,
|
||||
available_caching, true,
|
||||
TTM_PL_MASK_CACHING, true,
|
||||
size_pages);
|
||||
|
||||
man = kzalloc(sizeof(*man), GFP_KERNEL);
|
||||
|
@ -265,7 +252,7 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
|
|||
return -ENOMEM;
|
||||
|
||||
man->func = func;
|
||||
man->available_caching = available_caching;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->use_tt = true;
|
||||
ttm_resource_manager_init(man, size_pages);
|
||||
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
|
||||
|
|
Loading…
Reference in a new issue