drm/nouveau: make use of the GPUVM's shared dma-resv

DRM GEM objects private to a single GPUVM can use a shared dma-resv.
Make use of the shared dma-resv of GPUVM rather than a driver specific
one.

The shared dma-resv originates from a "root" GEM object serving as
container for the dma-resv to make it compatible with drm_exec.

In order to make sure the object proving the shared dma-resv can't be
freed up before the objects making use of it, let every such GEM object
take a reference on it.

Reviewed-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-7-dakr@redhat.com
This commit is contained in:
Danilo Krummrich 2023-11-08 01:12:36 +01:00
parent bbe8458037
commit 6118411428
5 changed files with 24 additions and 10 deletions

View File

@ -148,10 +148,17 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
*/
if (bo->base.dev)
if (bo->base.dev) {
/* Gem objects not being shared with other VMs get their
* dma_resv from a root GEM object.
*/
if (nvbo->no_share)
drm_gem_object_put(nvbo->r_obj);
drm_gem_object_release(&bo->base);
else
} else {
dma_resv_fini(&bo->base._resv);
}
kfree(nvbo);
}

View File

@ -26,6 +26,11 @@ struct nouveau_bo {
struct list_head entry;
int pbbo_index;
bool validate_mapped;
/* Root GEM object we derive the dma_resv of in case this BO is not
* shared between VMs.
*/
struct drm_gem_object *r_obj;
bool no_share;
/* GPU address space is independent of CPU word size */

View File

@ -111,7 +111,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
if (nvbo->no_share && uvmm &&
drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
return -EPERM;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
@ -245,7 +246,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (unlikely(!uvmm))
return -EINVAL;
resv = &uvmm->resv;
resv = drm_gpuvm_resv(&uvmm->base);
}
if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
@ -288,6 +289,11 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
if (nvbo->no_share) {
nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
drm_gem_object_get(nvbo->r_obj);
}
*pnvbo = nvbo;
return 0;
}

View File

@ -1802,7 +1802,6 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
int ret;
mutex_init(&uvmm->mutex);
dma_resv_init(&uvmm->resv);
mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
@ -1842,14 +1841,14 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
kernel_managed_addr, kernel_managed_size,
NULL, 0, &cli->uvmm.vmm.vmm);
if (ret)
goto out_free_gpuva_mgr;
goto out_gpuvm_fini;
cli->uvmm.vmm.cli = cli;
mutex_unlock(&cli->mutex);
return 0;
out_free_gpuva_mgr:
out_gpuvm_fini:
drm_gpuvm_destroy(&uvmm->base);
out_unlock:
mutex_unlock(&cli->mutex);
@ -1907,6 +1906,4 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
nouveau_vmm_fini(&uvmm->vmm);
drm_gpuvm_destroy(&uvmm->base);
mutex_unlock(&cli->mutex);
dma_resv_fini(&uvmm->resv);
}

View File

@ -12,7 +12,6 @@ struct nouveau_uvmm {
struct nouveau_vmm vmm;
struct maple_tree region_mt;
struct mutex mutex;
struct dma_resv resv;
bool disabled;
};