mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
drm/amdgpu: use common fences for VMID management v2
v2: add missing NULL check. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
8120b61fdf
commit
d52832986a
4 changed files with 21 additions and 18 deletions
|
@ -970,7 +970,7 @@ struct amdgpu_vm_id {
|
||||||
/* last flushed PD/PT update */
|
/* last flushed PD/PT update */
|
||||||
struct fence *flushed_updates;
|
struct fence *flushed_updates;
|
||||||
/* last use of vmid */
|
/* last use of vmid */
|
||||||
struct amdgpu_fence *last_id_use;
|
struct fence *last_id_use;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm {
|
struct amdgpu_vm {
|
||||||
|
@ -1003,7 +1003,7 @@ struct amdgpu_vm {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm_manager {
|
struct amdgpu_vm_manager {
|
||||||
struct amdgpu_fence *active[AMDGPU_NUM_VM];
|
struct fence *active[AMDGPU_NUM_VM];
|
||||||
uint32_t max_pfn;
|
uint32_t max_pfn;
|
||||||
/* number of VMIDs */
|
/* number of VMIDs */
|
||||||
unsigned nvm;
|
unsigned nvm;
|
||||||
|
|
|
@ -135,7 +135,7 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
|
||||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
struct amdgpu_sync *sync)
|
struct amdgpu_sync *sync)
|
||||||
{
|
{
|
||||||
struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
|
struct fence *best[AMDGPU_MAX_RINGS] = {};
|
||||||
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
|
||||||
|
@ -154,7 +154,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
|
|
||||||
/* skip over VMID 0, since it is the system VM */
|
/* skip over VMID 0, since it is the system VM */
|
||||||
for (i = 1; i < adev->vm_manager.nvm; ++i) {
|
for (i = 1; i < adev->vm_manager.nvm; ++i) {
|
||||||
struct amdgpu_fence *fence = adev->vm_manager.active[i];
|
struct fence *fence = adev->vm_manager.active[i];
|
||||||
|
struct amdgpu_ring *fring;
|
||||||
|
|
||||||
if (fence == NULL) {
|
if (fence == NULL) {
|
||||||
/* found a free one */
|
/* found a free one */
|
||||||
|
@ -163,21 +164,23 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
|
fring = amdgpu_ring_from_fence(fence);
|
||||||
best[fence->ring->idx] = fence;
|
if (best[fring->idx] == NULL ||
|
||||||
choices[fence->ring == ring ? 0 : 1] = i;
|
fence_is_later(best[fring->idx], fence)) {
|
||||||
|
best[fring->idx] = fence;
|
||||||
|
choices[fring == ring ? 0 : 1] = i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 2; ++i) {
|
for (i = 0; i < 2; ++i) {
|
||||||
if (choices[i]) {
|
if (choices[i]) {
|
||||||
struct amdgpu_fence *fence;
|
struct fence *fence;
|
||||||
|
|
||||||
fence = adev->vm_manager.active[choices[i]];
|
fence = adev->vm_manager.active[choices[i]];
|
||||||
vm_id->id = choices[i];
|
vm_id->id = choices[i];
|
||||||
|
|
||||||
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
|
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
|
||||||
return amdgpu_sync_fence(ring->adev, sync, &fence->base);
|
return amdgpu_sync_fence(ring->adev, sync, fence);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,11 +249,11 @@ void amdgpu_vm_fence(struct amdgpu_device *adev,
|
||||||
unsigned ridx = fence->ring->idx;
|
unsigned ridx = fence->ring->idx;
|
||||||
unsigned vm_id = vm->ids[ridx].id;
|
unsigned vm_id = vm->ids[ridx].id;
|
||||||
|
|
||||||
amdgpu_fence_unref(&adev->vm_manager.active[vm_id]);
|
fence_put(adev->vm_manager.active[vm_id]);
|
||||||
adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence);
|
adev->vm_manager.active[vm_id] = fence_get(&fence->base);
|
||||||
|
|
||||||
amdgpu_fence_unref(&vm->ids[ridx].last_id_use);
|
fence_put(vm->ids[ridx].last_id_use);
|
||||||
vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence);
|
vm->ids[ridx].last_id_use = fence_get(&fence->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1311,7 +1314,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
fence_put(vm->ids[i].flushed_updates);
|
fence_put(vm->ids[i].flushed_updates);
|
||||||
amdgpu_fence_unref(&vm->ids[i].last_id_use);
|
fence_put(vm->ids[i].last_id_use);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_destroy(&vm->mutex);
|
mutex_destroy(&vm->mutex);
|
||||||
|
|
|
@ -965,7 +965,7 @@ static int gmc_v7_0_sw_fini(void *handle)
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
if (adev->vm_manager.enabled) {
|
||||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||||
amdgpu_fence_unref(&adev->vm_manager.active[i]);
|
fence_put(adev->vm_manager.active[i]);
|
||||||
gmc_v7_0_vm_fini(adev);
|
gmc_v7_0_vm_fini(adev);
|
||||||
adev->vm_manager.enabled = false;
|
adev->vm_manager.enabled = false;
|
||||||
}
|
}
|
||||||
|
@ -1015,7 +1015,7 @@ static int gmc_v7_0_suspend(void *handle)
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
if (adev->vm_manager.enabled) {
|
||||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||||
amdgpu_fence_unref(&adev->vm_manager.active[i]);
|
fence_put(adev->vm_manager.active[i]);
|
||||||
gmc_v7_0_vm_fini(adev);
|
gmc_v7_0_vm_fini(adev);
|
||||||
adev->vm_manager.enabled = false;
|
adev->vm_manager.enabled = false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -984,7 +984,7 @@ static int gmc_v8_0_sw_fini(void *handle)
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
if (adev->vm_manager.enabled) {
|
||||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||||
amdgpu_fence_unref(&adev->vm_manager.active[i]);
|
fence_put(adev->vm_manager.active[i]);
|
||||||
gmc_v8_0_vm_fini(adev);
|
gmc_v8_0_vm_fini(adev);
|
||||||
adev->vm_manager.enabled = false;
|
adev->vm_manager.enabled = false;
|
||||||
}
|
}
|
||||||
|
@ -1036,7 +1036,7 @@ static int gmc_v8_0_suspend(void *handle)
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
if (adev->vm_manager.enabled) {
|
||||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||||
amdgpu_fence_unref(&adev->vm_manager.active[i]);
|
fence_put(adev->vm_manager.active[i]);
|
||||||
gmc_v8_0_vm_fini(adev);
|
gmc_v8_0_vm_fini(adev);
|
||||||
adev->vm_manager.enabled = false;
|
adev->vm_manager.enabled = false;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue