mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
habanalabs: add helper to get compute context
There are multiple places where the code needs to get the context's pointer and increment its ref cnt. This is the proper way instead of using the compute context pointer in the device structure. Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
parent
6798676f7e
commit
4337b50b5f
4 changed files with 36 additions and 15 deletions
|
@ -272,6 +272,29 @@ int hl_ctx_put(struct hl_ctx *ctx)
|
|||
return kref_put(&ctx->refcount, hl_ctx_do_release);
|
||||
}
|
||||
|
||||
struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
|
||||
{
|
||||
struct hl_ctx *ctx = NULL;
|
||||
struct hl_fpriv *hpriv;
|
||||
|
||||
mutex_lock(&hdev->fpriv_list_lock);
|
||||
|
||||
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
|
||||
/* There can only be a single user which has opened the compute device, so exit
|
||||
* immediately once we find him
|
||||
*/
|
||||
if (!hpriv->is_control) {
|
||||
ctx = hpriv->ctx;
|
||||
hl_ctx_get(hdev, ctx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&hdev->fpriv_list_lock);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/*
|
||||
* hl_ctx_get_fence_locked - get CS fence under CS lock
|
||||
*
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
/*
|
||||
* Copyright 2016-2019 HabanaLabs, Ltd.
|
||||
* Copyright 2016-2021 HabanaLabs, Ltd.
|
||||
* All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
@ -327,11 +327,7 @@ static int vm_show(struct seq_file *s, void *data)
|
|||
|
||||
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
|
||||
|
||||
mutex_lock(&dev_entry->hdev->fpriv_list_lock);
|
||||
ctx = dev_entry->hdev->compute_ctx;
|
||||
if (ctx)
|
||||
hl_ctx_get(dev_entry->hdev, ctx);
|
||||
mutex_unlock(&dev_entry->hdev->fpriv_list_lock);
|
||||
ctx = hl_get_compute_ctx(dev_entry->hdev);
|
||||
if (ctx) {
|
||||
seq_puts(s, "\nVA ranges:\n\n");
|
||||
for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
|
||||
|
@ -443,7 +439,7 @@ static int mmu_show(struct seq_file *s, void *data)
|
|||
if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
|
||||
ctx = hdev->kernel_ctx;
|
||||
else
|
||||
ctx = hdev->compute_ctx;
|
||||
ctx = hl_get_compute_ctx(hdev);
|
||||
|
||||
if (!ctx) {
|
||||
dev_err(hdev->dev, "no ctx available\n");
|
||||
|
@ -596,7 +592,7 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
|
|||
u64 *phys_addr)
|
||||
{
|
||||
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
||||
struct hl_ctx *ctx = hdev->compute_ctx;
|
||||
struct hl_ctx *ctx;
|
||||
struct hl_vm_hash_node *hnode;
|
||||
u64 end_address, range_size;
|
||||
struct hl_userptr *userptr;
|
||||
|
@ -604,6 +600,8 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
|
|||
bool valid = false;
|
||||
int i, rc = 0;
|
||||
|
||||
ctx = hl_get_compute_ctx(hdev);
|
||||
|
||||
if (!ctx) {
|
||||
dev_err(hdev->dev, "no ctx available\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -961,6 +961,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
|||
bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
|
||||
reset_upon_device_release = false;
|
||||
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
|
||||
struct hl_ctx *ctx;
|
||||
int i, rc;
|
||||
|
||||
if (!hdev->init_done) {
|
||||
|
@ -1101,16 +1102,14 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
|||
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
|
||||
hl_cq_reset(hdev, &hdev->completion_queue[i]);
|
||||
|
||||
mutex_lock(&hdev->fpriv_list_lock);
|
||||
|
||||
/* Make sure the context switch phase will run again */
|
||||
if (hdev->compute_ctx) {
|
||||
atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
|
||||
hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
|
||||
ctx = hl_get_compute_ctx(hdev);
|
||||
if (ctx) {
|
||||
atomic_set(&ctx->thread_ctx_switch_token, 1);
|
||||
ctx->thread_ctx_switch_wait_token = 0;
|
||||
hl_ctx_put(ctx);
|
||||
}
|
||||
|
||||
mutex_unlock(&hdev->fpriv_list_lock);
|
||||
|
||||
/* Finished tear-down, starting to re-initialize */
|
||||
|
||||
if (hard_reset) {
|
||||
|
|
|
@ -2906,6 +2906,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
|
|||
void hl_ctx_do_release(struct kref *ref);
|
||||
void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
|
||||
int hl_ctx_put(struct hl_ctx *ctx);
|
||||
struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
|
||||
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
|
||||
int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
|
||||
struct hl_fence **fence, u32 arr_len);
|
||||
|
|
Loading…
Reference in a new issue