accel/habanalabs: use a mutex rather than a spinlock

There are two reasons why mutex is better here:
1. There's a critical section relatively long, where in
certain scenarios (e.g., multiple VM allocations) taking a spinlock
might cause noticeable performance degradation.
2. It will remove the incorrect usage of mutex under
spin_lock (where preemption is disabled).

Reported-by: Dan Carpenter <error27@gmail.com>
Signed-off-by: Koby Elbaz <kelbaz@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
Koby Elbaz 2023-02-23 18:17:02 +02:00 committed by Oded Gabbay
parent c7ac65c881
commit 7ffb5ced2b
2 changed files with 10 additions and 9 deletions

View file

@ -258,7 +258,7 @@ static int vm_show(struct seq_file *s, void *data)
if (!dev_entry->hdev->mmu_enable)
return 0;
spin_lock(&dev_entry->ctx_mem_hash_spinlock);
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
once = false;
@ -329,7 +329,7 @@ static int vm_show(struct seq_file *s, void *data)
}
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
ctx = hl_get_compute_ctx(dev_entry->hdev);
if (ctx) {
@ -1785,7 +1785,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
spin_lock_init(&dev_entry->cs_spinlock);
spin_lock_init(&dev_entry->cs_job_spinlock);
spin_lock_init(&dev_entry->userptr_spinlock);
spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
hl_debug_root);
@ -1802,6 +1802,7 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
debugfs_remove_recursive(entry->root);
mutex_destroy(&entry->ctx_mem_hash_mutex);
mutex_destroy(&entry->file_mutex);
vfree(entry->data_dma_blob_desc.data);
@ -1908,18 +1909,18 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
spin_lock(&dev_entry->ctx_mem_hash_spinlock);
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
spin_lock(&dev_entry->ctx_mem_hash_spinlock);
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_del(&ctx->debugfs_list);
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
/**

View file

@ -2320,7 +2320,7 @@ struct hl_debugfs_entry {
* @userptr_list: list of available userptrs (virtual memory chunk descriptor).
* @userptr_spinlock: protects userptr_list.
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
* @ctx_mem_hash_spinlock: protects cb_list.
* @ctx_mem_hash_mutex: protects list of available contexts with MMU mappings.
* @data_dma_blob_desc: data DMA descriptor of blob.
* @mon_dump_blob_desc: monitor dump descriptor of blob.
* @state_dump: data of the system states in case of a bad cs.
@ -2351,7 +2351,7 @@ struct hl_dbg_device_entry {
struct list_head userptr_list;
spinlock_t userptr_spinlock;
struct list_head ctx_mem_hash_list;
spinlock_t ctx_mem_hash_spinlock;
struct mutex ctx_mem_hash_mutex;
struct debugfs_blob_wrapper data_dma_blob_desc;
struct debugfs_blob_wrapper mon_dump_blob_desc;
char *state_dump[HL_STATE_DUMP_HIST_LEN];