drm/msm: Remove struct_mutex usage

The remaining struct_mutex usage is just to serialize various gpu
related things (submit/retire/recover/fault/etc), so replace
struct_mutex with gpu->lock.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20211109181117.591148-4-robdclark@gmail.com
Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
Rob Clark 2021-11-09 10:11:03 -08:00
parent 1d054c9b84
commit c28e2f2b41
8 changed files with 49 additions and 34 deletions

View file

@ -107,7 +107,7 @@ reset_set(void *data, u64 val)
* try to reset an active GPU.
*/
mutex_lock(&dev->struct_mutex);
mutex_lock(&gpu->lock);
release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
@ -133,7 +133,7 @@ reset_set(void *data, u64 val)
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&gpu->lock);
return 0;
}

View file

@ -408,9 +408,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
mutex_lock(&dev->struct_mutex);
mutex_lock(&gpu->lock);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&gpu->lock);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);

View file

@ -29,14 +29,14 @@ static int msm_gpu_show(struct seq_file *m, void *arg)
struct msm_gpu *gpu = priv->gpu;
int ret;
ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
return ret;
drm_printf(&p, "%s Status:\n", gpu->name);
gpu->funcs->show(gpu, show_priv->state, &p);
mutex_unlock(&show_priv->dev->struct_mutex);
mutex_unlock(&gpu->lock);
return 0;
}
@ -48,9 +48,9 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
mutex_lock(&show_priv->dev->struct_mutex);
mutex_lock(&gpu->lock);
gpu->funcs->gpu_state_put(show_priv->state);
mutex_unlock(&show_priv->dev->struct_mutex);
mutex_unlock(&gpu->lock);
kfree(show_priv);
@ -72,7 +72,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (!show_priv)
return -ENOMEM;
ret = mutex_lock_interruptible(&dev->struct_mutex);
ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
goto free_priv;
@ -81,7 +81,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&gpu->lock);
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);

View file

@ -150,7 +150,7 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
{
int ret;
WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
WARN_ON(!mutex_is_locked(&gpu->lock));
if (!gpu->needs_hw_init)
return 0;
@ -361,7 +361,7 @@ static void recover_worker(struct kthread_work *work)
char *comm = NULL, *cmd = NULL;
int i;
mutex_lock(&dev->struct_mutex);
mutex_lock(&gpu->lock);
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
@ -442,7 +442,7 @@ static void recover_worker(struct kthread_work *work)
}
}
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
}
@ -450,12 +450,11 @@ static void recover_worker(struct kthread_work *work)
static void fault_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
struct drm_device *dev = gpu->dev;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
mutex_lock(&dev->struct_mutex);
mutex_lock(&gpu->lock);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit && submit->fault_dumped)
@ -490,7 +489,7 @@ static void fault_worker(struct kthread_work *work)
memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&gpu->lock);
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
@ -733,7 +732,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&gpu->lock));
pm_runtime_get_sync(&gpu->pdev->dev);
@ -848,6 +847,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
kthread_init_work(&gpu->fault_work, fault_worker);

View file

@ -161,13 +161,23 @@ struct msm_gpu {
*/
struct list_head active_list;
/**
* lock:
*
* General lock for serializing all the gpu things.
*
* TODO move to per-ring locking where feasible (ie. submit/retire
* path, etc)
*/
struct mutex lock;
/**
* active_submits:
*
* The number of submitted but not yet retired submits, used to
* determine transitions between active and idle.
*
* Protected by lock
* Protected by active_lock
*/
int active_submits;
@ -548,28 +558,28 @@ static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = NULL;
mutex_lock(&gpu->dev->struct_mutex);
mutex_lock(&gpu->lock);
if (gpu->crashstate) {
kref_get(&gpu->crashstate->ref);
state = gpu->crashstate;
}
mutex_unlock(&gpu->dev->struct_mutex);
mutex_unlock(&gpu->lock);
return state;
}
static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
{
mutex_lock(&gpu->dev->struct_mutex);
mutex_lock(&gpu->lock);
if (gpu->crashstate) {
if (gpu->funcs->gpu_state_put(gpu->crashstate))
gpu->crashstate = NULL;
}
mutex_unlock(&gpu->dev->struct_mutex);
mutex_unlock(&gpu->lock);
}
/*

View file

@ -155,9 +155,12 @@ static int perf_open(struct inode *inode, struct file *file)
struct msm_gpu *gpu = priv->gpu;
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!gpu)
return -ENODEV;
if (perf->open || !gpu) {
mutex_lock(&gpu->lock);
if (perf->open) {
ret = -EBUSY;
goto out;
}
@ -171,7 +174,7 @@ static int perf_open(struct inode *inode, struct file *file)
perf->next_jiffies = jiffies + SAMPLE_TIME;
out:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&gpu->lock);
return ret;
}

View file

@ -86,7 +86,7 @@ struct msm_rd_state {
struct msm_gem_submit *submit;
/* fifo access is synchronized on the producer side by
* struct_mutex held by submit code (otherwise we could
* gpu->lock held by submit code (otherwise we could
* end up w/ cmds logged in different order than they
* were executed). And read_lock synchronizes the reads
*/
@ -181,9 +181,12 @@ static int rd_open(struct inode *inode, struct file *file)
uint32_t gpu_id;
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!gpu)
return -ENODEV;
if (rd->open || !gpu) {
mutex_lock(&gpu->lock);
if (rd->open) {
ret = -EBUSY;
goto out;
}
@ -200,7 +203,7 @@ static int rd_open(struct inode *inode, struct file *file)
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
out:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&gpu->lock);
return ret;
}
@ -340,11 +343,10 @@ static void snapshot_buf(struct msm_rd_state *rd,
msm_gem_unlock(&obj->base);
}
/* called under struct_mutex */
/* called under gpu->lock */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
struct drm_device *dev = submit->dev;
struct task_struct *task;
char msg[256];
int i, n;
@ -355,7 +357,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
/* writing into fifo is serialized by caller, and
* rd->read_lock is used to serialize the reads
*/
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
if (fmt) {
va_list args;

View file

@ -21,11 +21,11 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
pm_runtime_get_sync(&gpu->pdev->dev);
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->dev->struct_mutex);
mutex_lock(&gpu->lock);
msm_gpu_submit(gpu, submit);
mutex_unlock(&gpu->dev->struct_mutex);
mutex_unlock(&gpu->lock);
pm_runtime_put(&gpu->pdev->dev);