mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-02 15:18:19 +00:00
media: v4l2-mem2mem: Fix missing v4l2_m2m_try_run call
Commit34dbb848d5
("media: mem2mem: Remove excessive try_run call") removed a redundant call to v4l2_m2m_try_run but instead introduced a bug. Consider the following case: 1) Context A schedules, queues and runs job A. 2) While the m2m device is running, context B schedules and queues job B. Job B cannot run, because it has to wait for job A. 3) Job A completes, calls v4l2_m2m_job_finish, and tries to queue a job for context A, but since the context is empty it won't do anything. In this scenario, queued job B will never run. Fix this by calling v4l2_m2m_try_run from v4l2_m2m_try_schedule. While here, add more documentation to these functions. Fixes:34dbb848d5
("media: mem2mem: Remove excessive try_run call") Signed-off-by: Ezequiel Garcia <ezequiel@collabora.com> Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> [hans.verkuil@cisco.com: split >80 cols line] Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
This commit is contained in:
parent
39dd23dc9d
commit
9db3bbf58b
1 changed files with 30 additions and 3 deletions
|
@ -249,15 +249,24 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
|
||||||
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
|
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
|
||||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||||
|
|
||||||
|
dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
|
||||||
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
|
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
|
/*
|
||||||
|
* __v4l2_m2m_try_queue() - queue a job
|
||||||
|
* @m2m_dev: m2m device
|
||||||
|
* @m2m_ctx: m2m context
|
||||||
|
*
|
||||||
|
* Check if this context is ready to queue a job.
|
||||||
|
*
|
||||||
|
* This function can run in interrupt context.
|
||||||
|
*/
|
||||||
|
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
|
||||||
|
struct v4l2_m2m_ctx *m2m_ctx)
|
||||||
{
|
{
|
||||||
struct v4l2_m2m_dev *m2m_dev;
|
|
||||||
unsigned long flags_job, flags_out, flags_cap;
|
unsigned long flags_job, flags_out, flags_cap;
|
||||||
|
|
||||||
m2m_dev = m2m_ctx->m2m_dev;
|
|
||||||
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
|
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
|
||||||
|
|
||||||
if (!m2m_ctx->out_q_ctx.q.streaming
|
if (!m2m_ctx->out_q_ctx.q.streaming
|
||||||
|
@ -315,7 +324,25 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
|
||||||
m2m_ctx->job_flags |= TRANS_QUEUED;
|
m2m_ctx->job_flags |= TRANS_QUEUED;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
|
||||||
|
* @m2m_ctx: m2m context
|
||||||
|
*
|
||||||
|
* Check if this context is ready to queue a job. If suitable,
|
||||||
|
* run the next queued job on the mem2mem device.
|
||||||
|
*
|
||||||
|
* This function shouldn't run in interrupt context.
|
||||||
|
*
|
||||||
|
* Note that v4l2_m2m_try_schedule() can schedule one job for this context,
|
||||||
|
* and then run another job for another context.
|
||||||
|
*/
|
||||||
|
void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
|
||||||
|
{
|
||||||
|
struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
|
||||||
|
|
||||||
|
__v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
|
||||||
v4l2_m2m_try_run(m2m_dev);
|
v4l2_m2m_try_run(m2m_dev);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
|
EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
|
||||||
|
|
Loading…
Reference in a new issue