mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 21:03:32 +00:00
scsi: block: Fix a race in the runtime power management code
commitfa4d0f1992
upstream. With the current implementation the following race can happen: * blk_pre_runtime_suspend() calls blk_freeze_queue_start() and blk_mq_unfreeze_queue(). * blk_queue_enter() calls blk_queue_pm_only() and that function returns true. * blk_queue_enter() calls blk_pm_request_resume() and that function does not call pm_request_resume() because the queue runtime status is RPM_ACTIVE. * blk_pre_runtime_suspend() changes the queue status into RPM_SUSPENDING. Fix this race by changing the queue runtime status into RPM_SUSPENDING before switching q_usage_counter to atomic mode. Link: https://lore.kernel.org/r/20201209052951.16136-2-bvanassche@acm.org Fixes:986d413b7c
("blk-mq: Enable support for runtime power management") Cc: Ming Lei <ming.lei@redhat.com> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: stable <stable@vger.kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Jens Axboe <axboe@kernel.dk> Acked-by: Alan Stern <stern@rowland.harvard.edu> Acked-by: Stanley Chu <stanley.chu@mediatek.com> Co-developed-by: Can Guo <cang@codeaurora.org> Signed-off-by: Can Guo <cang@codeaurora.org> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
9ce7ac5ed5
commit
af07e4dd07
1 changed files with 9 additions and 6 deletions
|
@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
|
||||||
|
|
||||||
WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
|
WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
|
||||||
|
|
||||||
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
q->rpm_status = RPM_SUSPENDING;
|
||||||
|
spin_unlock_irq(&q->queue_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increase the pm_only counter before checking whether any
|
* Increase the pm_only counter before checking whether any
|
||||||
* non-PM blk_queue_enter() calls are in progress to avoid that any
|
* non-PM blk_queue_enter() calls are in progress to avoid that any
|
||||||
|
@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
|
||||||
/* Switch q_usage_counter back to per-cpu mode. */
|
/* Switch q_usage_counter back to per-cpu mode. */
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
|
|
||||||
spin_lock_irq(&q->queue_lock);
|
if (ret < 0) {
|
||||||
if (ret < 0)
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
q->rpm_status = RPM_ACTIVE;
|
||||||
pm_runtime_mark_last_busy(q->dev);
|
pm_runtime_mark_last_busy(q->dev);
|
||||||
else
|
spin_unlock_irq(&q->queue_lock);
|
||||||
q->rpm_status = RPM_SUSPENDING;
|
|
||||||
spin_unlock_irq(&q->queue_lock);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
blk_clear_pm_only(q);
|
blk_clear_pm_only(q);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue