2018-09-26 21:01:03 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
2018-09-26 21:01:09 +00:00
|
|
|
#include <linux/blk-mq.h>
|
2018-09-26 21:01:03 +00:00
|
|
|
#include <linux/blk-pm.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/pm_runtime.h>
|
2018-09-26 21:01:09 +00:00
|
|
|
#include "blk-mq.h"
|
|
|
|
#include "blk-mq-tag.h"
|
2018-09-26 21:01:03 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_pm_runtime_init - Block layer runtime PM initialization routine
|
|
|
|
* @q: the queue of the device
|
|
|
|
* @dev: the device the queue belongs to
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Initialize runtime-PM-related fields for @q and start auto suspend for
|
|
|
|
* @dev. Drivers that want to take advantage of request-based runtime PM
|
|
|
|
* should call this function after @dev has been initialized, and its
|
|
|
|
* request queue @q has been allocated, and runtime PM for it can not happen
|
|
|
|
* yet(either due to disabled/forbidden or its usage_count > 0). In most
|
|
|
|
* cases, driver should call this function before any I/O has taken place.
|
|
|
|
*
|
|
|
|
* This function takes care of setting up using auto suspend for the device,
|
|
|
|
* the autosuspend delay is set to -1 to make runtime suspend impossible
|
|
|
|
* until an updated value is either set by user or by driver. Drivers do
|
|
|
|
* not need to touch other autosuspend settings.
|
|
|
|
*
|
|
|
|
* The block layer runtime PM is request based, so only works for drivers
|
|
|
|
* that use request as their IO unit instead of those directly use bio's.
|
|
|
|
*/
|
|
|
|
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
|
|
|
|
{
|
|
|
|
q->dev = dev;
|
|
|
|
q->rpm_status = RPM_ACTIVE;
|
|
|
|
pm_runtime_set_autosuspend_delay(q->dev, -1);
|
|
|
|
pm_runtime_use_autosuspend(q->dev);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_pm_runtime_init);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_pre_runtime_suspend - Pre runtime suspend check
|
|
|
|
* @q: the queue of the device
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function will check if runtime suspend is allowed for the device
|
|
|
|
* by examining if there are any requests pending in the queue. If there
|
|
|
|
* are requests pending, the device can not be runtime suspended; otherwise,
|
|
|
|
* the queue's status will be updated to SUSPENDING and the driver can
|
|
|
|
* proceed to suspend the device.
|
|
|
|
*
|
|
|
|
* For the not allowed case, we mark last busy for the device so that
|
|
|
|
* runtime PM core will try to autosuspend it some time later.
|
|
|
|
*
|
|
|
|
* This function should be called near the start of the device's
|
|
|
|
* runtime_suspend callback.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* 0 - OK to runtime suspend the device
|
|
|
|
* -EBUSY - Device should not be runtime suspended
|
|
|
|
*/
|
|
|
|
int blk_pre_runtime_suspend(struct request_queue *q)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!q->dev)
|
|
|
|
return ret;
|
|
|
|
|
2018-09-26 21:01:09 +00:00
|
|
|
WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
|
|
|
|
|
2020-12-09 05:29:44 +00:00
|
|
|
spin_lock_irq(&q->queue_lock);
|
|
|
|
q->rpm_status = RPM_SUSPENDING;
|
|
|
|
spin_unlock_irq(&q->queue_lock);
|
|
|
|
|
2018-09-26 21:01:09 +00:00
|
|
|
/*
|
|
|
|
* Increase the pm_only counter before checking whether any
|
|
|
|
* non-PM blk_queue_enter() calls are in progress to avoid that any
|
|
|
|
* new non-PM blk_queue_enter() calls succeed before the pm_only
|
|
|
|
* counter is decreased again.
|
|
|
|
*/
|
|
|
|
blk_set_pm_only(q);
|
|
|
|
ret = -EBUSY;
|
|
|
|
/* Switch q_usage_counter from per-cpu to atomic mode. */
|
|
|
|
blk_freeze_queue_start(q);
|
|
|
|
/*
|
|
|
|
* Wait until atomic mode has been reached. Since that
|
|
|
|
* involves calling call_rcu(), it is guaranteed that later
|
|
|
|
* blk_queue_enter() calls see the pm-only state. See also
|
|
|
|
* http://lwn.net/Articles/573497/.
|
|
|
|
*/
|
|
|
|
percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
|
|
|
|
if (percpu_ref_is_zero(&q->q_usage_counter))
|
|
|
|
ret = 0;
|
|
|
|
/* Switch q_usage_counter back to per-cpu mode. */
|
|
|
|
blk_mq_unfreeze_queue(q);
|
|
|
|
|
2020-12-09 05:29:44 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
spin_lock_irq(&q->queue_lock);
|
|
|
|
q->rpm_status = RPM_ACTIVE;
|
2018-09-26 21:01:03 +00:00
|
|
|
pm_runtime_mark_last_busy(q->dev);
|
2020-12-09 05:29:44 +00:00
|
|
|
spin_unlock_irq(&q->queue_lock);
|
2018-09-26 21:01:09 +00:00
|
|
|
|
|
|
|
blk_clear_pm_only(q);
|
2020-12-09 05:29:44 +00:00
|
|
|
}
|
2018-09-26 21:01:09 +00:00
|
|
|
|
2018-09-26 21:01:03 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_pre_runtime_suspend);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_post_runtime_suspend - Post runtime suspend processing
|
|
|
|
* @q: the queue of the device
|
|
|
|
* @err: return value of the device's runtime_suspend function
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Update the queue's runtime status according to the return value of the
|
|
|
|
* device's runtime suspend function and mark last busy for the device so
|
|
|
|
* that PM core will try to auto suspend the device at a later time.
|
|
|
|
*
|
|
|
|
* This function should be called near the end of the device's
|
|
|
|
* runtime_suspend callback.
|
|
|
|
*/
|
|
|
|
void blk_post_runtime_suspend(struct request_queue *q, int err)
|
|
|
|
{
|
|
|
|
if (!q->dev)
|
|
|
|
return;
|
|
|
|
|
2018-11-15 19:17:28 +00:00
|
|
|
spin_lock_irq(&q->queue_lock);
|
2018-09-26 21:01:03 +00:00
|
|
|
if (!err) {
|
|
|
|
q->rpm_status = RPM_SUSPENDED;
|
|
|
|
} else {
|
|
|
|
q->rpm_status = RPM_ACTIVE;
|
|
|
|
pm_runtime_mark_last_busy(q->dev);
|
|
|
|
}
|
2018-11-15 19:17:28 +00:00
|
|
|
spin_unlock_irq(&q->queue_lock);
|
2018-09-26 21:01:09 +00:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
blk_clear_pm_only(q);
|
2018-09-26 21:01:03 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_post_runtime_suspend);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_pre_runtime_resume - Pre runtime resume processing
|
|
|
|
* @q: the queue of the device
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Update the queue's runtime status to RESUMING in preparation for the
|
|
|
|
* runtime resume of the device.
|
|
|
|
*
|
|
|
|
* This function should be called near the start of the device's
|
|
|
|
* runtime_resume callback.
|
|
|
|
*/
|
|
|
|
void blk_pre_runtime_resume(struct request_queue *q)
|
|
|
|
{
|
|
|
|
if (!q->dev)
|
|
|
|
return;
|
|
|
|
|
2018-11-15 19:17:28 +00:00
|
|
|
spin_lock_irq(&q->queue_lock);
|
2018-09-26 21:01:03 +00:00
|
|
|
q->rpm_status = RPM_RESUMING;
|
2018-11-15 19:17:28 +00:00
|
|
|
spin_unlock_irq(&q->queue_lock);
|
2018-09-26 21:01:03 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_pre_runtime_resume);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_post_runtime_resume - Post runtime resume processing
|
|
|
|
* @q: the queue of the device
|
|
|
|
* @err: return value of the device's runtime_resume function
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Update the queue's runtime status according to the return value of the
|
2020-07-06 15:14:36 +00:00
|
|
|
* device's runtime_resume function. If the resume was successful, call
|
|
|
|
* blk_set_runtime_active() to do the real work of restarting the queue.
|
2018-09-26 21:01:03 +00:00
|
|
|
*
|
|
|
|
* This function should be called near the end of the device's
|
|
|
|
* runtime_resume callback.
|
|
|
|
*/
|
|
|
|
void blk_post_runtime_resume(struct request_queue *q, int err)
|
|
|
|
{
|
|
|
|
if (!q->dev)
|
|
|
|
return;
|
|
|
|
if (!err) {
|
2020-07-06 15:14:36 +00:00
|
|
|
blk_set_runtime_active(q);
|
2018-09-26 21:01:03 +00:00
|
|
|
} else {
|
2020-07-06 15:14:36 +00:00
|
|
|
spin_lock_irq(&q->queue_lock);
|
2018-09-26 21:01:03 +00:00
|
|
|
q->rpm_status = RPM_SUSPENDED;
|
2020-07-06 15:14:36 +00:00
|
|
|
spin_unlock_irq(&q->queue_lock);
|
2018-09-26 21:01:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_post_runtime_resume);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_set_runtime_active - Force runtime status of the queue to be active
|
|
|
|
* @q: the queue of the device
|
|
|
|
*
|
|
|
|
* If the device is left runtime suspended during system suspend the resume
|
|
|
|
* hook typically resumes the device and corrects runtime status
|
|
|
|
* accordingly. However, that does not affect the queue runtime PM status
|
|
|
|
* which is still "suspended". This prevents processing requests from the
|
|
|
|
* queue.
|
|
|
|
*
|
|
|
|
* This function can be used in driver's resume hook to correct queue
|
|
|
|
* runtime PM status and re-enable peeking requests from the queue. It
|
|
|
|
* should be called before first request is added to the queue.
|
2020-07-06 15:14:36 +00:00
|
|
|
*
|
|
|
|
* This function is also called by blk_post_runtime_resume() for successful
|
|
|
|
* runtime resumes. It does everything necessary to restart the queue.
|
2018-09-26 21:01:03 +00:00
|
|
|
*/
|
|
|
|
void blk_set_runtime_active(struct request_queue *q)
|
|
|
|
{
|
2020-07-06 15:14:36 +00:00
|
|
|
int old_status;
|
|
|
|
|
|
|
|
if (!q->dev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irq(&q->queue_lock);
|
|
|
|
old_status = q->rpm_status;
|
|
|
|
q->rpm_status = RPM_ACTIVE;
|
|
|
|
pm_runtime_mark_last_busy(q->dev);
|
|
|
|
pm_request_autosuspend(q->dev);
|
|
|
|
spin_unlock_irq(&q->queue_lock);
|
|
|
|
|
|
|
|
if (old_status != RPM_ACTIVE)
|
|
|
|
blk_clear_pm_only(q);
|
2018-09-26 21:01:03 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_set_runtime_active);
|