scsi: ufs: Clean up and refactor clk-scaling feature

Manipulate clock scaling related stuff only if the host capability supports
clock scaling feature to avoid redundant code execution.

Link: https://lore.kernel.org/r/20210120150142.5049-4-stanley.chu@mediatek.com
Reviewed-by: Can Guo <cang@codeaurora.org>
Signed-off-by: Stanley Chu <stanley.chu@mediatek.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Stanley Chu 2021-01-20 23:01:42 +08:00 committed by Martin K. Petersen
parent b058fa8682
commit 348e1bc5f4

View file

@ -1500,9 +1500,6 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool suspend = false;
if (!ufshcd_is_clkscaling_supported(hba))
return;
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
@ -1522,9 +1519,6 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool resume = false;
if (!ufshcd_is_clkscaling_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_scaling.is_suspended) {
resume = true;
@ -5758,6 +5752,26 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = allow;
up_write(&hba->clk_scaling_lock);
}
static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
{
if (suspend) {
if (hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
} else {
ufshcd_clk_scaling_allow(hba, true);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
}
}
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
pm_runtime_get_sync(hba->dev);
@ -5782,22 +5796,18 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba, false);
if (hba->clk_scaling.is_enabled)
if (ufshcd_is_clkscaling_supported(hba) &&
hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = false;
up_write(&hba->clk_scaling_lock);
ufshcd_clk_scaling_allow(hba, false);
}
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
ufshcd_release(hba);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = true;
up_write(&hba->clk_scaling_lock);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
pm_runtime_put(hba->dev);
}
@ -8694,12 +8704,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
if (hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = false;
up_write(&hba->clk_scaling_lock);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, true);
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
@ -8819,11 +8825,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = true;
up_write(&hba->clk_scaling_lock);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba);
@ -8925,11 +8929,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = false;
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = true;
up_write(&hba->clk_scaling_lock);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);