summaryrefslogtreecommitdiff
path: root/drivers/scsi/ufs/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r--drivers/scsi/ufs/ufshcd.c79
1 files changed, 57 insertions, 22 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3262ac0e39d4..5db18f444ea9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -94,6 +94,9 @@
/* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000
+/* Default delay of RPM device flush delayed work */
+#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
+
/* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
@@ -5314,7 +5317,7 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
return false;
}
-static bool ufshcd_wb_keep_vcc_on(struct ufs_hba *hba)
+static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
{
int ret;
u32 avail_buf;
@@ -5352,6 +5355,21 @@ static bool ufshcd_wb_keep_vcc_on(struct ufs_hba *hba)
return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
}
+static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(to_delayed_work(work),
+ struct ufs_hba,
+ rpm_dev_flush_recheck_work);
+ /*
+ * To prevent unnecessary VCC power drain after device finishes
+ * WriteBooster buffer flush or Auto BKOPs, force runtime resume
+ * after a certain delay to recheck the threshold by next runtime
+ * suspend.
+ */
+ pm_runtime_get_sync(hba->dev);
+ pm_runtime_put_sync(hba->dev);
+}
+
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
@@ -8099,8 +8117,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
!hba->dev_info.is_lu_power_on_wp) {
ufshcd_setup_vreg(hba, false);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
- if (!hba->dev_info.keep_vcc_on)
- ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
if (!ufshcd_is_link_active(hba)) {
ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
@@ -8225,27 +8242,30 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_disable_auto_bkops(hba);
}
/*
- * With wb enabled, if the bkops is enabled or if the
- * configured WB type is 70% full, keep vcc ON
- * for the device to flush the wb buffer
+ * If device needs to do BKOP or WB buffer flush during
+ * Hibern8, keep device power mode as "active power mode"
+ * and VCC supply.
*/
- if ((hba->auto_bkops_enabled && ufshcd_is_wb_allowed(hba)) ||
- ufshcd_wb_keep_vcc_on(hba))
- hba->dev_info.keep_vcc_on = true;
- else
- hba->dev_info.keep_vcc_on = false;
- } else {
- hba->dev_info.keep_vcc_on = false;
- }
+ hba->dev_info.b_rpm_dev_flush_capable =
+ hba->auto_bkops_enabled ||
+ (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
+ ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
+ ufshcd_is_auto_hibern8_enabled(hba))) &&
+ ufshcd_wb_need_flush(hba));
+ }
+
+ if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
+ if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op)) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
- if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
- ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
- !ufshcd_is_runtime_pm(pm_op))) {
- /* ensure that bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
- if (ret)
- goto enable_gating;
+ if (!hba->dev_info.b_rpm_dev_flush_capable) {
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
}
flush_work(&hba->eeh_work);
@@ -8298,9 +8318,16 @@ enable_gating:
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
+ hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba);
out:
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
+ msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
+ }
+
hba->pm_op_in_progress = 0;
+
if (ret)
ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
return ret;
@@ -8389,6 +8416,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ hba->dev_info.b_rpm_dev_flush_capable = false;
+ cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
+ }
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8862,6 +8894,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
+ ufshcd_rpm_dev_flush_recheck_work);
+
/* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |