@@ -1172,3 +1172,12 @@ Description: This node is used to set or display whether UFS WriteBooster is
(if the platform supports UFSHCD_CAP_CLK_SCALING). For a
platform that doesn't support UFSHCD_CAP_CLK_SCALING, we can
disable/enable WriteBooster through this sysfs node.
+
+What: /sys/bus/platform/drivers/ufshcd/*/wb_batched_flush
+Date: April 2021
+Contact: Daejun Park <daejun7.park@samsung.com>
+Description: This entry shows whether batch flushing of UFS WriteBooster
+ buffers is enabled. Writing 1 to this entry allows the device to flush
+ the WriteBooster buffer only when it needs to perform a buffer flush
+ during runtime suspend. Writing 0 to this entry allows the device to
+ flush the WriteBooster buffer during link hibernation.
@@ -253,6 +253,51 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
return res < 0 ? res : count;
}
+
+static ssize_t wb_batched_flush_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->vps->wb_batched_flush);
+}
+
+static ssize_t wb_batched_flush_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int wb_batched_flush;
+ ssize_t res = 0;
+
+ if (!ufshcd_is_wb_allowed(hba)) {
+ dev_warn(dev, "To control WB through wb_batched_flush is not allowed!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (kstrtouint(buf, 0, &wb_batched_flush))
+ return -EINVAL;
+
+ if (wb_batched_flush != 0 && wb_batched_flush != 1)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(hba->dev);
+ res = ufshcd_wb_toggle_flush_during_h8(hba, !wb_batched_flush);
+ pm_runtime_put_sync(hba->dev);
+ if (!res)
+ hba->vps->wb_batched_flush = wb_batched_flush;
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -261,6 +306,7 @@ static DEVICE_ATTR_RO(spm_target_dev_state);
static DEVICE_ATTR_RO(spm_target_link_state);
static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
+static DEVICE_ATTR_RW(wb_batched_flush);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -271,6 +317,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_spm_target_link_state.attr,
&dev_attr_auto_hibern8.attr,
&dev_attr_wb_on.attr,
+ &dev_attr_wb_batched_flush.attr,
NULL
};
@@ -244,7 +244,6 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -277,7 +276,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
ufshcd_wb_toggle(hba, true);
- ufshcd_wb_toggle_flush_during_h8(hba, true);
+ ufshcd_wb_toggle_flush_during_h8(hba, !hba->vps->wb_batched_flush);
+
if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
ufshcd_wb_toggle_flush(hba, true);
}
@@ -5472,7 +5472,7 @@ int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
return ret;
}
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
{
int ret;
@@ -5481,10 +5481,12 @@ static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
if (ret) {
dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
__func__, set ? "enable" : "disable", ret);
- return;
+ return ret;
}
dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
__func__, set ? "enabled" : "disabled");
+
+ return ret;
}
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
@@ -8745,6 +8747,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
if (ret)
goto enable_gating;
+ } else if (hba->vps->wb_batched_flush) {
+ ufshcd_wb_toggle_flush_during_h8(hba, true);
}
}
@@ -8925,6 +8929,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_auto_hibern8_enable(hba);
if (hba->dev_info.b_rpm_dev_flush_capable) {
+ if (hba->vps->wb_batched_flush)
+ ufshcd_wb_toggle_flush_during_h8(hba, false);
hba->dev_info.b_rpm_dev_flush_capable = false;
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
@@ -643,6 +643,7 @@ struct ufs_hba_variant_params {
struct devfreq_simple_ondemand_data ondemand_data;
u16 hba_enable_delay_us;
u32 wb_flush_threshold;
+ bool wb_batched_flush;
};
/**
@@ -1105,6 +1106,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)