summaryrefslogtreecommitdiff
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-22 21:24:58 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-22 21:24:58 +0300
commitbdb39c9509e6d31943cb29dbb6ccd1b64013fb98 (patch)
tree36bf88ee1db29c69f0e488b7f537b2907ebff095 /drivers/scsi/ufs
parent325b764089c9bef2be45354db4f15e5b12ae406d (diff)
parentd2aacd36a8e00bc1813841b482e3933acb1ea0b5 (diff)
downloadlinux-bdb39c9509e6d31943cb29dbb6ccd1b64013fb98.tar.xz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This series consists of the usual driver updates (ufs, ibmvfc, qla2xxx, hisi_sas, pm80xx) plus the removal of the gdth driver (which is bound to cause conflicts with a trivial change somewhere). The only big major rework of note is the one from Hannes trying to clean up our result handling code in the drivers to make it consistent" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (194 commits) scsi: MAINTAINERS: Adjust to reflect gdth scsi driver removal scsi: ufs: Give clk scaling min gear a value scsi: lpfc: Fix 'physical' typos scsi: megaraid_mbox: Fix spelling of 'allocated' scsi: qla2xxx: Simplify the calculation of variables scsi: message: fusion: Fix 'physical' typos scsi: target: core: Change ASCQ for residual write scsi: target: core: Signal WRITE residuals scsi: target: core: Set residuals for 4Kn devices scsi: hisi_sas: Add trace FIFO debugfs support scsi: hisi_sas: Flush workqueue in hisi_sas_v3_remove() scsi: hisi_sas: Enable debugfs support by default scsi: hisi_sas: Don't check .nr_hw_queues in hisi_sas_task_prep() scsi: hisi_sas: Remove deferred probe check in hisi_sas_v2_probe() scsi: lpfc: Add auto select on IRQ_POLL scsi: ncr53c8xx: Fix typos scsi: lpfc: Fix ancient double free scsi: qla2xxx: Fix some memory corruption scsi: qla2xxx: Remove redundant NULL check scsi: megaraid: Fix ifnullfree.cocci warnings ...
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/Kconfig14
-rw-r--r--drivers/scsi/ufs/Makefile13
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c56
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h22
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c9
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c18
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c175
-rw-r--r--drivers/scsi/ufs/ufs.h52
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c568
-rw-r--r--drivers/scsi/ufs/ufshcd.h41
12 files changed, 648 insertions, 325 deletions
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index b915b38c2b27..07cf415367b4 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -39,7 +39,7 @@ config SCSI_UFSHCD
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select NLS
help
- This selects the support for UFS devices in Linux, say Y and make
+ This selects the support for UFS devices in Linux, say Y and make
sure that you know the name of your UFS host adapter (the card
inside your computer that "speaks" the UFS protocol, also
called UFS Host Controller), because you will be asked for it.
@@ -54,8 +54,8 @@ config SCSI_UFSHCD_PCI
tristate "PCI bus based UFS Controller support"
depends on SCSI_UFSHCD && PCI
help
- This selects the PCI UFS Host Controller Interface. Select this if
- you have UFS Host Controller with PCI Interface.
+ This selects the PCI UFS Host Controller Interface. Select this if
+ you have UFS Host Controller with PCI Interface.
If you have a controller with this interface, say Y or M here.
@@ -74,10 +74,10 @@ config SCSI_UFSHCD_PLATFORM
depends on SCSI_UFSHCD
depends on HAS_IOMEM
help
- This selects the UFS host controller support. Select this if
- you have an UFS controller on Platform bus.
+ This selects the UFS host controller support. Select this if
+ you have an UFS controller on Platform bus.
- If you have a controller with this interface, say Y or M here.
+ If you have a controller with this interface, say Y or M here.
If unsure, say N.
@@ -85,7 +85,7 @@ config SCSI_UFS_CDNS_PLATFORM
tristate "Cadence UFS Controller platform driver"
depends on SCSI_UFSHCD_PLATFORM
help
- This selects the Cadence-specific additions to UFSHCD platform driver.
+ This selects the Cadence-specific additions to UFSHCD platform driver.
If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 4679af1b564e..06f3a3fe4a44 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,5 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
# UFSHCD makefile
+
+# The link order is important here. ufshcd-core must initialize
+# before vendor drivers.
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
+ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
+
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
@@ -7,10 +16,6 @@ obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o
ufs_qcom-y += ufs-qcom.o
ufs_qcom-$(CONFIG_SCSI_UFS_CRYPTO) += ufs-qcom-ice.o
obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
-ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
-ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
new file mode 100644
index 000000000000..dee98dc72d29
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/debugfs.h>
+
+#include "ufs-debugfs.h"
+#include "ufshcd.h"
+
+static struct dentry *ufs_debugfs_root;
+
+void __init ufs_debugfs_init(void)
+{
+ ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
+}
+
+void __exit ufs_debugfs_exit(void)
+{
+ debugfs_remove_recursive(ufs_debugfs_root);
+}
+
+static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
+{
+ struct ufs_hba *hba = s->private;
+ struct ufs_event_hist *e = hba->ufs_stats.event;
+
+#define PRT(fmt, typ) \
+ seq_printf(s, fmt, e[UFS_EVT_ ## typ].cnt)
+
+ PRT("PHY Adapter Layer errors (except LINERESET): %llu\n", PA_ERR);
+ PRT("Data Link Layer errors: %llu\n", DL_ERR);
+ PRT("Network Layer errors: %llu\n", NL_ERR);
+ PRT("Transport Layer errors: %llu\n", TL_ERR);
+ PRT("Generic DME errors: %llu\n", DME_ERR);
+ PRT("Auto-hibernate errors: %llu\n", AUTO_HIBERN8_ERR);
+ PRT("IS Fatal errors (CEFES, SBFES, HCFES, DFES): %llu\n", FATAL_ERR);
+ PRT("DME Link Startup errors: %llu\n", LINK_STARTUP_FAIL);
+ PRT("PM Resume errors: %llu\n", RESUME_ERR);
+ PRT("PM Suspend errors : %llu\n", SUSPEND_ERR);
+ PRT("Logical Unit Resets: %llu\n", DEV_RESET);
+ PRT("Host Resets: %llu\n", HOST_RESET);
+ PRT("SCSI command aborts: %llu\n", ABORT);
+#undef PRT
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats);
+
+void ufs_debugfs_hba_init(struct ufs_hba *hba)
+{
+ hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
+ debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops);
+}
+
+void ufs_debugfs_hba_exit(struct ufs_hba *hba)
+{
+ debugfs_remove_recursive(hba->debugfs_root);
+}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
new file mode 100644
index 000000000000..f35b39c4b4f5
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation
+ */
+
+#ifndef __UFS_DEBUGFS_H__
+#define __UFS_DEBUGFS_H__
+
+struct ufs_hba;
+
+#ifdef CONFIG_DEBUG_FS
+void __init ufs_debugfs_init(void);
+void __exit ufs_debugfs_exit(void);
+void ufs_debugfs_hba_init(struct ufs_hba *hba);
+void ufs_debugfs_hba_exit(struct ufs_hba *hba);
+#else
+static inline void ufs_debugfs_init(void) {}
+static inline void ufs_debugfs_exit(void) {}
+static inline void ufs_debugfs_hba_init(struct ufs_hba *hba) {}
+static inline void ufs_debugfs_hba_exit(struct ufs_hba *hba) {}
+#endif
+
+#endif
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index a8770ff14588..267943a13a94 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -640,6 +640,11 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
}
}
+ /* setting for three timeout values for traffic class #0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
+
return 0;
out:
return ret;
@@ -1236,7 +1241,9 @@ struct exynos_ufs_drv_data exynos_ufs_drvs = {
UFSHCI_QUIRK_BROKEN_HCE |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
- UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL,
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 80618af7c872..c55202b92a43 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -661,6 +661,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable WriteBooster */
hba->caps |= UFSHCD_CAP_WB_EN;
+ hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2206b1e4b774..f97d7b0ae3b6 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -568,6 +568,17 @@ out:
return err;
}
+static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ /* reset gpio is optional */
+ if (!host->device_reset)
+ return;
+
+ gpiod_set_value_cansleep(host->device_reset, asserted);
+}
+
static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -582,6 +593,9 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_disable_lane_clks(host);
phy_power_off(phy);
+ /* reset the connected UFS device during power down */
+ ufs_qcom_device_reset_ctrl(hba, true);
+
} else if (!ufs_qcom_is_link_active(hba)) {
ufs_qcom_disable_lane_clks(host);
}
@@ -1421,10 +1435,10 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
* The UFS device shall detect reset pulses of 1us, sleep for 10us to
* be on the safe side.
*/
- gpiod_set_value_cansleep(host->device_reset, 1);
+ ufs_qcom_device_reset_ctrl(hba, true);
usleep_range(10, 15);
- gpiod_set_value_cansleep(host->device_reset, 0);
+ ufs_qcom_device_reset_ctrl(hba, false);
usleep_range(10, 15);
return 0;
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 08e72b7eef6a..acc54f530f2d 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -67,7 +67,7 @@ static ssize_t rpm_lvl_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hba->rpm_lvl);
+ return sysfs_emit(buf, "%d\n", hba->rpm_lvl);
}
static ssize_t rpm_lvl_store(struct device *dev,
@@ -81,7 +81,7 @@ static ssize_t rpm_target_dev_state_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
+ return sysfs_emit(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
}
@@ -90,7 +90,7 @@ static ssize_t rpm_target_link_state_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ufschd_uic_link_state_to_string(
+ return sysfs_emit(buf, "%s\n", ufschd_uic_link_state_to_string(
ufs_pm_lvl_states[hba->rpm_lvl].link_state));
}
@@ -99,7 +99,7 @@ static ssize_t spm_lvl_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hba->spm_lvl);
+ return sysfs_emit(buf, "%d\n", hba->spm_lvl);
}
static ssize_t spm_lvl_store(struct device *dev,
@@ -113,7 +113,7 @@ static ssize_t spm_target_dev_state_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
+ return sysfs_emit(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
ufs_pm_lvl_states[hba->spm_lvl].dev_state));
}
@@ -122,7 +122,7 @@ static ssize_t spm_target_link_state_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ufschd_uic_link_state_to_string(
+ return sysfs_emit(buf, "%s\n", ufschd_uic_link_state_to_string(
ufs_pm_lvl_states[hba->spm_lvl].link_state));
}
@@ -154,18 +154,29 @@ static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 ahit;
+ int ret;
struct ufs_hba *hba = dev_get_drvdata(dev);
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(ahit));
+ ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
+
+out:
+ up(&hba->host_sem);
+ return ret;
}
static ssize_t auto_hibern8_store(struct device *dev,
@@ -174,6 +185,7 @@ static ssize_t auto_hibern8_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned int timer;
+ int ret = 0;
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
@@ -184,9 +196,61 @@ static ssize_t auto_hibern8_store(struct device *dev,
if (timer > UFSHCI_AHIBERN8_MAX)
return -EINVAL;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
- return count;
+out:
+ up(&hba->host_sem);
+ return ret ? ret : count;
+}
+
+static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled);
+}
+
+static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int wb_enable;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) {
+ /*
+ * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
+ * on/off will be done while clock scaling up/down.
+ */
+ dev_warn(dev, "To control WB through wb_on is not allowed!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (kstrtouint(buf, 0, &wb_enable))
+ return -EINVAL;
+
+ if (wb_enable != 0 && wb_enable != 1)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(hba->dev);
+ res = ufshcd_wb_ctrl(hba, wb_enable);
+ pm_runtime_put_sync(hba->dev);
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
}
static DEVICE_ATTR_RW(rpm_lvl);
@@ -196,6 +260,7 @@ static DEVICE_ATTR_RW(spm_lvl);
static DEVICE_ATTR_RO(spm_target_dev_state);
static DEVICE_ATTR_RO(spm_target_link_state);
static DEVICE_ATTR_RW(auto_hibern8);
+static DEVICE_ATTR_RW(wb_on);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -205,6 +270,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_spm_target_dev_state.attr,
&dev_attr_spm_target_link_state.attr,
&dev_attr_auto_hibern8.attr,
+ &dev_attr_wb_on.attr,
NULL
};
@@ -225,30 +291,41 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
if (param_size > 8)
return -EINVAL;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
pm_runtime_get_sync(hba->dev);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
pm_runtime_put_sync(hba->dev);
- if (ret)
- return -EINVAL;
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (param_size) {
case 1:
- ret = sprintf(sysfs_buf, "0x%02X\n", *desc_buf);
+ ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
break;
case 2:
- ret = sprintf(sysfs_buf, "0x%04X\n",
+ ret = sysfs_emit(sysfs_buf, "0x%04X\n",
get_unaligned_be16(desc_buf));
break;
case 4:
- ret = sprintf(sysfs_buf, "0x%08X\n",
+ ret = sysfs_emit(sysfs_buf, "0x%08X\n",
get_unaligned_be32(desc_buf));
break;
case 8:
- ret = sprintf(sysfs_buf, "0x%016llX\n",
+ ret = sysfs_emit(sysfs_buf, "0x%016llX\n",
get_unaligned_be64(desc_buf));
break;
}
+out:
+ up(&hba->host_sem);
return ret;
}
@@ -591,9 +668,16 @@ static ssize_t _name##_show(struct device *dev, \
int desc_len = QUERY_DESC_MAX_SIZE; \
u8 *desc_buf; \
\
+ down(&hba->host_sem); \
+ if (!ufshcd_is_user_access_allowed(hba)) { \
+ up(&hba->host_sem); \
+ return -EBUSY; \
+ } \
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
- if (!desc_buf) \
- return -ENOMEM; \
+ if (!desc_buf) { \
+ up(&hba->host_sem); \
+ return -ENOMEM; \
+ } \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
@@ -609,10 +693,11 @@ static ssize_t _name##_show(struct device *dev, \
SD_ASCII_STD); \
if (ret < 0) \
goto out; \
- ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \
+ ret = sysfs_emit(buf, "%s\n", desc_buf); \
out: \
pm_runtime_put_sync(hba->dev); \
kfree(desc_buf); \
+ up(&hba->host_sem); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
@@ -651,15 +736,26 @@ static ssize_t _name##_show(struct device *dev, \
u8 index = 0; \
int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
+ \
+ down(&hba->host_sem); \
+ if (!ufshcd_is_user_access_allowed(hba)) { \
+ up(&hba->host_sem); \
+ return -EBUSY; \
+ } \
if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
QUERY_FLAG_IDN##_uname, index, &flag); \
pm_runtime_put_sync(hba->dev); \
- if (ret) \
- return -EINVAL; \
- return sprintf(buf, "%s\n", flag ? "true" : "false"); \
+ if (ret) { \
+ ret = -EINVAL; \
+ goto out; \
+ } \
+ ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
+out: \
+ up(&hba->host_sem); \
+ return ret; \
} \
static DEVICE_ATTR_RO(_name)
@@ -709,15 +805,26 @@ static ssize_t _name##_show(struct device *dev, \
u32 value; \
int ret; \
u8 index = 0; \
+ \
+ down(&hba->host_sem); \
+ if (!ufshcd_is_user_access_allowed(hba)) { \
+ up(&hba->host_sem); \
+ return -EBUSY; \
+ } \
if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
QUERY_ATTR_IDN##_uname, index, 0, &value); \
pm_runtime_put_sync(hba->dev); \
- if (ret) \
- return -EINVAL; \
- return sprintf(buf, "0x%08X\n", value); \
+ if (ret) { \
+ ret = -EINVAL; \
+ goto out; \
+ } \
+ ret = sysfs_emit(buf, "0x%08X\n", value); \
+out: \
+ up(&hba->host_sem); \
+ return ret; \
} \
static DEVICE_ATTR_RO(_name)
@@ -792,7 +899,8 @@ static ssize_t _pname##_show(struct device *dev, \
struct scsi_device *sdev = to_scsi_device(dev); \
struct ufs_hba *hba = shost_priv(sdev->host); \
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, \
+ _duname##_DESC_PARAM##_puname)) \
return -EINVAL; \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
lun, _duname##_DESC_PARAM##_puname, buf, _size); \
@@ -850,13 +958,26 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
int ret;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
pm_runtime_get_sync(hba->dev);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
pm_runtime_put_sync(hba->dev);
- if (ret)
- return -EINVAL;
- return sprintf(buf, "0x%08X\n", value);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = sysfs_emit(buf, "0x%08X\n", value);
+
+out:
+ up(&hba->host_sem);
+ return ret;
}
static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 14dfda735adf..bf1897a72532 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -527,22 +527,42 @@ struct ufs_vreg_info {
};
struct ufs_dev_info {
- bool f_power_on_wp_en;
+ bool f_power_on_wp_en;
/* Keeps information if any of the LU is power on write protected */
- bool is_lu_power_on_wp;
+ bool is_lu_power_on_wp;
/* Maximum number of general LU supported by the UFS device */
- u8 max_lu_supported;
- u8 wb_dedicated_lu;
- u16 wmanufacturerid;
+ u8 max_lu_supported;
+ u16 wmanufacturerid;
/*UFS device Product Name */
- u8 *model;
- u16 wspecversion;
- u32 clk_gating_wait_us;
- u32 d_ext_ufs_feature_sup;
- u8 b_wb_buffer_type;
- u32 d_wb_alloc_units;
- bool b_rpm_dev_flush_capable;
- u8 b_presrv_uspc_en;
+ u8 *model;
+ u16 wspecversion;
+ u32 clk_gating_wait_us;
+
+ /* UFS WB related flags */
+ bool wb_enabled;
+ bool wb_buf_flush_enabled;
+ u8 wb_dedicated_lu;
+ u8 wb_buffer_type;
+
+ bool b_rpm_dev_flush_capable;
+ u8 b_presrv_uspc_en;
+};
+
+/*
+ * This enum is used in string mapping in include/trace/events/ufs.h.
+ */
+enum ufs_trace_str_t {
+ UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
+ UFS_QUERY_SEND, UFS_QUERY_COMP, UFS_QUERY_ERR,
+ UFS_TM_SEND, UFS_TM_COMP, UFS_TM_ERR
+};
+
+/*
+ * Transaction Specific Fields (TSF) type in the UPIU package, this enum is
+ * used in include/trace/events/ufs.h for UFS command trace.
+ */
+enum ufs_trace_tsf_t {
+ UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
};
/**
@@ -552,13 +572,15 @@ struct ufs_dev_info {
* @return: true if the lun has a matching unit descriptor, false otherwise
*/
static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
- u8 lun)
+ u8 lun, u8 param_offset)
{
if (!dev_info || !dev_info->max_lu_supported) {
pr_err("Max General LU supported by UFS isn't initialized\n");
return false;
}
-
+ /* WB is available only for the logical unit from 0 to 7 */
+ if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS)
+ return lun < UFS_UPIU_MAX_WB_LUN_ID;
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index 153dd5765d9c..d70cdcd35e43 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -182,7 +182,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
err = devm_blk_ksm_init(hba->dev, &hba->ksm,
hba->crypto_capabilities.config_count + 1);
if (err)
- goto out_free_caps;
+ goto out;
hba->ksm.ksm_ll_ops = ufshcd_ksm_ops;
/* UFS only supports 8 bytes for any DUN */
@@ -208,8 +208,6 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
return 0;
-out_free_caps:
- devm_kfree(hba->dev, hba->crypto_cap_array);
out:
/* Indicate that init failed by clearing UFSHCD_CAP_CRYPTO */
hba->caps &= ~UFSHCD_CAP_CRYPTO;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4c24eb782835..721f55db181f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -20,6 +20,7 @@
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
+#include "ufs-debugfs.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
#include <asm/unaligned.h>
@@ -94,6 +95,8 @@
16, 4, buf, __len, false); \
} while (0)
+static bool early_suspend;
+
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix)
{
@@ -244,11 +247,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
-static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
-static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -306,53 +306,67 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
}
static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+ if (!trace_ufshcd_upiu_enabled())
+ return;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq->header, &rq->sc.cdb,
+ UFS_TSF_CDB);
}
-static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
+ enum ufs_trace_str_t str_t,
+ struct utp_upiu_req *rq_rsp)
{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ if (!trace_ufshcd_upiu_enabled())
+ return;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
+ &rq_rsp->qr, UFS_TSF_OSF);
}
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
int off = (int)tag - hba->nutrs;
struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
- trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
- &descp->input_param1);
+ if (!trace_ufshcd_upiu_enabled())
+ return;
+
+ if (str_t == UFS_TM_SEND)
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->req_header,
+ &descp->input_param1, UFS_TSF_TM_INPUT);
+ else
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->rsp_header,
+ &descp->output_param1, UFS_TSF_TM_OUTPUT);
}
static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
struct uic_command *ucmd,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
u32 cmd;
if (!trace_ufshcd_uic_command_enabled())
return;
- if (!strcmp(str, "send"))
+ if (str_t == UFS_CMD_SEND)
cmd = ucmd->command;
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
- trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
+ trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
}
-static void ufshcd_add_command_trace(struct ufs_hba *hba,
- unsigned int tag, const char *str)
+static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+ enum ufs_trace_str_t str_t)
{
sector_t lba = -1;
u8 opcode = 0, group_id = 0;
@@ -364,13 +378,13 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
if (cmd)
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
return;
}
if (cmd) { /* data phase exists */
/* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
opcode = cmd->cmnd[0];
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
@@ -393,7 +407,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- trace_ufshcd_command(dev_name(hba->dev), str, tag,
+ trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
doorbell, transfer_len, intr, lba, opcode, group_id);
}
@@ -591,8 +605,8 @@ static void ufshcd_device_reset(struct ufs_hba *hba)
if (!err) {
ufshcd_set_ufs_dev_active(hba);
if (ufshcd_is_wb_allowed(hba)) {
- hba->wb_enabled = false;
- hba->wb_buf_flush_enabled = false;
+ hba->dev_info.wb_enabled = false;
+ hba->dev_info.wb_buf_flush_enabled = false;
}
}
if (err != -EOPNOTSUPP)
@@ -1182,19 +1196,30 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
*/
ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock);
- if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+
+ if (!hba->clk_scaling.is_allowed ||
+ ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
+ goto out;
}
+ /* let's not get into low power until clock scaling is completed */
+ ufshcd_hold(hba, false);
+
+out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
{
- up_write(&hba->clk_scaling_lock);
+ if (writelock)
+ up_write(&hba->clk_scaling_lock);
+ else
+ up_read(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
}
/**
@@ -1209,13 +1234,11 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
-
- /* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
+ bool is_writelock = true;
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
- goto out;
+ return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
@@ -1241,14 +1264,12 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
/* Enable Write Booster if we have scaled up else disable it */
- up_write(&hba->clk_scaling_lock);
+ downgrade_write(&hba->clk_scaling_lock);
+ is_writelock = false;
ufshcd_wb_ctrl(hba, scale_up);
- down_write(&hba->clk_scaling_lock);
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba);
-out:
- ufshcd_release(hba);
+ ufshcd_clock_scaling_unprepare(hba, is_writelock);
return ret;
}
@@ -1329,15 +1350,8 @@ static int ufshcd_devfreq_target(struct device *dev,
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- pm_runtime_get_noresume(hba->dev);
- if (!pm_runtime_active(hba->dev)) {
- pm_runtime_put_noidle(hba->dev);
- ret = -EAGAIN;
- goto out;
- }
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up);
- pm_runtime_put(hba->dev);
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
@@ -1484,8 +1498,8 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool suspend = false;
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.is_suspended) {
@@ -1503,9 +1517,6 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool resume = false;
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_scaling.is_suspended) {
resume = true;
@@ -1522,7 +1533,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
}
static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
@@ -1530,22 +1541,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 value;
- int err;
+ int err = 0;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ err = -EBUSY;
+ goto out;
+ }
+
value = !!value;
- if (value == hba->clk_scaling.is_allowed)
+ if (value == hba->clk_scaling.is_enabled)
goto out;
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
-
- hba->clk_scaling.is_allowed = value;
+ hba->clk_scaling.is_enabled = value;
if (value) {
ufshcd_resume_clkscaling(hba);
@@ -1560,10 +1574,11 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
out:
- return count;
+ up(&hba->host_sem);
+ return err ? err : count;
}
-static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
{
hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
@@ -1574,6 +1589,45 @@ static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
}
+static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
+{
+ if (hba->clk_scaling.enable_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+}
+
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->clk_scaling.min_gear)
+ hba->clk_scaling.min_gear = UFS_HS_G1;
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ hba->host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ hba->clk_scaling.is_initialized = true;
+}
+
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+{
+ if (!hba->clk_scaling.is_initialized)
+ return;
+
+ ufshcd_remove_clk_scaling_sysfs(hba);
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+ hba->clk_scaling.is_initialized = false;
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -1865,35 +1919,31 @@ out:
return count;
}
-static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- if (!hba->clk_scaling.min_gear)
- hba->clk_scaling.min_gear = UFS_HS_G1;
-
- INIT_WORK(&hba->clk_scaling.suspend_work,
- ufshcd_clk_scaling_suspend_work);
- INIT_WORK(&hba->clk_scaling.resume_work,
- ufshcd_clk_scaling_resume_work);
-
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+ hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+ hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+ sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+ hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+ hba->clk_gating.delay_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
- ufshcd_clkscaling_init_sysfs(hba);
+ hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
+ hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
+ sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
+ hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
+ hba->clk_gating.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
}
-static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
{
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- destroy_workqueue(hba->clk_scaling.workq);
- ufshcd_devfreq_remove(hba);
+ if (hba->clk_gating.delay_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ if (hba->clk_gating.enable_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
}
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
@@ -1914,34 +1964,21 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
WQ_MEM_RECLAIM | WQ_HIGHPRI);
- hba->clk_gating.is_enabled = true;
-
- hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
- hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
- sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
- hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
- hba->clk_gating.delay_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+ ufshcd_init_clk_gating_sysfs(hba);
- hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
- hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
- sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
- hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
- hba->clk_gating.enable_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+ hba->clk_gating.is_enabled = true;
+ hba->clk_gating.is_initialized = true;
}
static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
- if (!ufshcd_is_clkgating_allowed(hba))
+ if (!hba->clk_gating.is_initialized)
return;
- device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
- device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ ufshcd_remove_clk_gating_sysfs(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
destroy_workqueue(hba->clk_gating.clk_gating_workq);
+ hba->clk_gating.is_initialized = false;
}
/* Must be called with host lock acquired */
@@ -1956,7 +1993,7 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
if (queue_resume_work)
@@ -2002,7 +2039,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
lrbp->issue_time_stamp = ktime_get();
lrbp->compl_time_stamp = ktime_set(0, 0);
ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
- ufshcd_add_command_trace(hba, task_tag, "send");
+ ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -2138,7 +2175,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
- ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
+ ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
/* Write UIC Cmd */
ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
@@ -2857,7 +2894,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
- ufshcd_add_query_upiu_trace(hba, tag, "query_send");
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2867,8 +2904,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
out:
- ufshcd_add_query_upiu_trace(hba, tag,
- err ? "query_complete_err" : "query_complete");
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out_put_tag:
blk_put_request(req);
@@ -3425,7 +3462,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -4218,25 +4255,27 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
pwr_mode->hs_rate);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
- DL_AFC0ReqTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
- DL_FC1ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
- DL_TC1ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
- DL_AFC1ReqTimeOutVal_Default);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
- DL_AFC0ReqTimeOutVal_Default);
+ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+ }
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
| pwr_mode->pwr_tx);
@@ -4543,6 +4582,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
e = &hba->ufs_stats.event[id];
e->val[e->pos] = val;
e->tstamp[e->pos] = ktime_get();
+ e->cnt += 1;
e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
ufshcd_vops_event_notify(hba, id, &val);
@@ -4827,6 +4867,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
+ blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
@@ -4872,9 +4914,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
ufshcd_copy_sense_data(lrbp);
fallthrough;
case SAM_STAT_GOOD:
- result |= DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- scsi_status;
+ result |= DID_OK << 16 | scsi_status;
break;
case SAM_STAT_TASK_SET_FULL:
case SAM_STAT_BUSY:
@@ -5032,7 +5072,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
- "complete");
+ UFS_CMD_COMP);
return retval;
}
@@ -5056,7 +5096,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
- ufshcd_add_command_trace(hba, index, "complete");
+ ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
@@ -5070,7 +5110,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
- "dev_complete");
+ UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
update_scaling = true;
}
@@ -5390,7 +5430,7 @@ out:
__func__, err);
}
-static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
{
int ret;
u8 index;
@@ -5399,7 +5439,7 @@ static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
if (!ufshcd_is_wb_allowed(hba))
return 0;
- if (!(enable ^ hba->wb_enabled))
+ if (!(enable ^ hba->dev_info.wb_enabled))
return 0;
if (enable)
opcode = UPIU_QUERY_OPCODE_SET_FLAG;
@@ -5415,7 +5455,7 @@ static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
return ret;
}
- hba->wb_enabled = enable;
+ hba->dev_info.wb_enabled = enable;
dev_dbg(hba->dev, "%s write booster %s %d\n",
__func__, enable ? "enable" : "disable", ret);
@@ -5438,58 +5478,37 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
index, NULL);
}
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
-{
- if (enable)
- ufshcd_wb_buf_flush_enable(hba);
- else
- ufshcd_wb_buf_flush_disable(hba);
-
-}
-
-static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
int ret;
u8 index;
+ enum query_opcode opcode;
- if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
+ if (!ufshcd_is_wb_allowed(hba) ||
+ hba->dev_info.wb_buf_flush_enabled == enable)
return 0;
- index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
- index, NULL);
- if (ret)
- dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
- __func__, ret);
+ if (enable)
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
else
- hba->wb_buf_flush_enabled = true;
-
- dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
- return ret;
-}
-
-static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
-{
- int ret;
- u8 index;
-
- if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
- return 0;
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
- index, NULL);
+ ret = ufshcd_query_flag_retry(hba, opcode,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, index,
+ NULL);
if (ret) {
- dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
- __func__, ret);
- } else {
- hba->wb_buf_flush_enabled = false;
- dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+ dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
+ enable ? "enable" : "disable", ret);
+ goto out;
}
+ hba->dev_info.wb_buf_flush_enabled = enable;
+
+ dev_dbg(hba->dev, "WB-Buf Flush %s\n", enable ? "enabled" : "disabled");
+out:
return ret;
+
}
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
@@ -5714,6 +5733,26 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
+static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+{
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = allow;
+ up_write(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+{
+ if (suspend) {
+ if (hba->clk_scaling.is_enabled)
+ ufshcd_suspend_clkscaling(hba);
+ ufshcd_clk_scaling_allow(hba, false);
+ } else {
+ ufshcd_clk_scaling_allow(hba, true);
+ if (hba->clk_scaling.is_enabled)
+ ufshcd_resume_clkscaling(hba);
+ }
+}
+
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
pm_runtime_get_sync(hba->dev);
@@ -5738,27 +5777,27 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba, false);
- if (hba->clk_scaling.is_allowed) {
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
+ if (ufshcd_is_clkscaling_supported(hba) &&
+ hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
- }
+ ufshcd_clk_scaling_allow(hba, false);
}
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
ufshcd_release(hba);
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
pm_runtime_put(hba->dev);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
- return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
+ return (!hba->is_powered || hba->shutting_down ||
+ hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
- ufshcd_is_link_broken(hba))));
+ ufshcd_is_link_broken(hba))));
}
#ifdef CONFIG_PM
@@ -5828,13 +5867,13 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
- down(&hba->eh_sem);
+ down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return;
}
ufshcd_set_eh_in_progress(hba);
@@ -6003,7 +6042,7 @@ skip_err_handling:
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
if (!err && needs_reset)
ufshcd_clear_ua_wluns(hba);
@@ -6293,8 +6332,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
while (intr_status && retries--) {
enabled_intr_status =
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
if (enabled_intr_status)
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
@@ -6380,7 +6418,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_unlock_irqrestore(host->host_lock, flags);
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
/* wait until the task management command is completed */
err = wait_for_completion_io_timeout(&wait,
@@ -6391,7 +6429,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* use-after-free.
*/
req->end_io_data = NULL;
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -6402,7 +6440,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
err = 0;
memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
}
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7248,6 +7286,7 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
struct ufs_dev_info *dev_info = &hba->dev_info;
u8 lun;
u32 d_lu_wb_buf_alloc;
+ u32 ext_ufs_feature;
if (!ufshcd_is_wb_allowed(hba))
return;
@@ -7265,30 +7304,25 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
goto wb_disabled;
- dev_info->d_ext_ufs_feature_sup =
- get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
- if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
+ if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
goto wb_disabled;
/*
- * WB may be supported but not configured while provisioning.
- * The spec says, in dedicated wb buffer mode,
- * a max of 1 lun would have wb buffer configured.
- * Now only shared buffer mode is supported.
+ * WB may be supported but not configured while provisioning. The spec
+ * says, in dedicated wb buffer mode, a max of 1 lun would have wb
+ * buffer configured.
*/
- dev_info->b_wb_buffer_type =
- desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
- if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) {
- dev_info->d_wb_alloc_units =
- get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
- if (!dev_info->d_wb_alloc_units)
+ if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
+ if (!get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
goto wb_disabled;
} else {
for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
@@ -7734,13 +7768,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
&hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.saved_pwr_info.is_valid = true;
- if (!hba->devfreq) {
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
- }
-
hba->clk_scaling.is_allowed = true;
+
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+
+ hba->clk_scaling.is_enabled = true;
+ ufshcd_init_clk_scaling_sysfs(hba);
}
ufs_bsg_probe(hba);
@@ -7911,10 +7946,10 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
struct ufs_hba *hba = (struct ufs_hba *)data;
int ret;
- down(&hba->eh_sem);
+ down(&hba->host_sem);
/* Initialize hba, detect and initialize UFS device */
ret = ufshcd_probe_hba(hba, true);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
if (ret)
goto out;
@@ -7927,7 +7962,6 @@ out:
*/
if (ret) {
pm_runtime_put_sync(hba->dev);
- ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
}
}
@@ -8339,6 +8373,8 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
if (err)
goto out_disable_vreg;
+ ufs_debugfs_hba_init(hba);
+
hba->is_powered = true;
goto out;
@@ -8355,12 +8391,13 @@ out:
static void ufshcd_hba_exit(struct ufs_hba *hba)
{
if (hba->is_powered) {
+ ufshcd_exit_clk_scaling(hba);
+ ufshcd_exit_clk_gating(hba);
+ if (hba->eh_wq)
+ destroy_workqueue(hba->eh_wq);
+ ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
- ufshcd_suspend_clkscaling(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- if (hba->devfreq)
- ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -8655,11 +8692,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
- if (hba->clk_scaling.is_allowed) {
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
- ufshcd_suspend_clkscaling(hba);
- }
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, true);
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -8726,8 +8760,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_dev_active;
- ufshcd_vreg_set_lpm(hba);
-
disable_clks:
/*
* Call vendor specific suspend callback. As these callbacks may access
@@ -8751,13 +8783,13 @@ disable_clks:
hba->clk_gating.state);
}
+ ufshcd_vreg_set_lpm(hba);
+
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
goto out;
set_link_active:
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
/*
* Device hardware reset is required to exit DeepSleep. Also, for
@@ -8781,8 +8813,9 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
+
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_clear_ua_wluns(hba);
@@ -8819,18 +8852,18 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
old_link_state = hba->uic_link_state;
ufshcd_hba_vreg_set_hpm(hba);
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto out;
+
/* Make sure clocks are enabled before accessing controller */
ret = ufshcd_setup_clocks(hba, true);
if (ret)
- goto out;
+ goto disable_vreg;
/* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba);
- ret = ufshcd_vreg_set_hpm(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
-
/*
* Call vendor specific resume callback. As these callbacks may access
* vendor specific host controller register space call them when the
@@ -8838,7 +8871,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_vops_resume(hba, pm_op);
if (ret)
- goto disable_vreg;
+ goto disable_irq_and_vops_clks;
/* For DeepSleep, the only supported option is to have the link off */
WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
@@ -8885,8 +8918,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = false;
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -8907,18 +8940,16 @@ set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op);
-disable_vreg:
- ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
- if (hba->clk_scaling.is_allowed)
- ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
out:
hba->pm_op_in_progress = 0;
if (ret)
@@ -8939,8 +8970,14 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- down(&hba->eh_sem);
- if (!hba || !hba->is_powered)
+ if (!hba) {
+ early_suspend = true;
+ return 0;
+ }
+
+ down(&hba->host_sem);
+
+ if (!hba->is_powered)
return 0;
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
@@ -8972,7 +9009,7 @@ out:
if (!ret)
hba->is_sys_suspended = true;
else
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -8989,9 +9026,12 @@ int ufshcd_system_resume(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- if (!hba) {
- up(&hba->eh_sem);
+ if (!hba)
return -EINVAL;
+
+ if (unlikely(early_suspend)) {
+ early_suspend = false;
+ down(&hba->host_sem);
}
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
@@ -9008,7 +9048,7 @@ out:
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = false;
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -9100,7 +9140,10 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
int ret = 0;
- down(&hba->eh_sem);
+ down(&hba->host_sem);
+ hba->shutting_down = true;
+ up(&hba->host_sem);
+
if (!hba->is_powered)
goto out;
@@ -9114,7 +9157,6 @@ out:
if (ret)
dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
hba->is_powered = false;
- up(&hba->eh_sem);
/* allow force shutdown even in case of errors */
return 0;
}
@@ -9133,15 +9175,9 @@ void ufshcd_remove(struct ufs_hba *hba)
blk_mq_free_tag_set(&hba->tmf_tag_set);
blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
- destroy_workqueue(hba->eh_wq);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
-
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -9309,7 +9345,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
- sema_init(&hba->eh_sem, 1);
+ sema_init(&hba->host_sem, 1);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
@@ -9341,7 +9377,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
- goto exit_gating;
+ goto out_disable;
} else {
hba->is_irq_enabled = true;
}
@@ -9349,7 +9385,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = scsi_add_host(host, hba->dev);
if (err) {
dev_err(hba->dev, "scsi_add_host failed\n");
- goto exit_gating;
+ goto out_disable;
}
hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
@@ -9432,10 +9468,6 @@ free_cmd_queue:
blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
-exit_gating:
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
- destroy_workqueue(hba->eh_wq);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
@@ -9444,6 +9476,20 @@ out_error:
}
EXPORT_SYMBOL_GPL(ufshcd_init);
+static int __init ufshcd_core_init(void)
+{
+ ufs_debugfs_init();
+ return 0;
+}
+
+static void __exit ufshcd_core_exit(void)
+{
+ ufs_debugfs_exit();
+}
+
+module_init(ufshcd_core_init);
+module_exit(ufshcd_core_exit);
+
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
MODULE_DESCRIPTION("Generic UFS host controller driver Core");
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index aa9ea3552323..ee61f821f75d 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -383,6 +383,7 @@ enum clk_gating_state {
* @delay_attr: sysfs attribute to control delay_attr
* @enable_attr: sysfs attribute to enable/disable clock gating
* @is_enabled: Indicates the current status of clock gating
+ * @is_initialized: Indicates whether clock gating is initialized or not
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
*/
@@ -395,6 +396,7 @@ struct ufs_clk_gating {
struct device_attribute delay_attr;
struct device_attribute enable_attr;
bool is_enabled;
+ bool is_initialized;
int active_reqs;
struct workqueue_struct *clk_gating_workq;
};
@@ -419,7 +421,11 @@ struct ufs_saved_pwr_info {
* @suspend_work: worker to suspend devfreq
* @resume_work: worker to resume devfreq
* @min_gear: lowest HS gear to scale down to
- * @is_allowed: tracks if scaling is currently allowed or not
+ * @is_enabled: tracks if scaling is currently enabled or not, controlled by
+ clkscale_enable sysfs node
+ * @is_allowed: tracks if scaling is currently allowed or not, used to block
+ clock scaling which is not invoked from devfreq governor
+ * @is_initialized: Indicates whether clock scaling is initialized or not
* @is_busy_started: tracks if busy period has started or not
* @is_suspended: tracks if devfreq is suspended or not
*/
@@ -434,7 +440,9 @@ struct ufs_clk_scaling {
struct work_struct suspend_work;
struct work_struct resume_work;
u32 min_gear;
+ bool is_enabled;
bool is_allowed;
+ bool is_initialized;
bool is_busy_started;
bool is_suspended;
};
@@ -445,11 +453,13 @@ struct ufs_clk_scaling {
* @pos: index to indicate cyclic buffer position
* @reg: cyclic buffer for registers value
* @tstamp: cyclic buffer for time stamp
+ * @cnt: error counter
*/
struct ufs_event_hist {
int pos;
u32 val[UFS_EVENT_HIST_LENGTH];
ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
+ unsigned long long cnt;
};
/**
@@ -551,6 +561,16 @@ enum ufshcd_quirks {
*/
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
+ /*
+ * This quirk needs to disable unipro timeout values
+ * before power mode change
+ */
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
+
+ /*
+ * This quirk allows only sg entries aligned with page size.
+ */
+ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 13,
};
enum ufshcd_caps {
@@ -657,6 +677,8 @@ struct ufs_hba_variant_params {
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
+ * @shutting_down: flag to check if shutdown has been invoked
+ * @host_sem: semaphore used to serialize concurrent contexts
* @eh_wq: Workqueue that eh_work works on
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
@@ -753,7 +775,8 @@ struct ufs_hba {
u32 intr_mask;
u16 ee_ctrl_mask;
bool is_powered;
- struct semaphore eh_sem;
+ bool shutting_down;
+ struct semaphore host_sem;
/* Work Queues */
struct workqueue_struct *eh_wq;
@@ -807,8 +830,6 @@ struct ufs_hba {
struct device bsg_dev;
struct request_queue *bsg_queue;
- bool wb_buf_flush_enabled;
- bool wb_enabled;
struct delayed_work rpm_dev_flush_recheck_work;
#ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -817,6 +838,9 @@ struct ufs_hba {
u32 crypto_cfg_register;
struct blk_keyslot_manager ksm;
#endif
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -877,6 +901,11 @@ static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
return hba->caps & UFSHCD_CAP_WB_EN;
}
+static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
+{
+ return !hba->shutting_down;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -948,7 +977,7 @@ static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
{
- if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
+ if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
return hba->dev_info.wb_dedicated_lu;
return 0;
}
@@ -1070,6 +1099,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op);
+int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{