summaryrefslogtreecommitdiff
path: root/drivers/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c2
-rw-r--r--drivers/ufs/core/ufs-hwmon.c2
-rw-r--r--drivers/ufs/core/ufs-mcq.c259
-rw-r--r--drivers/ufs/core/ufs-sysfs.c2
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c2
-rw-r--r--drivers/ufs/core/ufshcd-priv.h21
-rw-r--r--drivers/ufs/core/ufshcd.c385
-rw-r--r--drivers/ufs/core/ufshpb.c6
-rw-r--r--drivers/ufs/core/ufshpb.h2
-rw-r--r--drivers/ufs/host/ufs-exynos.c2
-rw-r--r--drivers/ufs/host/ufs-hisi.c24
-rw-r--r--drivers/ufs/host/ufs-mediatek.c3
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
13 files changed, 541 insertions, 171 deletions
diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c
index 7ac7c4e7ff83..5b1184aac585 100644
--- a/drivers/ufs/core/ufs-fault-injection.c
+++ b/drivers/ufs/core/ufs-fault-injection.c
@@ -54,7 +54,7 @@ static int ufs_fault_set(const char *val, const struct kernel_param *kp)
if (!setup_fault_attr(attr, (char *)val))
return -EINVAL;
- strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE);
+ strscpy(kp->arg, val, FAULT_INJ_STR_SIZE);
return 0;
}
diff --git a/drivers/ufs/core/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c
index 4c6a872b7a7c..101d7082446f 100644
--- a/drivers/ufs/core/ufs-hwmon.c
+++ b/drivers/ufs/core/ufs-hwmon.c
@@ -146,7 +146,7 @@ static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types t
return 0;
}
-static const struct hwmon_channel_info *ufs_hwmon_info[] = {
+static const struct hwmon_channel_info *const ufs_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT),
NULL
};
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 202ff71e1b58..66ac02e0a859 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -12,6 +12,10 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include "ufshcd-priv.h"
+#include <linux/delay.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
#define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2
@@ -27,6 +31,9 @@
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
+/* Max mcq register polling time in microseconds */
+#define MCQ_POLL_US 500000
+
static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
@@ -269,16 +276,38 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
}
static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq)
+ struct ufs_hw_queue *hwq)
{
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
- ufshcd_compl_one_cqe(hba, tag, cqe);
+ if (cqe->command_desc_base_addr) {
+ ufshcd_compl_one_cqe(hba, tag, cqe);
+ /* After processed the cqe, mark it empty (invalid) entry */
+ cqe->command_desc_base_addr = 0;
+ }
}
-unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq)
+void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long flags;
+ u32 entries = hwq->max_entries;
+
+ spin_lock_irqsave(&hwq->cq_lock, flags);
+ while (entries > 0) {
+ ufshcd_mcq_process_cqe(hba, hwq);
+ ufshcd_mcq_inc_cq_head_slot(hwq);
+ entries--;
+ }
+
+ ufshcd_mcq_update_cq_tail_slot(hwq);
+ hwq->cq_head_slot = hwq->cq_tail_slot;
+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
+}
+
+static unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
{
unsigned long completed_reqs = 0;
@@ -294,7 +323,6 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
return completed_reqs;
}
-EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
@@ -307,6 +335,7 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
return completed_reqs;
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
@@ -419,6 +448,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
hwq->max_entries = hba->nutrs;
spin_lock_init(&hwq->sq_lock);
spin_lock_init(&hwq->cq_lock);
+ mutex_init(&hwq->sq_mutex);
}
/* The very first HW queue serves device commands */
@@ -429,3 +459,222 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
host->host_tagset = 1;
return 0;
}
+
+static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
+{
+ void __iomem *reg;
+ u32 id = hwq->id, val;
+ int err;
+
+ writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
+ reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
+ err = read_poll_timeout(readl, val, val & SQ_STS, 20,
+ MCQ_POLL_US, false, reg);
+ if (err)
+ dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
+ __func__, id, err);
+ return err;
+}
+
+static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
+{
+ void __iomem *reg;
+ u32 id = hwq->id, val;
+ int err;
+
+ writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
+ reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
+ err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
+ MCQ_POLL_US, false, reg);
+ if (err)
+ dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
+ __func__, id, err);
+ return err;
+}
+
+/**
+ * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
+ * associated with the pending command.
+ * @hba - per adapter instance.
+ * @task_tag - The command's task tag.
+ *
+ * Returns 0 for success; error code otherwise.
+ */
+int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
+{
+ struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ struct ufs_hw_queue *hwq;
+ void __iomem *reg, *opr_sqd_base;
+ u32 nexus, id, val;
+ int err;
+
+ if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
+ if (!cmd)
+ return -EINVAL;
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ } else {
+ hwq = hba->dev_cmd_queue;
+ }
+
+ id = hwq->id;
+
+ mutex_lock(&hwq->sq_mutex);
+
+ /* stop the SQ fetching before working on it */
+ err = ufshcd_mcq_sq_stop(hba, hwq);
+ if (err)
+ goto unlock;
+
+ /* SQCTI = EXT_IID, IID, LUN, Task Tag */
+ nexus = lrbp->lun << 8 | task_tag;
+ opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
+ writel(nexus, opr_sqd_base + REG_SQCTI);
+
+ /* SQRTCy.ICU = 1 */
+ writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
+
+ /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
+ reg = opr_sqd_base + REG_SQRTS;
+ err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
+ MCQ_POLL_US, false, reg);
+ if (err)
+ dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
+ __func__, id, task_tag,
+ FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
+
+ if (ufshcd_mcq_sq_start(hba, hwq))
+ err = -ETIMEDOUT;
+
+unlock:
+ mutex_unlock(&hwq->sq_mutex);
+ return err;
+}
+
+/**
+ * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
+ * Write the sqe's Command Type to 0xF. The host controller will not
+ * fetch any sqe with Command Type = 0xF.
+ *
+ * @utrd - UTP Transfer Request Descriptor to be nullified.
+ */
+static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
+{
+ u32 dword_0;
+
+ dword_0 = le32_to_cpu(utrd->header.dword_0);
+ dword_0 &= ~UPIU_COMMAND_TYPE_MASK;
+ dword_0 |= FIELD_PREP(UPIU_COMMAND_TYPE_MASK, 0xF);
+ utrd->header.dword_0 = cpu_to_le32(dword_0);
+}
+
+/**
+ * ufshcd_mcq_sqe_search - Search for the command in the submission queue
+ * If the command is in the submission queue and not issued to the device yet,
+ * nullify the sqe so the host controller will skip fetching the sqe.
+ *
+ * @hba - per adapter instance.
+ * @hwq - Hardware Queue to be searched.
+ * @task_tag - The command's task tag.
+ *
+ * Returns true if the SQE containing the command is present in the SQ
+ * (not fetched by the controller); returns false if the SQE is not in the SQ.
+ */
+static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq, int task_tag)
+{
+ struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ struct utp_transfer_req_desc *utrd;
+ u32 mask = hwq->max_entries - 1;
+ __le64 cmd_desc_base_addr;
+ bool ret = false;
+ u64 addr, match;
+ u32 sq_head_slot;
+
+ mutex_lock(&hwq->sq_mutex);
+
+ ufshcd_mcq_sq_stop(hba, hwq);
+ sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
+ if (sq_head_slot == hwq->sq_tail_slot)
+ goto out;
+
+ cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
+ addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
+
+ while (sq_head_slot != hwq->sq_tail_slot) {
+ utrd = hwq->sqe_base_addr +
+ sq_head_slot * sizeof(struct utp_transfer_req_desc);
+ match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
+ if (addr == match) {
+ ufshcd_mcq_nullify_sqe(utrd);
+ ret = true;
+ goto out;
+ }
+ sq_head_slot = (sq_head_slot + 1) & mask;
+ }
+
+out:
+ ufshcd_mcq_sq_start(hba, hwq);
+ mutex_unlock(&hwq->sq_mutex);
+ return ret;
+}
+
+/**
+ * ufshcd_mcq_abort - Abort the command in MCQ.
+ * @cmd - The command to be aborted.
+ *
+ * Returns SUCCESS or FAILED error codes
+ */
+int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct ufs_hba *hba = shost_priv(host);
+ int tag = scsi_cmd_to_rq(cmd)->tag;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufs_hw_queue *hwq;
+ int err = FAILED;
+
+ if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+ dev_err(hba->dev,
+ "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ goto out;
+ }
+
+ /* Skip task abort in case previous aborts failed and report failure */
+ if (lrbp->req_abort_skip) {
+ dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
+ __func__, tag);
+ goto out;
+ }
+
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+
+ if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
+ /*
+ * Failure. The command should not be "stuck" in SQ for
+ * a long time which resulted in command being aborted.
+ */
+ dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
+ __func__, hwq->id, tag);
+ goto out;
+ }
+
+ /*
+ * The command is not in the submission queue, and it is not
+ * in the completion queue either. Query the device to see if
+ * the command is being processed in the device.
+ */
+ if (ufshcd_try_to_abort_task(hba, tag)) {
+ dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
+ lrbp->req_abort_skip = true;
+ goto out;
+ }
+
+ err = SUCCESS;
+ if (ufshcd_cmd_inflight(lrbp->cmd))
+ ufshcd_release_scsi_cmd(hba, lrbp);
+
+out:
+ return err;
+}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 883f0e44b54e..cdf3d5f2b77b 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -168,7 +168,7 @@ static ssize_t auto_hibern8_show(struct device *dev,
}
pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 198360fe5e8e..f2c4422cab86 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -24,7 +24,7 @@ static int ufshcd_program_key(struct ufs_hba *hba,
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
int err = 0;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
if (hba->vops && hba->vops->program_key) {
err = hba->vops->program_key(hba, cfg, slot);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index d53b93c21a0c..f32c1a874dff 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -71,12 +71,18 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
-unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
+void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
+int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
+int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
+int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
+void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
@@ -84,9 +90,6 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii);
-int ufshcd_hold(struct ufs_hba *hba, bool async);
-void ufshcd_release(struct ufs_hba *hba);
-
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
@@ -404,4 +407,12 @@ static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
return cqe + q->cq_head_slot;
}
+
+static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
+{
+ u32 val = readl(q->mcq_sq_head);
+
+ return val / sizeof(struct utp_transfer_req_desc);
+}
+
#endif /* _UFSHCD_PRIV_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 00f730671f4b..ab1255bd8b7f 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -98,7 +98,7 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
-/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
+/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
static bool is_mcq_supported(struct ufs_hba *hba)
@@ -106,23 +106,7 @@ static bool is_mcq_supported(struct ufs_hba *hba)
return hba->mcq_sup && use_mcq_mode;
}
-static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
-{
- int ret;
-
- ret = param_set_bool(val, kp);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct kernel_param_ops mcq_mode_ops = {
- .set = param_set_mcq_mode,
- .get = param_get_bool,
-};
-
-module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
+module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
@@ -173,7 +157,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
- UFSHCD_NUM_RESERVED = 1,
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
@@ -301,7 +284,6 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
-static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
@@ -1205,7 +1187,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
bool timeout = false, do_last_check = false;
ktime_t start;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
@@ -1326,7 +1308,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
}
/* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
out:
return ret;
@@ -1656,7 +1638,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
goto out;
ufshcd_rpm_get_sync(hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_scaling.is_enabled = value;
@@ -1739,7 +1721,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
- goto unblock_reqs;
+ return;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1762,25 +1744,21 @@ static void ufshcd_ungate_work(struct work_struct *work)
}
hba->clk_gating.is_suspended = false;
}
-unblock_reqs:
- ufshcd_scsi_unblock_requests(hba);
}
/**
* ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
* Also, exit from hibern8 mode and set the link as active.
* @hba: per adapter instance
- * @async: This indicates whether caller should ungate clocks asynchronously.
*/
-int ufshcd_hold(struct ufs_hba *hba, bool async)
+void ufshcd_hold(struct ufs_hba *hba)
{
- int rc = 0;
bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
- goto out;
+ return;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
@@ -1797,15 +1775,10 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
- goto out;
+ return;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
@@ -1827,21 +1800,14 @@ start:
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- if (queue_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.ungate_work))
- ufshcd_scsi_block_requests(hba);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
*/
fallthrough;
case REQ_CLKS_ON:
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
-
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
@@ -1853,8 +1819,6 @@ start:
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return rc;
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -2086,7 +2050,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
ufshcd_remove_clk_gating_sysfs(hba);
/* Ungate the clock if necessary. */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_gating.is_initialized = false;
ufshcd_release(hba);
@@ -2336,18 +2300,20 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
- if (err)
+ if (err) {
dev_err(hba->dev, "crypto setup failed\n");
+ return err;
+ }
hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
if (!hba->mcq_sup)
- return err;
+ return 0;
hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
hba->mcq_capabilities);
- return err;
+ return 0;
}
/**
@@ -2482,7 +2448,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -2533,7 +2499,7 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
* 11b to indicate Dword granularity. A value of '3'
* indicates 4 bytes, '7' indicates 8 bytes, etc."
*/
- WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
+ WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
prd->size = cpu_to_le32(len - 1);
prd->addr = cpu_to_le64(sg->dma_address);
prd->reserved = 0;
@@ -2885,12 +2851,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
- /*
- * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
- * calls.
- */
- rcu_read_lock();
-
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
@@ -2936,13 +2896,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0;
- err = ufshcd_hold(hba, true);
- if (err) {
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
- (hba->clk_gating.state != CLKS_ON));
+ ufshcd_hold(hba);
lrbp = &hba->lrb[tag];
lrbp->cmd = cmd;
@@ -2970,8 +2924,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_send_command(hba, tag, hwq);
out:
- rcu_read_unlock();
-
if (ufs_trigger_eh()) {
unsigned long flags;
@@ -2997,13 +2949,50 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
}
/*
- * Clear all the requests from the controller for which a bit has been set in
- * @mask and wait until the controller confirms that these requests have been
- * cleared.
+ * Check with the block layer if the command is inflight
+ * @cmd: command to check.
+ *
+ * Returns true if command is inflight; false if not.
*/
-static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
+bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
{
+ struct request *rq;
+
+ if (!cmd)
+ return false;
+
+ rq = scsi_cmd_to_rq(cmd);
+ if (!blk_mq_request_started(rq))
+ return false;
+
+ return true;
+}
+
+/*
+ * Clear the pending command in the controller and wait until
+ * the controller confirms that the command has been cleared.
+ * @hba: per adapter instance
+ * @task_tag: The tag number of the command to be cleared.
+ */
+static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
+{
+ u32 mask = 1U << task_tag;
unsigned long flags;
+ int err;
+
+ if (is_mcq_enabled(hba)) {
+ /*
+ * MCQ mode. Clean up the MCQ resources similar to
+ * what the ufshcd_utrl_clear() does for SDB mode.
+ */
+ err = ufshcd_mcq_sq_cleanup(hba, task_tag);
+ if (err) {
+ dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
+ __func__, task_tag, err);
+ return err;
+ }
+ return 0;
+ }
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3104,7 +3093,16 @@ retry:
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
- if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
+
+ /* MCQ mode */
+ if (is_mcq_enabled(hba)) {
+ err = ufshcd_clear_cmd(hba, lrbp->task_tag);
+ hba->dev_cmd.complete = NULL;
+ return err;
+ }
+
+ /* SDB mode */
+ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
/*
@@ -3265,7 +3263,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3339,7 +3337,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL;
}
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
@@ -3435,7 +3433,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL;
}
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
@@ -3777,7 +3775,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/*
* Allocate memory for UTP Transfer descriptors
- * UFSHCI requires 1024 byte alignment of UTRD
+ * UFSHCI requires 1KB alignment of UTRD
*/
utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
@@ -3785,7 +3783,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
- WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) {
+ WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
@@ -3801,7 +3799,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
goto skip_utmrdl;
/*
* Allocate memory for UTP Task Management descriptors
- * UFSHCI requires 1024 byte alignment of UTMRD
+ * UFSHCI requires 1KB alignment of UTMRD
*/
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
@@ -3809,7 +3807,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
- WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) {
+ WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
@@ -3866,10 +3864,8 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
/* Configure UTRD with command descriptor base address */
cmd_desc_element_addr =
(cmd_desc_dma_addr + (cmd_desc_size * i));
- utrdlp[i].command_desc_base_addr_lo =
- cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
- utrdlp[i].command_desc_base_addr_hi =
- cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+ utrdlp[i].command_desc_base_addr =
+ cpu_to_le64(cmd_desc_element_addr);
/* Response upiu and prdt offset should be in double words */
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
@@ -4253,7 +4249,7 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
@@ -4360,7 +4356,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
if (update &&
!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
@@ -4953,7 +4949,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -5146,7 +5142,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
- blk_queue_update_dma_alignment(q, 4096 - 1);
+ blk_queue_update_dma_alignment(q, SZ_4K - 1);
/*
* Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links().
@@ -5414,8 +5410,8 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
}
/* Release the resources allocated for processing a SCSI command. */
-static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
{
struct scsi_cmnd *cmd = lrbp->cmd;
@@ -5529,6 +5525,57 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
}
/**
+ * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
+ * invoked from the error handler context or ufshcd_host_reset_and_restore()
+ * to complete the pending transfers and free the resources associated with
+ * the scsi command.
+ *
+ * @hba: per adapter instance
+ * @force_compl: This flag is set to true when invoked
+ * from ufshcd_host_reset_and_restore() in which case it requires special
+ * handling because the host controller has been reset by ufshcd_hba_stop().
+ */
+static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
+ bool force_compl)
+{
+ struct ufs_hw_queue *hwq;
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+ unsigned long flags;
+ u32 hwq_num, utag;
+ int tag;
+
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ lrbp = &hba->lrb[tag];
+ cmd = lrbp->cmd;
+ if (!ufshcd_cmd_inflight(cmd) ||
+ test_bit(SCMD_STATE_COMPLETE, &cmd->state))
+ continue;
+
+ utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
+ hwq_num = blk_mq_unique_tag_to_hwq(utag);
+ hwq = &hba->uhq[hwq_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+
+ if (force_compl) {
+ ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
+ /*
+ * For those cmds of which the cqes are not present
+ * in the cq, complete them explicitly.
+ */
+ if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
+ spin_lock_irqsave(&hwq->cq_lock, flags);
+ set_host_byte(cmd, DID_REQUEUE);
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ scsi_done(cmd);
+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
+ }
+ } else {
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
+ }
+}
+
+/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*
@@ -6092,9 +6139,13 @@ out:
}
/* Complete requests that have door-bell cleared */
-static void ufshcd_complete_requests(struct ufs_hba *hba)
+static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
{
- ufshcd_transfer_req_compl(hba);
+ if (is_mcq_enabled(hba))
+ ufshcd_mcq_compl_pending_transfer(hba, force_compl);
+ else
+ ufshcd_transfer_req_compl(hba);
+
ufshcd_tmc_handler(hba);
}
@@ -6238,22 +6289,22 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_setup_vreg(hba, true);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op);
} else {
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
if (ufshcd_is_clkscaling_supported(hba) &&
hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
}
ufshcd_scsi_block_requests(hba);
- /* Drain ufshcd_queuecommand() */
- synchronize_rcu();
+ /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
+ blk_mq_wait_quiesce_done(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work);
}
@@ -6335,18 +6386,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
bool needs_reset = false;
int tag, ret;
- /* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- ret = ufshcd_try_to_abort_task(hba, tag);
- dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
- ret ? "failed" : "succeeded");
- if (ret) {
- needs_reset = true;
- goto out;
+ if (is_mcq_enabled(hba)) {
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ lrbp = &hba->lrb[tag];
+ if (!ufshcd_cmd_inflight(lrbp->cmd))
+ continue;
+ ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ret ? "failed" : "succeeded");
+ if (ret) {
+ needs_reset = true;
+ goto out;
+ }
+ }
+ } else {
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ret ? "failed" : "succeeded");
+ if (ret) {
+ needs_reset = true;
+ goto out;
+ }
}
}
-
/* Clear pending task management requests */
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
if (ufshcd_clear_tm_cmd(hba, tag)) {
@@ -6357,7 +6426,7 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
out:
/* Complete the requests that are cleared by s/w */
- ufshcd_complete_requests(hba);
+ ufshcd_complete_requests(hba, false);
return needs_reset;
}
@@ -6397,7 +6466,7 @@ static void ufshcd_err_handler(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_prepare(hba);
/* Complete requests that have door-bell cleared by h/w */
- ufshcd_complete_requests(hba);
+ ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
again:
needs_restore = false;
@@ -6768,7 +6837,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
ufshcd_mcq_write_cqis(hba, events, i);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
- ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
return IRQ_HANDLED;
@@ -6898,7 +6967,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
return PTR_ERR(req);
req->end_io_data = &wait;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
spin_lock_irqsave(host->host_lock, flags);
@@ -7134,7 +7203,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
@@ -7200,7 +7269,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len;
/* Protects use of hba->reserved_slot. */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
@@ -7276,7 +7345,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
- u32 pos;
+ struct ufs_hw_queue *hwq;
+ struct ufshcd_lrb *lrbp;
+ u32 pos, not_cleared_mask = 0;
int err;
u8 resp = 0xF, lun;
@@ -7291,6 +7362,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
goto out;
}
+ if (is_mcq_enabled(hba)) {
+ for (pos = 0; pos < hba->nutrs; pos++) {
+ lrbp = &hba->lrb[pos];
+ if (ufshcd_cmd_inflight(lrbp->cmd) &&
+ lrbp->lun == lun) {
+ ufshcd_clear_cmd(hba, pos);
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
+ }
+ err = 0;
+ goto out;
+ }
+
/* clear the commands that were pending for corresponding LUN */
spin_lock_irqsave(&hba->outstanding_lock, flags);
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
@@ -7299,17 +7384,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
hba->outstanding_reqs &= ~pending_reqs;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
- if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- not_cleared = pending_reqs &
- ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- hba->outstanding_reqs |= not_cleared;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
+ if (ufshcd_clear_cmd(hba, pos) < 0) {
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ not_cleared = 1U << pos &
+ ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ hba->outstanding_reqs |= not_cleared;
+ not_cleared_mask |= not_cleared;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
- dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
- __func__, not_cleared);
+ dev_err(hba->dev, "%s: failed to clear request %d\n",
+ __func__, pos);
+ }
}
- __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
+ __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
out:
hba->req_abort_count = 0;
@@ -7347,7 +7435,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
*
* Returns zero on success, non-zero on failure
*/
-static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
+int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
@@ -7370,6 +7458,20 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
*/
dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
__func__, tag);
+ if (is_mcq_enabled(hba)) {
+ /* MCQ mode */
+ if (ufshcd_cmd_inflight(lrbp->cmd)) {
+ /* sleep for max. 200us same delay as in SDB mode */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
+ __func__, tag);
+ goto out;
+ }
+
+ /* Single Doorbell Mode */
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (reg & (1 << tag)) {
/* sleep for max. 200us to stabilize */
@@ -7406,7 +7508,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
goto out;
}
- err = ufshcd_clear_cmds(hba, 1U << tag);
+ err = ufshcd_clear_cmd(hba, tag);
if (err)
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
__func__, tag, err);
@@ -7434,14 +7536,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
- ufshcd_hold(hba, false);
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- /* If command is already aborted/completed, return FAILED. */
- if (!(test_bit(tag, &hba->outstanding_reqs))) {
- dev_err(hba->dev,
- "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
- __func__, tag, hba->outstanding_reqs, reg);
- goto release;
+ ufshcd_hold(hba);
+
+ if (!is_mcq_enabled(hba)) {
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!test_bit(tag, &hba->outstanding_reqs)) {
+ /* If command is already aborted/completed, return FAILED. */
+ dev_err(hba->dev,
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+ __func__, tag, hba->outstanding_reqs, reg);
+ goto release;
+ }
}
/* Print Transfer Request of aborted task */
@@ -7466,7 +7571,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
}
hba->req_abort_count++;
- if (!(reg & (1 << tag))) {
+ if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
+ /* only execute this code in single doorbell mode */
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
@@ -7492,6 +7598,12 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
+ if (is_mcq_enabled(hba)) {
+ /* MCQ mode. Branch off to handle abort for mcq mode */
+ err = ufshcd_mcq_abort(cmd);
+ goto release;
+ }
+
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skipping abort\n", __func__);
@@ -7547,7 +7659,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
- ufshcd_complete_requests(hba);
+ ufshcd_complete_requests(hba, true);
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
@@ -8769,7 +8881,7 @@ static const struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
- .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
+ .max_sectors = SZ_1M / SECTOR_SIZE,
.max_host_blocked = 1,
.track_queue_depth = 1,
.skip_settle_delay = 1,
@@ -9426,7 +9538,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If we can't transition into any of the low power modes
* just gate the clocks.
*/
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_gating.is_suspended = true;
if (ufshcd_is_clkscaling_supported(hba))
@@ -10201,6 +10313,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
host->max_cmd_len = UFS_CDB_SIZE;
+ host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
hba->max_pwr_info.is_valid = false;
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index a46a7666c891..255f8b38d0c2 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -30,7 +30,7 @@ static struct kmem_cache *ufshpb_mctx_cache;
static mempool_t *ufshpb_mctx_pool;
static mempool_t *ufshpb_page_pool;
/* A cache size of 2MB can cache ppn in the 1GB range. */
-static unsigned int ufshpb_host_map_kbytes = 2048;
+static unsigned int ufshpb_host_map_kbytes = SZ_2K;
static int tot_active_srgn_pages;
static struct workqueue_struct *ufshpb_wq;
@@ -2461,7 +2461,7 @@ static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
init_success = !ufshpb_check_hpb_reset_query(hba);
- pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+ pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
if (pool_size > tot_active_srgn_pages) {
mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
@@ -2527,7 +2527,7 @@ static int ufshpb_init_mem_wq(struct ufs_hba *hba)
return -ENOMEM;
}
- pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+ pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
__func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
diff --git a/drivers/ufs/core/ufshpb.h b/drivers/ufs/core/ufshpb.h
index 0d6e6004d783..b428bbdd2799 100644
--- a/drivers/ufs/core/ufshpb.h
+++ b/drivers/ufs/core/ufshpb.h
@@ -25,7 +25,7 @@
/* hpb map & entries macro */
#define HPB_RGN_SIZE_UNIT 512
-#define HPB_ENTRY_BLOCK_SIZE 4096
+#define HPB_ENTRY_BLOCK_SIZE SZ_4K
#define HPB_ENTRY_SIZE 0x8
#define PINNED_NOT_SET U32_MAX
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index f41056f57fd7..3396e0388512 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1306,7 +1306,7 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
* (ufshcd_async_scan()). Note: this callback may also be called
* from other functions than ufshcd_init().
*/
- hba->host->max_segment_size = 4096;
+ hba->host->max_segment_size = SZ_4K;
if (ufs->drv_data->pre_hce_enable) {
ret = ufs->drv_data->pre_hce_enable(ufs);
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 18b72e2e68c1..5b3060cd0ab8 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -335,29 +335,29 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
/* PA_TxSkip */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
/*PA_PWRModeUserData0 = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), SZ_8K - 1);
/*PA_PWRModeUserData1 = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), SZ_64K - 1);
/*PA_PWRModeUserData2 = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), SZ_32K - 1);
/*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), SZ_8K - 1);
/*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), SZ_64K - 1);
/*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), SZ_32K - 1);
/*PA_PWRModeUserData3 = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), SZ_8K - 1);
/*PA_PWRModeUserData4 = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), SZ_64K - 1);
/*PA_PWRModeUserData5 = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), SZ_32K - 1);
/*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), SZ_8K - 1);
/*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), SZ_64K - 1);
/*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), SZ_32K - 1);
}
static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index e89b625d3c5a..33b301649757 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -410,9 +410,6 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
- if (val == state)
- return 0;
-
return -ETIMEDOUT;
}
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 059de74dfea3..235bb29bf3c0 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1556,7 +1556,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
struct ufs_hw_queue *hwq = &hba->uhq[id];
ufshcd_mcq_write_cqis(hba, 0x1, id);
- ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
}