diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-25 18:50:55 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-25 18:50:55 -0800 |
commit | 0637a68b9c6c1dfffcc1fca003cb7cd3257c3c03 (patch) | |
tree | e7d9a7accb6335c46ac3a6ce77860bd55eca839c /drivers/ufs/core | |
parent | 222974c6ec9d901f7ad13bbe6e505ec1f1d822d4 (diff) | |
parent | 8e3b6345d113cc917e64b0349dc486b5d8f55e70 (diff) | |
download | lwn-0637a68b9c6c1dfffcc1fca003cb7cd3257c3c03.tar.gz lwn-0637a68b9c6c1dfffcc1fca003cb7cd3257c3c03.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"Updates to the usual drivers (ufs, lpfc, hisi_sas, st).
Amazingly enough, no core changes with the biggest set of driver
changes being ufs (which conflicted with it's own fixes a bit, hence
the merges) and the rest being minor fixes and updates"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (97 commits)
scsi: st: New session only when Unit Attention for new tape
scsi: st: Add MTIOCGET and MTLOAD to ioctls allowed after device reset
scsi: st: Don't modify unknown block number in MTIOCGET
scsi: ufs: core: Restore SM8650 support
scsi: sun3: Mark driver struct with __refdata to prevent section mismatch
scsi: sg: Enable runtime power management
scsi: qedi: Fix a possible memory leak in qedi_alloc_and_init_sb()
scsi: qedf: Fix a possible memory leak in qedf_alloc_and_init_sb()
scsi: fusion: Remove unused variable 'rc'
scsi: bfa: Fix use-after-free in bfad_im_module_exit()
scsi: esas2r: Remove unused esas2r_build_cli_req()
scsi: target: Fix incorrect function name in pscsi_create_type_disk()
scsi: ufs: Replace deprecated PCI functions
scsi: Switch back to struct platform_driver::remove()
scsi: pm8001: Increase request sg length to support 4MiB requests
scsi: pm8001: Initialize devices in pm8001_alloc_dev()
scsi: pm8001: Use module param to set pcs event log severity
scsi: ufs: ufs-mediatek: Configure individual LU queue flags
scsi: MAINTAINERS: Update UFS Exynos entry
scsi: lpfc: Copyright updates for 14.4.0.6 patches
...
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r-- | drivers/ufs/core/ufs-mcq.c | 30 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 571 |
2 files changed, 319 insertions, 282 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index dba935c712d6..240ce135bbfb 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -417,13 +417,6 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational); -void ufshcd_mcq_enable_esi(struct ufs_hba *hba) -{ - ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, - REG_UFS_MEM_CFG); -} -EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); - void ufshcd_mcq_enable(struct ufs_hba *hba) { ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); @@ -437,6 +430,13 @@ void ufshcd_mcq_disable(struct ufs_hba *hba) hba->mcq_enabled = false; } +void ufshcd_mcq_enable_esi(struct ufs_hba *hba) +{ + ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, + REG_UFS_MEM_CFG); +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); + void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) { ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); @@ -539,7 +539,7 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) struct scsi_cmnd *cmd = lrbp->cmd; struct ufs_hw_queue *hwq; void __iomem *reg, *opr_sqd_base; - u32 nexus, id, val, rtc; + u32 nexus, id, val; int err; if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) @@ -573,14 +573,18 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU, opr_sqd_base + REG_SQRTC); - /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */ + /* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */ reg = opr_sqd_base + REG_SQRTS; err = read_poll_timeout(readl, val, val & SQ_CUS, 20, MCQ_POLL_US, false, reg); - rtc = FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)); - if (err || rtc) - dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d RTC=%d\n", - __func__, id, task_tag, err, rtc); + if (err) + dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n", + __func__, id, task_tag, err); + else + dev_info(hba->dev, + "%s, hwq %d: cleanup return code (RTC) %ld\n", + __func__, id, + FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); if (ufshcd_mcq_sq_start(hba, hwq)) err = -ETIMEDOUT; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index abbe7135a977..6a2685333076 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -298,6 +298,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -349,18 +350,6 @@ static void ufshcd_configure_wb(struct ufs_hba *hba) ufshcd_wb_toggle_buf_flush(hba, true); } -static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) -{ - if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) - scsi_unblock_requests(hba->host); -} - -static void ufshcd_scsi_block_requests(struct ufs_hba *hba) -{ - if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) - scsi_block_requests(hba->host); -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, enum ufs_trace_str_t str_t) { @@ -739,25 +728,15 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us); * Return: -ETIMEDOUT on error, zero on success. */ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long interval_us, - unsigned long timeout_ms) + u32 val, unsigned long interval_us, + unsigned long timeout_ms) { - int err = 0; - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); - - /* ignore bits that we don't intend to wait on */ - val = val & mask; + u32 v; - while ((ufshcd_readl(hba, reg) & mask) != val) { - usleep_range(interval_us, interval_us + 50); - if (time_after(jiffies, timeout)) { - if ((ufshcd_readl(hba, reg) & mask) != val) - err = -ETIMEDOUT; - break; - } - } + val &= mask; /* ignore bits that we don't intend to wait on */ - return err; + return read_poll_timeout(ufshcd_readl, v, (v & mask) == val, + interval_us, timeout_ms * 1000, false, hba, reg); } /** @@ -1255,11 +1234,13 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, static u32 ufshcd_pending_cmds(struct ufs_hba *hba) { const struct scsi_device *sdev; + unsigned long flags; u32 pending = 0; - lockdep_assert_held(hba->host->host_lock); + spin_lock_irqsave(hba->host->host_lock, flags); __shost_for_each_device(sdev, hba->host) pending += sbitmap_weight(&sdev->budget_map); + spin_unlock_irqrestore(hba->host->host_lock, flags); return pending; } @@ -1273,7 +1254,6 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us) { - unsigned long flags; int ret = 0; u32 tm_doorbell; u32 tr_pending; @@ -1281,7 +1261,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ktime_t start; ufshcd_hold(hba); - spin_lock_irqsave(hba->host->host_lock, flags); /* * Wait for all the outstanding tasks/transfer requests. * Verify by checking the doorbell registers are clear. @@ -1302,7 +1281,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, break; } - spin_unlock_irqrestore(hba->host->host_lock, flags); io_schedule_timeout(msecs_to_jiffies(20)); if (ktime_to_us(ktime_sub(ktime_get(), start)) > wait_timeout_us) { @@ -1314,7 +1292,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, */ do_last_check = true; } - spin_lock_irqsave(hba->host->host_lock, flags); } while (tm_doorbell || tr_pending); if (timeout) { @@ -1324,7 +1301,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ret = -EBUSY; } out: - spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_release(hba); return ret; } @@ -2411,8 +2387,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) int err; hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); - if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) - hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; /* nutrs and nutmrs are 0 based values */ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; @@ -2551,13 +2525,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result * @hba: per adapter instance * @uic_cmd: UIC command - * @completion: initialize the completion only if this is set to true * * Return: 0 only if success. */ static int -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, - bool completion) +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { lockdep_assert_held(&hba->uic_cmd_mutex); @@ -2567,8 +2539,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, return -EIO; } - if (completion) - init_completion(&uic_cmd->done); + init_completion(&uic_cmd->done); uic_cmd->cmd_active = 1; ufshcd_dispatch_uic_cmd(hba, uic_cmd); @@ -2594,7 +2565,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); - ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -2775,7 +2746,6 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); - memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); @@ -2878,6 +2848,26 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); } +static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag) +{ + memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr)); + + lrbp->cmd = cmd; + lrbp->task_tag = tag; + lrbp->lun = lun; + ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp); +} + +static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + struct scsi_cmnd *cmd, u8 lun, int tag) +{ + __ufshcd_setup_cmd(lrbp, cmd, lun, tag); + lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); + lrbp->req_abort_skip = false; + + ufshcd_comp_scsi_upiu(hba, lrbp); +} + /** * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID * @upiu_wlun_id: UPIU W-LUN id @@ -3010,16 +3000,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_hold(hba); lrbp = &hba->lrb[tag]; - lrbp->cmd = cmd; - lrbp->task_tag = tag; - lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); - lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); - ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); - - lrbp->req_abort_skip = false; - - ufshcd_comp_scsi_upiu(hba, lrbp); + ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag); err = ufshcd_map_sg(hba, lrbp); if (err) { @@ -3047,11 +3029,8 @@ out: static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, u8 lun, int tag) { - lrbp->cmd = NULL; - lrbp->task_tag = tag; - lrbp->lun = lun; + __ufshcd_setup_cmd(lrbp, NULL, lun, tag); lrbp->intr_cmd = true; /* No interrupt aggregation */ - ufshcd_prepare_lrbp_crypto(NULL, lrbp); hba->dev_cmd.type = cmd_type; } @@ -3083,7 +3062,6 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) { u32 mask; - unsigned long flags; int err; if (hba->mcq_enabled) { @@ -3103,9 +3081,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) mask = 1U << task_tag; /* clear outstanding transaction before retry */ - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utrl_clear(hba, mask); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* * wait for h/w to clear corresponding bit in door-bell. @@ -4288,7 +4264,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) reenable_intr = true; } spin_unlock_irqrestore(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, cmd, false); + ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", @@ -4539,6 +4515,14 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) return -EINVAL; } + if (pwr_info->lane_rx != pwr_info->lane_tx) { + dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } + /* * First, get the maximum gears of HS speed. * If a zero value, it means there is no HSGEAR capability. @@ -4822,51 +4806,44 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop); */ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) { - int retry_outer = 3; - int retry_inner; + int retry; -start: - if (ufshcd_is_hba_active(hba)) - /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); + for (retry = 3; retry > 0; retry--) { + if (ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); - /* UniPro link is disabled at this point */ - ufshcd_set_link_off(hba); + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); - ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); - /* start controller initialization sequence */ - ufshcd_hba_start(hba); + /* start controller initialization sequence */ + ufshcd_hba_start(hba); - /* - * To initialize a UFS host controller HCE bit must be set to 1. - * During initialization the HCE bit value changes from 1->0->1. - * When the host controller completes initialization sequence - * it sets the value of HCE bit to 1. The same HCE bit is read back - * to check if the controller has completed initialization sequence. - * So without this delay the value HCE = 1, set in the previous - * instruction might be read back. - * This delay can be changed based on the controller. - */ - ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); - /* wait for the host controller to complete initialization */ - retry_inner = 50; - while (!ufshcd_is_hba_active(hba)) { - if (retry_inner) { - retry_inner--; - } else { - dev_err(hba->dev, - "Controller enable failed\n"); - if (retry_outer) { - retry_outer--; - goto start; - } - return -EIO; - } - usleep_range(1000, 1100); + /* wait for the host controller to complete initialization */ + if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, + CONTROLLER_ENABLE, 1000, 50)) + break; + + dev_err(hba->dev, "Enabling the controller failed\n"); } + if (!retry) + return -EIO; + /* enable UIC related interrupts */ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); @@ -5258,6 +5235,9 @@ static int ufshcd_device_configure(struct scsi_device *sdev, */ sdev->silence_suspend = 1; + if (hba->vops && hba->vops->config_scsi_dev) + hba->vops->config_scsi_dev(sdev); + ufshcd_crypto_register(hba, q); return 0; @@ -5478,32 +5458,37 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { irqreturn_t retval = IRQ_NONE; + struct uic_command *cmd; spin_lock(hba->host->host_lock); + cmd = hba->active_uic_cmd; + if (WARN_ON_ONCE(!cmd)) + goto unlock; + if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); - if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { - hba->active_uic_cmd->argument2 |= - ufshcd_get_uic_cmd_result(hba); - hba->active_uic_cmd->argument3 = - ufshcd_get_dme_attr_val(hba); + if (intr_status & UIC_COMMAND_COMPL) { + cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); + cmd->argument3 = ufshcd_get_dme_attr_val(hba); if (!hba->uic_async_done) - hba->active_uic_cmd->cmd_active = 0; - complete(&hba->active_uic_cmd->done); + cmd->cmd_active = 0; + complete(&cmd->done); retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { - hba->active_uic_cmd->cmd_active = 0; + if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) { + cmd->cmd_active = 0; complete(hba->uic_async_done); retval = IRQ_HANDLED; } if (retval == IRQ_HANDLED) - ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, - UFS_CMD_COMP); + ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); + +unlock: spin_unlock(hba->host->host_lock); + return retval; } @@ -6196,12 +6181,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work) u32 status = 0; hba = container_of(work, struct ufs_hba, eeh_work); - ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", __func__, err); - goto out; + return; } trace_ufshcd_exception_event(dev_name(hba->dev), status); @@ -6213,8 +6197,6 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_temp_exception_event_handler(hba, status); ufs_debugfs_exception_event(hba, status); -out: - ufshcd_scsi_unblock_requests(hba); } /* Complete requests that have door-bell cleared */ @@ -6380,15 +6362,14 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_allow(hba, false); } - ufshcd_scsi_block_requests(hba); /* Wait for ongoing ufshcd_queuecommand() calls to finish. */ - blk_mq_wait_quiesce_done(&hba->host->tag_set); + blk_mq_quiesce_tagset(&hba->host->tag_set); cancel_work_sync(&hba->eeh_work); } static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) { - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); @@ -7002,14 +6983,11 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; u32 mask = 1 << tag; - unsigned long flags; if (!test_bit(tag, &hba->outstanding_tasks)) goto out; - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utmrl_clear(hba, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, @@ -7052,12 +7030,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); - /* send command to the controller */ __set_bit(task_tag, &hba->outstanding_tasks); - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); spin_unlock_irqrestore(host->host_lock, flags); + /* send command to the controller */ + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); /* wait until the task management command is completed */ @@ -7473,10 +7452,9 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) { struct ufshcd_lrb *lrbp = &hba->lrb[tag]; - int err = 0; + int err; int poll_cnt; u8 resp = 0xF; - u32 reg; for (poll_cnt = 100; poll_cnt; poll_cnt--) { err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, @@ -7491,46 +7469,27 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) * cmd not pending in the device, check if it is * in transition. */ - dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", + dev_info( + hba->dev, + "%s: cmd with tag %d not pending in the device.\n", __func__, tag); - if (hba->mcq_enabled) { - /* MCQ mode */ - if (ufshcd_cmd_inflight(lrbp->cmd)) { - /* sleep for max. 200us same delay as in SDB mode */ - usleep_range(100, 200); - continue; - } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n", - __func__, tag); - goto out; - } - - /* Single Doorbell Mode */ - reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - if (reg & (1 << tag)) { - /* sleep for max. 200us to stabilize */ - usleep_range(100, 200); - continue; + if (!ufshcd_cmd_inflight(lrbp->cmd)) { + dev_info(hba->dev, + "%s: cmd with tag=%d completed.\n", + __func__, tag); + return 0; } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", - __func__, tag); - goto out; + usleep_range(100, 200); } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", __func__, tag, err); - if (!err) - err = resp; /* service response error */ - goto out; + return err ? : resp; } } - if (!poll_cnt) { - err = -EBUSY; - goto out; - } + if (!poll_cnt) + return -EBUSY; err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, UFS_ABORT_TASK, &resp); @@ -7540,7 +7499,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", __func__, tag, err); } - goto out; + return err; } err = ufshcd_clear_cmd(hba, tag); @@ -7548,7 +7507,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); -out: return err; } @@ -7672,6 +7630,29 @@ release: } /** + * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result. + * @hba: UFS host controller instance. + * @probe_start: time when the ufshcd_probe_hba() call started. + * @ret: ufshcd_probe_hba() return value. + */ +static void ufshcd_process_probe_result(struct ufs_hba *hba, + ktime_t probe_start, int ret) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + else if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + trace_ufshcd_init(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), probe_start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); +} + +/** * ufshcd_host_reset_and_restore - reset and restore host controller * @hba: per-adapter instance * @@ -7700,8 +7681,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) err = ufshcd_hba_enable(hba); /* Establish the link again and restore the device */ - if (!err) - err = ufshcd_probe_hba(hba, false); + if (!err) { + ktime_t probe_start = ktime_get(); + + err = ufshcd_device_init(hba, /*init_dev_params=*/false); + if (!err) + err = ufshcd_probe_hba(hba, false); + ufshcd_process_probe_result(hba, probe_start, err); + } if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); @@ -8727,10 +8714,43 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) hba->nutrs); } +static int ufshcd_post_device_init(struct ufs_hba *hba) +{ + int ret; + + ufshcd_tune_unipro_params(hba); + + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); + ufshcd_force_reset_auto_bkops(hba); + + ufshcd_set_timestamp_attr(hba); + + if (!hba->max_pwr_info.is_valid) + return 0; + + /* + * Set the right value to bRefClkFreq before attempting to + * switch to HS gears. + */ + if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) + ufshcd_set_dev_ref_clk(hba); + /* Gear up to HS gear. */ + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + return ret; + } + + return 0; +} + static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; - struct Scsi_Host *host = hba->host; + + WARN_ON_ONCE(!hba->scsi_host_added); hba->ufshcd_state = UFSHCD_STATE_RESET; @@ -8771,56 +8791,14 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ret = ufshcd_device_params_init(hba); if (ret) return ret; - if (is_mcq_supported(hba) && !hba->scsi_host_added) { - ufshcd_mcq_enable(hba); - ret = ufshcd_alloc_mcq(hba); - if (!ret) { - ufshcd_config_mcq(hba); - } else { - /* Continue with SDB mode */ - ufshcd_mcq_disable(hba); - use_mcq_mode = false; - dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", - ret); - } - ret = scsi_add_host(host, hba->dev); - if (ret) { - dev_err(hba->dev, "scsi_add_host failed\n"); - return ret; - } - hba->scsi_host_added = true; - } else if (is_mcq_supported(hba)) { - /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ + if (is_mcq_supported(hba) && + hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { ufshcd_config_mcq(hba); ufshcd_mcq_enable(hba); } } - ufshcd_tune_unipro_params(hba); - - /* UFS device is also active now */ - ufshcd_set_ufs_dev_active(hba); - ufshcd_force_reset_auto_bkops(hba); - - ufshcd_set_timestamp_attr(hba); - - /* Gear up to HS gear if supported */ - if (hba->max_pwr_info.is_valid) { - /* - * Set the right value to bRefClkFreq before attempting to - * switch to HS gears. - */ - if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) - ufshcd_set_dev_ref_clk(hba); - ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - if (ret) { - dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", - __func__, ret); - return ret; - } - } - - return 0; + return ufshcd_post_device_init(hba); } /** @@ -8834,14 +8812,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) */ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) { - ktime_t start = ktime_get(); - unsigned long flags; int ret; - ret = ufshcd_device_init(hba, init_dev_params); - if (ret) - goto out; - if (!hba->pm_op_in_progress && (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { /* Reset the device and controller before doing reinit */ @@ -8854,13 +8826,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto out; + return ret; } /* Reinit the device */ ret = ufshcd_device_init(hba, init_dev_params); if (ret) - goto out; + return ret; } ufshcd_print_pwr_info(hba); @@ -8880,18 +8852,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) ufshcd_write_ee_control(hba); ufshcd_configure_auto_hibern8(hba); -out: - spin_lock_irqsave(hba->host->host_lock, flags); - if (ret) - hba->ufshcd_state = UFSHCD_STATE_ERROR; - else if (hba->ufshcd_state == UFSHCD_STATE_RESET) - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - spin_unlock_irqrestore(hba->host->host_lock, flags); - - trace_ufshcd_init(dev_name(hba->dev), ret, - ktime_to_us(ktime_sub(ktime_get(), start)), - hba->curr_dev_pwr_mode, hba->uic_link_state); - return ret; + return 0; } /** @@ -8902,11 +8863,14 @@ out: static void ufshcd_async_scan(void *data, async_cookie_t cookie) { struct ufs_hba *hba = (struct ufs_hba *)data; + ktime_t probe_start; int ret; down(&hba->host_sem); /* Initialize hba, detect and initialize UFS device */ + probe_start = ktime_get(); ret = ufshcd_probe_hba(hba, true); + ufshcd_process_probe_result(hba, probe_start, ret); up(&hba->host_sem); if (ret) goto out; @@ -10309,6 +10273,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); */ static int ufshcd_set_dma_mask(struct ufs_hba *hba) { + if (hba->vops && hba->vops->set_dma_mask) + return hba->vops->set_dma_mask(hba); if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) return 0; @@ -10372,6 +10338,74 @@ static const struct blk_mq_ops ufshcd_tmf_ops = { .queue_rq = ufshcd_queue_tmf, }; +static int ufshcd_add_scsi_host(struct ufs_hba *hba) +{ + int err; + + if (is_mcq_supported(hba)) { + ufshcd_mcq_enable(hba); + err = ufshcd_alloc_mcq(hba); + if (!err) { + ufshcd_config_mcq(hba); + } else { + /* Continue with SDB mode */ + ufshcd_mcq_disable(hba); + use_mcq_mode = false; + dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", + err); + } + } + if (!is_mcq_supported(hba) && !hba->lsdb_sup) { + dev_err(hba->dev, + "%s: failed to initialize (legacy doorbell mode not supported)\n", + __func__); + return -EINVAL; + } + + err = scsi_add_host(hba->host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); + return err; + } + hba->scsi_host_added = true; + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto remove_scsi_host; + hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, + sizeof(*hba->tmf_rqs), GFP_KERNEL); + if (!hba->tmf_rqs) { + err = -ENOMEM; + goto free_tmf_queue; + } + + return 0; + +free_tmf_queue: + blk_mq_destroy_queue(hba->tmf_queue); + blk_put_queue(hba->tmf_queue); + +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); + +remove_scsi_host: + if (hba->scsi_host_added) + scsi_remove_host(hba->host); + + return err; +} + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -10503,42 +10537,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->is_irq_enabled = true; } - if (!is_mcq_supported(hba)) { - if (!hba->lsdb_sup) { - dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n", - __func__); - err = -EINVAL; - goto out_disable; - } - err = scsi_add_host(host, hba->dev); - if (err) { - dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_disable; - } - hba->scsi_host_added = true; - } - - hba->tmf_tag_set = (struct blk_mq_tag_set) { - .nr_hw_queues = 1, - .queue_depth = hba->nutmrs, - .ops = &ufshcd_tmf_ops, - .flags = BLK_MQ_F_NO_SCHED, - }; - err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); - if (err < 0) - goto out_remove_scsi_host; - hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); - if (IS_ERR(hba->tmf_queue)) { - err = PTR_ERR(hba->tmf_queue); - goto free_tmf_tag_set; - } - hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, - sizeof(*hba->tmf_rqs), GFP_KERNEL); - if (!hba->tmf_rqs) { - err = -ENOMEM; - goto free_tmf_queue; - } - /* Reset the attached device */ ufshcd_device_reset(hba); @@ -10550,7 +10548,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto free_tmf_queue; + goto out_disable; } /* @@ -10576,7 +10574,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); - atomic_set(&hba->scsi_block_reqs_cnt, 0); + /* * We are assuming that device wasn't put in sleep/power-down * state exclusively during the boot stage before kernel. @@ -10585,6 +10583,49 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ ufshcd_set_ufs_dev_active(hba); + /* Initialize hba, detect and initialize UFS device */ + ktime_t probe_start = ktime_get(); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + + err = ufshcd_link_startup(hba); + if (err) + goto out_disable; + + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto initialized; + + /* Debug counters initialization */ + ufshcd_clear_dbg_ufs_stats(hba); + + /* UniPro link is active now */ + ufshcd_set_link_active(hba); + + /* Verify device initialization by sending NOP OUT UPIU */ + err = ufshcd_verify_dev_init(hba); + if (err) + goto out_disable; + + /* Initiate UFS initialization, and waiting until completion */ + err = ufshcd_complete_dev_init(hba); + if (err) + goto out_disable; + + err = ufshcd_device_params_init(hba); + if (err) + goto out_disable; + + err = ufshcd_post_device_init(hba); + +initialized: + ufshcd_process_probe_result(hba, probe_start, err); + if (err) + goto out_disable; + + err = ufshcd_add_scsi_host(hba); + if (err) + goto out_disable; + async_schedule(ufshcd_async_scan, hba); ufs_sysfs_add_nodes(hba->dev); @@ -10592,14 +10633,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_pm_qos_init(hba); return 0; -free_tmf_queue: - blk_mq_destroy_queue(hba->tmf_queue); - blk_put_queue(hba->tmf_queue); -free_tmf_tag_set: - blk_mq_free_tag_set(&hba->tmf_tag_set); -out_remove_scsi_host: - if (hba->scsi_host_added) - scsi_remove_host(hba->host); out_disable: hba->is_irq_enabled = false; ufshcd_hba_exit(hba); |