summaryrefslogtreecommitdiff
path: root/include/ufs/ufshcd.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/ufs/ufshcd.h')
-rw-r--r--include/ufs/ufshcd.h63
1 files changed, 41 insertions, 22 deletions
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index d650ae6b58d3..e3909cc691b2 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -326,10 +326,8 @@ struct ufs_pwr_mode_info {
* @phy_initialization: used to initialize phys
* @device_reset: called to issue a reset pulse on the UFS device
* @config_scaling_param: called to configure clock scaling parameters
- * @program_key: program or evict an inline encryption key
* @fill_crypto_prdt: initialize crypto-related fields in the PRDT
* @event_notify: called to notify important events
- * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources
* @get_hba_mac: reports maximum number of outstanding commands supported by
* the controller. Should be implemented for UFSHCI 4.0 or later
@@ -338,6 +336,7 @@ struct ufs_pwr_mode_info {
* @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt
* @config_scsi_dev: called to configure SCSI device parameters
+ * @freq_to_gear_speed: called to map clock frequency to the max supported gear speed
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -346,8 +345,8 @@ struct ufs_hba_variant_ops {
void (*exit)(struct ufs_hba *);
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*set_dma_mask)(struct ufs_hba *);
- int (*clk_scale_notify)(struct ufs_hba *, bool,
- enum ufs_notify_change_status);
+ int (*clk_scale_notify)(struct ufs_hba *, bool, unsigned long,
+ enum ufs_notify_change_status);
int (*setup_clocks)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
int (*hce_enable_notify)(struct ufs_hba *,
@@ -355,9 +354,9 @@ struct ufs_hba_variant_ops {
int (*link_startup_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
int (*pwr_change_notify)(struct ufs_hba *,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *desired_pwr_mode,
- struct ufs_pa_layer_attr *final_params);
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *desired_pwr_mode,
+ struct ufs_pa_layer_attr *final_params);
void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
bool is_scsi_cmd);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
@@ -374,14 +373,11 @@ struct ufs_hba_variant_ops {
void (*config_scaling_param)(struct ufs_hba *hba,
struct devfreq_dev_profile *profile,
struct devfreq_simple_ondemand_data *data);
- int (*program_key)(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot);
int (*fill_crypto_prdt)(struct ufs_hba *hba,
const struct bio_crypt_ctx *crypt_ctx,
void *prdt, unsigned int num_segments);
void (*event_notify)(struct ufs_hba *hba,
enum ufs_event_type evt, void *data);
- void (*reinit_notify)(struct ufs_hba *);
int (*mcq_config_resource)(struct ufs_hba *hba);
int (*get_hba_mac)(struct ufs_hba *hba);
int (*op_runtime_config)(struct ufs_hba *hba);
@@ -389,6 +385,7 @@ struct ufs_hba_variant_ops {
unsigned long *ocqs);
int (*config_esi)(struct ufs_hba *hba);
void (*config_scsi_dev)(struct scsi_device *sdev);
+ u32 (*freq_to_gear_speed)(struct ufs_hba *hba, unsigned long freq);
};
/* clock gating state */
@@ -405,6 +402,9 @@ enum clk_gating_state {
* delay_ms
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
+ * @clk_gating_workq: workqueue for clock gating work.
+ * @lock: serialize access to some struct ufs_clk_gating members. An outer lock
+ * relative to the host lock
* @state: the current clocks state
* @delay_ms: gating delay in ms
* @is_suspended: clk gating is suspended when set to 1 which can be used
@@ -415,11 +415,14 @@ enum clk_gating_state {
* @is_initialized: Indicates whether clock gating is initialized or not
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
- * @clk_gating_workq: workqueue for clock gating work.
*/
struct ufs_clk_gating {
struct delayed_work gate_work;
struct work_struct ungate_work;
+ struct workqueue_struct *clk_gating_workq;
+
+ spinlock_t lock;
+
enum clk_gating_state state;
unsigned long delay_ms;
bool is_suspended;
@@ -428,11 +431,14 @@ struct ufs_clk_gating {
bool is_enabled;
bool is_initialized;
int active_reqs;
- struct workqueue_struct *clk_gating_workq;
};
/**
* struct ufs_clk_scaling - UFS clock scaling related data
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @lock: serialize access to some struct ufs_clk_scaling members
* @active_reqs: number of requests that are pending. If this is zero when
* devfreq ->target() function is called then schedule "suspend_work" to
* suspend devfreq.
@@ -442,11 +448,10 @@ struct ufs_clk_gating {
* @enable_attr: sysfs attribute to enable/disable clock scaling
* @saved_pwr_info: UFS power mode may also be changed during scaling and this
* one keeps track of previous power mode.
- * @workq: workqueue to schedule devfreq suspend/resume work
- * @suspend_work: worker to suspend devfreq
- * @resume_work: worker to resume devfreq
* @target_freq: frequency requested by devfreq framework
* @min_gear: lowest HS gear to scale down to
+ * @wb_gear: enable Write Booster when HS gear scales above or equal to it, else
+ * disable Write Booster
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
* clkscale_enable sysfs node
* @is_allowed: tracks if scaling is currently allowed or not, used to block
@@ -456,17 +461,21 @@ struct ufs_clk_gating {
* @is_suspended: tracks if devfreq is suspended or not
*/
struct ufs_clk_scaling {
+ struct workqueue_struct *workq;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+
+ spinlock_t lock;
+
int active_reqs;
unsigned long tot_busy_t;
ktime_t window_start_t;
ktime_t busy_start_t;
struct device_attribute enable_attr;
struct ufs_pa_layer_attr saved_pwr_info;
- struct workqueue_struct *workq;
- struct work_struct suspend_work;
- struct work_struct resume_work;
unsigned long target_freq;
u32 min_gear;
+ u32 wb_gear;
bool is_enabled;
bool is_allowed;
bool is_initialized;
@@ -948,7 +957,6 @@ enum ufshcd_mcq_opr {
* @nr_queues: number of Queues of different queue types
* @complete_put: whether or not to call ufshcd_rpm_put() from inside
* ufshcd_resume_complete()
- * @ext_iid_sup: is EXT_IID is supported by UFSHC
* @mcq_sup: is mcq supported by UFSHC
* @mcq_enabled: is mcq ready to accept requests
* @res: array of resource info of MCQ registers
@@ -959,6 +967,7 @@ enum ufshcd_mcq_opr {
* @ufs_rtc_update_work: A work for UFS RTC periodic update
* @pm_qos_req: PM QoS request handle
* @pm_qos_enabled: flag to check if pm qos is enabled
+ * @critical_health_count: count of critical health exceptions
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -1114,7 +1123,6 @@ struct ufs_hba {
unsigned int nr_hw_queues;
unsigned int nr_queues[HCTX_MAX_TYPES];
bool complete_put;
- bool ext_iid_sup;
bool scsi_host_added;
bool mcq_sup;
bool lsdb_sup;
@@ -1128,6 +1136,8 @@ struct ufs_hba {
struct delayed_work ufs_rtc_update_work;
struct pm_qos_request pm_qos_req;
bool pm_qos_enabled;
+
+ int critical_health_count;
};
/**
@@ -1204,6 +1214,14 @@ static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
#endif
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+static inline struct ufs_hba *
+ufs_hba_from_crypto_profile(struct blk_crypto_profile *profile)
+{
+ return container_of(profile, struct ufs_hba, crypto_profile);
+}
+#endif
+
static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba)
{
return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
@@ -1299,7 +1317,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
void ufshcd_enable_irq(struct ufs_hba *hba);
void ufshcd_disable_irq(struct ufs_hba *hba);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
-void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
@@ -1359,6 +1376,8 @@ extern int ufshcd_system_thaw(struct device *dev);
extern int ufshcd_system_restore(struct device *dev);
#endif
+extern int ufshcd_dme_reset(struct ufs_hba *hba);
+extern int ufshcd_dme_enable(struct ufs_hba *hba);
extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
int adapt_val);
@@ -1416,7 +1435,7 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
-static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
+static inline bool ufshcd_is_hs_mode(const struct ufs_pa_layer_attr *pwr_info)
{
return (pwr_info->pwr_rx == FAST_MODE ||
pwr_info->pwr_rx == FASTAUTO_MODE) &&