Bean Huo | 6735111 | 2020-06-05 22:05:19 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 2 | /* |
| 3 | * Universal Flash Storage Host controller driver |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 4 | * Copyright (C) 2011-2013 Samsung India Software Operations |
Yaniv Gardi | dc3c8d3 | 2016-02-01 15:02:46 +0200 | [diff] [blame] | 5 | * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 6 | * |
| 7 | * Authors: |
| 8 | * Santosh Yaraganavi <santosh.sy@samsung.com> |
| 9 | * Vinayak Holikatti <h.vinayak@samsung.com> |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #ifndef _UFSHCD_H |
| 13 | #define _UFSHCD_H |
| 14 | |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/io.h> |
| 20 | #include <linux/delay.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/spinlock.h> |
subhashj@codeaurora.org | a3cd5ec | 2017-02-03 16:57:02 -0800 | [diff] [blame] | 23 | #include <linux/rwsem.h> |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 24 | #include <linux/workqueue.h> |
| 25 | #include <linux/errno.h> |
| 26 | #include <linux/types.h> |
| 27 | #include <linux/wait.h> |
| 28 | #include <linux/bitops.h> |
| 29 | #include <linux/pm_runtime.h> |
| 30 | #include <linux/clk.h> |
Seungwon Jeon | 6ccf44fe | 2013-06-26 22:39:29 +0530 | [diff] [blame] | 31 | #include <linux/completion.h> |
Sujit Reddy Thumma | aa49761 | 2014-09-25 15:32:22 +0300 | [diff] [blame] | 32 | #include <linux/regulator/consumer.h> |
Stanley Chu | 5a244e0 | 2020-01-29 18:52:50 +0800 | [diff] [blame] | 33 | #include <linux/bitfield.h> |
Asutosh Das | 2c75f9a | 2020-03-25 11:29:01 -0700 | [diff] [blame] | 34 | #include <linux/devfreq.h> |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 35 | #include <linux/keyslot-manager.h> |
Yaniv Gardi | f37aabc | 2016-03-10 17:37:20 +0200 | [diff] [blame] | 36 | #include "unipro.h" |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 37 | |
| 38 | #include <asm/irq.h> |
| 39 | #include <asm/byteorder.h> |
| 40 | #include <scsi/scsi.h> |
| 41 | #include <scsi/scsi_cmnd.h> |
| 42 | #include <scsi/scsi_host.h> |
| 43 | #include <scsi/scsi_tcq.h> |
| 44 | #include <scsi/scsi_dbg.h> |
| 45 | #include <scsi/scsi_eh.h> |
| 46 | |
| 47 | #include "ufs.h" |
Stanley Chu | c28c00b | 2020-05-08 16:01:09 +0800 | [diff] [blame] | 48 | #include "ufs_quirks.h" |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 49 | #include "ufshci.h" |
| 50 | |
| 51 | #define UFSHCD "ufshcd" |
| 52 | #define UFSHCD_DRIVER_VERSION "0.2" |
| 53 | |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 54 | struct ufs_hba; |
| 55 | |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 56 | enum dev_cmd_type { |
| 57 | DEV_CMD_TYPE_NOP = 0x0, |
Dolev Raviv | 68078d5 | 2013-07-30 00:35:58 +0530 | [diff] [blame] | 58 | DEV_CMD_TYPE_QUERY = 0x1, |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 59 | }; |
| 60 | |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 61 | /** |
| 62 | * struct uic_command - UIC command structure |
| 63 | * @command: UIC command |
| 64 | * @argument1: UIC command argument 1 |
| 65 | * @argument2: UIC command argument 2 |
| 66 | * @argument3: UIC command argument 3 |
Seungwon Jeon | 6ccf44fe | 2013-06-26 22:39:29 +0530 | [diff] [blame] | 67 | * @done: UIC command completion |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 68 | */ |
| 69 | struct uic_command { |
| 70 | u32 command; |
| 71 | u32 argument1; |
| 72 | u32 argument2; |
| 73 | u32 argument3; |
Seungwon Jeon | 6ccf44fe | 2013-06-26 22:39:29 +0530 | [diff] [blame] | 74 | struct completion done; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 75 | }; |
| 76 | |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 77 | /* Used to differentiate the power management options */ |
| 78 | enum ufs_pm_op { |
| 79 | UFS_RUNTIME_PM, |
| 80 | UFS_SYSTEM_PM, |
| 81 | UFS_SHUTDOWN_PM, |
| 82 | }; |
| 83 | |
| 84 | #define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM) |
| 85 | #define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM) |
| 86 | #define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM) |
| 87 | |
| 88 | /* Host <-> Device UniPro Link state */ |
| 89 | enum uic_link_state { |
| 90 | UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ |
| 91 | UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ |
| 92 | UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ |
| 93 | }; |
| 94 | |
| 95 | #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) |
| 96 | #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ |
| 97 | UIC_LINK_ACTIVE_STATE) |
| 98 | #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ |
| 99 | UIC_LINK_HIBERN8_STATE) |
| 100 | #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) |
| 101 | #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ |
| 102 | UIC_LINK_ACTIVE_STATE) |
| 103 | #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ |
| 104 | UIC_LINK_HIBERN8_STATE) |
| 105 | |
Stanley Chu | 1764fa2 | 2020-03-27 17:58:35 +0800 | [diff] [blame] | 106 | #define ufshcd_set_ufs_dev_active(h) \ |
| 107 | ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) |
| 108 | #define ufshcd_set_ufs_dev_sleep(h) \ |
| 109 | ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) |
| 110 | #define ufshcd_set_ufs_dev_poweroff(h) \ |
| 111 | ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) |
| 112 | #define ufshcd_is_ufs_dev_active(h) \ |
| 113 | ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) |
| 114 | #define ufshcd_is_ufs_dev_sleep(h) \ |
| 115 | ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) |
| 116 | #define ufshcd_is_ufs_dev_poweroff(h) \ |
| 117 | ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) |
| 118 | |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 119 | /* |
| 120 | * UFS Power management levels. |
| 121 | * Each level is in increasing order of power savings. |
| 122 | */ |
| 123 | enum ufs_pm_level { |
| 124 | UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */ |
| 125 | UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */ |
| 126 | UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */ |
| 127 | UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */ |
| 128 | UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */ |
| 129 | UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */ |
| 130 | UFS_PM_LVL_MAX |
| 131 | }; |
| 132 | |
| 133 | struct ufs_pm_lvl_states { |
| 134 | enum ufs_dev_pwr_mode dev_state; |
| 135 | enum uic_link_state link_state; |
| 136 | }; |
| 137 | |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 138 | /** |
| 139 | * struct ufshcd_lrb - local reference block |
| 140 | * @utr_descriptor_ptr: UTRD address of the command |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 141 | * @ucd_req_ptr: UCD address of the command |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 142 | * @ucd_rsp_ptr: Response UPIU address for this command |
| 143 | * @ucd_prdt_ptr: PRDT address of the command |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 144 | * @utrd_dma_addr: UTRD dma address for debug |
| 145 | * @ucd_prdt_dma_addr: PRDT dma address for debug |
| 146 | * @ucd_rsp_dma_addr: UPIU response dma address for debug |
| 147 | * @ucd_req_dma_addr: UPIU request dma address for debug |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 148 | * @cmd: pointer to SCSI command |
| 149 | * @sense_buffer: pointer to sense buffer address of the SCSI command |
| 150 | * @sense_bufflen: Length of the sense buffer |
| 151 | * @scsi_status: SCSI status of the command |
| 152 | * @command_type: SCSI, UFS, Query. |
| 153 | * @task_tag: Task tag of the command |
| 154 | * @lun: LUN of the command |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 155 | * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 156 | * @issue_time_stamp: time stamp for debug purposes |
Zang Leigang | 0901718 | 2017-09-27 10:06:06 +0800 | [diff] [blame] | 157 | * @compl_time_stamp: time stamp for statistics |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 158 | * @crypto_key_slot: the key slot to use for inline crypto (-1 if none) |
Satya Tangirala | 53d5b29 | 2019-10-24 14:44:28 -0700 | [diff] [blame] | 159 | * @data_unit_num: the data unit number for the first block for inline crypto |
Gilad Broner | e0b299e | 2017-02-03 16:56:40 -0800 | [diff] [blame] | 160 | * @req_abort_skip: skip request abort task flag |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 161 | */ |
| 162 | struct ufshcd_lrb { |
| 163 | struct utp_transfer_req_desc *utr_descriptor_ptr; |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 164 | struct utp_upiu_req *ucd_req_ptr; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 165 | struct utp_upiu_rsp *ucd_rsp_ptr; |
| 166 | struct ufshcd_sg_entry *ucd_prdt_ptr; |
| 167 | |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 168 | dma_addr_t utrd_dma_addr; |
| 169 | dma_addr_t ucd_req_dma_addr; |
| 170 | dma_addr_t ucd_rsp_dma_addr; |
| 171 | dma_addr_t ucd_prdt_dma_addr; |
| 172 | |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 173 | struct scsi_cmnd *cmd; |
| 174 | u8 *sense_buffer; |
| 175 | unsigned int sense_bufflen; |
| 176 | int scsi_status; |
| 177 | |
| 178 | int command_type; |
| 179 | int task_tag; |
Subhash Jadavani | 0ce147d | 2014-09-25 15:32:29 +0300 | [diff] [blame] | 180 | u8 lun; /* UPIU LUN id field is only 8-bit wide */ |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 181 | bool intr_cmd; |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 182 | ktime_t issue_time_stamp; |
Zang Leigang | 0901718 | 2017-09-27 10:06:06 +0800 | [diff] [blame] | 183 | ktime_t compl_time_stamp; |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 184 | #ifdef CONFIG_SCSI_UFS_CRYPTO |
| 185 | int crypto_key_slot; |
Satya Tangirala | 53d5b29 | 2019-10-24 14:44:28 -0700 | [diff] [blame] | 186 | u64 data_unit_num; |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 187 | #endif |
Gilad Broner | e0b299e | 2017-02-03 16:56:40 -0800 | [diff] [blame] | 188 | |
| 189 | bool req_abort_skip; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 190 | }; |
| 191 | |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 192 | /** |
Tomas Winkler | a230c2f | 2016-02-09 10:25:41 +0200 | [diff] [blame] | 193 | * struct ufs_query - holds relevant data structures for query request |
Dolev Raviv | 68078d5 | 2013-07-30 00:35:58 +0530 | [diff] [blame] | 194 | * @request: request upiu and function |
| 195 | * @descriptor: buffer for sending/receiving descriptor |
| 196 | * @response: response upiu and response |
| 197 | */ |
| 198 | struct ufs_query { |
| 199 | struct ufs_query_req request; |
| 200 | u8 *descriptor; |
| 201 | struct ufs_query_res response; |
| 202 | }; |
| 203 | |
| 204 | /** |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 205 | * struct ufs_dev_cmd - all assosiated fields with device management commands |
| 206 | * @type: device management command type - Query, NOP OUT |
| 207 | * @lock: lock to allow one command at a time |
| 208 | * @complete: internal commands completion |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 209 | */ |
| 210 | struct ufs_dev_cmd { |
| 211 | enum dev_cmd_type type; |
| 212 | struct mutex lock; |
| 213 | struct completion *complete; |
Dolev Raviv | 68078d5 | 2013-07-30 00:35:58 +0530 | [diff] [blame] | 214 | struct ufs_query query; |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 215 | }; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 216 | |
Sujit Reddy Thumma | c6e79da | 2014-09-25 15:32:23 +0300 | [diff] [blame] | 217 | /** |
| 218 | * struct ufs_clk_info - UFS clock related info |
| 219 | * @list: list headed by hba->clk_list_head |
| 220 | * @clk: clock node |
| 221 | * @name: clock name |
| 222 | * @max_freq: maximum frequency supported by the clock |
Sahitya Tummala | 4cff6d99 | 2014-09-25 15:32:33 +0300 | [diff] [blame] | 223 | * @min_freq: min frequency that can be used for clock scaling |
Sahitya Tummala | 856b348 | 2014-09-25 15:32:34 +0300 | [diff] [blame] | 224 | * @curr_freq: indicates the current frequency that it is set to |
Sujit Reddy Thumma | c6e79da | 2014-09-25 15:32:23 +0300 | [diff] [blame] | 225 | * @enabled: variable to check against multiple enable/disable |
| 226 | */ |
| 227 | struct ufs_clk_info { |
| 228 | struct list_head list; |
| 229 | struct clk *clk; |
| 230 | const char *name; |
| 231 | u32 max_freq; |
Sahitya Tummala | 4cff6d99 | 2014-09-25 15:32:33 +0300 | [diff] [blame] | 232 | u32 min_freq; |
Sahitya Tummala | 856b348 | 2014-09-25 15:32:34 +0300 | [diff] [blame] | 233 | u32 curr_freq; |
Sujit Reddy Thumma | c6e79da | 2014-09-25 15:32:23 +0300 | [diff] [blame] | 234 | bool enabled; |
| 235 | }; |
| 236 | |
Yaniv Gardi | f06fcc7 | 2015-10-28 13:15:51 +0200 | [diff] [blame] | 237 | enum ufs_notify_change_status { |
| 238 | PRE_CHANGE, |
| 239 | POST_CHANGE, |
| 240 | }; |
Dolev Raviv | 7eb584d | 2014-09-25 15:32:31 +0300 | [diff] [blame] | 241 | |
| 242 | struct ufs_pa_layer_attr { |
| 243 | u32 gear_rx; |
| 244 | u32 gear_tx; |
| 245 | u32 lane_rx; |
| 246 | u32 lane_tx; |
| 247 | u32 pwr_rx; |
| 248 | u32 pwr_tx; |
| 249 | u32 hs_rate; |
| 250 | }; |
| 251 | |
| 252 | struct ufs_pwr_mode_info { |
| 253 | bool is_valid; |
| 254 | struct ufs_pa_layer_attr info; |
| 255 | }; |
| 256 | |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 257 | /** |
| 258 | * struct ufs_hba_variant_ops - variant specific callbacks |
| 259 | * @name: variant name |
| 260 | * @init: called when the driver is initialized |
| 261 | * @exit: called to cleanup everything done in init |
Yaniv Gardi | 9949e70 | 2015-05-17 18:55:05 +0300 | [diff] [blame] | 262 | * @get_ufs_hci_version: called to get UFS HCI version |
Sahitya Tummala | 856b348 | 2014-09-25 15:32:34 +0300 | [diff] [blame] | 263 | * @clk_scale_notify: notifies that clks are scaled up/down |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 264 | * @setup_clocks: called before touching any of the controller registers |
| 265 | * @setup_regulators: called before accessing the host controller |
| 266 | * @hce_enable_notify: called before and after HCE enable bit is set to allow |
| 267 | * variant specific Uni-Pro initialization. |
| 268 | * @link_startup_notify: called before and after Link startup is carried out |
| 269 | * to allow variant specific Uni-Pro initialization. |
Dolev Raviv | 7eb584d | 2014-09-25 15:32:31 +0300 | [diff] [blame] | 270 | * @pwr_change_notify: called before and after a power mode change |
| 271 | * is carried out to allow vendor spesific capabilities |
| 272 | * to be set. |
Kiwoong Kim | 0e675ef | 2016-11-10 21:14:36 +0900 | [diff] [blame] | 273 | * @setup_xfer_req: called before any transfer request is issued |
| 274 | * to set some things |
Kiwoong Kim | d2877be | 2016-11-10 21:16:15 +0900 | [diff] [blame] | 275 | * @setup_task_mgmt: called before any task management request is issued |
| 276 | * to set some things |
Kiwoong Kim | ee32c90 | 2016-11-10 21:17:43 +0900 | [diff] [blame] | 277 | * @hibern8_notify: called around hibern8 enter/exit |
Subhash Jadavani | 56d4a18 | 2016-12-05 19:25:32 -0800 | [diff] [blame] | 278 | * @apply_dev_quirks: called to apply device specific quirks |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 279 | * @suspend: called during host controller PM callback |
| 280 | * @resume: called during host controller PM callback |
Yaniv Gardi | 6e3fd44 | 2015-10-28 13:15:50 +0200 | [diff] [blame] | 281 | * @dbg_register_dump: used to dump controller debug information |
Joao Pinto | 4b9ffb5 | 2016-05-11 12:21:30 +0100 | [diff] [blame] | 282 | * @phy_initialization: used to initialize phys |
Bjorn Andersson | d8d9f79 | 2019-08-28 12:17:54 -0700 | [diff] [blame] | 283 | * @device_reset: called to issue a reset pulse on the UFS device |
Eric Biggers | 27b2ed4 | 2020-07-14 09:31:24 -0700 | [diff] [blame] | 284 | * @program_key: program or evict an inline encryption key |
Eric Biggers | 617dda8 | 2020-08-22 10:47:33 -0700 | [diff] [blame] | 285 | * @fill_prdt: called after initializing the standard PRDT fields so that any |
| 286 | * variant-specific PRDT fields can be initialized too |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 287 | */ |
| 288 | struct ufs_hba_variant_ops { |
| 289 | const char *name; |
| 290 | int (*init)(struct ufs_hba *); |
| 291 | void (*exit)(struct ufs_hba *); |
Yaniv Gardi | 9949e70 | 2015-05-17 18:55:05 +0300 | [diff] [blame] | 292 | u32 (*get_ufs_hci_version)(struct ufs_hba *); |
Yaniv Gardi | f06fcc7 | 2015-10-28 13:15:51 +0200 | [diff] [blame] | 293 | int (*clk_scale_notify)(struct ufs_hba *, bool, |
| 294 | enum ufs_notify_change_status); |
Subhash Jadavani | 1e879e8 | 2016-10-06 21:48:22 -0700 | [diff] [blame] | 295 | int (*setup_clocks)(struct ufs_hba *, bool, |
| 296 | enum ufs_notify_change_status); |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 297 | int (*setup_regulators)(struct ufs_hba *, bool); |
Yaniv Gardi | f06fcc7 | 2015-10-28 13:15:51 +0200 | [diff] [blame] | 298 | int (*hce_enable_notify)(struct ufs_hba *, |
| 299 | enum ufs_notify_change_status); |
| 300 | int (*link_startup_notify)(struct ufs_hba *, |
| 301 | enum ufs_notify_change_status); |
Dolev Raviv | 7eb584d | 2014-09-25 15:32:31 +0300 | [diff] [blame] | 302 | int (*pwr_change_notify)(struct ufs_hba *, |
Yaniv Gardi | f06fcc7 | 2015-10-28 13:15:51 +0200 | [diff] [blame] | 303 | enum ufs_notify_change_status status, |
| 304 | struct ufs_pa_layer_attr *, |
Dolev Raviv | 7eb584d | 2014-09-25 15:32:31 +0300 | [diff] [blame] | 305 | struct ufs_pa_layer_attr *); |
Kiwoong Kim | 0e675ef | 2016-11-10 21:14:36 +0900 | [diff] [blame] | 306 | void (*setup_xfer_req)(struct ufs_hba *, int, bool); |
Kiwoong Kim | d2877be | 2016-11-10 21:16:15 +0900 | [diff] [blame] | 307 | void (*setup_task_mgmt)(struct ufs_hba *, int, u8); |
Kiwoong Kim | ee32c90 | 2016-11-10 21:17:43 +0900 | [diff] [blame] | 308 | void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, |
Subhash Jadavani | 56d4a18 | 2016-12-05 19:25:32 -0800 | [diff] [blame] | 309 | enum ufs_notify_change_status); |
Bean Huo | 0975006 | 2020-01-20 14:08:14 +0100 | [diff] [blame] | 310 | int (*apply_dev_quirks)(struct ufs_hba *hba); |
Stanley Chu | c28c00b | 2020-05-08 16:01:09 +0800 | [diff] [blame] | 311 | void (*fixup_dev_quirks)(struct ufs_hba *hba); |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 312 | int (*suspend)(struct ufs_hba *, enum ufs_pm_op); |
| 313 | int (*resume)(struct ufs_hba *, enum ufs_pm_op); |
Yaniv Gardi | 6e3fd44 | 2015-10-28 13:15:50 +0200 | [diff] [blame] | 314 | void (*dbg_register_dump)(struct ufs_hba *hba); |
Joao Pinto | 4b9ffb5 | 2016-05-11 12:21:30 +0100 | [diff] [blame] | 315 | int (*phy_initialization)(struct ufs_hba *); |
Bjorn Andersson | d8d9f79 | 2019-08-28 12:17:54 -0700 | [diff] [blame] | 316 | void (*device_reset)(struct ufs_hba *hba); |
Asutosh Das | 2c75f9a | 2020-03-25 11:29:01 -0700 | [diff] [blame] | 317 | void (*config_scaling_param)(struct ufs_hba *hba, |
| 318 | struct devfreq_dev_profile *profile, |
| 319 | void *data); |
Eric Biggers | 884e364 | 2020-01-09 21:10:06 -0800 | [diff] [blame] | 320 | int (*program_key)(struct ufs_hba *hba, |
| 321 | const union ufs_crypto_cfg_entry *cfg, int slot); |
Eric Biggers | 617dda8 | 2020-08-22 10:47:33 -0700 | [diff] [blame] | 322 | int (*fill_prdt)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, |
| 323 | unsigned int segments); |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 324 | }; |
| 325 | |
Sahitya Tummala | 1ab27c9 | 2014-09-25 15:32:32 +0300 | [diff] [blame] | 326 | /* clock gating state */ |
| 327 | enum clk_gating_state { |
| 328 | CLKS_OFF, |
| 329 | CLKS_ON, |
| 330 | REQ_CLKS_OFF, |
| 331 | REQ_CLKS_ON, |
| 332 | }; |
| 333 | |
| 334 | /** |
| 335 | * struct ufs_clk_gating - UFS clock gating related info |
| 336 | * @gate_work: worker to turn off clocks after some delay as specified in |
| 337 | * delay_ms |
| 338 | * @ungate_work: worker to turn on clocks that will be used in case of |
| 339 | * interrupt context |
| 340 | * @state: the current clocks state |
| 341 | * @delay_ms: gating delay in ms |
| 342 | * @is_suspended: clk gating is suspended when set to 1 which can be used |
| 343 | * during suspend/resume |
| 344 | * @delay_attr: sysfs attribute to control delay_attr |
Sahitya Tummala | b427411 | 2016-12-22 18:40:39 -0800 | [diff] [blame] | 345 | * @enable_attr: sysfs attribute to enable/disable clock gating |
| 346 | * @is_enabled: Indicates the current status of clock gating |
Sahitya Tummala | 1ab27c9 | 2014-09-25 15:32:32 +0300 | [diff] [blame] | 347 | * @active_reqs: number of requests that are pending and should be waited for |
| 348 | * completion before gating clocks. |
| 349 | */ |
| 350 | struct ufs_clk_gating { |
| 351 | struct delayed_work gate_work; |
| 352 | struct work_struct ungate_work; |
| 353 | enum clk_gating_state state; |
| 354 | unsigned long delay_ms; |
| 355 | bool is_suspended; |
| 356 | struct device_attribute delay_attr; |
Sahitya Tummala | b427411 | 2016-12-22 18:40:39 -0800 | [diff] [blame] | 357 | struct device_attribute enable_attr; |
| 358 | bool is_enabled; |
Sahitya Tummala | 1ab27c9 | 2014-09-25 15:32:32 +0300 | [diff] [blame] | 359 | int active_reqs; |
Vijay Viswanath | 10e5e37 | 2018-05-03 16:37:22 +0530 | [diff] [blame] | 360 | struct workqueue_struct *clk_gating_workq; |
Sahitya Tummala | 1ab27c9 | 2014-09-25 15:32:32 +0300 | [diff] [blame] | 361 | }; |
| 362 | |
subhashj@codeaurora.org | a3cd5ec | 2017-02-03 16:57:02 -0800 | [diff] [blame] | 363 | struct ufs_saved_pwr_info { |
| 364 | struct ufs_pa_layer_attr info; |
| 365 | bool is_valid; |
| 366 | }; |
| 367 | |
subhashj@codeaurora.org | 401f1e4 | 2017-02-03 16:57:39 -0800 | [diff] [blame] | 368 | /** |
| 369 | * struct ufs_clk_scaling - UFS clock scaling related data |
| 370 | * @active_reqs: number of requests that are pending. If this is zero when |
| 371 | * devfreq ->target() function is called then schedule "suspend_work" to |
| 372 | * suspend devfreq. |
| 373 | * @tot_busy_t: Total busy time in current polling window |
| 374 | * @window_start_t: Start time (in jiffies) of the current polling window |
| 375 | * @busy_start_t: Start time of current busy period |
| 376 | * @enable_attr: sysfs attribute to enable/disable clock scaling |
| 377 | * @saved_pwr_info: UFS power mode may also be changed during scaling and this |
| 378 | * one keeps track of previous power mode. |
| 379 | * @workq: workqueue to schedule devfreq suspend/resume work |
| 380 | * @suspend_work: worker to suspend devfreq |
| 381 | * @resume_work: worker to resume devfreq |
| 382 | * @is_allowed: tracks if scaling is currently allowed or not |
| 383 | * @is_busy_started: tracks if busy period has started or not |
| 384 | * @is_suspended: tracks if devfreq is suspended or not |
| 385 | */ |
Sahitya Tummala | 856b348 | 2014-09-25 15:32:34 +0300 | [diff] [blame] | 386 | struct ufs_clk_scaling { |
subhashj@codeaurora.org | 401f1e4 | 2017-02-03 16:57:39 -0800 | [diff] [blame] | 387 | int active_reqs; |
| 388 | unsigned long tot_busy_t; |
Stanley Chu | b1bf66d | 2020-06-11 18:10:43 +0800 | [diff] [blame] | 389 | ktime_t window_start_t; |
subhashj@codeaurora.org | 401f1e4 | 2017-02-03 16:57:39 -0800 | [diff] [blame] | 390 | ktime_t busy_start_t; |
Sahitya Tummala | fcb0c4b | 2016-12-22 18:40:50 -0800 | [diff] [blame] | 391 | struct device_attribute enable_attr; |
subhashj@codeaurora.org | a3cd5ec | 2017-02-03 16:57:02 -0800 | [diff] [blame] | 392 | struct ufs_saved_pwr_info saved_pwr_info; |
subhashj@codeaurora.org | 401f1e4 | 2017-02-03 16:57:39 -0800 | [diff] [blame] | 393 | struct workqueue_struct *workq; |
| 394 | struct work_struct suspend_work; |
| 395 | struct work_struct resume_work; |
| 396 | bool is_allowed; |
| 397 | bool is_busy_started; |
| 398 | bool is_suspended; |
Sahitya Tummala | 856b348 | 2014-09-25 15:32:34 +0300 | [diff] [blame] | 399 | }; |
| 400 | |
Stanley Chu | 48d5b97 | 2019-07-10 21:38:18 +0800 | [diff] [blame] | 401 | #define UFS_ERR_REG_HIST_LENGTH 8 |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 402 | /** |
Stanley Chu | d3c615b | 2019-07-10 21:38:19 +0800 | [diff] [blame] | 403 | * struct ufs_err_reg_hist - keeps history of errors |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 404 | * @pos: index to indicate cyclic buffer position |
| 405 | * @reg: cyclic buffer for registers value |
| 406 | * @tstamp: cyclic buffer for time stamp |
| 407 | */ |
Stanley Chu | 48d5b97 | 2019-07-10 21:38:18 +0800 | [diff] [blame] | 408 | struct ufs_err_reg_hist { |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 409 | int pos; |
Stanley Chu | 48d5b97 | 2019-07-10 21:38:18 +0800 | [diff] [blame] | 410 | u32 reg[UFS_ERR_REG_HIST_LENGTH]; |
| 411 | ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH]; |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 412 | }; |
| 413 | |
| 414 | /** |
| 415 | * struct ufs_stats - keeps usage/err statistics |
Can Guo | 08076c7 | 2020-08-09 05:15:50 -0700 | [diff] [blame^] | 416 | * @last_intr_status: record the last interrupt status. |
| 417 | * @last_intr_ts: record the last interrupt timestamp. |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 418 | * @hibern8_exit_cnt: Counter to keep track of number of exits, |
| 419 | * reset this after link-startup. |
| 420 | * @last_hibern8_exit_tstamp: Set time after the hibern8 exit. |
| 421 | * Clear after the first successful command completion. |
| 422 | * @pa_err: tracks pa-uic errors |
| 423 | * @dl_err: tracks dl-uic errors |
| 424 | * @nl_err: tracks nl-uic errors |
| 425 | * @tl_err: tracks tl-uic errors |
| 426 | * @dme_err: tracks dme errors |
Stanley Chu | d3c615b | 2019-07-10 21:38:19 +0800 | [diff] [blame] | 427 | * @auto_hibern8_err: tracks auto-hibernate errors |
Stanley Chu | 8808b4e | 2019-07-10 21:38:21 +0800 | [diff] [blame] | 428 | * @fatal_err: tracks fatal errors |
| 429 | * @linkup_err: tracks link-startup errors |
| 430 | * @resume_err: tracks resume errors |
| 431 | * @suspend_err: tracks suspend errors |
| 432 | * @dev_reset: tracks device reset events |
| 433 | * @host_reset: tracks host reset events |
| 434 | * @tsk_abort: tracks task abort events |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 435 | */ |
| 436 | struct ufs_stats { |
Can Guo | 08076c7 | 2020-08-09 05:15:50 -0700 | [diff] [blame^] | 437 | u32 last_intr_status; |
| 438 | ktime_t last_intr_ts; |
| 439 | |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 440 | u32 hibern8_exit_cnt; |
| 441 | ktime_t last_hibern8_exit_tstamp; |
Stanley Chu | d3c615b | 2019-07-10 21:38:19 +0800 | [diff] [blame] | 442 | |
| 443 | /* uic specific errors */ |
Stanley Chu | 48d5b97 | 2019-07-10 21:38:18 +0800 | [diff] [blame] | 444 | struct ufs_err_reg_hist pa_err; |
| 445 | struct ufs_err_reg_hist dl_err; |
| 446 | struct ufs_err_reg_hist nl_err; |
| 447 | struct ufs_err_reg_hist tl_err; |
| 448 | struct ufs_err_reg_hist dme_err; |
Stanley Chu | d3c615b | 2019-07-10 21:38:19 +0800 | [diff] [blame] | 449 | |
| 450 | /* fatal errors */ |
Stanley Chu | d3c615b | 2019-07-10 21:38:19 +0800 | [diff] [blame] | 451 | struct ufs_err_reg_hist auto_hibern8_err; |
Stanley Chu | 8808b4e | 2019-07-10 21:38:21 +0800 | [diff] [blame] | 452 | struct ufs_err_reg_hist fatal_err; |
| 453 | struct ufs_err_reg_hist link_startup_err; |
| 454 | struct ufs_err_reg_hist resume_err; |
| 455 | struct ufs_err_reg_hist suspend_err; |
| 456 | |
| 457 | /* abnormal events */ |
| 458 | struct ufs_err_reg_hist dev_reset; |
| 459 | struct ufs_err_reg_hist host_reset; |
| 460 | struct ufs_err_reg_hist task_abort; |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 461 | }; |
| 462 | |
Christoph Hellwig | c3f7d1f | 2020-02-21 06:08:12 -0800 | [diff] [blame] | 463 | enum ufshcd_quirks { |
| 464 | /* Interrupt aggregation support is broken */ |
| 465 | UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0, |
| 466 | |
| 467 | /* |
| 468 | * delay before each dme command is required as the unipro |
| 469 | * layer has shown instabilities |
| 470 | */ |
| 471 | UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1, |
| 472 | |
| 473 | /* |
| 474 | * If UFS host controller is having issue in processing LCC (Line |
| 475 | * Control Command) coming from device then enable this quirk. |
| 476 | * When this quirk is enabled, host controller driver should disable |
| 477 | * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE |
| 478 | * attribute of device to 0). |
| 479 | */ |
| 480 | UFSHCD_QUIRK_BROKEN_LCC = 1 << 2, |
| 481 | |
| 482 | /* |
| 483 | * The attribute PA_RXHSUNTERMCAP specifies whether or not the |
| 484 | * inbound Link supports unterminated line in HS mode. Setting this |
| 485 | * attribute to 1 fixes moving to HS gear. |
| 486 | */ |
| 487 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3, |
| 488 | |
| 489 | /* |
| 490 | * This quirk needs to be enabled if the host controller only allows |
| 491 | * accessing the peer dme attributes in AUTO mode (FAST AUTO or |
| 492 | * SLOW AUTO). |
| 493 | */ |
| 494 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4, |
| 495 | |
| 496 | /* |
| 497 | * This quirk needs to be enabled if the host controller doesn't |
| 498 | * advertise the correct version in UFS_VER register. If this quirk |
| 499 | * is enabled, standard UFS host driver will call the vendor specific |
| 500 | * ops (get_ufs_hci_version) to get the correct version. |
| 501 | */ |
| 502 | UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5, |
Greg Kroah-Hartman | 7719f75 | 2020-04-07 18:33:53 +0200 | [diff] [blame] | 503 | |
| 504 | /* |
Alim Akhtar | 8718384 | 2020-05-28 06:46:49 +0530 | [diff] [blame] | 505 | * Clear handling for transfer/task request list is just opposite. |
| 506 | */ |
| 507 | UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6, |
Alim Akhtar | b638b5e | 2020-05-28 06:46:50 +0530 | [diff] [blame] | 508 | |
| 509 | /* |
| 510 | * This quirk needs to be enabled if host controller doesn't allow |
| 511 | * that the interrupt aggregation timer and counter are reset by s/w. |
| 512 | */ |
| 513 | UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7, |
Alim Akhtar | 39bf2d8 | 2020-05-28 06:46:51 +0530 | [diff] [blame] | 514 | |
| 515 | /* |
| 516 | * This quirks needs to be enabled if host controller cannot be |
| 517 | * enabled via HCE register. |
| 518 | */ |
| 519 | UFSHCI_QUIRK_BROKEN_HCE = 1 << 8, |
Alim Akhtar | 26f968d | 2020-05-28 06:46:52 +0530 | [diff] [blame] | 520 | |
| 521 | /* |
| 522 | * This quirk needs to be enabled if the host controller regards |
| 523 | * resolution of the values of PRDTO and PRDTL in UTRD as byte. |
| 524 | */ |
| 525 | UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9, |
Kiwoong Kim | d779a6e | 2020-05-28 06:46:53 +0530 | [diff] [blame] | 526 | |
| 527 | /* |
Greg Kroah-Hartman | 7719f75 | 2020-04-07 18:33:53 +0200 | [diff] [blame] | 528 | * This quirk needs to be enabled if the host controller reports |
| 529 | * OCS FATAL ERROR with device error through sense data |
| 530 | */ |
Kiwoong Kim | d779a6e | 2020-05-28 06:46:53 +0530 | [diff] [blame] | 531 | UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10, |
Adrian Hunter | 8da76f7 | 2020-08-10 17:10:24 +0300 | [diff] [blame] | 532 | |
| 533 | /* |
| 534 | * This quirk needs to be enabled if the host controller has |
| 535 | * auto-hibernate capability but it doesn't work. |
| 536 | */ |
| 537 | UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11, |
Eric Biggers | 73372c9 | 2020-08-22 10:47:33 -0700 | [diff] [blame] | 538 | |
| 539 | /* |
| 540 | * This quirk needs to be enabled if the host controller supports inline |
| 541 | * encryption, but it uses a nonstandard mechanism where the standard |
| 542 | * crypto registers aren't used and there is no concept of keyslots. |
| 543 | * ufs_hba_variant_ops::init() is expected to initialize ufs_hba::ksm as |
| 544 | * a passthrough keyslot manager. |
| 545 | */ |
| 546 | UFSHCD_QUIRK_NO_KEYSLOTS = 1 << 12, |
Eric Biggers | 4ad2fd5 | 2020-08-22 10:47:34 -0700 | [diff] [blame] | 547 | |
| 548 | /* |
| 549 | * This quirk needs to be enabled if the host controller requires that |
| 550 | * the PRDT be cleared after each encrypted request because encryption |
| 551 | * keys were stored in it. |
| 552 | */ |
| 553 | UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 13, |
Christoph Hellwig | c3f7d1f | 2020-02-21 06:08:12 -0800 | [diff] [blame] | 554 | }; |
| 555 | |
Stanley Chu | c201468 | 2020-03-18 18:40:11 +0800 | [diff] [blame] | 556 | enum ufshcd_caps { |
| 557 | /* Allow dynamic clk gating */ |
| 558 | UFSHCD_CAP_CLK_GATING = 1 << 0, |
| 559 | |
| 560 | /* Allow hiberb8 with clk gating */ |
| 561 | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1, |
| 562 | |
| 563 | /* Allow dynamic clk scaling */ |
| 564 | UFSHCD_CAP_CLK_SCALING = 1 << 2, |
| 565 | |
| 566 | /* Allow auto bkops to enabled during runtime suspend */ |
| 567 | UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3, |
| 568 | |
| 569 | /* |
| 570 | * This capability allows host controller driver to use the UFS HCI's |
| 571 | * interrupt aggregation capability. |
| 572 | * CAUTION: Enabling this might reduce overall UFS throughput. |
| 573 | */ |
| 574 | UFSHCD_CAP_INTR_AGGR = 1 << 4, |
| 575 | |
| 576 | /* |
| 577 | * This capability allows the device auto-bkops to be always enabled |
| 578 | * except during suspend (both runtime and suspend). |
| 579 | * Enabling this capability means that device will always be allowed |
| 580 | * to do background operation when it's active but it might degrade |
| 581 | * the performance of ongoing read/write operations. |
| 582 | */ |
| 583 | UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5, |
| 584 | |
| 585 | /* |
| 586 | * This capability allows host controller driver to automatically |
| 587 | * enable runtime power management by itself instead of waiting |
| 588 | * for userspace to control the power management. |
| 589 | */ |
| 590 | UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6, |
Greg Kroah-Hartman | 7719f75 | 2020-04-07 18:33:53 +0200 | [diff] [blame] | 591 | |
| 592 | /* |
Asutosh Das | 3d17b9b | 2020-04-22 14:41:42 -0700 | [diff] [blame] | 593 | * This capability allows the host controller driver to turn-on |
| 594 | * WriteBooster, if the underlying device supports it and is |
| 595 | * provisioned to be used. This would increase the write performance. |
| 596 | */ |
| 597 | UFSHCD_CAP_WB_EN = 1 << 7, |
Greg Kroah-Hartman | b378328 | 2020-06-24 07:11:16 +0200 | [diff] [blame] | 598 | |
| 599 | /* |
Greg Kroah-Hartman | 7719f75 | 2020-04-07 18:33:53 +0200 | [diff] [blame] | 600 | * This capability allows the host controller driver to use the |
| 601 | * inline crypto engine, if it is present |
| 602 | */ |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 603 | UFSHCD_CAP_CRYPTO = 1 << 8, |
Stanley Chu | c201468 | 2020-03-18 18:40:11 +0800 | [diff] [blame] | 604 | }; |
| 605 | |
Stanley Chu | 90b8491 | 2020-05-09 17:37:13 +0800 | [diff] [blame] | 606 | struct ufs_hba_variant_params { |
| 607 | struct devfreq_dev_profile devfreq_profile; |
| 608 | struct devfreq_simple_ondemand_data ondemand_data; |
| 609 | u16 hba_enable_delay_us; |
Stanley Chu | d14734ae | 2020-05-09 17:37:15 +0800 | [diff] [blame] | 610 | u32 wb_flush_threshold; |
Stanley Chu | 90b8491 | 2020-05-09 17:37:13 +0800 | [diff] [blame] | 611 | }; |
| 612 | |
Yaniv Gardi | 3a4bf06 | 2014-09-25 15:32:27 +0300 | [diff] [blame] | 613 | /** |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 614 | * struct ufs_hba - per adapter private structure |
| 615 | * @mmio_base: UFSHCI base register address |
| 616 | * @ucdl_base_addr: UFS Command Descriptor base address |
| 617 | * @utrdl_base_addr: UTP Transfer Request Descriptor base address |
| 618 | * @utmrdl_base_addr: UTP Task Management Descriptor base address |
| 619 | * @ucdl_dma_addr: UFS Command Descriptor DMA address |
| 620 | * @utrdl_dma_addr: UTRDL DMA address |
| 621 | * @utmrdl_dma_addr: UTMRDL DMA address |
| 622 | * @host: Scsi_Host instance of the driver |
| 623 | * @dev: device handle |
| 624 | * @lrb: local reference block |
Bart Van Assche | 7252a36 | 2019-12-09 10:13:08 -0800 | [diff] [blame] | 625 | * @cmd_queue: Used to allocate command tags from hba->host->tag_set. |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 626 | * @outstanding_tasks: Bits representing outstanding task requests |
| 627 | * @outstanding_reqs: Bits representing outstanding transfer requests |
| 628 | * @capabilities: UFS Controller Capabilities |
| 629 | * @nutrs: Transfer Request Queue depth supported by controller |
| 630 | * @nutmrs: Task Management Queue depth supported by controller |
| 631 | * @ufs_version: UFS Version to which controller complies |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 632 | * @vops: pointer to variant specific operations |
| 633 | * @priv: pointer to variant specific private data |
Eric Biggers | 8de80df | 2020-02-24 14:37:12 -0800 | [diff] [blame] | 634 | * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields) |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 635 | * @irq: Irq number of the controller |
| 636 | * @active_uic_cmd: handle of active UIC command |
Seungwon Jeon | 6ccf44fe | 2013-06-26 22:39:29 +0530 | [diff] [blame] | 637 | * @uic_cmd_mutex: mutex for uic command |
Bart Van Assche | 69a6c26 | 2019-12-09 10:13:09 -0800 | [diff] [blame] | 638 | * @tmf_tag_set: TMF tag set. |
| 639 | * @tmf_queue: Used to allocate TMF tags. |
Seungwon Jeon | 53b3d9c | 2013-08-31 21:40:22 +0530 | [diff] [blame] | 640 | * @pwr_done: completion for power mode change |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 641 | * @ufshcd_state: UFSHCD states |
Sujit Reddy Thumma | 3441da7 | 2014-05-26 10:59:14 +0530 | [diff] [blame] | 642 | * @eh_flags: Error handling flags |
Seungwon Jeon | 2fbd009 | 2013-06-26 22:39:27 +0530 | [diff] [blame] | 643 | * @intr_mask: Interrupt Mask Bits |
Sujit Reddy Thumma | 66ec6d5 | 2013-07-30 00:35:59 +0530 | [diff] [blame] | 644 | * @ee_ctrl_mask: Exception event control mask |
Sujit Reddy Thumma | 1d337ec | 2014-09-25 15:32:26 +0300 | [diff] [blame] | 645 | * @is_powered: flag to check if HBA is powered |
Sujit Reddy Thumma | e8e7f27 | 2014-05-26 10:59:15 +0530 | [diff] [blame] | 646 | * @eh_work: Worker to handle UFS errors that require s/w attention |
Sujit Reddy Thumma | 66ec6d5 | 2013-07-30 00:35:59 +0530 | [diff] [blame] | 647 | * @eeh_work: Worker to handle exception events |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 648 | * @errors: HBA errors |
Sujit Reddy Thumma | e8e7f27 | 2014-05-26 10:59:15 +0530 | [diff] [blame] | 649 | * @uic_error: UFS interconnect layer error status |
| 650 | * @saved_err: sticky error mask |
| 651 | * @saved_uic_err: sticky UIC error mask |
Can Guo | 46c0912 | 2019-11-25 22:53:33 -0800 | [diff] [blame] | 652 | * @silence_err_logs: flag to silence error logs |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 653 | * @dev_cmd: ufs device management command information |
Yaniv Gardi | cad2e03 | 2015-03-31 17:37:14 +0300 | [diff] [blame] | 654 | * @last_dme_cmd_tstamp: time stamp of the last completed DME command |
Sujit Reddy Thumma | 66ec6d5 | 2013-07-30 00:35:59 +0530 | [diff] [blame] | 655 | * @auto_bkops_enabled: to track whether bkops is enabled in device |
Sujit Reddy Thumma | aa49761 | 2014-09-25 15:32:22 +0300 | [diff] [blame] | 656 | * @vreg_info: UFS device voltage regulator information |
Sujit Reddy Thumma | c6e79da | 2014-09-25 15:32:23 +0300 | [diff] [blame] | 657 | * @clk_list_head: UFS host controller clocks list node head |
Dolev Raviv | 7eb584d | 2014-09-25 15:32:31 +0300 | [diff] [blame] | 658 | * @pwr_info: holds current power mode |
| 659 | * @max_pwr_info: keeps the device max valid pwm |
Potomski, MichalX | a4b0e8a | 2017-02-23 09:05:30 +0000 | [diff] [blame] | 660 | * @desc_size: descriptor sizes reported by device |
Yaniv Gardi | afdfff5 | 2016-03-10 17:37:15 +0200 | [diff] [blame] | 661 | * @urgent_bkops_lvl: keeps track of urgent bkops level for device |
| 662 | * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for |
| 663 | * device is known or not. |
Subhash Jadavani | 3813553 | 2018-05-03 16:37:18 +0530 | [diff] [blame] | 664 | * @scsi_block_reqs_cnt: reference counting for scsi block requests |
Satya Tangirala | 4bac110 | 2019-10-24 14:44:27 -0700 | [diff] [blame] | 665 | * @crypto_capabilities: Content of crypto capabilities register (0x100) |
| 666 | * @crypto_cap_array: Array of crypto capabilities |
| 667 | * @crypto_cfg_register: Start of the crypto cfg array |
Satya Tangirala | 4bac110 | 2019-10-24 14:44:27 -0700 | [diff] [blame] | 668 | * @ksm: the keyslot manager tied to this hba |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 669 | */ |
| 670 | struct ufs_hba { |
| 671 | void __iomem *mmio_base; |
| 672 | |
| 673 | /* Virtual memory reference */ |
| 674 | struct utp_transfer_cmd_desc *ucdl_base_addr; |
| 675 | struct utp_transfer_req_desc *utrdl_base_addr; |
| 676 | struct utp_task_req_desc *utmrdl_base_addr; |
| 677 | |
| 678 | /* DMA memory reference */ |
| 679 | dma_addr_t ucdl_dma_addr; |
| 680 | dma_addr_t utrdl_dma_addr; |
| 681 | dma_addr_t utmrdl_dma_addr; |
| 682 | |
| 683 | struct Scsi_Host *host; |
| 684 | struct device *dev; |
Bart Van Assche | 7252a36 | 2019-12-09 10:13:08 -0800 | [diff] [blame] | 685 | struct request_queue *cmd_queue; |
Subhash Jadavani | 2a8fa60 | 2014-09-25 15:32:28 +0300 | [diff] [blame] | 686 | /* |
| 687 | * This field is to keep a reference to "scsi_device" corresponding to |
| 688 | * "UFS device" W-LU. |
| 689 | */ |
| 690 | struct scsi_device *sdev_ufs_device; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 691 | |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 692 | enum ufs_dev_pwr_mode curr_dev_pwr_mode; |
| 693 | enum uic_link_state uic_link_state; |
| 694 | /* Desired UFS power management level during runtime PM */ |
| 695 | enum ufs_pm_level rpm_lvl; |
| 696 | /* Desired UFS power management level during system PM */ |
| 697 | enum ufs_pm_level spm_lvl; |
subhashj@codeaurora.org | 09690d5 | 2016-12-22 18:41:00 -0800 | [diff] [blame] | 698 | struct device_attribute rpm_lvl_attr; |
| 699 | struct device_attribute spm_lvl_attr; |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 700 | int pm_op_in_progress; |
| 701 | |
Adrian Hunter | ad44837 | 2018-03-20 15:07:38 +0200 | [diff] [blame] | 702 | /* Auto-Hibernate Idle Timer register value */ |
| 703 | u32 ahit; |
| 704 | |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 705 | struct ufshcd_lrb *lrb; |
| 706 | |
| 707 | unsigned long outstanding_tasks; |
| 708 | unsigned long outstanding_reqs; |
| 709 | |
| 710 | u32 capabilities; |
| 711 | int nutrs; |
| 712 | int nutmrs; |
| 713 | u32 ufs_version; |
Arnd Bergmann | 176eb92 | 2019-03-04 20:39:11 +0100 | [diff] [blame] | 714 | const struct ufs_hba_variant_ops *vops; |
Stanley Chu | 90b8491 | 2020-05-09 17:37:13 +0800 | [diff] [blame] | 715 | struct ufs_hba_variant_params *vps; |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 716 | void *priv; |
Eric Biggers | 8de80df | 2020-02-24 14:37:12 -0800 | [diff] [blame] | 717 | size_t sg_entry_size; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 718 | unsigned int irq; |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 719 | bool is_irq_enabled; |
Subhash Jadavani | 9e1e8a7 | 2018-10-16 14:29:41 +0530 | [diff] [blame] | 720 | enum ufs_ref_clk_freq dev_ref_clk_freq; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 721 | |
Yaniv Gardi | cad2e03 | 2015-03-31 17:37:14 +0300 | [diff] [blame] | 722 | unsigned int quirks; /* Deviations from standard UFSHCI spec. */ |
Seungwon Jeon | 6ccf44fe | 2013-06-26 22:39:29 +0530 | [diff] [blame] | 723 | |
Yaniv Gardi | c58ab7a | 2016-03-10 17:37:10 +0200 | [diff] [blame] | 724 | /* Device deviations from standard UFS device spec. */ |
| 725 | unsigned int dev_quirks; |
| 726 | |
Bart Van Assche | 69a6c26 | 2019-12-09 10:13:09 -0800 | [diff] [blame] | 727 | struct blk_mq_tag_set tmf_tag_set; |
| 728 | struct request_queue *tmf_queue; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 729 | |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 730 | struct uic_command *active_uic_cmd; |
| 731 | struct mutex uic_cmd_mutex; |
| 732 | struct completion *uic_async_done; |
Seungwon Jeon | 53b3d9c | 2013-08-31 21:40:22 +0530 | [diff] [blame] | 733 | |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 734 | u32 ufshcd_state; |
Sujit Reddy Thumma | 3441da7 | 2014-05-26 10:59:14 +0530 | [diff] [blame] | 735 | u32 eh_flags; |
Seungwon Jeon | 2fbd009 | 2013-06-26 22:39:27 +0530 | [diff] [blame] | 736 | u32 intr_mask; |
Sujit Reddy Thumma | 66ec6d5 | 2013-07-30 00:35:59 +0530 | [diff] [blame] | 737 | u16 ee_ctrl_mask; |
Sujit Reddy Thumma | 1d337ec | 2014-09-25 15:32:26 +0300 | [diff] [blame] | 738 | bool is_powered; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 739 | |
| 740 | /* Work Queues */ |
Sujit Reddy Thumma | e8e7f27 | 2014-05-26 10:59:15 +0530 | [diff] [blame] | 741 | struct work_struct eh_work; |
Sujit Reddy Thumma | 66ec6d5 | 2013-07-30 00:35:59 +0530 | [diff] [blame] | 742 | struct work_struct eeh_work; |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 743 | |
| 744 | /* HBA Errors */ |
| 745 | u32 errors; |
Sujit Reddy Thumma | e8e7f27 | 2014-05-26 10:59:15 +0530 | [diff] [blame] | 746 | u32 uic_error; |
| 747 | u32 saved_err; |
| 748 | u32 saved_uic_err; |
Dolev Raviv | ff8e20c | 2016-12-22 18:42:18 -0800 | [diff] [blame] | 749 | struct ufs_stats ufs_stats; |
Can Guo | 46c0912 | 2019-11-25 22:53:33 -0800 | [diff] [blame] | 750 | bool silence_err_logs; |
Sujit Reddy Thumma | 5a0b0cb | 2013-07-30 00:35:57 +0530 | [diff] [blame] | 751 | |
| 752 | /* Device management request data */ |
| 753 | struct ufs_dev_cmd dev_cmd; |
Yaniv Gardi | cad2e03 | 2015-03-31 17:37:14 +0300 | [diff] [blame] | 754 | ktime_t last_dme_cmd_tstamp; |
Sujit Reddy Thumma | 66ec6d5 | 2013-07-30 00:35:59 +0530 | [diff] [blame] | 755 | |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 756 | /* Keeps information of the UFS device connected to this host */ |
| 757 | struct ufs_dev_info dev_info; |
Sujit Reddy Thumma | 66ec6d5 | 2013-07-30 00:35:59 +0530 | [diff] [blame] | 758 | bool auto_bkops_enabled; |
Sujit Reddy Thumma | aa49761 | 2014-09-25 15:32:22 +0300 | [diff] [blame] | 759 | struct ufs_vreg_info vreg_info; |
Sujit Reddy Thumma | c6e79da | 2014-09-25 15:32:23 +0300 | [diff] [blame] | 760 | struct list_head clk_list_head; |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 761 | |
| 762 | bool wlun_dev_clr_ua; |
Dolev Raviv | 7eb584d | 2014-09-25 15:32:31 +0300 | [diff] [blame] | 763 | |
Gilad Broner | 7fabb77 | 2017-02-03 16:56:50 -0800 | [diff] [blame] | 764 | /* Number of requests aborts */ |
| 765 | int req_abort_count; |
| 766 | |
Yaniv Gardi | 54b879b | 2016-03-10 17:37:05 +0200 | [diff] [blame] | 767 | /* Number of lanes available (1 or 2) for Rx/Tx */ |
| 768 | u32 lanes_per_direction; |
Dolev Raviv | 7eb584d | 2014-09-25 15:32:31 +0300 | [diff] [blame] | 769 | struct ufs_pa_layer_attr pwr_info; |
| 770 | struct ufs_pwr_mode_info max_pwr_info; |
Sahitya Tummala | 1ab27c9 | 2014-09-25 15:32:32 +0300 | [diff] [blame] | 771 | |
| 772 | struct ufs_clk_gating clk_gating; |
| 773 | /* Control to enable/disable host capabilities */ |
| 774 | u32 caps; |
Sahitya Tummala | 856b348 | 2014-09-25 15:32:34 +0300 | [diff] [blame] | 775 | |
| 776 | struct devfreq *devfreq; |
| 777 | struct ufs_clk_scaling clk_scaling; |
Dolev Raviv | e785060 | 2014-09-25 15:32:36 +0300 | [diff] [blame] | 778 | bool is_sys_suspended; |
Yaniv Gardi | afdfff5 | 2016-03-10 17:37:15 +0200 | [diff] [blame] | 779 | |
| 780 | enum bkops_status urgent_bkops_lvl; |
| 781 | bool is_urgent_bkops_lvl_checked; |
subhashj@codeaurora.org | a3cd5ec | 2017-02-03 16:57:02 -0800 | [diff] [blame] | 782 | |
| 783 | struct rw_semaphore clk_scaling_lock; |
Bean Huo | 7a0bf85 | 2020-06-03 11:19:58 +0200 | [diff] [blame] | 784 | unsigned char desc_size[QUERY_DESC_IDN_MAX]; |
Subhash Jadavani | 3813553 | 2018-05-03 16:37:18 +0530 | [diff] [blame] | 785 | atomic_t scsi_block_reqs_cnt; |
Avri Altman | df032bf | 2018-10-07 17:30:35 +0300 | [diff] [blame] | 786 | |
| 787 | struct device bsg_dev; |
| 788 | struct request_queue *bsg_queue; |
Asutosh Das | 3d17b9b | 2020-04-22 14:41:42 -0700 | [diff] [blame] | 789 | bool wb_buf_flush_enabled; |
| 790 | bool wb_enabled; |
Stanley Chu | 51dd905 | 2020-05-22 16:32:12 +0800 | [diff] [blame] | 791 | struct delayed_work rpm_dev_flush_recheck_work; |
Satya Tangirala | 4bac110 | 2019-10-24 14:44:27 -0700 | [diff] [blame] | 792 | |
| 793 | #ifdef CONFIG_SCSI_UFS_CRYPTO |
Satya Tangirala | 4bac110 | 2019-10-24 14:44:27 -0700 | [diff] [blame] | 794 | union ufs_crypto_capabilities crypto_capabilities; |
| 795 | union ufs_crypto_cap_entry *crypto_cap_array; |
| 796 | u32 crypto_cfg_register; |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 797 | struct blk_keyslot_manager ksm; |
| 798 | #endif |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 799 | }; |
| 800 | |
Sahitya Tummala | 1ab27c9 | 2014-09-25 15:32:32 +0300 | [diff] [blame] | 801 | /* Returns true if clocks can be gated. Otherwise false */ |
| 802 | static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) |
| 803 | { |
| 804 | return hba->caps & UFSHCD_CAP_CLK_GATING; |
| 805 | } |
| 806 | static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) |
| 807 | { |
| 808 | return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; |
| 809 | } |
Sahitya Tummala | fcb0c4b | 2016-12-22 18:40:50 -0800 | [diff] [blame] | 810 | static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba) |
Sahitya Tummala | 856b348 | 2014-09-25 15:32:34 +0300 | [diff] [blame] | 811 | { |
| 812 | return hba->caps & UFSHCD_CAP_CLK_SCALING; |
| 813 | } |
Subhash Jadavani | 374a246 | 2014-09-25 15:32:35 +0300 | [diff] [blame] | 814 | static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) |
| 815 | { |
| 816 | return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; |
| 817 | } |
Stanley Chu | a30de0e | 2019-09-16 23:56:50 +0800 | [diff] [blame] | 818 | static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba) |
| 819 | { |
| 820 | return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND; |
| 821 | } |
Subhash Jadavani | 374a246 | 2014-09-25 15:32:35 +0300 | [diff] [blame] | 822 | |
Yaniv Gardi | b852190 | 2015-05-17 18:54:57 +0300 | [diff] [blame] | 823 | static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) |
| 824 | { |
Joao Pinto | 4b9ffb5 | 2016-05-11 12:21:30 +0100 | [diff] [blame] | 825 | /* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/ |
| 826 | #ifndef CONFIG_SCSI_UFS_DWC |
Yaniv Gardi | b852190 | 2015-05-17 18:54:57 +0300 | [diff] [blame] | 827 | if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && |
| 828 | !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) |
| 829 | return true; |
| 830 | else |
| 831 | return false; |
Joao Pinto | 4b9ffb5 | 2016-05-11 12:21:30 +0100 | [diff] [blame] | 832 | #else |
| 833 | return true; |
| 834 | #endif |
Yaniv Gardi | b852190 | 2015-05-17 18:54:57 +0300 | [diff] [blame] | 835 | } |
| 836 | |
Stanley Chu | ee5f104 | 2019-05-21 14:44:52 +0800 | [diff] [blame] | 837 | static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) |
| 838 | { |
Adrian Hunter | 8da76f7 | 2020-08-10 17:10:24 +0300 | [diff] [blame] | 839 | return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) && |
| 840 | !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8); |
Stanley Chu | ee5f104 | 2019-05-21 14:44:52 +0800 | [diff] [blame] | 841 | } |
| 842 | |
Stanley Chu | 5a244e0 | 2020-01-29 18:52:50 +0800 | [diff] [blame] | 843 | static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) |
| 844 | { |
| 845 | return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false; |
| 846 | } |
| 847 | |
Asutosh Das | 3d17b9b | 2020-04-22 14:41:42 -0700 | [diff] [blame] | 848 | static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba) |
| 849 | { |
| 850 | return hba->caps & UFSHCD_CAP_WB_EN; |
| 851 | } |
| 852 | |
Seungwon Jeon | b873a275 | 2013-06-26 22:39:26 +0530 | [diff] [blame] | 853 | #define ufshcd_writel(hba, val, reg) \ |
| 854 | writel((val), (hba)->mmio_base + (reg)) |
| 855 | #define ufshcd_readl(hba, reg) \ |
| 856 | readl((hba)->mmio_base + (reg)) |
| 857 | |
Dolev Raviv | e785060 | 2014-09-25 15:32:36 +0300 | [diff] [blame] | 858 | /** |
| 859 | * ufshcd_rmwl - read modify write into a register |
| 860 | * @hba - per adapter instance |
| 861 | * @mask - mask to apply on read value |
| 862 | * @val - actual value to write |
| 863 | * @reg - register address |
| 864 | */ |
| 865 | static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) |
| 866 | { |
| 867 | u32 tmp; |
| 868 | |
| 869 | tmp = ufshcd_readl(hba, reg); |
| 870 | tmp &= ~mask; |
| 871 | tmp |= (val & mask); |
| 872 | ufshcd_writel(hba, tmp, reg); |
| 873 | } |
| 874 | |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 875 | int ufshcd_alloc_host(struct device *, struct ufs_hba **); |
Yaniv Gardi | 47555a5 | 2015-10-28 13:15:49 +0200 | [diff] [blame] | 876 | void ufshcd_dealloc_host(struct ufs_hba *); |
Stanley Chu | 9d19bf7a | 2020-01-17 11:51:07 +0800 | [diff] [blame] | 877 | int ufshcd_hba_enable(struct ufs_hba *hba); |
Sujit Reddy Thumma | 5c0c28a | 2014-09-25 15:32:21 +0300 | [diff] [blame] | 878 | int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); |
Stanley Chu | 087c5ef | 2020-03-27 17:53:28 +0800 | [diff] [blame] | 879 | int ufshcd_link_recovery(struct ufs_hba *hba); |
Stanley Chu | 9d19bf7a | 2020-01-17 11:51:07 +0800 | [diff] [blame] | 880 | int ufshcd_make_hba_operational(struct ufs_hba *hba); |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 881 | void ufshcd_remove(struct ufs_hba *); |
Stanley Chu | 9d19bf7a | 2020-01-17 11:51:07 +0800 | [diff] [blame] | 882 | int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); |
Stanley Chu | 5c955c1 | 2020-03-18 18:40:12 +0800 | [diff] [blame] | 883 | void ufshcd_delay_us(unsigned long us, unsigned long tolerance); |
Yaniv Gardi | 596585a | 2016-03-10 17:37:08 +0200 | [diff] [blame] | 884 | int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, |
| 885 | u32 val, unsigned long interval_us, |
Bart Van Assche | 5cac109 | 2020-05-07 15:27:50 -0700 | [diff] [blame] | 886 | unsigned long timeout_ms); |
Subhash Jadavani | 9e1e8a7 | 2018-10-16 14:29:41 +0530 | [diff] [blame] | 887 | void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); |
Stanley Chu | a5fe372d | 2020-01-04 22:26:07 +0800 | [diff] [blame] | 888 | void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, |
| 889 | u32 reg); |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 890 | |
Dolev Raviv | 68078d5 | 2013-07-30 00:35:58 +0530 | [diff] [blame] | 891 | static inline void check_upiu_size(void) |
| 892 | { |
| 893 | BUILD_BUG_ON(ALIGNED_UPIU_SIZE < |
| 894 | GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); |
| 895 | } |
| 896 | |
Yaniv Gardi | 1ce5898 | 2015-10-28 13:15:47 +0200 | [diff] [blame] | 897 | /** |
| 898 | * ufshcd_set_variant - set variant specific data to the hba |
| 899 | * @hba - per adapter instance |
| 900 | * @variant - pointer to variant specific data |
| 901 | */ |
| 902 | static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant) |
| 903 | { |
| 904 | BUG_ON(!hba); |
| 905 | hba->priv = variant; |
| 906 | } |
| 907 | |
| 908 | /** |
| 909 | * ufshcd_get_variant - get variant specific data from the hba |
| 910 | * @hba - per adapter instance |
| 911 | */ |
| 912 | static inline void *ufshcd_get_variant(struct ufs_hba *hba) |
| 913 | { |
| 914 | BUG_ON(!hba); |
| 915 | return hba->priv; |
| 916 | } |
subhashj@codeaurora.org | 4e768e7 | 2016-12-22 18:41:22 -0800 | [diff] [blame] | 917 | static inline bool ufshcd_keep_autobkops_enabled_except_suspend( |
| 918 | struct ufs_hba *hba) |
| 919 | { |
| 920 | return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; |
| 921 | } |
Yaniv Gardi | 1ce5898 | 2015-10-28 13:15:47 +0200 | [diff] [blame] | 922 | |
Stanley Chu | e31011a | 2020-05-22 16:32:11 +0800 | [diff] [blame] | 923 | static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba) |
Stanley Chu | 6f8d5a6 | 2020-05-08 16:01:13 +0800 | [diff] [blame] | 924 | { |
| 925 | if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED) |
| 926 | return hba->dev_info.wb_dedicated_lu; |
| 927 | return 0; |
| 928 | } |
| 929 | |
Sujit Reddy Thumma | 66ec6d5 | 2013-07-30 00:35:59 +0530 | [diff] [blame] | 930 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); |
| 931 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); |
| 932 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); |
Subhash Jadavani | 57d104c | 2014-09-25 15:32:30 +0300 | [diff] [blame] | 933 | extern int ufshcd_system_suspend(struct ufs_hba *hba); |
| 934 | extern int ufshcd_system_resume(struct ufs_hba *hba); |
| 935 | extern int ufshcd_shutdown(struct ufs_hba *hba); |
Seungwon Jeon | 12b4fdb | 2013-08-31 21:40:21 +0530 | [diff] [blame] | 936 | extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, |
| 937 | u8 attr_set, u32 mib_val, u8 peer); |
| 938 | extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, |
| 939 | u32 *mib_val, u8 peer); |
Alim Akhtar | 0d846e7 | 2018-05-06 15:44:18 +0530 | [diff] [blame] | 940 | extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, |
| 941 | struct ufs_pa_layer_attr *desired_pwr_mode); |
Seungwon Jeon | 12b4fdb | 2013-08-31 21:40:21 +0530 | [diff] [blame] | 942 | |
| 943 | /* UIC command interfaces for DME primitives */ |
| 944 | #define DME_LOCAL 0 |
| 945 | #define DME_PEER 1 |
| 946 | #define ATTR_SET_NOR 0 /* NORMAL */ |
| 947 | #define ATTR_SET_ST 1 /* STATIC */ |
| 948 | |
| 949 | static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, |
| 950 | u32 mib_val) |
| 951 | { |
| 952 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, |
| 953 | mib_val, DME_LOCAL); |
| 954 | } |
| 955 | |
| 956 | static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, |
| 957 | u32 mib_val) |
| 958 | { |
| 959 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, |
| 960 | mib_val, DME_LOCAL); |
| 961 | } |
| 962 | |
| 963 | static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, |
| 964 | u32 mib_val) |
| 965 | { |
| 966 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, |
| 967 | mib_val, DME_PEER); |
| 968 | } |
| 969 | |
| 970 | static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, |
| 971 | u32 mib_val) |
| 972 | { |
| 973 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, |
| 974 | mib_val, DME_PEER); |
| 975 | } |
| 976 | |
| 977 | static inline int ufshcd_dme_get(struct ufs_hba *hba, |
| 978 | u32 attr_sel, u32 *mib_val) |
| 979 | { |
| 980 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); |
| 981 | } |
| 982 | |
| 983 | static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, |
| 984 | u32 attr_sel, u32 *mib_val) |
| 985 | { |
| 986 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); |
| 987 | } |
| 988 | |
Yaniv Gardi | f37aabc | 2016-03-10 17:37:20 +0200 | [diff] [blame] | 989 | static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) |
| 990 | { |
| 991 | return (pwr_info->pwr_rx == FAST_MODE || |
| 992 | pwr_info->pwr_rx == FASTAUTO_MODE) && |
| 993 | (pwr_info->pwr_tx == FAST_MODE || |
| 994 | pwr_info->pwr_tx == FASTAUTO_MODE); |
| 995 | } |
| 996 | |
Stanley Chu | 984eaac | 2020-02-07 15:03:57 +0800 | [diff] [blame] | 997 | static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba) |
| 998 | { |
| 999 | return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); |
| 1000 | } |
| 1001 | |
Yaniv Gardi | dc3c8d3 | 2016-02-01 15:02:46 +0200 | [diff] [blame] | 1002 | /* Expose Query-Request API */ |
Stanislav Nijnikov | 2238d31 | 2018-02-15 14:14:07 +0200 | [diff] [blame] | 1003 | int ufshcd_query_descriptor_retry(struct ufs_hba *hba, |
| 1004 | enum query_opcode opcode, |
| 1005 | enum desc_idn idn, u8 index, |
| 1006 | u8 selector, |
| 1007 | u8 *desc_buf, int *buf_len); |
Stanislav Nijnikov | 45bced8 | 2018-02-15 14:14:02 +0200 | [diff] [blame] | 1008 | int ufshcd_read_desc_param(struct ufs_hba *hba, |
| 1009 | enum desc_idn desc_id, |
| 1010 | int desc_index, |
| 1011 | u8 param_offset, |
| 1012 | u8 *param_read_buf, |
| 1013 | u8 param_size); |
Stanislav Nijnikov | ec92b59 | 2018-02-15 14:14:11 +0200 | [diff] [blame] | 1014 | int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, |
| 1015 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); |
Yaniv Gardi | dc3c8d3 | 2016-02-01 15:02:46 +0200 | [diff] [blame] | 1016 | int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, |
Stanley Chu | 1f34eed | 2020-05-08 16:01:12 +0800 | [diff] [blame] | 1017 | enum flag_idn idn, u8 index, bool *flag_res); |
Tomas Winkler | 4b828fe | 2019-07-30 08:55:17 +0300 | [diff] [blame] | 1018 | |
Can Guo | 71d848b | 2019-11-14 22:09:26 -0800 | [diff] [blame] | 1019 | void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); |
Stanley Chu | ba7af5e | 2019-12-30 13:32:28 +0800 | [diff] [blame] | 1020 | void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); |
Stanley Chu | 8db269a | 2020-05-08 16:01:10 +0800 | [diff] [blame] | 1021 | void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups); |
Tomas Winkler | 4b828fe | 2019-07-30 08:55:17 +0300 | [diff] [blame] | 1022 | #define SD_ASCII_STD true |
| 1023 | #define SD_RAW false |
| 1024 | int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, |
| 1025 | u8 **buf, bool ascii); |
Stanislav Nijnikov | 2238d31 | 2018-02-15 14:14:07 +0200 | [diff] [blame] | 1026 | |
Sahitya Tummala | 1ab27c9 | 2014-09-25 15:32:32 +0300 | [diff] [blame] | 1027 | int ufshcd_hold(struct ufs_hba *hba, bool async); |
| 1028 | void ufshcd_release(struct ufs_hba *hba); |
Potomski, MichalX | a4b0e8a | 2017-02-23 09:05:30 +0000 | [diff] [blame] | 1029 | |
Bean Huo | 7a0bf85 | 2020-06-03 11:19:58 +0200 | [diff] [blame] | 1030 | void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, |
| 1031 | int *desc_length); |
Potomski, MichalX | a4b0e8a | 2017-02-23 09:05:30 +0000 | [diff] [blame] | 1032 | |
Yaniv Gardi | 3711310 | 2016-03-10 17:37:16 +0200 | [diff] [blame] | 1033 | u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); |
Yaniv Gardi | 0263bcd | 2015-10-28 13:15:48 +0200 | [diff] [blame] | 1034 | |
Avri Altman | e77044c5 | 2018-10-07 17:30:39 +0300 | [diff] [blame] | 1035 | int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); |
| 1036 | |
Avri Altman | 5e0a86e | 2018-10-07 17:30:37 +0300 | [diff] [blame] | 1037 | int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, |
| 1038 | struct utp_upiu_req *req_upiu, |
| 1039 | struct utp_upiu_req *rsp_upiu, |
| 1040 | int msgcode, |
| 1041 | u8 *desc_buff, int *buff_len, |
| 1042 | enum query_opcode desc_op); |
| 1043 | |
Yaniv Gardi | 0263bcd | 2015-10-28 13:15:48 +0200 | [diff] [blame] | 1044 | /* Wrapper functions for safely calling variant operations */ |
| 1045 | static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) |
| 1046 | { |
| 1047 | if (hba->vops) |
| 1048 | return hba->vops->name; |
| 1049 | return ""; |
| 1050 | } |
| 1051 | |
| 1052 | static inline int ufshcd_vops_init(struct ufs_hba *hba) |
| 1053 | { |
| 1054 | if (hba->vops && hba->vops->init) |
| 1055 | return hba->vops->init(hba); |
| 1056 | |
| 1057 | return 0; |
| 1058 | } |
| 1059 | |
| 1060 | static inline void ufshcd_vops_exit(struct ufs_hba *hba) |
| 1061 | { |
| 1062 | if (hba->vops && hba->vops->exit) |
| 1063 | return hba->vops->exit(hba); |
| 1064 | } |
| 1065 | |
| 1066 | static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) |
| 1067 | { |
| 1068 | if (hba->vops && hba->vops->get_ufs_hci_version) |
| 1069 | return hba->vops->get_ufs_hci_version(hba); |
| 1070 | |
| 1071 | return ufshcd_readl(hba, REG_UFS_VERSION); |
| 1072 | } |
| 1073 | |
Yaniv Gardi | f06fcc7 | 2015-10-28 13:15:51 +0200 | [diff] [blame] | 1074 | static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, |
| 1075 | bool up, enum ufs_notify_change_status status) |
Yaniv Gardi | 0263bcd | 2015-10-28 13:15:48 +0200 | [diff] [blame] | 1076 | { |
| 1077 | if (hba->vops && hba->vops->clk_scale_notify) |
Yaniv Gardi | f06fcc7 | 2015-10-28 13:15:51 +0200 | [diff] [blame] | 1078 | return hba->vops->clk_scale_notify(hba, up, status); |
| 1079 | return 0; |
Yaniv Gardi | 0263bcd | 2015-10-28 13:15:48 +0200 | [diff] [blame] | 1080 | } |
| 1081 | |
Subhash Jadavani | 1e879e8 | 2016-10-06 21:48:22 -0700 | [diff] [blame] | 1082 | static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on, |
| 1083 | enum ufs_notify_change_status status) |
Yaniv Gardi | 0263bcd | 2015-10-28 13:15:48 +0200 | [diff] [blame] | 1084 | { |
| 1085 | if (hba->vops && hba->vops->setup_clocks) |
Subhash Jadavani | 1e879e8 | 2016-10-06 21:48:22 -0700 | [diff] [blame] | 1086 | return hba->vops->setup_clocks(hba, on, status); |
Yaniv Gardi | 0263bcd | 2015-10-28 13:15:48 +0200 | [diff] [blame] | 1087 | return 0; |
| 1088 | } |
| 1089 | |
| 1090 | static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status) |
| 1091 | { |
| 1092 | if (hba->vops && hba->vops->setup_regulators) |
| 1093 | return hba->vops->setup_regulators(hba, status); |
| 1094 | |
| 1095 | return 0; |
| 1096 | } |
| 1097 | |
| 1098 | static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba, |
| 1099 | bool status) |
| 1100 | { |
| 1101 | if (hba->vops && hba->vops->hce_enable_notify) |
| 1102 | return hba->vops->hce_enable_notify(hba, status); |
| 1103 | |
| 1104 | return 0; |
| 1105 | } |
| 1106 | static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, |
| 1107 | bool status) |
| 1108 | { |
| 1109 | if (hba->vops && hba->vops->link_startup_notify) |
| 1110 | return hba->vops->link_startup_notify(hba, status); |
| 1111 | |
| 1112 | return 0; |
| 1113 | } |
| 1114 | |
| 1115 | static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, |
| 1116 | bool status, |
| 1117 | struct ufs_pa_layer_attr *dev_max_params, |
| 1118 | struct ufs_pa_layer_attr *dev_req_params) |
| 1119 | { |
| 1120 | if (hba->vops && hba->vops->pwr_change_notify) |
| 1121 | return hba->vops->pwr_change_notify(hba, status, |
| 1122 | dev_max_params, dev_req_params); |
| 1123 | |
| 1124 | return -ENOTSUPP; |
| 1125 | } |
| 1126 | |
Kiwoong Kim | 0e675ef | 2016-11-10 21:14:36 +0900 | [diff] [blame] | 1127 | static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, |
| 1128 | bool is_scsi_cmd) |
| 1129 | { |
| 1130 | if (hba->vops && hba->vops->setup_xfer_req) |
| 1131 | return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); |
| 1132 | } |
| 1133 | |
Kiwoong Kim | d2877be | 2016-11-10 21:16:15 +0900 | [diff] [blame] | 1134 | static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, |
| 1135 | int tag, u8 tm_function) |
| 1136 | { |
| 1137 | if (hba->vops && hba->vops->setup_task_mgmt) |
| 1138 | return hba->vops->setup_task_mgmt(hba, tag, tm_function); |
| 1139 | } |
| 1140 | |
Kiwoong Kim | ee32c90 | 2016-11-10 21:17:43 +0900 | [diff] [blame] | 1141 | static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, |
| 1142 | enum uic_cmd_dme cmd, |
| 1143 | enum ufs_notify_change_status status) |
| 1144 | { |
| 1145 | if (hba->vops && hba->vops->hibern8_notify) |
| 1146 | return hba->vops->hibern8_notify(hba, cmd, status); |
| 1147 | } |
| 1148 | |
Subhash Jadavani | 56d4a18 | 2016-12-05 19:25:32 -0800 | [diff] [blame] | 1149 | static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba) |
| 1150 | { |
| 1151 | if (hba->vops && hba->vops->apply_dev_quirks) |
| 1152 | return hba->vops->apply_dev_quirks(hba); |
| 1153 | return 0; |
| 1154 | } |
| 1155 | |
Stanley Chu | c28c00b | 2020-05-08 16:01:09 +0800 | [diff] [blame] | 1156 | static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba) |
| 1157 | { |
| 1158 | if (hba->vops && hba->vops->fixup_dev_quirks) |
| 1159 | hba->vops->fixup_dev_quirks(hba); |
| 1160 | } |
| 1161 | |
Yaniv Gardi | 0263bcd | 2015-10-28 13:15:48 +0200 | [diff] [blame] | 1162 | static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) |
| 1163 | { |
| 1164 | if (hba->vops && hba->vops->suspend) |
| 1165 | return hba->vops->suspend(hba, op); |
| 1166 | |
| 1167 | return 0; |
| 1168 | } |
| 1169 | |
| 1170 | static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op) |
| 1171 | { |
| 1172 | if (hba->vops && hba->vops->resume) |
| 1173 | return hba->vops->resume(hba, op); |
| 1174 | |
| 1175 | return 0; |
| 1176 | } |
| 1177 | |
Yaniv Gardi | 6e3fd44 | 2015-10-28 13:15:50 +0200 | [diff] [blame] | 1178 | static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) |
| 1179 | { |
| 1180 | if (hba->vops && hba->vops->dbg_register_dump) |
| 1181 | hba->vops->dbg_register_dump(hba); |
| 1182 | } |
| 1183 | |
Bjorn Andersson | d8d9f79 | 2019-08-28 12:17:54 -0700 | [diff] [blame] | 1184 | static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) |
| 1185 | { |
Stanley Chu | a5fe372d | 2020-01-04 22:26:07 +0800 | [diff] [blame] | 1186 | if (hba->vops && hba->vops->device_reset) { |
Bjorn Andersson | d8d9f79 | 2019-08-28 12:17:54 -0700 | [diff] [blame] | 1187 | hba->vops->device_reset(hba); |
Stanley Chu | 1764fa2 | 2020-03-27 17:58:35 +0800 | [diff] [blame] | 1188 | ufshcd_set_ufs_dev_active(hba); |
Stanley Chu | a5fe372d | 2020-01-04 22:26:07 +0800 | [diff] [blame] | 1189 | ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0); |
| 1190 | } |
Bjorn Andersson | d8d9f79 | 2019-08-28 12:17:54 -0700 | [diff] [blame] | 1191 | } |
| 1192 | |
Asutosh Das | 2c75f9a | 2020-03-25 11:29:01 -0700 | [diff] [blame] | 1193 | static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, |
| 1194 | struct devfreq_dev_profile |
| 1195 | *profile, void *data) |
| 1196 | { |
| 1197 | if (hba->vops && hba->vops->config_scaling_param) |
| 1198 | hba->vops->config_scaling_param(hba, profile, data); |
| 1199 | } |
| 1200 | |
Eric Biggers | 617dda8 | 2020-08-22 10:47:33 -0700 | [diff] [blame] | 1201 | static inline int ufshcd_vops_fill_prdt(struct ufs_hba *hba, |
| 1202 | struct ufshcd_lrb *lrbp, |
| 1203 | unsigned int segments) |
| 1204 | { |
| 1205 | if (hba->vops && hba->vops->fill_prdt) |
| 1206 | return hba->vops->fill_prdt(hba, lrbp, segments); |
| 1207 | |
| 1208 | return 0; |
| 1209 | } |
| 1210 | |
Stanislav Nijnikov | cbb6813 | 2018-02-15 14:14:01 +0200 | [diff] [blame] | 1211 | extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; |
| 1212 | |
Stanislav Nijnikov | d829fc8 | 2018-02-15 14:14:09 +0200 | [diff] [blame] | 1213 | /* |
| 1214 | * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN |
| 1215 | * @scsi_lun: scsi LUN id |
| 1216 | * |
| 1217 | * Returns UPIU LUN id |
| 1218 | */ |
| 1219 | static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) |
| 1220 | { |
| 1221 | if (scsi_is_wlun(scsi_lun)) |
| 1222 | return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) |
| 1223 | | UFS_UPIU_WLUN_ID; |
| 1224 | else |
| 1225 | return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; |
| 1226 | } |
| 1227 | |
Tomas Winkler | ba80917 | 2018-06-14 11:14:09 +0300 | [diff] [blame] | 1228 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, |
| 1229 | const char *prefix); |
Greg Kroah-Hartman | 4f92040 | 2020-02-03 14:11:50 +0000 | [diff] [blame] | 1230 | int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); |
| 1231 | int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); |
Vinayak Holikatti | e0eca63 | 2013-02-25 21:44:33 +0530 | [diff] [blame] | 1232 | #endif /* End of Header */ |