blob: 9f0a1f637030fe8c5d7dcf9cd6da511f7ee1cc11 [file] [log] [blame]
Bean Huo67351112020-06-05 22:05:19 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05303 * Universal Flash Storage Host controller driver Core
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304 * Copyright (C) 2011-2013 Samsung India Software Operations
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02005 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010 */
11
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053012#include <linux/async.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030013#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020014#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020015#include <linux/of.h>
Adrian Hunterad448372018-03-20 15:07:38 +020016#include <linux/bitfield.h>
Can Guofb276f72020-03-25 18:09:59 -070017#include <linux/blk-pm.h>
Can Guoc72e79c2020-08-09 05:15:52 -070018#include <linux/blkdev.h>
Asutosh Dasb294ff32021-04-23 17:20:16 -070019#include <scsi/scsi_driver.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053020#include "ufshcd.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020021#include "ufs_quirks.h"
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053022#include "unipro.h"
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +020023#include "ufs-sysfs.h"
Adrian Hunterb6cacaf2021-01-07 09:25:38 +020024#include "ufs-debugfs.h"
Bart Van Asschec11a1ae2021-07-21 20:34:39 -070025#include "ufs-fault-injection.h"
Avri Altmandf032bf2018-10-07 17:30:35 +030026#include "ufs_bsg.h"
Satya Tangiraladf043c742020-07-06 20:04:14 +000027#include "ufshcd-crypto.h"
Daejun Parkf02bc972021-07-12 17:58:30 +090028#include "ufshpb.h"
Asutosh Das3d17b9b2020-04-22 14:41:42 -070029#include <asm/unaligned.h>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053030
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -080031#define CREATE_TRACE_POINTS
32#include <trace/events/ufs.h>
33
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053034#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
35 UTP_TASK_REQ_COMPL |\
36 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053037/* UIC command timeout, unit: ms */
38#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053039
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053040/* NOP OUT retries waiting for NOP IN response */
41#define NOP_OUT_RETRIES 10
Daejun Park782e2ef2020-09-02 11:58:52 +090042/* Timeout after 50 msecs if NOP OUT hangs without response */
43#define NOP_OUT_TIMEOUT 50 /* msecs */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053044
Dolev Raviv68078d52013-07-30 00:35:58 +053045/* Query request retries */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080046#define QUERY_REQ_RETRIES 3
Dolev Raviv68078d52013-07-30 00:35:58 +053047/* Query request timeout */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080048#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
Dolev Raviv68078d52013-07-30 00:35:58 +053049
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053050/* Task management command timeout */
51#define TM_CMD_TIMEOUT 100 /* msecs */
52
Yaniv Gardi64238fb2016-02-01 15:02:43 +020053/* maximum number of retries for a general UIC command */
54#define UFS_UIC_COMMAND_RETRIES 3
55
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030056/* maximum number of link-startup retries */
57#define DME_LINKSTARTUP_RETRIES 3
58
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +020059/* Maximum retries for Hibern8 enter */
60#define UIC_HIBERN8_ENTER_RETRIES 3
61
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030062/* maximum number of reset retries before giving up */
63#define MAX_HOST_RESET_RETRIES 5
64
Adrian Hunter87bf6a62021-10-02 18:45:50 +030065/* Maximum number of error handler retries before giving up */
66#define MAX_ERR_HANDLER_RETRIES 5
67
Dolev Raviv68078d52013-07-30 00:35:58 +053068/* Expose the flag value from utp_upiu_query.value */
69#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
70
Seungwon Jeon7d568652013-08-31 21:40:20 +053071/* Interrupt aggregation default timeout, unit: 40us */
72#define INT_AGGR_DEF_TO 0x02
73
Stanley Chu49615ba2019-09-16 23:56:50 +080074/* default delay of autosuspend: 2000 ms */
75#define RPM_AUTOSUSPEND_DELAY_MS 2000
76
Stanley Chu51dd9052020-05-22 16:32:12 +080077/* Default delay of RPM device flush delayed work */
78#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
79
Can Guo09f17792020-02-10 19:40:49 -080080/* Default value of wait time before gating device ref clock */
81#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
82
Kiwoong Kim29707fa2020-08-10 19:02:27 +090083/* Polling time to wait for fDeviceInit */
84#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
85
Asutosh Dasb294ff32021-04-23 17:20:16 -070086#define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
87
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +030088#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
89 ({ \
90 int _ret; \
91 if (_on) \
92 _ret = ufshcd_enable_vreg(_dev, _vreg); \
93 else \
94 _ret = ufshcd_disable_vreg(_dev, _vreg); \
95 _ret; \
96 })
97
Tomas Winklerba809172018-06-14 11:14:09 +030098#define ufshcd_hex_dump(prefix_str, buf, len) do { \
99 size_t __len = (len); \
100 print_hex_dump(KERN_ERR, prefix_str, \
101 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
102 16, 4, buf, __len, false); \
103} while (0)
104
105int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
106 const char *prefix)
107{
Marc Gonzalezd6724752019-01-22 18:29:22 +0100108 u32 *regs;
109 size_t pos;
110
111 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
112 return -EINVAL;
Tomas Winklerba809172018-06-14 11:14:09 +0300113
Can Guocddaeba2019-11-14 22:09:27 -0800114 regs = kzalloc(len, GFP_ATOMIC);
Tomas Winklerba809172018-06-14 11:14:09 +0300115 if (!regs)
116 return -ENOMEM;
117
Marc Gonzalezd6724752019-01-22 18:29:22 +0100118 for (pos = 0; pos < len; pos += 4)
119 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
120
Tomas Winklerba809172018-06-14 11:14:09 +0300121 ufshcd_hex_dump(prefix, regs, len);
122 kfree(regs);
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800127
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530128enum {
129 UFSHCD_MAX_CHANNEL = 0,
130 UFSHCD_MAX_ID = 1,
Bart Van Assche945c3cc2021-12-03 15:19:42 -0800131 UFSHCD_NUM_RESERVED = 1,
132 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
133 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530134};
135
Bart Van Assche4693fad2021-10-20 14:40:18 -0700136static const char *const ufshcd_state_name[] = {
137 [UFSHCD_STATE_RESET] = "reset",
138 [UFSHCD_STATE_OPERATIONAL] = "operational",
139 [UFSHCD_STATE_ERROR] = "error",
140 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
141 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
142};
143
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530144/* UFSHCD error handling flags */
145enum {
146 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530147};
148
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530149/* UFSHCD UIC layer error flags */
150enum {
151 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200152 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
153 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
154 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
155 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
156 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Can Guo2355b662020-08-24 19:07:06 -0700157 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530158};
159
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530160#define ufshcd_set_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300161 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530162#define ufshcd_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300163 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530164#define ufshcd_clear_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300165 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530166
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +0200167struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
Bart Van Asschee2ac7ab2021-05-19 13:20:58 -0700168 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
169 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
170 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
171 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
172 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
173 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
Adrian Hunterfe1d4c22020-11-03 16:14:02 +0200174 /*
175 * For DeepSleep, the link is first put in hibern8 and then off.
176 * Leaving the link in hibern8 is not supported.
177 */
Bart Van Asschee2ac7ab2021-05-19 13:20:58 -0700178 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300179};
180
181static inline enum ufs_dev_pwr_mode
182ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
183{
184 return ufs_pm_lvl_states[lvl].dev_state;
185}
186
187static inline enum uic_link_state
188ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
189{
190 return ufs_pm_lvl_states[lvl].link_state;
191}
192
subhashj@codeaurora.org0c8f7582016-12-22 18:41:11 -0800193static inline enum ufs_pm_level
194ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
195 enum uic_link_state link_state)
196{
197 enum ufs_pm_level lvl;
198
199 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
200 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
201 (ufs_pm_lvl_states[lvl].link_state == link_state))
202 return lvl;
203 }
204
205 /* if no match found, return the level 0 */
206 return UFS_PM_LVL_0;
207}
208
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800209static struct ufs_dev_fix ufs_fixups[] = {
210 /* UFS cards deviations table */
Stanley Chuc0a18ee2020-06-12 09:26:24 +0800211 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
Bean Huo63522bf2021-08-04 20:21:27 +0200212 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
213 UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800214 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
Stanley Chued0b40f2020-06-12 09:26:25 +0800215 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
216 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800217 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
Stanley Chued0b40f2020-06-12 09:26:25 +0800218 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
219 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
220 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
221 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800222 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
223 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
224 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
225 UFS_DEVICE_QUIRK_PA_TACTIVATE),
226 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
227 UFS_DEVICE_QUIRK_PA_TACTIVATE),
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800228 END_FIX
229};
230
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -0800231static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530232static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530233static int ufshcd_reset_and_restore(struct ufs_hba *hba);
Dolev Ravive7d38252016-12-22 18:40:07 -0800234static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530235static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300236static void ufshcd_hba_exit(struct ufs_hba *hba);
Bean Huo68444d72021-09-29 22:06:39 +0200237static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300238static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300239static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300240static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800241static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
242static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -0800243static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800244static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300245static irqreturn_t ufshcd_intr(int irq, void *__hba);
Yaniv Gardi874237f2015-05-17 18:55:03 +0300246static int ufshcd_change_power_mode(struct ufs_hba *hba,
247 struct ufs_pa_layer_attr *pwr_mode);
Can Guoc72e79c2020-08-09 05:15:52 -0700248static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
249static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
250static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
251 struct ufs_vreg *vreg);
Can Guo307348f2020-08-24 19:07:05 -0700252static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
Yue Hu3b5f3c02021-03-18 17:55:36 +0800253static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
254static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
Can Guodd7143e2020-10-27 12:10:36 -0700255static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
256static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
Asutosh Das3d17b9b2020-04-22 14:41:42 -0700257
Can Guo5231d382019-12-05 02:14:46 +0000258static inline void ufshcd_enable_irq(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300259{
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300260 if (!hba->is_irq_enabled) {
Can Guo5231d382019-12-05 02:14:46 +0000261 enable_irq(hba->irq);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300262 hba->is_irq_enabled = true;
263 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300264}
265
266static inline void ufshcd_disable_irq(struct ufs_hba *hba)
267{
268 if (hba->is_irq_enabled) {
Can Guo5231d382019-12-05 02:14:46 +0000269 disable_irq(hba->irq);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300270 hba->is_irq_enabled = false;
271 }
272}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530273
Asutosh Das3d17b9b2020-04-22 14:41:42 -0700274static inline void ufshcd_wb_config(struct ufs_hba *hba)
275{
Stanley Chu79e35202020-05-08 16:01:15 +0800276 if (!ufshcd_is_wb_allowed(hba))
Asutosh Das3d17b9b2020-04-22 14:41:42 -0700277 return;
278
Yue Hu3b5f3c02021-03-18 17:55:36 +0800279 ufshcd_wb_toggle(hba, true);
280
281 ufshcd_wb_toggle_flush_during_h8(hba, true);
Stanley Chu21acf462020-12-22 15:29:05 +0800282 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
283 ufshcd_wb_toggle_flush(hba, true);
Asutosh Das3d17b9b2020-04-22 14:41:42 -0700284}
285
Subhash Jadavani38135532018-05-03 16:37:18 +0530286static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
287{
288 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
289 scsi_unblock_requests(hba->host);
290}
291
292static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
293{
294 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
295 scsi_block_requests(hba->host);
296}
297
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300298static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
Bean Huo28fa68f2021-01-05 12:34:42 +0100299 enum ufs_trace_str_t str_t)
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300300{
301 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
Bean Huo89ac2c3b2021-05-31 12:43:06 +0200302 struct utp_upiu_header *header;
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300303
Bean Huo9d5095e2021-01-05 12:34:43 +0100304 if (!trace_ufshcd_upiu_enabled())
305 return;
306
Bean Huo89ac2c3b2021-05-31 12:43:06 +0200307 if (str_t == UFS_CMD_SEND)
308 header = &rq->header;
309 else
310 header = &hba->lrb[tag].ucd_rsp_ptr->header;
311
312 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
Bean Huo867fdc22021-01-05 12:34:46 +0100313 UFS_TSF_CDB);
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300314}
315
Avri Altmanfb475b72021-01-10 10:46:18 +0200316static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
317 enum ufs_trace_str_t str_t,
318 struct utp_upiu_req *rq_rsp)
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300319{
Bean Huo9d5095e2021-01-05 12:34:43 +0100320 if (!trace_ufshcd_upiu_enabled())
321 return;
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300322
Bean Huobe20b512021-01-05 12:34:44 +0100323 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
Bean Huo867fdc22021-01-05 12:34:46 +0100324 &rq_rsp->qr, UFS_TSF_OSF);
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300325}
326
327static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
Bean Huo28fa68f2021-01-05 12:34:42 +0100328 enum ufs_trace_str_t str_t)
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300329{
Jonathan Hsue8c2da72021-09-24 16:58:48 +0800330 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300331
Bean Huo9d5095e2021-01-05 12:34:43 +0100332 if (!trace_ufshcd_upiu_enabled())
333 return;
334
Bean Huo0ed083e2021-01-05 12:34:45 +0100335 if (str_t == UFS_TM_SEND)
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -0500336 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
337 &descp->upiu_req.req_header,
338 &descp->upiu_req.input_param1,
339 UFS_TSF_TM_INPUT);
Bean Huo0ed083e2021-01-05 12:34:45 +0100340 else
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -0500341 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
342 &descp->upiu_rsp.rsp_header,
343 &descp->upiu_rsp.output_param1,
344 UFS_TSF_TM_OUTPUT);
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300345}
346
Stanley Chuaa5c6972020-06-15 15:22:35 +0800347static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
348 struct uic_command *ucmd,
Bean Huo28fa68f2021-01-05 12:34:42 +0100349 enum ufs_trace_str_t str_t)
Stanley Chuaa5c6972020-06-15 15:22:35 +0800350{
351 u32 cmd;
352
353 if (!trace_ufshcd_uic_command_enabled())
354 return;
355
Bean Huo28fa68f2021-01-05 12:34:42 +0100356 if (str_t == UFS_CMD_SEND)
Stanley Chuaa5c6972020-06-15 15:22:35 +0800357 cmd = ucmd->command;
358 else
359 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
360
Bean Huo28fa68f2021-01-05 12:34:42 +0100361 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
Stanley Chuaa5c6972020-06-15 15:22:35 +0800362 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
363 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
364 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
365}
366
Bean Huo28fa68f2021-01-05 12:34:42 +0100367static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
368 enum ufs_trace_str_t str_t)
Lee Susman1a07f2d2016-12-22 18:42:03 -0800369{
Colin Ian King102851f2021-08-04 14:32:41 +0100370 u64 lba;
Jaegeuk Kim69a314d2020-11-17 08:58:37 -0800371 u8 opcode = 0, group_id = 0;
Lee Susman1a07f2d2016-12-22 18:42:03 -0800372 u32 intr, doorbell;
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300373 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
Bart Van Asschee4d2add2019-12-24 14:02:44 -0800374 struct scsi_cmnd *cmd = lrbp->cmd;
Bart Van Assche3f2c1002021-08-09 16:03:50 -0700375 struct request *rq = scsi_cmd_to_rq(cmd);
Lee Susman1a07f2d2016-12-22 18:42:03 -0800376 int transfer_len = -1;
377
Bean Huo44b5de32021-05-31 12:43:07 +0200378 if (!cmd)
379 return;
380
Bean Huo44b5de32021-05-31 12:43:07 +0200381 /* trace UPIU also */
382 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
Bean Huof0101af2021-08-02 20:08:03 +0200383 if (!trace_ufshcd_command_enabled())
384 return;
385
Bean Huo44b5de32021-05-31 12:43:07 +0200386 opcode = cmd->cmnd[0];
Martin K. Petersen54815082021-06-08 23:39:29 -0400387 lba = scsi_get_lba(cmd);
Bean Huo04c073f2021-05-31 12:43:05 +0200388
Bean Huo44b5de32021-05-31 12:43:07 +0200389 if (opcode == READ_10 || opcode == WRITE_10) {
390 /*
391 * Currently we only fully trace read(10) and write(10) commands
392 */
393 transfer_len =
394 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
395 if (opcode == WRITE_10)
396 group_id = lrbp->cmd->cmnd[6];
397 } else if (opcode == UNMAP) {
398 /*
399 * The number of Bytes to be unmapped beginning with the lba.
400 */
Bart Van Assche3f2c1002021-08-09 16:03:50 -0700401 transfer_len = blk_rq_bytes(rq);
Lee Susman1a07f2d2016-12-22 18:42:03 -0800402 }
403
404 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
405 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Bean Huo28fa68f2021-01-05 12:34:42 +0100406 trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
Jaegeuk Kim69a314d2020-11-17 08:58:37 -0800407 doorbell, transfer_len, intr, lba, opcode, group_id);
Lee Susman1a07f2d2016-12-22 18:42:03 -0800408}
409
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800410static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
411{
412 struct ufs_clk_info *clki;
413 struct list_head *head = &hba->clk_list_head;
414
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +0300415 if (list_empty(head))
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800416 return;
417
418 list_for_each_entry(clki, head, list) {
419 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
420 clki->max_freq)
421 dev_err(hba->dev, "clk: %s, rate: %u\n",
422 clki->name, clki->curr_freq);
423 }
424}
425
Stanley Chue965e5e2020-12-05 19:58:59 +0800426static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
427 char *err_name)
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800428{
429 int i;
Stanley Chu27752642019-01-28 22:04:26 +0800430 bool found = false;
Stanley Chue965e5e2020-12-05 19:58:59 +0800431 struct ufs_event_hist *e;
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800432
Stanley Chue965e5e2020-12-05 19:58:59 +0800433 if (id >= UFS_EVT_CNT)
434 return;
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800435
Stanley Chue965e5e2020-12-05 19:58:59 +0800436 e = &hba->ufs_stats.event[id];
437
438 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
439 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
440
441 if (e->tstamp[p] == 0)
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800442 continue;
Stanley Chuc5397f12019-07-10 21:38:20 +0800443 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
Stanley Chue965e5e2020-12-05 19:58:59 +0800444 e->val[p], ktime_to_us(e->tstamp[p]));
Stanley Chu27752642019-01-28 22:04:26 +0800445 found = true;
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800446 }
Stanley Chu27752642019-01-28 22:04:26 +0800447
448 if (!found)
Stanley Chufd1fb4d2020-01-04 22:26:08 +0800449 dev_err(hba->dev, "No record of %s\n", err_name);
DooHyun Hwangbafd09f2021-02-03 19:14:43 +0900450 else
451 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800452}
453
Stanley Chue965e5e2020-12-05 19:58:59 +0800454static void ufshcd_print_evt_hist(struct ufs_hba *hba)
Dolev Raviv66cc8202016-12-22 18:39:42 -0800455{
Tomas Winklerba809172018-06-14 11:14:09 +0300456 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800457
Stanley Chue965e5e2020-12-05 19:58:59 +0800458 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
459 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
460 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
461 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
462 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
463 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
464 "auto_hibern8_err");
465 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
466 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
467 "link_startup_fail");
468 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
469 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
470 "suspend_fail");
471 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
472 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
473 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800474
Stanley Chu7c486d912019-12-24 21:01:06 +0800475 ufshcd_vops_dbg_register_dump(hba);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800476}
477
478static
479void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
480{
481 struct ufshcd_lrb *lrbp;
Gilad Broner7fabb772017-02-03 16:56:50 -0800482 int prdt_length;
Dolev Raviv66cc8202016-12-22 18:39:42 -0800483 int tag;
484
485 for_each_set_bit(tag, &bitmap, hba->nutrs) {
486 lrbp = &hba->lrb[tag];
487
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800488 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
489 tag, ktime_to_us(lrbp->issue_time_stamp));
Zang Leigang09017182017-09-27 10:06:06 +0800490 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
491 tag, ktime_to_us(lrbp->compl_time_stamp));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800492 dev_err(hba->dev,
493 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
494 tag, (u64)lrbp->utrd_dma_addr);
495
Dolev Raviv66cc8202016-12-22 18:39:42 -0800496 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
497 sizeof(struct utp_transfer_req_desc));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800498 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
499 (u64)lrbp->ucd_req_dma_addr);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800500 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
501 sizeof(struct utp_upiu_req));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800502 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
503 (u64)lrbp->ucd_rsp_dma_addr);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800504 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
505 sizeof(struct utp_upiu_rsp));
Dolev Raviv66cc8202016-12-22 18:39:42 -0800506
Gilad Broner7fabb772017-02-03 16:56:50 -0800507 prdt_length = le16_to_cpu(
508 lrbp->utr_descriptor_ptr->prd_table_length);
Eric Biggerscc770ce2020-08-25 19:10:40 -0700509 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
510 prdt_length /= sizeof(struct ufshcd_sg_entry);
511
Gilad Broner7fabb772017-02-03 16:56:50 -0800512 dev_err(hba->dev,
513 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
514 tag, prdt_length,
515 (u64)lrbp->ucd_prdt_dma_addr);
516
517 if (pr_prdt)
Dolev Raviv66cc8202016-12-22 18:39:42 -0800518 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
Gilad Broner7fabb772017-02-03 16:56:50 -0800519 sizeof(struct ufshcd_sg_entry) * prdt_length);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800520 }
521}
522
523static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
524{
Dolev Raviv66cc8202016-12-22 18:39:42 -0800525 int tag;
526
527 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
Christoph Hellwig391e3882018-10-07 17:30:32 +0300528 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
529
Dolev Raviv66cc8202016-12-22 18:39:42 -0800530 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
Christoph Hellwig391e3882018-10-07 17:30:32 +0300531 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
Dolev Raviv66cc8202016-12-22 18:39:42 -0800532 }
533}
534
Gilad Broner6ba65582017-02-03 16:57:28 -0800535static void ufshcd_print_host_state(struct ufs_hba *hba)
536{
Can Guo3f8af602020-08-09 05:15:50 -0700537 struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
538
Gilad Broner6ba65582017-02-03 16:57:28 -0800539 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
Bart Van Assche7252a362019-12-09 10:13:08 -0800540 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
541 hba->outstanding_reqs, hba->outstanding_tasks);
Gilad Broner6ba65582017-02-03 16:57:28 -0800542 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
543 hba->saved_err, hba->saved_uic_err);
544 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
545 hba->curr_dev_pwr_mode, hba->uic_link_state);
546 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
547 hba->pm_op_in_progress, hba->is_sys_suspended);
548 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
549 hba->auto_bkops_enabled, hba->host->host_self_blocked);
550 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
Can Guo3f8af602020-08-09 05:15:50 -0700551 dev_err(hba->dev,
552 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
553 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
554 hba->ufs_stats.hibern8_exit_cnt);
555 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
556 ktime_to_us(hba->ufs_stats.last_intr_ts),
557 hba->ufs_stats.last_intr_status);
Gilad Broner6ba65582017-02-03 16:57:28 -0800558 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
559 hba->eh_flags, hba->req_abort_count);
Can Guo3f8af602020-08-09 05:15:50 -0700560 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
561 hba->ufs_version, hba->capabilities, hba->caps);
Gilad Broner6ba65582017-02-03 16:57:28 -0800562 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
563 hba->dev_quirks);
Can Guo3f8af602020-08-09 05:15:50 -0700564 if (sdev_ufs)
565 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
566 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
567
568 ufshcd_print_clk_freqs(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -0800569}
570
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800571/**
572 * ufshcd_print_pwr_info - print power params as saved in hba
573 * power info
574 * @hba: per-adapter instance
575 */
576static void ufshcd_print_pwr_info(struct ufs_hba *hba)
577{
578 static const char * const names[] = {
579 "INVALID MODE",
580 "FAST MODE",
581 "SLOW_MODE",
582 "INVALID MODE",
583 "FASTAUTO_MODE",
584 "SLOWAUTO_MODE",
585 "INVALID MODE",
586 };
587
588 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
589 __func__,
590 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
591 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
592 names[hba->pwr_info.pwr_rx],
593 names[hba->pwr_info.pwr_tx],
594 hba->pwr_info.hs_rate);
595}
596
Stanley Chu31a5d9c2020-12-08 21:56:35 +0800597static void ufshcd_device_reset(struct ufs_hba *hba)
598{
599 int err;
600
601 err = ufshcd_vops_device_reset(hba);
602
603 if (!err) {
604 ufshcd_set_ufs_dev_active(hba);
605 if (ufshcd_is_wb_allowed(hba)) {
Bean Huo4cd48992021-01-19 17:38:46 +0100606 hba->dev_info.wb_enabled = false;
607 hba->dev_info.wb_buf_flush_enabled = false;
Stanley Chu31a5d9c2020-12-08 21:56:35 +0800608 }
609 }
610 if (err != -EOPNOTSUPP)
611 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
612}
613
Stanley Chu5c955c12020-03-18 18:40:12 +0800614void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
615{
616 if (!us)
617 return;
618
619 if (us < 10)
620 udelay(us);
621 else
622 usleep_range(us, us + tolerance);
623}
624EXPORT_SYMBOL_GPL(ufshcd_delay_us);
625
Bart Van Assche5cac1092020-05-07 15:27:50 -0700626/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530627 * ufshcd_wait_for_register - wait for register value to change
Bart Van Assche5cac1092020-05-07 15:27:50 -0700628 * @hba: per-adapter interface
629 * @reg: mmio register offset
630 * @mask: mask to apply to the read register value
631 * @val: value to wait for
632 * @interval_us: polling interval in microseconds
633 * @timeout_ms: timeout in milliseconds
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530634 *
Bart Van Assche5cac1092020-05-07 15:27:50 -0700635 * Return:
636 * -ETIMEDOUT on error, zero on success.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530637 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200638int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
639 u32 val, unsigned long interval_us,
Bart Van Assche5cac1092020-05-07 15:27:50 -0700640 unsigned long timeout_ms)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530641{
642 int err = 0;
643 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
644
645 /* ignore bits that we don't intend to wait on */
646 val = val & mask;
647
648 while ((ufshcd_readl(hba, reg) & mask) != val) {
Bart Van Assche5cac1092020-05-07 15:27:50 -0700649 usleep_range(interval_us, interval_us + 50);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530650 if (time_after(jiffies, timeout)) {
651 if ((ufshcd_readl(hba, reg) & mask) != val)
652 err = -ETIMEDOUT;
653 break;
654 }
655 }
656
657 return err;
658}
659
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530660/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530661 * ufshcd_get_intr_mask - Get the interrupt bit mask
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800662 * @hba: Pointer to adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530663 *
664 * Returns interrupt bit mask per version
665 */
666static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
667{
Caleb Connolly51428812021-03-10 15:33:42 +0000668 if (hba->ufs_version == ufshci_version(1, 0))
669 return INTERRUPT_MASK_ALL_VER_10;
670 if (hba->ufs_version <= ufshci_version(2, 0))
671 return INTERRUPT_MASK_ALL_VER_11;
Yaniv Gardic01848c2016-12-05 19:25:02 -0800672
Caleb Connolly51428812021-03-10 15:33:42 +0000673 return INTERRUPT_MASK_ALL_VER_21;
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530674}
675
676/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530677 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800678 * @hba: Pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530679 *
680 * Returns UFSHCI version supported by the controller
681 */
682static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
683{
Caleb Connolly51428812021-03-10 15:33:42 +0000684 u32 ufshci_ver;
Yaniv Gardi9949e702015-05-17 18:55:05 +0300685
Caleb Connolly51428812021-03-10 15:33:42 +0000686 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
687 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
688 else
689 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
690
691 /*
692 * UFSHCI v1.x uses a different version scheme, in order
693 * to allow the use of comparisons with the ufshci_version
694 * function, we convert it to the same scheme as ufs 2.0+.
695 */
696 if (ufshci_ver & 0x00010000)
697 return ufshci_version(1, ufshci_ver & 0x00000100);
698
699 return ufshci_ver;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530700}
701
702/**
703 * ufshcd_is_device_present - Check if any device connected to
704 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300705 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530706 *
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300707 * Returns true if device present, false if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530708 */
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300709static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530710{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300711 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300712 DEVICE_PRESENT) ? true : false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530713}
714
715/**
716 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800717 * @lrbp: pointer to local command reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530718 *
719 * This function is used to get the OCS field from UTRD
720 * Returns the OCS field in the UTRD
721 */
Bart Van Assche957d63e2021-10-20 14:40:17 -0700722static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530723{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530724 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530725}
726
727/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530728 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
729 * @hba: per adapter instance
730 * @pos: position of the bit to be cleared
731 */
732static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
733{
Alim Akhtar87183842020-05-28 06:46:49 +0530734 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
735 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
736 else
737 ufshcd_writel(hba, ~(1 << pos),
738 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Alim Akhtar1399c5b2018-05-06 15:44:15 +0530739}
740
741/**
742 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
743 * @hba: per adapter instance
744 * @pos: position of the bit to be cleared
745 */
746static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
747{
Alim Akhtar87183842020-05-28 06:46:49 +0530748 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
749 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
750 else
751 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530752}
753
754/**
755 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
756 * @reg: Register value of host controller status
757 *
758 * Returns integer, 0 on Success and positive value if failed
759 */
760static inline int ufshcd_get_lists_status(u32 reg)
761{
Tomohiro Kusumi6cf16112017-04-26 20:28:58 +0300762 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530763}
764
765/**
766 * ufshcd_get_uic_cmd_result - Get the UIC command result
767 * @hba: Pointer to adapter instance
768 *
769 * This function gets the result of UIC command completion
770 * Returns 0 on success, non zero value on error
771 */
772static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
773{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530774 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530775 MASK_UIC_COMMAND_RESULT;
776}
777
778/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530779 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
780 * @hba: Pointer to adapter instance
781 *
782 * This function gets UIC command argument3
783 * Returns 0 on success, non zero value on error
784 */
785static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
786{
787 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
788}
789
790/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530791 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530792 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530793 */
794static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530795ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530796{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530797 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530798}
799
800/**
801 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
802 * @ucd_rsp_ptr: pointer to response UPIU
803 *
804 * This function gets the response status and scsi_status from response UPIU
805 * Returns the response result code.
806 */
807static inline int
808ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
809{
810 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
811}
812
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530813/*
814 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
815 * from response UPIU
816 * @ucd_rsp_ptr: pointer to response UPIU
817 *
818 * Return the data segment length.
819 */
820static inline unsigned int
821ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
822{
823 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
824 MASK_RSP_UPIU_DATA_SEG_LEN;
825}
826
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530827/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530828 * ufshcd_is_exception_event - Check if the device raised an exception event
829 * @ucd_rsp_ptr: pointer to response UPIU
830 *
831 * The function checks if the device raised an exception event indicated in
832 * the Device Information field of response UPIU.
833 *
834 * Returns true if exception is raised, false otherwise.
835 */
836static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
837{
838 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
839 MASK_RSP_EXCEPTION_EVENT ? true : false;
840}
841
842/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530843 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530844 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530845 */
846static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530847ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530848{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530849 ufshcd_writel(hba, INT_AGGR_ENABLE |
850 INT_AGGR_COUNTER_AND_TIMER_RESET,
851 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
852}
853
854/**
855 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
856 * @hba: per adapter instance
857 * @cnt: Interrupt aggregation counter threshold
858 * @tmout: Interrupt aggregation timeout value
859 */
860static inline void
861ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
862{
863 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
864 INT_AGGR_COUNTER_THLD_VAL(cnt) |
865 INT_AGGR_TIMEOUT_VAL(tmout),
866 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530867}
868
869/**
Yaniv Gardib8521902015-05-17 18:54:57 +0300870 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
871 * @hba: per adapter instance
872 */
873static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
874{
875 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
876}
877
878/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530879 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
880 * When run-stop registers are set to 1, it indicates the
881 * host controller that it can process the requests
882 * @hba: per adapter instance
883 */
884static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
885{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530886 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
887 REG_UTP_TASK_REQ_LIST_RUN_STOP);
888 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
889 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530890}
891
892/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530893 * ufshcd_hba_start - Start controller initialization sequence
894 * @hba: per adapter instance
895 */
896static inline void ufshcd_hba_start(struct ufs_hba *hba)
897{
Satya Tangiraladf043c742020-07-06 20:04:14 +0000898 u32 val = CONTROLLER_ENABLE;
899
900 if (ufshcd_crypto_enable(hba))
901 val |= CRYPTO_GENERAL_ENABLE;
902
903 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530904}
905
906/**
907 * ufshcd_is_hba_active - Get controller state
908 * @hba: per adapter instance
909 *
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300910 * Returns false if controller is active, true otherwise
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530911 */
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300912static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530913{
Tomohiro Kusumi4a8eec22017-03-28 16:49:25 +0300914 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
915 ? false : true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530916}
917
Yaniv Gardi37113102016-03-10 17:37:16 +0200918u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
919{
920 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
Caleb Connolly51428812021-03-10 15:33:42 +0000921 if (hba->ufs_version <= ufshci_version(1, 1))
Yaniv Gardi37113102016-03-10 17:37:16 +0200922 return UFS_UNIPRO_VER_1_41;
923 else
924 return UFS_UNIPRO_VER_1_6;
925}
926EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
927
928static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
929{
930 /*
931 * If both host and device support UniPro ver1.6 or later, PA layer
932 * parameters tuning happens during link startup itself.
933 *
934 * We can manually tune PA layer parameters if either host or device
935 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
936 * logic simple, we will only do manual tuning if local unipro version
937 * doesn't support ver1.6 or later.
938 */
939 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
940 return true;
941 else
942 return false;
943}
944
Subhash Jadavani394b9492020-03-26 02:25:40 -0700945/**
946 * ufshcd_set_clk_freq - set UFS controller clock frequencies
947 * @hba: per adapter instance
948 * @scale_up: If True, set max possible frequency othewise set low frequency
949 *
950 * Returns 0 if successful
951 * Returns < 0 for any other errors
952 */
953static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800954{
955 int ret = 0;
956 struct ufs_clk_info *clki;
957 struct list_head *head = &hba->clk_list_head;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800958
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +0300959 if (list_empty(head))
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800960 goto out;
961
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800962 list_for_each_entry(clki, head, list) {
963 if (!IS_ERR_OR_NULL(clki->clk)) {
964 if (scale_up && clki->max_freq) {
965 if (clki->curr_freq == clki->max_freq)
966 continue;
967
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800968 ret = clk_set_rate(clki->clk, clki->max_freq);
969 if (ret) {
970 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
971 __func__, clki->name,
972 clki->max_freq, ret);
973 break;
974 }
975 trace_ufshcd_clk_scaling(dev_name(hba->dev),
976 "scaled up", clki->name,
977 clki->curr_freq,
978 clki->max_freq);
979
980 clki->curr_freq = clki->max_freq;
981
982 } else if (!scale_up && clki->min_freq) {
983 if (clki->curr_freq == clki->min_freq)
984 continue;
985
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800986 ret = clk_set_rate(clki->clk, clki->min_freq);
987 if (ret) {
988 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
989 __func__, clki->name,
990 clki->min_freq, ret);
991 break;
992 }
993 trace_ufshcd_clk_scaling(dev_name(hba->dev),
994 "scaled down", clki->name,
995 clki->curr_freq,
996 clki->min_freq);
997 clki->curr_freq = clki->min_freq;
998 }
999 }
1000 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1001 clki->name, clk_get_rate(clki->clk));
1002 }
1003
Subhash Jadavani394b9492020-03-26 02:25:40 -07001004out:
1005 return ret;
1006}
1007
1008/**
1009 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1010 * @hba: per adapter instance
1011 * @scale_up: True if scaling up and false if scaling down
1012 *
1013 * Returns 0 if successful
1014 * Returns < 0 for any other errors
1015 */
1016static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1017{
1018 int ret = 0;
1019 ktime_t start = ktime_get();
1020
1021 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1022 if (ret)
1023 goto out;
1024
1025 ret = ufshcd_set_clk_freq(hba, scale_up);
1026 if (ret)
1027 goto out;
1028
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001029 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
Subhash Jadavani394b9492020-03-26 02:25:40 -07001030 if (ret)
1031 ufshcd_set_clk_freq(hba, !scale_up);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001032
1033out:
Subhash Jadavani394b9492020-03-26 02:25:40 -07001034 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001035 (scale_up ? "up" : "down"),
1036 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1037 return ret;
1038}
1039
1040/**
1041 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1042 * @hba: per adapter instance
1043 * @scale_up: True if scaling up and false if scaling down
1044 *
1045 * Returns true if scaling is required, false otherwise.
1046 */
1047static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1048 bool scale_up)
1049{
1050 struct ufs_clk_info *clki;
1051 struct list_head *head = &hba->clk_list_head;
1052
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03001053 if (list_empty(head))
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001054 return false;
1055
1056 list_for_each_entry(clki, head, list) {
1057 if (!IS_ERR_OR_NULL(clki->clk)) {
1058 if (scale_up && clki->max_freq) {
1059 if (clki->curr_freq == clki->max_freq)
1060 continue;
1061 return true;
1062 } else if (!scale_up && clki->min_freq) {
1063 if (clki->curr_freq == clki->min_freq)
1064 continue;
1065 return true;
1066 }
1067 }
1068 }
1069
1070 return false;
1071}
1072
1073static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1074 u64 wait_timeout_us)
1075{
1076 unsigned long flags;
1077 int ret = 0;
1078 u32 tm_doorbell;
1079 u32 tr_doorbell;
1080 bool timeout = false, do_last_check = false;
1081 ktime_t start;
1082
1083 ufshcd_hold(hba, false);
1084 spin_lock_irqsave(hba->host->host_lock, flags);
1085 /*
1086 * Wait for all the outstanding tasks/transfer requests.
1087 * Verify by checking the doorbell registers are clear.
1088 */
1089 start = ktime_get();
1090 do {
1091 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1092 ret = -EBUSY;
1093 goto out;
1094 }
1095
1096 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1097 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1098 if (!tm_doorbell && !tr_doorbell) {
1099 timeout = false;
1100 break;
1101 } else if (do_last_check) {
1102 break;
1103 }
1104
1105 spin_unlock_irqrestore(hba->host->host_lock, flags);
1106 schedule();
1107 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1108 wait_timeout_us) {
1109 timeout = true;
1110 /*
1111 * We might have scheduled out for long time so make
1112 * sure to check if doorbells are cleared by this time
1113 * or not.
1114 */
1115 do_last_check = true;
1116 }
1117 spin_lock_irqsave(hba->host->host_lock, flags);
1118 } while (tm_doorbell || tr_doorbell);
1119
1120 if (timeout) {
1121 dev_err(hba->dev,
1122 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1123 __func__, tm_doorbell, tr_doorbell);
1124 ret = -EBUSY;
1125 }
1126out:
1127 spin_unlock_irqrestore(hba->host->host_lock, flags);
1128 ufshcd_release(hba);
1129 return ret;
1130}
1131
1132/**
1133 * ufshcd_scale_gear - scale up/down UFS gear
1134 * @hba: per adapter instance
1135 * @scale_up: True for scaling up gear and false for scaling down
1136 *
1137 * Returns 0 for success,
1138 * Returns -EBUSY if scaling can't happen at this time
1139 * Returns non-zero for any other errors
1140 */
1141static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1142{
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001143 int ret = 0;
1144 struct ufs_pa_layer_attr new_pwr_info;
1145
1146 if (scale_up) {
1147 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1148 sizeof(struct ufs_pa_layer_attr));
1149 } else {
1150 memcpy(&new_pwr_info, &hba->pwr_info,
1151 sizeof(struct ufs_pa_layer_attr));
1152
Can Guo29b87e92020-11-26 17:58:48 -08001153 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1154 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001155 /* save the current power mode */
1156 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1157 &hba->pwr_info,
1158 sizeof(struct ufs_pa_layer_attr));
1159
1160 /* scale down gear */
Can Guo29b87e92020-11-26 17:58:48 -08001161 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1162 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001163 }
1164 }
1165
1166 /* check if the power mode needs to be changed or not? */
Can Guo6a9df812020-02-11 21:38:28 -08001167 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001168 if (ret)
1169 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1170 __func__, ret,
1171 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1172 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1173
1174 return ret;
1175}
1176
1177static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1178{
1179 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1180 int ret = 0;
1181 /*
1182 * make sure that there are no outstanding requests when
1183 * clock scaling is in progress
1184 */
Subhash Jadavani38135532018-05-03 16:37:18 +05301185 ufshcd_scsi_block_requests(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001186 down_write(&hba->clk_scaling_lock);
Can Guo0e9d4ca2021-01-20 02:04:21 -08001187
1188 if (!hba->clk_scaling.is_allowed ||
1189 ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001190 ret = -EBUSY;
1191 up_write(&hba->clk_scaling_lock);
Subhash Jadavani38135532018-05-03 16:37:18 +05301192 ufshcd_scsi_unblock_requests(hba);
Can Guo0e9d4ca2021-01-20 02:04:21 -08001193 goto out;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001194 }
1195
Can Guo0e9d4ca2021-01-20 02:04:21 -08001196 /* let's not get into low power until clock scaling is completed */
1197 ufshcd_hold(hba, false);
1198
1199out:
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001200 return ret;
1201}
1202
Can Guo0e9d4ca2021-01-20 02:04:21 -08001203static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001204{
Can Guo0e9d4ca2021-01-20 02:04:21 -08001205 if (writelock)
1206 up_write(&hba->clk_scaling_lock);
1207 else
1208 up_read(&hba->clk_scaling_lock);
Subhash Jadavani38135532018-05-03 16:37:18 +05301209 ufshcd_scsi_unblock_requests(hba);
Can Guo0e9d4ca2021-01-20 02:04:21 -08001210 ufshcd_release(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001211}
1212
1213/**
1214 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1215 * @hba: per adapter instance
1216 * @scale_up: True for scaling up and false for scalin down
1217 *
1218 * Returns 0 for success,
1219 * Returns -EBUSY if scaling can't happen at this time
1220 * Returns non-zero for any other errors
1221 */
1222static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1223{
1224 int ret = 0;
Can Guo0e9d4ca2021-01-20 02:04:21 -08001225 bool is_writelock = true;
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001226
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001227 ret = ufshcd_clock_scaling_prepare(hba);
1228 if (ret)
Can Guo0e9d4ca2021-01-20 02:04:21 -08001229 return ret;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001230
1231 /* scale down the gear before scaling down clocks */
1232 if (!scale_up) {
1233 ret = ufshcd_scale_gear(hba, false);
1234 if (ret)
Subhash Jadavani394b9492020-03-26 02:25:40 -07001235 goto out_unprepare;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001236 }
1237
1238 ret = ufshcd_scale_clks(hba, scale_up);
1239 if (ret) {
1240 if (!scale_up)
1241 ufshcd_scale_gear(hba, true);
Subhash Jadavani394b9492020-03-26 02:25:40 -07001242 goto out_unprepare;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001243 }
1244
1245 /* scale up the gear after scaling up clocks */
1246 if (scale_up) {
1247 ret = ufshcd_scale_gear(hba, true);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07001248 if (ret) {
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001249 ufshcd_scale_clks(hba, false);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07001250 goto out_unprepare;
1251 }
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001252 }
1253
Asutosh Das3d17b9b2020-04-22 14:41:42 -07001254 /* Enable Write Booster if we have scaled up else disable it */
Can Guo0e9d4ca2021-01-20 02:04:21 -08001255 downgrade_write(&hba->clk_scaling_lock);
1256 is_writelock = false;
Yue Hu3b5f3c02021-03-18 17:55:36 +08001257 ufshcd_wb_toggle(hba, scale_up);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07001258
Subhash Jadavani394b9492020-03-26 02:25:40 -07001259out_unprepare:
Can Guo0e9d4ca2021-01-20 02:04:21 -08001260 ufshcd_clock_scaling_unprepare(hba, is_writelock);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001261 return ret;
1262}
1263
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001264static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1265{
1266 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1267 clk_scaling.suspend_work);
1268 unsigned long irq_flags;
1269
1270 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1271 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1272 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1273 return;
1274 }
1275 hba->clk_scaling.is_suspended = true;
1276 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1277
1278 __ufshcd_suspend_clkscaling(hba);
1279}
1280
1281static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1282{
1283 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1284 clk_scaling.resume_work);
1285 unsigned long irq_flags;
1286
1287 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1288 if (!hba->clk_scaling.is_suspended) {
1289 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1290 return;
1291 }
1292 hba->clk_scaling.is_suspended = false;
1293 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1294
1295 devfreq_resume_device(hba->devfreq);
1296}
1297
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001298static int ufshcd_devfreq_target(struct device *dev,
1299 unsigned long *freq, u32 flags)
1300{
1301 int ret = 0;
1302 struct ufs_hba *hba = dev_get_drvdata(dev);
1303 ktime_t start;
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001304 bool scale_up, sched_clk_scaling_suspend_work = false;
Bjorn Andersson092b4552018-05-17 23:26:37 -07001305 struct list_head *clk_list = &hba->clk_list_head;
1306 struct ufs_clk_info *clki;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001307 unsigned long irq_flags;
1308
1309 if (!ufshcd_is_clkscaling_supported(hba))
1310 return -EINVAL;
1311
Asutosh Das91831d32020-03-25 11:29:00 -07001312 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1313 /* Override with the closest supported frequency */
1314 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001315 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1316 if (ufshcd_eh_in_progress(hba)) {
1317 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1318 return 0;
1319 }
1320
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001321 if (!hba->clk_scaling.active_reqs)
1322 sched_clk_scaling_suspend_work = true;
1323
Bjorn Andersson092b4552018-05-17 23:26:37 -07001324 if (list_empty(clk_list)) {
1325 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1326 goto out;
1327 }
1328
Asutosh Das91831d32020-03-25 11:29:00 -07001329 /* Decide based on the rounded-off frequency and update */
Bjorn Andersson092b4552018-05-17 23:26:37 -07001330 scale_up = (*freq == clki->max_freq) ? true : false;
Asutosh Das91831d32020-03-25 11:29:00 -07001331 if (!scale_up)
1332 *freq = clki->min_freq;
1333 /* Update the frequency */
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001334 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1335 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1336 ret = 0;
1337 goto out; /* no state change required */
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001338 }
1339 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1340
1341 start = ktime_get();
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001342 ret = ufshcd_devfreq_scale(hba, scale_up);
1343
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001344 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1345 (scale_up ? "up" : "down"),
1346 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1347
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001348out:
1349 if (sched_clk_scaling_suspend_work)
1350 queue_work(hba->clk_scaling.workq,
1351 &hba->clk_scaling.suspend_work);
1352
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001353 return ret;
1354}
1355
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001356static int ufshcd_devfreq_get_dev_status(struct device *dev,
1357 struct devfreq_dev_status *stat)
1358{
1359 struct ufs_hba *hba = dev_get_drvdata(dev);
1360 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1361 unsigned long flags;
Asutosh Das91831d32020-03-25 11:29:00 -07001362 struct list_head *clk_list = &hba->clk_list_head;
1363 struct ufs_clk_info *clki;
Stanley Chub1bf66d2020-06-11 18:10:43 +08001364 ktime_t curr_t;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001365
1366 if (!ufshcd_is_clkscaling_supported(hba))
1367 return -EINVAL;
1368
1369 memset(stat, 0, sizeof(*stat));
1370
1371 spin_lock_irqsave(hba->host->host_lock, flags);
Stanley Chub1bf66d2020-06-11 18:10:43 +08001372 curr_t = ktime_get();
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001373 if (!scaling->window_start_t)
1374 goto start_window;
1375
Asutosh Das91831d32020-03-25 11:29:00 -07001376 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1377 /*
1378 * If current frequency is 0, then the ondemand governor considers
1379 * there's no initial frequency set. And it always requests to set
1380 * to max. frequency.
1381 */
1382 stat->current_frequency = clki->curr_freq;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001383 if (scaling->is_busy_started)
Stanley Chub1bf66d2020-06-11 18:10:43 +08001384 scaling->tot_busy_t += ktime_us_delta(curr_t,
1385 scaling->busy_start_t);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001386
Stanley Chub1bf66d2020-06-11 18:10:43 +08001387 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001388 stat->busy_time = scaling->tot_busy_t;
1389start_window:
Stanley Chub1bf66d2020-06-11 18:10:43 +08001390 scaling->window_start_t = curr_t;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001391 scaling->tot_busy_t = 0;
1392
1393 if (hba->outstanding_reqs) {
Stanley Chub1bf66d2020-06-11 18:10:43 +08001394 scaling->busy_start_t = curr_t;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001395 scaling->is_busy_started = true;
1396 } else {
1397 scaling->busy_start_t = 0;
1398 scaling->is_busy_started = false;
1399 }
1400 spin_unlock_irqrestore(hba->host->host_lock, flags);
1401 return 0;
1402}
1403
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001404static int ufshcd_devfreq_init(struct ufs_hba *hba)
1405{
Bjorn Andersson092b4552018-05-17 23:26:37 -07001406 struct list_head *clk_list = &hba->clk_list_head;
1407 struct ufs_clk_info *clki;
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001408 struct devfreq *devfreq;
1409 int ret;
1410
Bjorn Andersson092b4552018-05-17 23:26:37 -07001411 /* Skip devfreq if we don't have any clocks in the list */
1412 if (list_empty(clk_list))
1413 return 0;
1414
1415 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1416 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1417 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1418
Stanley Chu90b84912020-05-09 17:37:13 +08001419 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1420 &hba->vps->ondemand_data);
Bjorn Andersson092b4552018-05-17 23:26:37 -07001421 devfreq = devfreq_add_device(hba->dev,
Stanley Chu90b84912020-05-09 17:37:13 +08001422 &hba->vps->devfreq_profile,
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001423 DEVFREQ_GOV_SIMPLE_ONDEMAND,
Stanley Chu90b84912020-05-09 17:37:13 +08001424 &hba->vps->ondemand_data);
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001425 if (IS_ERR(devfreq)) {
1426 ret = PTR_ERR(devfreq);
1427 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
Bjorn Andersson092b4552018-05-17 23:26:37 -07001428
1429 dev_pm_opp_remove(hba->dev, clki->min_freq);
1430 dev_pm_opp_remove(hba->dev, clki->max_freq);
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001431 return ret;
1432 }
1433
1434 hba->devfreq = devfreq;
1435
1436 return 0;
1437}
1438
Bjorn Andersson092b4552018-05-17 23:26:37 -07001439static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1440{
1441 struct list_head *clk_list = &hba->clk_list_head;
1442 struct ufs_clk_info *clki;
1443
1444 if (!hba->devfreq)
1445 return;
1446
1447 devfreq_remove_device(hba->devfreq);
1448 hba->devfreq = NULL;
1449
1450 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1451 dev_pm_opp_remove(hba->dev, clki->min_freq);
1452 dev_pm_opp_remove(hba->dev, clki->max_freq);
1453}
1454
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001455static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1456{
1457 unsigned long flags;
1458
1459 devfreq_suspend_device(hba->devfreq);
1460 spin_lock_irqsave(hba->host->host_lock, flags);
1461 hba->clk_scaling.window_start_t = 0;
1462 spin_unlock_irqrestore(hba->host->host_lock, flags);
1463}
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001464
Gilad Bronera5082532016-10-17 17:10:00 -07001465static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1466{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001467 unsigned long flags;
1468 bool suspend = false;
1469
Stanley Chuf9a7fa32021-01-20 23:01:40 +08001470 cancel_work_sync(&hba->clk_scaling.suspend_work);
1471 cancel_work_sync(&hba->clk_scaling.resume_work);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001472
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001473 spin_lock_irqsave(hba->host->host_lock, flags);
1474 if (!hba->clk_scaling.is_suspended) {
1475 suspend = true;
1476 hba->clk_scaling.is_suspended = true;
1477 }
1478 spin_unlock_irqrestore(hba->host->host_lock, flags);
1479
1480 if (suspend)
1481 __ufshcd_suspend_clkscaling(hba);
Gilad Bronera5082532016-10-17 17:10:00 -07001482}
1483
1484static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1485{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001486 unsigned long flags;
1487 bool resume = false;
1488
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001489 spin_lock_irqsave(hba->host->host_lock, flags);
1490 if (hba->clk_scaling.is_suspended) {
1491 resume = true;
1492 hba->clk_scaling.is_suspended = false;
1493 }
1494 spin_unlock_irqrestore(hba->host->host_lock, flags);
1495
1496 if (resume)
1497 devfreq_resume_device(hba->devfreq);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001498}
1499
1500static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1501 struct device_attribute *attr, char *buf)
1502{
1503 struct ufs_hba *hba = dev_get_drvdata(dev);
1504
Jiapeng Chong1481b7f2021-03-02 14:08:18 +08001505 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001506}
1507
1508static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1509 struct device_attribute *attr, const char *buf, size_t count)
1510{
1511 struct ufs_hba *hba = dev_get_drvdata(dev);
1512 u32 value;
Can Guo9cd20d32021-01-13 19:13:28 -08001513 int err = 0;
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001514
1515 if (kstrtou32(buf, 0, &value))
1516 return -EINVAL;
1517
Can Guo9cd20d32021-01-13 19:13:28 -08001518 down(&hba->host_sem);
1519 if (!ufshcd_is_user_access_allowed(hba)) {
1520 err = -EBUSY;
1521 goto out;
1522 }
1523
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001524 value = !!value;
Can Guo0e9d4ca2021-01-20 02:04:21 -08001525 if (value == hba->clk_scaling.is_enabled)
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001526 goto out;
1527
Asutosh Dasb294ff32021-04-23 17:20:16 -07001528 ufshcd_rpm_get_sync(hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001529 ufshcd_hold(hba, false);
1530
Can Guo0e9d4ca2021-01-20 02:04:21 -08001531 hba->clk_scaling.is_enabled = value;
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001532
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001533 if (value) {
1534 ufshcd_resume_clkscaling(hba);
1535 } else {
1536 ufshcd_suspend_clkscaling(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001537 err = ufshcd_devfreq_scale(hba, true);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001538 if (err)
1539 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1540 __func__, err);
1541 }
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001542
1543 ufshcd_release(hba);
Asutosh Dasb294ff32021-04-23 17:20:16 -07001544 ufshcd_rpm_put_sync(hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001545out:
Can Guo9cd20d32021-01-13 19:13:28 -08001546 up(&hba->host_sem);
1547 return err ? err : count;
Gilad Bronera5082532016-10-17 17:10:00 -07001548}
1549
Can Guo4543d9d2021-01-20 02:04:22 -08001550static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001551{
1552 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1553 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1554 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1555 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1556 hba->clk_scaling.enable_attr.attr.mode = 0644;
1557 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1558 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1559}
1560
Can Guo4543d9d2021-01-20 02:04:22 -08001561static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1562{
1563 if (hba->clk_scaling.enable_attr.attr.name)
1564 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1565}
1566
1567static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1568{
1569 char wq_name[sizeof("ufs_clkscaling_00")];
1570
1571 if (!ufshcd_is_clkscaling_supported(hba))
1572 return;
1573
Can Guo80d892f2021-01-27 18:49:27 -08001574 if (!hba->clk_scaling.min_gear)
1575 hba->clk_scaling.min_gear = UFS_HS_G1;
1576
Can Guo4543d9d2021-01-20 02:04:22 -08001577 INIT_WORK(&hba->clk_scaling.suspend_work,
1578 ufshcd_clk_scaling_suspend_work);
1579 INIT_WORK(&hba->clk_scaling.resume_work,
1580 ufshcd_clk_scaling_resume_work);
1581
1582 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1583 hba->host->host_no);
1584 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1585
1586 hba->clk_scaling.is_initialized = true;
1587}
1588
1589static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1590{
1591 if (!hba->clk_scaling.is_initialized)
1592 return;
1593
1594 ufshcd_remove_clk_scaling_sysfs(hba);
1595 destroy_workqueue(hba->clk_scaling.workq);
1596 ufshcd_devfreq_remove(hba);
1597 hba->clk_scaling.is_initialized = false;
1598}
1599
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001600static void ufshcd_ungate_work(struct work_struct *work)
1601{
1602 int ret;
1603 unsigned long flags;
1604 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1605 clk_gating.ungate_work);
1606
1607 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1608
1609 spin_lock_irqsave(hba->host->host_lock, flags);
1610 if (hba->clk_gating.state == CLKS_ON) {
1611 spin_unlock_irqrestore(hba->host->host_lock, flags);
1612 goto unblock_reqs;
1613 }
1614
1615 spin_unlock_irqrestore(hba->host->host_lock, flags);
Can Guodd7143e2020-10-27 12:10:36 -07001616 ufshcd_hba_vreg_set_hpm(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001617 ufshcd_setup_clocks(hba, true);
1618
Stanley Chu8b0bbf02019-12-07 20:22:01 +08001619 ufshcd_enable_irq(hba);
1620
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001621 /* Exit from hibern8 */
1622 if (ufshcd_can_hibern8_during_gating(hba)) {
1623 /* Prevent gating in this path */
1624 hba->clk_gating.is_suspended = true;
1625 if (ufshcd_is_link_hibern8(hba)) {
1626 ret = ufshcd_uic_hibern8_exit(hba);
1627 if (ret)
1628 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1629 __func__, ret);
1630 else
1631 ufshcd_set_link_active(hba);
1632 }
1633 hba->clk_gating.is_suspended = false;
1634 }
1635unblock_reqs:
Subhash Jadavani38135532018-05-03 16:37:18 +05301636 ufshcd_scsi_unblock_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001637}
1638
1639/**
1640 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1641 * Also, exit from hibern8 mode and set the link as active.
1642 * @hba: per adapter instance
1643 * @async: This indicates whether caller should ungate clocks asynchronously.
1644 */
1645int ufshcd_hold(struct ufs_hba *hba, bool async)
1646{
1647 int rc = 0;
Stanley Chu93b6c5d2020-08-09 13:07:34 +08001648 bool flush_result;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001649 unsigned long flags;
1650
Bart Van Assche3489c342021-12-03 15:19:47 -08001651 if (!ufshcd_is_clkgating_allowed(hba) ||
1652 !hba->clk_gating.is_initialized)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001653 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001654 spin_lock_irqsave(hba->host->host_lock, flags);
1655 hba->clk_gating.active_reqs++;
1656
Sahitya Tummala856b3482014-09-25 15:32:34 +03001657start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001658 switch (hba->clk_gating.state) {
1659 case CLKS_ON:
Venkat Gopalakrishnanf2a785a2016-10-17 17:10:53 -07001660 /*
1661 * Wait for the ungate work to complete if in progress.
1662 * Though the clocks may be in ON state, the link could
1663 * still be in hibner8 state if hibern8 is allowed
1664 * during clock gating.
1665 * Make sure we exit hibern8 state also in addition to
1666 * clocks being ON.
1667 */
1668 if (ufshcd_can_hibern8_during_gating(hba) &&
1669 ufshcd_is_link_hibern8(hba)) {
Can Guoc63d6092020-02-10 19:40:48 -08001670 if (async) {
1671 rc = -EAGAIN;
1672 hba->clk_gating.active_reqs--;
1673 break;
1674 }
Venkat Gopalakrishnanf2a785a2016-10-17 17:10:53 -07001675 spin_unlock_irqrestore(hba->host->host_lock, flags);
Stanley Chu93b6c5d2020-08-09 13:07:34 +08001676 flush_result = flush_work(&hba->clk_gating.ungate_work);
1677 if (hba->clk_gating.is_suspended && !flush_result)
1678 goto out;
Venkat Gopalakrishnanf2a785a2016-10-17 17:10:53 -07001679 spin_lock_irqsave(hba->host->host_lock, flags);
1680 goto start;
1681 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001682 break;
1683 case REQ_CLKS_OFF:
1684 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1685 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001686 trace_ufshcd_clk_gating(dev_name(hba->dev),
1687 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001688 break;
1689 }
1690 /*
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +03001691 * If we are here, it means gating work is either done or
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001692 * currently running. Hence, fall through to cancel gating
1693 * work and to enable clocks.
1694 */
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001695 fallthrough;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001696 case CLKS_OFF:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001697 hba->clk_gating.state = REQ_CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001698 trace_ufshcd_clk_gating(dev_name(hba->dev),
1699 hba->clk_gating.state);
Can Guoda3fecb2020-11-02 22:24:39 -08001700 if (queue_work(hba->clk_gating.clk_gating_workq,
1701 &hba->clk_gating.ungate_work))
1702 ufshcd_scsi_block_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001703 /*
1704 * fall through to check if we should wait for this
1705 * work to be done or not.
1706 */
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001707 fallthrough;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001708 case REQ_CLKS_ON:
1709 if (async) {
1710 rc = -EAGAIN;
1711 hba->clk_gating.active_reqs--;
1712 break;
1713 }
1714
1715 spin_unlock_irqrestore(hba->host->host_lock, flags);
1716 flush_work(&hba->clk_gating.ungate_work);
1717 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +03001718 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001719 goto start;
1720 default:
1721 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1722 __func__, hba->clk_gating.state);
1723 break;
1724 }
1725 spin_unlock_irqrestore(hba->host->host_lock, flags);
1726out:
1727 return rc;
1728}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001729EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001730
1731static void ufshcd_gate_work(struct work_struct *work)
1732{
1733 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1734 clk_gating.gate_work.work);
1735 unsigned long flags;
Can Guo4db7a232020-08-09 05:15:51 -07001736 int ret;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001737
1738 spin_lock_irqsave(hba->host->host_lock, flags);
Venkat Gopalakrishnan3f0c06d2016-10-17 17:11:07 -07001739 /*
1740 * In case you are here to cancel this work the gating state
1741 * would be marked as REQ_CLKS_ON. In this case save time by
1742 * skipping the gating work and exit after changing the clock
1743 * state to CLKS_ON.
1744 */
1745 if (hba->clk_gating.is_suspended ||
Asutosh Das18f013742019-11-14 22:09:29 -08001746 (hba->clk_gating.state != REQ_CLKS_OFF)) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001747 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001748 trace_ufshcd_clk_gating(dev_name(hba->dev),
1749 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001750 goto rel_lock;
1751 }
1752
1753 if (hba->clk_gating.active_reqs
1754 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
Bart Van Asschebd0b3532021-12-03 15:19:40 -08001755 || hba->outstanding_reqs || hba->outstanding_tasks
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001756 || hba->active_uic_cmd || hba->uic_async_done)
1757 goto rel_lock;
1758
1759 spin_unlock_irqrestore(hba->host->host_lock, flags);
1760
1761 /* put the link into hibern8 mode before turning off clocks */
1762 if (ufshcd_can_hibern8_during_gating(hba)) {
Can Guo4db7a232020-08-09 05:15:51 -07001763 ret = ufshcd_uic_hibern8_enter(hba);
1764 if (ret) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001765 hba->clk_gating.state = CLKS_ON;
Can Guo4db7a232020-08-09 05:15:51 -07001766 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1767 __func__, ret);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001768 trace_ufshcd_clk_gating(dev_name(hba->dev),
1769 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001770 goto out;
1771 }
1772 ufshcd_set_link_hibern8(hba);
1773 }
1774
Stanley Chu8b0bbf02019-12-07 20:22:01 +08001775 ufshcd_disable_irq(hba);
1776
Can Guo81309c22020-11-25 18:01:00 -08001777 ufshcd_setup_clocks(hba, false);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001778
Can Guodd7143e2020-10-27 12:10:36 -07001779 /* Put the host controller in low power mode if possible */
1780 ufshcd_hba_vreg_set_lpm(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001781 /*
1782 * In case you are here to cancel this work the gating state
1783 * would be marked as REQ_CLKS_ON. In this case keep the state
1784 * as REQ_CLKS_ON which would anyway imply that clocks are off
1785 * and a request to turn them on is pending. By doing this way,
1786 * we keep the state machine in tact and this would ultimately
1787 * prevent from doing cancel work multiple times when there are
1788 * new requests arriving before the current cancel work is done.
1789 */
1790 spin_lock_irqsave(hba->host->host_lock, flags);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001791 if (hba->clk_gating.state == REQ_CLKS_OFF) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001792 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001793 trace_ufshcd_clk_gating(dev_name(hba->dev),
1794 hba->clk_gating.state);
1795 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001796rel_lock:
1797 spin_unlock_irqrestore(hba->host->host_lock, flags);
1798out:
1799 return;
1800}
1801
1802/* host lock must be held before calling this variant */
1803static void __ufshcd_release(struct ufs_hba *hba)
1804{
1805 if (!ufshcd_is_clkgating_allowed(hba))
1806 return;
1807
1808 hba->clk_gating.active_reqs--;
1809
Can Guo4db7a232020-08-09 05:15:51 -07001810 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1811 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
Bart Van Assche3489c342021-12-03 15:19:47 -08001812 hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
Jaegeuk Kimfd62de12020-11-17 08:58:33 -08001813 hba->active_uic_cmd || hba->uic_async_done ||
1814 hba->clk_gating.state == CLKS_OFF)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001815 return;
1816
1817 hba->clk_gating.state = REQ_CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001818 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Evan Greenf4bb7702018-10-05 10:27:32 -07001819 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1820 &hba->clk_gating.gate_work,
1821 msecs_to_jiffies(hba->clk_gating.delay_ms));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001822}
1823
1824void ufshcd_release(struct ufs_hba *hba)
1825{
1826 unsigned long flags;
1827
1828 spin_lock_irqsave(hba->host->host_lock, flags);
1829 __ufshcd_release(hba);
1830 spin_unlock_irqrestore(hba->host->host_lock, flags);
1831}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001832EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001833
1834static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1835 struct device_attribute *attr, char *buf)
1836{
1837 struct ufs_hba *hba = dev_get_drvdata(dev);
1838
DooHyun Hwangbafd09f2021-02-03 19:14:43 +09001839 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001840}
1841
1842static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1843 struct device_attribute *attr, const char *buf, size_t count)
1844{
1845 struct ufs_hba *hba = dev_get_drvdata(dev);
1846 unsigned long flags, value;
1847
1848 if (kstrtoul(buf, 0, &value))
1849 return -EINVAL;
1850
1851 spin_lock_irqsave(hba->host->host_lock, flags);
1852 hba->clk_gating.delay_ms = value;
1853 spin_unlock_irqrestore(hba->host->host_lock, flags);
1854 return count;
1855}
1856
Sahitya Tummalab4274112016-12-22 18:40:39 -08001857static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1858 struct device_attribute *attr, char *buf)
1859{
1860 struct ufs_hba *hba = dev_get_drvdata(dev);
1861
DooHyun Hwangbafd09f2021-02-03 19:14:43 +09001862 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
Sahitya Tummalab4274112016-12-22 18:40:39 -08001863}
1864
1865static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1866 struct device_attribute *attr, const char *buf, size_t count)
1867{
1868 struct ufs_hba *hba = dev_get_drvdata(dev);
1869 unsigned long flags;
1870 u32 value;
1871
1872 if (kstrtou32(buf, 0, &value))
1873 return -EINVAL;
1874
1875 value = !!value;
Jaegeuk Kimb6645112020-11-17 08:58:34 -08001876
1877 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummalab4274112016-12-22 18:40:39 -08001878 if (value == hba->clk_gating.is_enabled)
1879 goto out;
1880
Jaegeuk Kimb6645112020-11-17 08:58:34 -08001881 if (value)
1882 __ufshcd_release(hba);
1883 else
Sahitya Tummalab4274112016-12-22 18:40:39 -08001884 hba->clk_gating.active_reqs++;
Sahitya Tummalab4274112016-12-22 18:40:39 -08001885
1886 hba->clk_gating.is_enabled = value;
1887out:
Jaegeuk Kimb6645112020-11-17 08:58:34 -08001888 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sahitya Tummalab4274112016-12-22 18:40:39 -08001889 return count;
1890}
1891
Can Guo4543d9d2021-01-20 02:04:22 -08001892static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
Vivek Gautameebcc192018-08-07 23:17:39 +05301893{
Can Guo4543d9d2021-01-20 02:04:22 -08001894 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1895 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1896 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1897 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1898 hba->clk_gating.delay_attr.attr.mode = 0644;
1899 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1900 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
Vivek Gautameebcc192018-08-07 23:17:39 +05301901
Can Guo4543d9d2021-01-20 02:04:22 -08001902 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1903 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1904 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1905 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1906 hba->clk_gating.enable_attr.attr.mode = 0644;
1907 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1908 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
Vivek Gautameebcc192018-08-07 23:17:39 +05301909}
1910
Can Guo4543d9d2021-01-20 02:04:22 -08001911static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
Vivek Gautameebcc192018-08-07 23:17:39 +05301912{
Can Guo4543d9d2021-01-20 02:04:22 -08001913 if (hba->clk_gating.delay_attr.attr.name)
1914 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1915 if (hba->clk_gating.enable_attr.attr.name)
1916 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
Vivek Gautameebcc192018-08-07 23:17:39 +05301917}
1918
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001919static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1920{
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301921 char wq_name[sizeof("ufs_clk_gating_00")];
1922
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001923 if (!ufshcd_is_clkgating_allowed(hba))
1924 return;
1925
Can Guo2dec9472020-08-09 05:15:47 -07001926 hba->clk_gating.state = CLKS_ON;
1927
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001928 hba->clk_gating.delay_ms = 150;
1929 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1930 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1931
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301932 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1933 hba->host->host_no);
1934 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
Jaegeuk Kime93e6e42020-11-17 08:58:36 -08001935 WQ_MEM_RECLAIM | WQ_HIGHPRI);
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301936
Can Guo4543d9d2021-01-20 02:04:22 -08001937 ufshcd_init_clk_gating_sysfs(hba);
1938
Sahitya Tummalab4274112016-12-22 18:40:39 -08001939 hba->clk_gating.is_enabled = true;
Can Guo4543d9d2021-01-20 02:04:22 -08001940 hba->clk_gating.is_initialized = true;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001941}
1942
1943static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1944{
Can Guo4543d9d2021-01-20 02:04:22 -08001945 if (!hba->clk_gating.is_initialized)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001946 return;
Bart Van Assche3489c342021-12-03 15:19:47 -08001947
Can Guo4543d9d2021-01-20 02:04:22 -08001948 ufshcd_remove_clk_gating_sysfs(hba);
Bart Van Assche3489c342021-12-03 15:19:47 -08001949
1950 /* Ungate the clock if necessary. */
1951 ufshcd_hold(hba, false);
Can Guo4543d9d2021-01-20 02:04:22 -08001952 hba->clk_gating.is_initialized = false;
Bart Van Assche3489c342021-12-03 15:19:47 -08001953 ufshcd_release(hba);
1954
1955 destroy_workqueue(hba->clk_gating.clk_gating_workq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001956}
1957
Sahitya Tummala856b3482014-09-25 15:32:34 +03001958/* Must be called with host lock acquired */
1959static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1960{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001961 bool queue_resume_work = false;
Stanley Chub1bf66d2020-06-11 18:10:43 +08001962 ktime_t curr_t = ktime_get();
Can Guoa45f9372021-05-24 01:36:57 -07001963 unsigned long flags;
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001964
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001965 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001966 return;
1967
Can Guoa45f9372021-05-24 01:36:57 -07001968 spin_lock_irqsave(hba->host->host_lock, flags);
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001969 if (!hba->clk_scaling.active_reqs++)
1970 queue_resume_work = true;
1971
Can Guoa45f9372021-05-24 01:36:57 -07001972 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
1973 spin_unlock_irqrestore(hba->host->host_lock, flags);
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001974 return;
Can Guoa45f9372021-05-24 01:36:57 -07001975 }
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001976
1977 if (queue_resume_work)
1978 queue_work(hba->clk_scaling.workq,
1979 &hba->clk_scaling.resume_work);
1980
1981 if (!hba->clk_scaling.window_start_t) {
Stanley Chub1bf66d2020-06-11 18:10:43 +08001982 hba->clk_scaling.window_start_t = curr_t;
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001983 hba->clk_scaling.tot_busy_t = 0;
1984 hba->clk_scaling.is_busy_started = false;
1985 }
1986
Sahitya Tummala856b3482014-09-25 15:32:34 +03001987 if (!hba->clk_scaling.is_busy_started) {
Stanley Chub1bf66d2020-06-11 18:10:43 +08001988 hba->clk_scaling.busy_start_t = curr_t;
Sahitya Tummala856b3482014-09-25 15:32:34 +03001989 hba->clk_scaling.is_busy_started = true;
1990 }
Can Guoa45f9372021-05-24 01:36:57 -07001991 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sahitya Tummala856b3482014-09-25 15:32:34 +03001992}
1993
1994static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1995{
1996 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
Can Guoa45f9372021-05-24 01:36:57 -07001997 unsigned long flags;
Sahitya Tummala856b3482014-09-25 15:32:34 +03001998
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001999 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03002000 return;
2001
Can Guoa45f9372021-05-24 01:36:57 -07002002 spin_lock_irqsave(hba->host->host_lock, flags);
2003 hba->clk_scaling.active_reqs--;
Sahitya Tummala856b3482014-09-25 15:32:34 +03002004 if (!hba->outstanding_reqs && scaling->is_busy_started) {
2005 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2006 scaling->busy_start_t));
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01002007 scaling->busy_start_t = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +03002008 scaling->is_busy_started = false;
2009 }
Can Guoa45f9372021-05-24 01:36:57 -07002010 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sahitya Tummala856b3482014-09-25 15:32:34 +03002011}
Can Guo1d8613a2021-04-21 19:28:39 -07002012
2013static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2014{
2015 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2016 return READ;
2017 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2018 return WRITE;
2019 else
2020 return -EINVAL;
2021}
2022
2023static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2024 struct ufshcd_lrb *lrbp)
2025{
2026 struct ufs_hba_monitor *m = &hba->monitor;
2027
2028 return (m->enabled && lrbp && lrbp->cmd &&
2029 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2030 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2031}
2032
2033static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2034{
2035 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
Can Guoa45f9372021-05-24 01:36:57 -07002036 unsigned long flags;
Can Guo1d8613a2021-04-21 19:28:39 -07002037
Can Guoa45f9372021-05-24 01:36:57 -07002038 spin_lock_irqsave(hba->host->host_lock, flags);
Can Guo1d8613a2021-04-21 19:28:39 -07002039 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2040 hba->monitor.busy_start_ts[dir] = ktime_get();
Can Guoa45f9372021-05-24 01:36:57 -07002041 spin_unlock_irqrestore(hba->host->host_lock, flags);
Can Guo1d8613a2021-04-21 19:28:39 -07002042}
2043
2044static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2045{
2046 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
Can Guoa45f9372021-05-24 01:36:57 -07002047 unsigned long flags;
Can Guo1d8613a2021-04-21 19:28:39 -07002048
Can Guoa45f9372021-05-24 01:36:57 -07002049 spin_lock_irqsave(hba->host->host_lock, flags);
Can Guo1d8613a2021-04-21 19:28:39 -07002050 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
Bart Van Assche3f2c1002021-08-09 16:03:50 -07002051 struct request *req = scsi_cmd_to_rq(lrbp->cmd);
Can Guo1d8613a2021-04-21 19:28:39 -07002052 struct ufs_hba_monitor *m = &hba->monitor;
2053 ktime_t now, inc, lat;
2054
2055 now = lrbp->compl_time_stamp;
2056 inc = ktime_sub(now, m->busy_start_ts[dir]);
2057 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2058 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2059
2060 /* Update latencies */
2061 m->nr_req[dir]++;
2062 lat = ktime_sub(now, lrbp->issue_time_stamp);
2063 m->lat_sum[dir] += lat;
2064 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2065 m->lat_max[dir] = lat;
2066 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2067 m->lat_min[dir] = lat;
2068
2069 m->nr_queued[dir]--;
2070 /* Push forward the busy start of monitor */
2071 m->busy_start_ts[dir] = now;
2072 }
Can Guoa45f9372021-05-24 01:36:57 -07002073 spin_unlock_irqrestore(hba->host->host_lock, flags);
Can Guo1d8613a2021-04-21 19:28:39 -07002074}
2075
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302076/**
2077 * ufshcd_send_command - Send SCSI or device management commands
2078 * @hba: per adapter instance
2079 * @task_tag: Task tag of the command
2080 */
2081static inline
2082void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2083{
Stanley Chu6edfdcf2020-07-06 14:07:07 +08002084 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
Bart Van Assche1f522c52021-07-21 20:34:32 -07002085 unsigned long flags;
Stanley Chu6edfdcf2020-07-06 14:07:07 +08002086
2087 lrbp->issue_time_stamp = ktime_get();
2088 lrbp->compl_time_stamp = ktime_set(0, 0);
Bean Huo28fa68f2021-01-05 12:34:42 +01002089 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
Sahitya Tummala856b3482014-09-25 15:32:34 +03002090 ufshcd_clk_scaling_start_busy(hba);
Can Guo1d8613a2021-04-21 19:28:39 -07002091 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2092 ufshcd_start_monitor(hba, lrbp);
Bart Van Assche169f5eb2021-07-21 20:34:34 -07002093
2094 spin_lock_irqsave(&hba->outstanding_lock, flags);
Bart Van Asschea024ad02021-07-21 20:34:33 -07002095 if (hba->vops && hba->vops->setup_xfer_req)
2096 hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
Bart Van Assche169f5eb2021-07-21 20:34:34 -07002097 __set_bit(task_tag, &hba->outstanding_reqs);
Bart Van Assche1f522c52021-07-21 20:34:32 -07002098 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Bart Van Assche169f5eb2021-07-21 20:34:34 -07002099 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2100
Gilad Bronerad1a1b92016-10-17 17:09:36 -07002101 /* Make sure that doorbell is committed immediately */
2102 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302103}
2104
2105/**
2106 * ufshcd_copy_sense_data - Copy sense data in case of check condition
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002107 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302108 */
2109static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2110{
2111 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05302112 if (lrbp->sense_buffer &&
2113 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07002114 int len_to_copy;
2115
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302116 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Avri Altman09a5a242018-11-22 20:04:56 +02002117 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07002118
Avri Altman09a5a242018-11-22 20:04:56 +02002119 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2120 len_to_copy);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302121 }
2122}
2123
2124/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302125 * ufshcd_copy_query_response() - Copy the Query Response and the data
2126 * descriptor
2127 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002128 * @lrbp: pointer to local reference block
Dolev Raviv68078d52013-07-30 00:35:58 +05302129 */
2130static
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002131int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +05302132{
2133 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2134
Dolev Raviv68078d52013-07-30 00:35:58 +05302135 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05302136
Dolev Raviv68078d52013-07-30 00:35:58 +05302137 /* Get the descriptor */
Avri Altman1c908362019-05-21 11:24:22 +03002138 if (hba->dev_cmd.query.descriptor &&
2139 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002140 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +05302141 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002142 u16 resp_len;
2143 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +05302144
2145 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002146 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +05302147 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002148 buf_len = be16_to_cpu(
2149 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002150 if (likely(buf_len >= resp_len)) {
2151 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2152 } else {
2153 dev_warn(hba->dev,
Bean Huo3d4881d2019-11-12 23:34:35 +01002154 "%s: rsp size %d is bigger than buffer size %d",
2155 __func__, resp_len, buf_len);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002156 return -EINVAL;
2157 }
Dolev Raviv68078d52013-07-30 00:35:58 +05302158 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002159
2160 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302161}
2162
2163/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302164 * ufshcd_hba_capabilities - Read controller capabilities
2165 * @hba: per adapter instance
Satya Tangiraladf043c742020-07-06 20:04:14 +00002166 *
2167 * Return: 0 on success, negative on error.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302168 */
Satya Tangiraladf043c742020-07-06 20:04:14 +00002169static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302170{
Satya Tangiraladf043c742020-07-06 20:04:14 +00002171 int err;
2172
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302173 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302174
2175 /* nutrs and nutmrs are 0 based values */
2176 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2177 hba->nutmrs =
2178 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
Bart Van Assche945c3cc2021-12-03 15:19:42 -08002179 hba->reserved_slot = hba->nutrs - 1;
Satya Tangiraladf043c742020-07-06 20:04:14 +00002180
2181 /* Read crypto capabilities */
2182 err = ufshcd_hba_init_crypto_capabilities(hba);
2183 if (err)
2184 dev_err(hba->dev, "crypto setup failed\n");
2185
2186 return err;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302187}
2188
2189/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302190 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2191 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302192 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302193 * Return true on success, else false
2194 */
2195static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2196{
2197 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2198 return true;
2199 else
2200 return false;
2201}
2202
2203/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302204 * ufshcd_get_upmcrs - Get the power mode change request status
2205 * @hba: Pointer to adapter instance
2206 *
2207 * This function gets the UPMCRS field of HCS register
2208 * Returns value of UPMCRS field
2209 */
2210static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2211{
2212 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2213}
2214
2215/**
Bart Van Assche35c7d872021-07-21 20:34:28 -07002216 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302217 * @hba: per adapter instance
2218 * @uic_cmd: UIC command
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302219 */
2220static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302221ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302222{
Bart Van Assche35c7d872021-07-21 20:34:28 -07002223 lockdep_assert_held(&hba->uic_cmd_mutex);
2224
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302225 WARN_ON(hba->active_uic_cmd);
2226
2227 hba->active_uic_cmd = uic_cmd;
2228
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302229 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302230 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2231 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2232 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302233
Bean Huo28fa68f2021-01-05 12:34:42 +01002234 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
Stanley Chuaa5c6972020-06-15 15:22:35 +08002235
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302236 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302237 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302238 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302239}
2240
2241/**
Bart Van Assche35c7d872021-07-21 20:34:28 -07002242 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302243 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002244 * @uic_cmd: UIC command
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302245 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302246 * Returns 0 only if success.
2247 */
2248static int
2249ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2250{
2251 int ret;
2252 unsigned long flags;
2253
Bart Van Assche35c7d872021-07-21 20:34:28 -07002254 lockdep_assert_held(&hba->uic_cmd_mutex);
2255
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302256 if (wait_for_completion_timeout(&uic_cmd->done,
Can Guo0f52fcb92020-11-02 22:24:40 -08002257 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302258 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
Can Guo0f52fcb92020-11-02 22:24:40 -08002259 } else {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302260 ret = -ETIMEDOUT;
Can Guo0f52fcb92020-11-02 22:24:40 -08002261 dev_err(hba->dev,
2262 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2263 uic_cmd->command, uic_cmd->argument3);
2264
2265 if (!uic_cmd->cmd_active) {
2266 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2267 __func__);
2268 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2269 }
2270 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302271
2272 spin_lock_irqsave(hba->host->host_lock, flags);
2273 hba->active_uic_cmd = NULL;
2274 spin_unlock_irqrestore(hba->host->host_lock, flags);
2275
2276 return ret;
2277}
2278
2279/**
2280 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2281 * @hba: per adapter instance
2282 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002283 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302284 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302285 * Returns 0 only if success.
2286 */
2287static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002288__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2289 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302290{
Bart Van Assche35c7d872021-07-21 20:34:28 -07002291 lockdep_assert_held(&hba->uic_cmd_mutex);
2292 lockdep_assert_held(hba->host->host_lock);
2293
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302294 if (!ufshcd_ready_for_uic_cmd(hba)) {
2295 dev_err(hba->dev,
2296 "Controller not ready to accept UIC commands\n");
2297 return -EIO;
2298 }
2299
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002300 if (completion)
2301 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302302
Can Guo0f52fcb92020-11-02 22:24:40 -08002303 uic_cmd->cmd_active = 1;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302304 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302305
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002306 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302307}
2308
2309/**
2310 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2311 * @hba: per adapter instance
2312 * @uic_cmd: UIC command
2313 *
2314 * Returns 0 only if success.
2315 */
Avri Altmane77044c52018-10-07 17:30:39 +03002316int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302317{
2318 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002319 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302320
jongmin jeonga22bcfd2021-10-18 21:42:02 +09002321 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2322 return 0;
2323
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002324 ufshcd_hold(hba, false);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302325 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002326 ufshcd_add_delay_before_dme_cmd(hba);
2327
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002328 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002329 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002330 spin_unlock_irqrestore(hba->host->host_lock, flags);
2331 if (!ret)
2332 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2333
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302334 mutex_unlock(&hba->uic_cmd_mutex);
2335
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002336 ufshcd_release(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302337 return ret;
2338}
2339
2340/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302341 * ufshcd_map_sg - Map scatter-gather list to prdt
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002342 * @hba: per adapter instance
2343 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302344 *
2345 * Returns 0 in case of success, non-zero value in case of failure
2346 */
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002347static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302348{
2349 struct ufshcd_sg_entry *prd_table;
2350 struct scatterlist *sg;
2351 struct scsi_cmnd *cmd;
2352 int sg_segments;
2353 int i;
2354
2355 cmd = lrbp->cmd;
2356 sg_segments = scsi_dma_map(cmd);
2357 if (sg_segments < 0)
2358 return sg_segments;
2359
2360 if (sg_segments) {
Alim Akhtar26f968d2020-05-28 06:46:52 +05302361
2362 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2363 lrbp->utr_descriptor_ptr->prd_table_length =
2364 cpu_to_le16((sg_segments *
2365 sizeof(struct ufshcd_sg_entry)));
2366 else
2367 lrbp->utr_descriptor_ptr->prd_table_length =
Bart Van Assche3ad317a2021-10-20 14:40:22 -07002368 cpu_to_le16(sg_segments);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302369
Bart Van Assche3ad317a2021-10-20 14:40:22 -07002370 prd_table = lrbp->ucd_prdt_ptr;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302371
2372 scsi_for_each_sg(cmd, sg, sg_segments, i) {
Bart Van Assche1ea7d802021-10-20 14:40:24 -07002373 const unsigned int len = sg_dma_len(sg);
2374
2375 /*
2376 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2377 * based value that indicates the length, in bytes, of
2378 * the data block. A maximum of length of 256KB may
2379 * exist for any entry. Bits 1:0 of this field shall be
2380 * 11b to indicate Dword granularity. A value of '3'
2381 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2382 */
2383 WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
2384 prd_table[i].size = cpu_to_le32(len - 1);
2385 prd_table[i].addr = cpu_to_le64(sg->dma_address);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002386 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302387 }
2388 } else {
2389 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2390 }
2391
2392 return 0;
2393}
2394
2395/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302396 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302397 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302398 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302399 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302400static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302401{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302402 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2403
Caleb Connolly51428812021-03-10 15:33:42 +00002404 if (hba->ufs_version == ufshci_version(1, 0)) {
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302405 u32 rw;
2406 rw = set & INTERRUPT_MASK_RW_VER_10;
2407 set = rw | ((set ^ intrs) & intrs);
2408 } else {
2409 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302410 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302411
2412 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2413}
2414
2415/**
2416 * ufshcd_disable_intr - disable interrupts
2417 * @hba: per adapter instance
2418 * @intrs: interrupt bits
2419 */
2420static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2421{
2422 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2423
Caleb Connolly51428812021-03-10 15:33:42 +00002424 if (hba->ufs_version == ufshci_version(1, 0)) {
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302425 u32 rw;
2426 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2427 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2428 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2429
2430 } else {
2431 set &= ~intrs;
2432 }
2433
2434 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302435}
2436
2437/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302438 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2439 * descriptor according to request
2440 * @lrbp: pointer to local reference block
2441 * @upiu_flags: flags required in the header
2442 * @cmd_dir: requests data direction
2443 */
2444static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
Bean Huoa23064c2020-07-06 14:39:36 +02002445 u8 *upiu_flags, enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302446{
2447 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2448 u32 data_direction;
2449 u32 dword_0;
Satya Tangiraladf043c742020-07-06 20:04:14 +00002450 u32 dword_1 = 0;
2451 u32 dword_3 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302452
2453 if (cmd_dir == DMA_FROM_DEVICE) {
2454 data_direction = UTP_DEVICE_TO_HOST;
2455 *upiu_flags = UPIU_CMD_FLAGS_READ;
2456 } else if (cmd_dir == DMA_TO_DEVICE) {
2457 data_direction = UTP_HOST_TO_DEVICE;
2458 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2459 } else {
2460 data_direction = UTP_NO_DATA_TRANSFER;
2461 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2462 }
2463
2464 dword_0 = data_direction | (lrbp->command_type
2465 << UPIU_COMMAND_TYPE_OFFSET);
2466 if (lrbp->intr_cmd)
2467 dword_0 |= UTP_REQ_DESC_INT_CMD;
2468
Satya Tangiraladf043c742020-07-06 20:04:14 +00002469 /* Prepare crypto related dwords */
2470 ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2471
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302472 /* Transfer request descriptor header fields */
2473 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Satya Tangiraladf043c742020-07-06 20:04:14 +00002474 req_desc->header.dword_1 = cpu_to_le32(dword_1);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302475 /*
2476 * assigning invalid value for command status. Controller
2477 * updates OCS on command completion, with the command
2478 * status
2479 */
2480 req_desc->header.dword_2 =
2481 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Satya Tangiraladf043c742020-07-06 20:04:14 +00002482 req_desc->header.dword_3 = cpu_to_le32(dword_3);
Yaniv Gardi51047262016-02-01 15:02:38 +02002483
2484 req_desc->prd_table_length = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302485}
2486
2487/**
2488 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2489 * for scsi commands
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002490 * @lrbp: local reference block pointer
2491 * @upiu_flags: flags
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302492 */
2493static
Bean Huoa23064c2020-07-06 14:39:36 +02002494void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302495{
Bart Van Assche1b21b8f2019-12-24 14:02:45 -08002496 struct scsi_cmnd *cmd = lrbp->cmd;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302497 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002498 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302499
2500 /* command descriptor fields */
2501 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2502 UPIU_TRANSACTION_COMMAND, upiu_flags,
2503 lrbp->lun, lrbp->task_tag);
2504 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2505 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2506
2507 /* Total EHS length and Data segment length will be zero */
2508 ucd_req_ptr->header.dword_2 = 0;
2509
Bart Van Assche1b21b8f2019-12-24 14:02:45 -08002510 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302511
Bart Van Assche1b21b8f2019-12-24 14:02:45 -08002512 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
Avri Altmana851b2b2018-10-07 17:30:34 +03002513 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
Bart Van Assche1b21b8f2019-12-24 14:02:45 -08002514 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002515
2516 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302517}
2518
Dolev Raviv68078d52013-07-30 00:35:58 +05302519/**
2520 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2521 * for query requsts
2522 * @hba: UFS hba
2523 * @lrbp: local reference block pointer
2524 * @upiu_flags: flags
2525 */
2526static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
Bean Huoa23064c2020-07-06 14:39:36 +02002527 struct ufshcd_lrb *lrbp, u8 upiu_flags)
Dolev Raviv68078d52013-07-30 00:35:58 +05302528{
2529 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2530 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302531 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05302532
2533 /* Query request header */
2534 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2535 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2536 lrbp->lun, lrbp->task_tag);
2537 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2538 0, query->request.query_func, 0, 0);
2539
Zang Leigang68612852016-08-25 17:39:19 +08002540 /* Data segment length only need for WRITE_DESC */
2541 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2542 ucd_req_ptr->header.dword_2 =
2543 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2544 else
2545 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302546
2547 /* Copy the Query Request buffer as is */
2548 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2549 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05302550
2551 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002552 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
Avri Altman220d17a62018-10-07 17:30:36 +03002553 memcpy(ucd_req_ptr + 1, query->descriptor, len);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002554
Yaniv Gardi51047262016-02-01 15:02:38 +02002555 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05302556}
2557
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302558static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2559{
2560 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2561
2562 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2563
2564 /* command descriptor fields */
2565 ucd_req_ptr->header.dword_0 =
2566 UPIU_HEADER_DWORD(
2567 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02002568 /* clear rest of the fields of basic header */
2569 ucd_req_ptr->header.dword_1 = 0;
2570 ucd_req_ptr->header.dword_2 = 0;
2571
2572 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302573}
2574
2575/**
Bean Huof273c542020-08-14 11:50:33 +02002576 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
Joao Pinto300bb132016-05-11 12:21:27 +01002577 * for Device Management Purposes
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002578 * @hba: per adapter instance
2579 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302580 */
Bean Huof273c542020-08-14 11:50:33 +02002581static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2582 struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302583{
Bean Huoa23064c2020-07-06 14:39:36 +02002584 u8 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302585 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302586
Caleb Connolly51428812021-03-10 15:33:42 +00002587 if (hba->ufs_version <= ufshci_version(1, 1))
Joao Pinto300bb132016-05-11 12:21:27 +01002588 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
kehuanlin83dc7e32017-09-06 17:58:39 +08002589 else
2590 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
Joao Pinto300bb132016-05-11 12:21:27 +01002591
2592 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2593 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2594 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2595 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2596 ufshcd_prepare_utp_nop_upiu(lrbp);
2597 else
2598 ret = -EINVAL;
2599
2600 return ret;
2601}
2602
2603/**
2604 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2605 * for SCSI Purposes
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002606 * @hba: per adapter instance
2607 * @lrbp: pointer to local reference block
Joao Pinto300bb132016-05-11 12:21:27 +01002608 */
2609static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2610{
Bean Huoa23064c2020-07-06 14:39:36 +02002611 u8 upiu_flags;
Joao Pinto300bb132016-05-11 12:21:27 +01002612 int ret = 0;
2613
Caleb Connolly51428812021-03-10 15:33:42 +00002614 if (hba->ufs_version <= ufshci_version(1, 1))
Joao Pinto300bb132016-05-11 12:21:27 +01002615 lrbp->command_type = UTP_CMD_TYPE_SCSI;
kehuanlin83dc7e32017-09-06 17:58:39 +08002616 else
2617 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
Joao Pinto300bb132016-05-11 12:21:27 +01002618
2619 if (likely(lrbp->cmd)) {
2620 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2621 lrbp->cmd->sc_data_direction);
2622 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2623 } else {
2624 ret = -EINVAL;
2625 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302626
2627 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302628}
2629
2630/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002631 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002632 * @upiu_wlun_id: UPIU W-LUN id
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002633 *
2634 * Returns SCSI W-LUN id
2635 */
2636static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2637{
2638 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2639}
2640
Asutosh Dasb294ff32021-04-23 17:20:16 -07002641static inline bool is_device_wlun(struct scsi_device *sdev)
2642{
2643 return sdev->lun ==
2644 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2645}
2646
Bart Van Assche4d2b8d42020-01-22 19:56:35 -08002647static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2648{
2649 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2650 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2651 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2652 i * sizeof(struct utp_transfer_cmd_desc);
2653 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2654 response_upiu);
2655 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2656
2657 lrb->utr_descriptor_ptr = utrdlp + i;
2658 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2659 i * sizeof(struct utp_transfer_req_desc);
2660 lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2661 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2662 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2663 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
Bart Van Assche3ad317a2021-10-20 14:40:22 -07002664 lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
Bart Van Assche4d2b8d42020-01-22 19:56:35 -08002665 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2666}
2667
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002668/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302669 * ufshcd_queuecommand - main entry point for SCSI requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002670 * @host: SCSI host pointer
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302671 * @cmd: command from SCSI Midlayer
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302672 *
2673 * Returns 0 for success, non-zero in case of failure
2674 */
2675static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2676{
Bart Van Assche4728ab42021-07-21 20:34:27 -07002677 struct ufs_hba *hba = shost_priv(host);
Bart Van Assche3f2c1002021-08-09 16:03:50 -07002678 int tag = scsi_cmd_to_rq(cmd)->tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302679 struct ufshcd_lrb *lrbp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302680 int err = 0;
2681
Bart Van Assche4728ab42021-07-21 20:34:27 -07002682 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302683
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002684 if (!down_read_trylock(&hba->clk_scaling_lock))
2685 return SCSI_MLQUEUE_HOST_BUSY;
2686
Bart Van Assche5675c382021-12-03 15:19:48 -08002687 /*
2688 * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
2689 * calls.
2690 */
2691 rcu_read_lock();
2692
Can Guoa45f9372021-05-24 01:36:57 -07002693 switch (hba->ufshcd_state) {
2694 case UFSHCD_STATE_OPERATIONAL:
Adrian Hunterd489f18a2021-10-08 11:40:48 +03002695 break;
Can Guoa45f9372021-05-24 01:36:57 -07002696 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
Adrian Hunterd489f18a2021-10-08 11:40:48 +03002697 /*
2698 * SCSI error handler can call ->queuecommand() while UFS error
2699 * handler is in progress. Error interrupts could change the
2700 * state from UFSHCD_STATE_RESET to
2701 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2702 * being issued in that case.
2703 */
2704 if (ufshcd_eh_in_progress(hba)) {
2705 err = SCSI_MLQUEUE_HOST_BUSY;
2706 goto out;
2707 }
Can Guoa45f9372021-05-24 01:36:57 -07002708 break;
2709 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2710 /*
2711 * pm_runtime_get_sync() is used at error handling preparation
2712 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2713 * PM ops, it can never be finished if we let SCSI layer keep
2714 * retrying it, which gets err handler stuck forever. Neither
2715 * can we let the scsi cmd pass through, because UFS is in bad
2716 * state, the scsi cmd may eventually time out, which will get
2717 * err handler blocked for too long. So, just fail the scsi cmd
2718 * sent from PM ops, err handler can recover PM error anyways.
2719 */
2720 if (hba->pm_op_in_progress) {
2721 hba->force_reset = true;
2722 set_host_byte(cmd, DID_BAD_TARGET);
Bart Van Assche35c37302021-10-07 13:46:01 -07002723 scsi_done(cmd);
Can Guoa45f9372021-05-24 01:36:57 -07002724 goto out;
2725 }
2726 fallthrough;
2727 case UFSHCD_STATE_RESET:
2728 err = SCSI_MLQUEUE_HOST_BUSY;
2729 goto out;
2730 case UFSHCD_STATE_ERROR:
2731 set_host_byte(cmd, DID_ERROR);
Bart Van Assche35c37302021-10-07 13:46:01 -07002732 scsi_done(cmd);
Can Guoa45f9372021-05-24 01:36:57 -07002733 goto out;
Can Guoa45f9372021-05-24 01:36:57 -07002734 }
2735
Gilad Broner7fabb772017-02-03 16:56:50 -08002736 hba->req_abort_count = 0;
2737
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002738 err = ufshcd_hold(hba, true);
2739 if (err) {
2740 err = SCSI_MLQUEUE_HOST_BUSY;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002741 goto out;
2742 }
Can Guo2dec9472020-08-09 05:15:47 -07002743 WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2744 (hba->clk_gating.state != CLKS_ON));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002745
Can Guoa45f9372021-05-24 01:36:57 -07002746 lrbp = &hba->lrb[tag];
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302747 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302748 lrbp->cmd = cmd;
Avri Altman09a5a242018-11-22 20:04:56 +02002749 lrbp->sense_bufflen = UFS_SENSE_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302750 lrbp->sense_buffer = cmd->sense_buffer;
2751 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002752 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03002753 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Satya Tangiraladf043c742020-07-06 20:04:14 +00002754
Bart Van Assche3f2c1002021-08-09 16:03:50 -07002755 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
Satya Tangiraladf043c742020-07-06 20:04:14 +00002756
Gilad Bronere0b299e2017-02-03 16:56:40 -08002757 lrbp->req_abort_skip = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302758
Avri Altman09d9e4d2021-10-30 09:23:01 +03002759 ufshpb_prep(hba, lrbp);
Daejun Park2fff76f2021-07-12 17:59:36 +09002760
Joao Pinto300bb132016-05-11 12:21:27 +01002761 ufshcd_comp_scsi_upiu(hba, lrbp);
2762
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002763 err = ufshcd_map_sg(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302764 if (err) {
2765 lrbp->cmd = NULL;
Can Guo17c7d352019-12-05 02:14:33 +00002766 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302767 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302768 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302769
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302770 ufshcd_send_command(hba, tag);
Bart Van Assche5675c382021-12-03 15:19:48 -08002771
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302772out:
Bart Van Assche5675c382021-12-03 15:19:48 -08002773 rcu_read_unlock();
2774
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002775 up_read(&hba->clk_scaling_lock);
Bart Van Asschec11a1ae2021-07-21 20:34:39 -07002776
Adrian Hunter88b099002021-09-17 17:43:49 +03002777 if (ufs_trigger_eh()) {
2778 unsigned long flags;
2779
2780 spin_lock_irqsave(hba->host->host_lock, flags);
2781 ufshcd_schedule_eh_work(hba);
2782 spin_unlock_irqrestore(hba->host->host_lock, flags);
2783 }
Bart Van Asschec11a1ae2021-07-21 20:34:39 -07002784
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302785 return err;
2786}
2787
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302788static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2789 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2790{
2791 lrbp->cmd = NULL;
2792 lrbp->sense_bufflen = 0;
2793 lrbp->sense_buffer = NULL;
2794 lrbp->task_tag = tag;
2795 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302796 lrbp->intr_cmd = true; /* No interrupt aggregation */
Satya Tangiraladf043c742020-07-06 20:04:14 +00002797 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302798 hba->dev_cmd.type = cmd_type;
2799
Bean Huof273c542020-08-14 11:50:33 +02002800 return ufshcd_compose_devman_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302801}
2802
2803static int
2804ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2805{
2806 int err = 0;
2807 unsigned long flags;
2808 u32 mask = 1 << tag;
2809
2810 /* clear outstanding transaction before retry */
2811 spin_lock_irqsave(hba->host->host_lock, flags);
2812 ufshcd_utrl_clear(hba, tag);
2813 spin_unlock_irqrestore(hba->host->host_lock, flags);
2814
2815 /*
Keoseong Park32424902021-06-04 11:40:38 +09002816 * wait for h/w to clear corresponding bit in door-bell.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302817 * max. wait is 1 sec.
2818 */
2819 err = ufshcd_wait_for_register(hba,
2820 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Bart Van Assche5cac1092020-05-07 15:27:50 -07002821 mask, ~mask, 1000, 1000);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302822
2823 return err;
2824}
2825
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002826static int
2827ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2828{
2829 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2830
2831 /* Get the UPIU response */
2832 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2833 UPIU_RSP_CODE_OFFSET;
2834 return query_res->response;
2835}
2836
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302837/**
2838 * ufshcd_dev_cmd_completion() - handles device management command responses
2839 * @hba: per adapter instance
2840 * @lrbp: pointer to local reference block
2841 */
2842static int
2843ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2844{
2845 int resp;
2846 int err = 0;
2847
Dolev Ravivff8e20c2016-12-22 18:42:18 -08002848 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302849 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2850
2851 switch (resp) {
2852 case UPIU_TRANSACTION_NOP_IN:
2853 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2854 err = -EINVAL;
2855 dev_err(hba->dev, "%s: unexpected response %x\n",
2856 __func__, resp);
2857 }
2858 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05302859 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002860 err = ufshcd_check_query_response(hba, lrbp);
2861 if (!err)
2862 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05302863 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302864 case UPIU_TRANSACTION_REJECT_UPIU:
2865 /* TODO: handle Reject UPIU Response */
2866 err = -EPERM;
2867 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2868 __func__);
2869 break;
2870 default:
2871 err = -EINVAL;
2872 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2873 __func__, resp);
2874 break;
2875 }
2876
2877 return err;
2878}
2879
2880static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2881 struct ufshcd_lrb *lrbp, int max_timeout)
2882{
2883 int err = 0;
2884 unsigned long time_left;
2885 unsigned long flags;
2886
2887 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2888 msecs_to_jiffies(max_timeout));
2889
2890 spin_lock_irqsave(hba->host->host_lock, flags);
2891 hba->dev_cmd.complete = NULL;
2892 if (likely(time_left)) {
2893 err = ufshcd_get_tr_ocs(lrbp);
2894 if (!err)
2895 err = ufshcd_dev_cmd_completion(hba, lrbp);
2896 }
2897 spin_unlock_irqrestore(hba->host->host_lock, flags);
2898
2899 if (!time_left) {
2900 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02002901 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2902 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302903 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02002904 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302905 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02002906 /*
2907 * in case of an error, after clearing the doorbell,
2908 * we also need to clear the outstanding_request
2909 * field in hba
2910 */
Bart Van Assche169f5eb2021-07-21 20:34:34 -07002911 spin_lock_irqsave(&hba->outstanding_lock, flags);
2912 __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
2913 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302914 }
2915
2916 return err;
2917}
2918
2919/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302920 * ufshcd_exec_dev_cmd - API for sending device management requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002921 * @hba: UFS hba
2922 * @cmd_type: specifies the type (NOP, Query...)
Bart Van Assched0b2b702021-05-13 09:49:12 -07002923 * @timeout: timeout in milliseconds
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302924 *
Dolev Raviv68078d52013-07-30 00:35:58 +05302925 * NOTE: Since there is only one available tag for device management commands,
2926 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302927 */
2928static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2929 enum dev_cmd_type cmd_type, int timeout)
2930{
Bart Van Assche8a686f22021-07-21 20:34:26 -07002931 DECLARE_COMPLETION_ONSTACK(wait);
Bart Van Assche945c3cc2021-12-03 15:19:42 -08002932 const u32 tag = hba->reserved_slot;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302933 struct ufshcd_lrb *lrbp;
2934 int err;
Bart Van Assche945c3cc2021-12-03 15:19:42 -08002935
2936 /* Protects use of hba->reserved_slot. */
2937 lockdep_assert_held(&hba->dev_cmd.lock);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302938
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002939 down_read(&hba->clk_scaling_lock);
2940
Can Guoa45f9372021-05-24 01:36:57 -07002941 lrbp = &hba->lrb[tag];
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302942 WARN_ON(lrbp->cmd);
2943 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2944 if (unlikely(err))
Can Guoeb783bb2021-06-09 01:24:00 -07002945 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302946
2947 hba->dev_cmd.complete = &wait;
2948
Avri Altmanfb475b72021-01-10 10:46:18 +02002949 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
Can Guoa45f9372021-05-24 01:36:57 -07002950
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302951 ufshcd_send_command(hba, tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302952 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
Avri Altmanfb475b72021-01-10 10:46:18 +02002953 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
2954 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03002955
Can Guoeb783bb2021-06-09 01:24:00 -07002956out:
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002957 up_read(&hba->clk_scaling_lock);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302958 return err;
2959}
2960
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302961/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002962 * ufshcd_init_query() - init the query response and request parameters
2963 * @hba: per-adapter instance
2964 * @request: address of the request pointer to be initialized
2965 * @response: address of the response pointer to be initialized
2966 * @opcode: operation to perform
2967 * @idn: flag idn to access
2968 * @index: LU number to access
2969 * @selector: query/flag/descriptor further identification
2970 */
2971static inline void ufshcd_init_query(struct ufs_hba *hba,
2972 struct ufs_query_req **request, struct ufs_query_res **response,
2973 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2974{
2975 *request = &hba->dev_cmd.query.request;
2976 *response = &hba->dev_cmd.query.response;
2977 memset(*request, 0, sizeof(struct ufs_query_req));
2978 memset(*response, 0, sizeof(struct ufs_query_res));
2979 (*request)->upiu_req.opcode = opcode;
2980 (*request)->upiu_req.idn = idn;
2981 (*request)->upiu_req.index = index;
2982 (*request)->upiu_req.selector = selector;
2983}
2984
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002985static int ufshcd_query_flag_retry(struct ufs_hba *hba,
Stanley Chu1f34eed2020-05-08 16:01:12 +08002986 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002987{
2988 int ret;
2989 int retries;
2990
2991 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
Stanley Chu1f34eed2020-05-08 16:01:12 +08002992 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002993 if (ret)
2994 dev_dbg(hba->dev,
2995 "%s: failed with error %d, retries %d\n",
2996 __func__, ret, retries);
2997 else
2998 break;
2999 }
3000
3001 if (ret)
3002 dev_err(hba->dev,
3003 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3004 __func__, opcode, idn, ret, retries);
3005 return ret;
3006}
3007
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003008/**
Dolev Raviv68078d52013-07-30 00:35:58 +05303009 * ufshcd_query_flag() - API function for sending flag query requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08003010 * @hba: per-adapter instance
3011 * @opcode: flag query to perform
3012 * @idn: flag idn to access
Stanley Chu1f34eed2020-05-08 16:01:12 +08003013 * @index: flag index to access
Bart Van Assche8aa29f12018-03-01 15:07:20 -08003014 * @flag_res: the flag value after the query request completes
Dolev Raviv68078d52013-07-30 00:35:58 +05303015 *
3016 * Returns 0 for success, non-zero in case of failure
3017 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003018int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Stanley Chu1f34eed2020-05-08 16:01:12 +08003019 enum flag_idn idn, u8 index, bool *flag_res)
Dolev Raviv68078d52013-07-30 00:35:58 +05303020{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003021 struct ufs_query_req *request = NULL;
3022 struct ufs_query_res *response = NULL;
Stanley Chu1f34eed2020-05-08 16:01:12 +08003023 int err, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02003024 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05303025
3026 BUG_ON(!hba);
3027
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003028 ufshcd_hold(hba, false);
Dolev Raviv68078d52013-07-30 00:35:58 +05303029 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003030 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3031 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05303032
3033 switch (opcode) {
3034 case UPIU_QUERY_OPCODE_SET_FLAG:
3035 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3036 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3037 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3038 break;
3039 case UPIU_QUERY_OPCODE_READ_FLAG:
3040 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3041 if (!flag_res) {
3042 /* No dummy reads */
3043 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3044 __func__);
3045 err = -EINVAL;
3046 goto out_unlock;
3047 }
3048 break;
3049 default:
3050 dev_err(hba->dev,
3051 "%s: Expected query flag opcode but got = %d\n",
3052 __func__, opcode);
3053 err = -EINVAL;
3054 goto out_unlock;
3055 }
Dolev Raviv68078d52013-07-30 00:35:58 +05303056
Yaniv Gardie5ad4062016-02-01 15:02:41 +02003057 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05303058
3059 if (err) {
3060 dev_err(hba->dev,
3061 "%s: Sending flag query for idn %d failed, err = %d\n",
3062 __func__, idn, err);
3063 goto out_unlock;
3064 }
3065
3066 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303067 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05303068 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3069
3070out_unlock:
3071 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003072 ufshcd_release(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05303073 return err;
3074}
3075
3076/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303077 * ufshcd_query_attr - API function for sending attribute requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08003078 * @hba: per-adapter instance
3079 * @opcode: attribute opcode
3080 * @idn: attribute idn to access
3081 * @index: index field
3082 * @selector: selector field
3083 * @attr_val: the attribute value after the query request completes
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303084 *
3085 * Returns 0 for success, non-zero in case of failure
3086*/
Stanislav Nijnikovec92b592018-02-15 14:14:11 +02003087int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3088 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303089{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003090 struct ufs_query_req *request = NULL;
3091 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303092 int err;
3093
3094 BUG_ON(!hba);
3095
3096 if (!attr_val) {
3097 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3098 __func__, opcode);
jintae jang8ca1a402020-12-03 14:25:32 +09003099 return -EINVAL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303100 }
3101
jintae jang8ca1a402020-12-03 14:25:32 +09003102 ufshcd_hold(hba, false);
3103
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303104 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003105 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3106 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303107
3108 switch (opcode) {
3109 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3110 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303111 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303112 break;
3113 case UPIU_QUERY_OPCODE_READ_ATTR:
3114 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3115 break;
3116 default:
3117 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3118 __func__, opcode);
3119 err = -EINVAL;
3120 goto out_unlock;
3121 }
3122
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003123 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303124
3125 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08003126 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3127 __func__, opcode, idn, index, err);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303128 goto out_unlock;
3129 }
3130
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303131 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303132
3133out_unlock:
3134 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003135 ufshcd_release(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303136 return err;
3137}
3138
3139/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003140 * ufshcd_query_attr_retry() - API function for sending query
3141 * attribute with retries
3142 * @hba: per-adapter instance
3143 * @opcode: attribute opcode
3144 * @idn: attribute idn to access
3145 * @index: index field
3146 * @selector: selector field
3147 * @attr_val: the attribute value after the query request
3148 * completes
3149 *
3150 * Returns 0 for success, non-zero in case of failure
3151*/
Daejun Park41d8a932021-07-12 18:00:25 +09003152int ufshcd_query_attr_retry(struct ufs_hba *hba,
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003153 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3154 u32 *attr_val)
3155{
3156 int ret = 0;
3157 u32 retries;
3158
Bart Van Assche68c9fcf2019-12-24 14:02:43 -08003159 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003160 ret = ufshcd_query_attr(hba, opcode, idn, index,
3161 selector, attr_val);
3162 if (ret)
3163 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3164 __func__, ret, retries);
3165 else
3166 break;
3167 }
3168
3169 if (ret)
3170 dev_err(hba->dev,
3171 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3172 __func__, idn, ret, QUERY_REQ_RETRIES);
3173 return ret;
3174}
3175
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003176static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003177 enum query_opcode opcode, enum desc_idn idn, u8 index,
3178 u8 selector, u8 *desc_buf, int *buf_len)
3179{
3180 struct ufs_query_req *request = NULL;
3181 struct ufs_query_res *response = NULL;
3182 int err;
3183
3184 BUG_ON(!hba);
3185
3186 if (!desc_buf) {
3187 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3188 __func__, opcode);
jintae jang8ca1a402020-12-03 14:25:32 +09003189 return -EINVAL;
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003190 }
3191
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003192 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003193 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3194 __func__, *buf_len);
jintae jang8ca1a402020-12-03 14:25:32 +09003195 return -EINVAL;
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003196 }
3197
jintae jang8ca1a402020-12-03 14:25:32 +09003198 ufshcd_hold(hba, false);
3199
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003200 mutex_lock(&hba->dev_cmd.lock);
3201 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3202 selector);
3203 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003204 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003205
3206 switch (opcode) {
3207 case UPIU_QUERY_OPCODE_WRITE_DESC:
3208 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3209 break;
3210 case UPIU_QUERY_OPCODE_READ_DESC:
3211 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3212 break;
3213 default:
3214 dev_err(hba->dev,
3215 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3216 __func__, opcode);
3217 err = -EINVAL;
3218 goto out_unlock;
3219 }
3220
3221 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3222
3223 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08003224 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3225 __func__, opcode, idn, index, err);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003226 goto out_unlock;
3227 }
3228
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003229 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003230
3231out_unlock:
Bean Huocfcbae32019-11-12 23:34:36 +01003232 hba->dev_cmd.query.descriptor = NULL;
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003233 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003234 ufshcd_release(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003235 return err;
3236}
3237
3238/**
Bart Van Assche8aa29f12018-03-01 15:07:20 -08003239 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3240 * @hba: per-adapter instance
3241 * @opcode: attribute opcode
3242 * @idn: attribute idn to access
3243 * @index: index field
3244 * @selector: selector field
3245 * @desc_buf: the buffer that contains the descriptor
3246 * @buf_len: length parameter passed to the device
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003247 *
3248 * Returns 0 for success, non-zero in case of failure.
3249 * The buf_len parameter will contain, on return, the length parameter
3250 * received on the response.
3251 */
Stanislav Nijnikov2238d312018-02-15 14:14:07 +02003252int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3253 enum query_opcode opcode,
3254 enum desc_idn idn, u8 index,
3255 u8 selector,
3256 u8 *desc_buf, int *buf_len)
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003257{
3258 int err;
3259 int retries;
3260
3261 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3262 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3263 selector, desc_buf, buf_len);
3264 if (!err || err == -EINVAL)
3265 break;
3266 }
3267
3268 return err;
3269}
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003270
3271/**
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003272 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3273 * @hba: Pointer to adapter instance
3274 * @desc_id: descriptor idn value
3275 * @desc_len: mapped desc length (out)
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003276 */
Bean Huo7a0bf852020-06-03 11:19:58 +02003277void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3278 int *desc_len)
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003279{
Bean Huo7a0bf852020-06-03 11:19:58 +02003280 if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3281 desc_id == QUERY_DESC_IDN_RFU_1)
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003282 *desc_len = 0;
Bean Huo7a0bf852020-06-03 11:19:58 +02003283 else
3284 *desc_len = hba->desc_size[desc_id];
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003285}
3286EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3287
Bean Huo7a0bf852020-06-03 11:19:58 +02003288static void ufshcd_update_desc_length(struct ufs_hba *hba,
Bean Huo72fb6902020-06-03 11:19:59 +02003289 enum desc_idn desc_id, int desc_index,
Bean Huo7a0bf852020-06-03 11:19:58 +02003290 unsigned char desc_len)
3291{
3292 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
Bean Huo72fb6902020-06-03 11:19:59 +02003293 desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3294 /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3295 * than the RPMB unit, however, both descriptors share the same
3296 * desc_idn, to cover both unit descriptors with one length, we
3297 * choose the normal unit descriptor length by desc_index.
3298 */
Bean Huo7a0bf852020-06-03 11:19:58 +02003299 hba->desc_size[desc_id] = desc_len;
3300}
3301
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003302/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003303 * ufshcd_read_desc_param - read the specified descriptor parameter
3304 * @hba: Pointer to adapter instance
3305 * @desc_id: descriptor idn value
3306 * @desc_index: descriptor index
3307 * @param_offset: offset of the parameter to read
3308 * @param_read_buf: pointer to buffer where parameter would be read
3309 * @param_size: sizeof(param_read_buf)
3310 *
3311 * Return 0 in case of success, non-zero otherwise
3312 */
Stanislav Nijnikov45bced82018-02-15 14:14:02 +02003313int ufshcd_read_desc_param(struct ufs_hba *hba,
3314 enum desc_idn desc_id,
3315 int desc_index,
3316 u8 param_offset,
3317 u8 *param_read_buf,
3318 u8 param_size)
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003319{
3320 int ret;
3321 u8 *desc_buf;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003322 int buff_len;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003323 bool is_kmalloc = true;
3324
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003325 /* Safety check */
3326 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003327 return -EINVAL;
3328
Bean Huo7a0bf852020-06-03 11:19:58 +02003329 /* Get the length of descriptor */
3330 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3331 if (!buff_len) {
Can Guo1699f982020-10-21 22:59:00 -07003332 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3333 return -EINVAL;
3334 }
3335
3336 if (param_offset >= buff_len) {
3337 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3338 __func__, param_offset, desc_id, buff_len);
Bean Huo7a0bf852020-06-03 11:19:58 +02003339 return -EINVAL;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003340 }
3341
3342 /* Check whether we need temp memory */
3343 if (param_offset != 0 || param_size < buff_len) {
Can Guo1699f982020-10-21 22:59:00 -07003344 desc_buf = kzalloc(buff_len, GFP_KERNEL);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003345 if (!desc_buf)
3346 return -ENOMEM;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003347 } else {
3348 desc_buf = param_read_buf;
3349 is_kmalloc = false;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003350 }
3351
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003352 /* Request for full descriptor */
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003353 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003354 desc_id, desc_index, 0,
3355 desc_buf, &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003356
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003357 if (ret) {
Can Guo1699f982020-10-21 22:59:00 -07003358 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003359 __func__, desc_id, desc_index, param_offset, ret);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003360 goto out;
3361 }
3362
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003363 /* Sanity check */
3364 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
Can Guo1699f982020-10-21 22:59:00 -07003365 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003366 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3367 ret = -EINVAL;
3368 goto out;
3369 }
3370
Bean Huo7a0bf852020-06-03 11:19:58 +02003371 /* Update descriptor length */
3372 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
Bean Huo72fb6902020-06-03 11:19:59 +02003373 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
Bean Huo7a0bf852020-06-03 11:19:58 +02003374
Can Guo1699f982020-10-21 22:59:00 -07003375 if (is_kmalloc) {
3376 /* Make sure we don't copy more data than available */
Bart Van Assched3d9c452021-07-21 20:34:22 -07003377 if (param_offset >= buff_len)
3378 ret = -EINVAL;
3379 else
3380 memcpy(param_read_buf, &desc_buf[param_offset],
3381 min_t(u32, param_size, buff_len - param_offset));
Can Guo1699f982020-10-21 22:59:00 -07003382 }
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003383out:
3384 if (is_kmalloc)
3385 kfree(desc_buf);
3386 return ret;
3387}
3388
Yaniv Gardib573d482016-03-10 17:37:09 +02003389/**
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003390 * struct uc_string_id - unicode string
3391 *
3392 * @len: size of this descriptor inclusive
3393 * @type: descriptor type
3394 * @uc: unicode string character
3395 */
3396struct uc_string_id {
3397 u8 len;
3398 u8 type;
Gustavo A. R. Silvaec38c0a2020-05-07 14:25:50 -05003399 wchar_t uc[];
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003400} __packed;
3401
3402/* replace non-printable or non-ASCII characters with spaces */
3403static inline char ufshcd_remove_non_printable(u8 ch)
3404{
3405 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3406}
3407
3408/**
Yaniv Gardib573d482016-03-10 17:37:09 +02003409 * ufshcd_read_string_desc - read string descriptor
3410 * @hba: pointer to adapter instance
3411 * @desc_index: descriptor index
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003412 * @buf: pointer to buffer where descriptor would be read,
3413 * the caller should free the memory.
Yaniv Gardib573d482016-03-10 17:37:09 +02003414 * @ascii: if true convert from unicode to ascii characters
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003415 * null terminated string.
Yaniv Gardib573d482016-03-10 17:37:09 +02003416 *
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003417 * Return:
3418 * * string size on success.
3419 * * -ENOMEM: on allocation failure
3420 * * -EINVAL: on a wrong parameter
Yaniv Gardib573d482016-03-10 17:37:09 +02003421 */
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003422int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3423 u8 **buf, bool ascii)
Yaniv Gardib573d482016-03-10 17:37:09 +02003424{
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003425 struct uc_string_id *uc_str;
3426 u8 *str;
3427 int ret;
Yaniv Gardib573d482016-03-10 17:37:09 +02003428
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003429 if (!buf)
3430 return -EINVAL;
Yaniv Gardib573d482016-03-10 17:37:09 +02003431
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003432 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3433 if (!uc_str)
3434 return -ENOMEM;
3435
Bean Huoc4607a02020-06-03 11:19:56 +02003436 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3437 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003438 if (ret < 0) {
3439 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3440 QUERY_REQ_RETRIES, ret);
3441 str = NULL;
3442 goto out;
3443 }
3444
3445 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3446 dev_dbg(hba->dev, "String Desc is of zero length\n");
3447 str = NULL;
3448 ret = 0;
Yaniv Gardib573d482016-03-10 17:37:09 +02003449 goto out;
3450 }
3451
3452 if (ascii) {
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003453 ssize_t ascii_len;
Yaniv Gardib573d482016-03-10 17:37:09 +02003454 int i;
Yaniv Gardib573d482016-03-10 17:37:09 +02003455 /* remove header and divide by 2 to move from UTF16 to UTF8 */
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003456 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3457 str = kzalloc(ascii_len, GFP_KERNEL);
3458 if (!str) {
3459 ret = -ENOMEM;
Tiezhu Yangfcbefc32016-06-25 12:35:22 +08003460 goto out;
Yaniv Gardib573d482016-03-10 17:37:09 +02003461 }
3462
3463 /*
3464 * the descriptor contains string in UTF16 format
3465 * we need to convert to utf-8 so it can be displayed
3466 */
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003467 ret = utf16s_to_utf8s(uc_str->uc,
3468 uc_str->len - QUERY_DESC_HDR_SIZE,
3469 UTF16_BIG_ENDIAN, str, ascii_len);
Yaniv Gardib573d482016-03-10 17:37:09 +02003470
3471 /* replace non-printable or non-ASCII characters with spaces */
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003472 for (i = 0; i < ret; i++)
3473 str[i] = ufshcd_remove_non_printable(str[i]);
Yaniv Gardib573d482016-03-10 17:37:09 +02003474
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003475 str[ret++] = '\0';
3476
3477 } else {
YueHaibing5f577042019-08-31 12:44:24 +00003478 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003479 if (!str) {
3480 ret = -ENOMEM;
3481 goto out;
3482 }
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003483 ret = uc_str->len;
Yaniv Gardib573d482016-03-10 17:37:09 +02003484 }
3485out:
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003486 *buf = str;
3487 kfree(uc_str);
3488 return ret;
Yaniv Gardib573d482016-03-10 17:37:09 +02003489}
Yaniv Gardib573d482016-03-10 17:37:09 +02003490
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003491/**
3492 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3493 * @hba: Pointer to adapter instance
3494 * @lun: lun id
3495 * @param_offset: offset of the parameter to read
3496 * @param_read_buf: pointer to buffer where parameter would be read
3497 * @param_size: sizeof(param_read_buf)
3498 *
3499 * Return 0 in case of success, non-zero otherwise
3500 */
3501static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3502 int lun,
3503 enum unit_desc_param param_offset,
3504 u8 *param_read_buf,
3505 u32 param_size)
3506{
3507 /*
3508 * Unit descriptors are only available for general purpose LUs (LUN id
3509 * from 0 to 7) and RPMB Well known LU.
3510 */
Jaegeuk Kima2fca522021-01-11 01:59:27 -08003511 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003512 return -EOPNOTSUPP;
3513
3514 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3515 param_offset, param_read_buf, param_size);
3516}
3517
Can Guo09f17792020-02-10 19:40:49 -08003518static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3519{
3520 int err = 0;
3521 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3522
3523 if (hba->dev_info.wspecversion >= 0x300) {
3524 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3525 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3526 &gating_wait);
3527 if (err)
3528 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3529 err, gating_wait);
3530
3531 if (gating_wait == 0) {
3532 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3533 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3534 gating_wait);
3535 }
3536
3537 hba->dev_info.clk_gating_wait_us = gating_wait;
3538 }
3539
3540 return err;
3541}
3542
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003543/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303544 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3545 * @hba: per adapter instance
3546 *
3547 * 1. Allocate DMA memory for Command Descriptor array
3548 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3549 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3550 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3551 * (UTMRDL)
3552 * 4. Allocate memory for local reference block(lrb).
3553 *
3554 * Returns 0 for success, non-zero in case of failure
3555 */
3556static int ufshcd_memory_alloc(struct ufs_hba *hba)
3557{
3558 size_t utmrdl_size, utrdl_size, ucdl_size;
3559
3560 /* Allocate memory for UTP command descriptors */
3561 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09003562 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3563 ucdl_size,
3564 &hba->ucdl_dma_addr,
3565 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303566
3567 /*
3568 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3569 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3570 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3571 * be aligned to 128 bytes as well
3572 */
3573 if (!hba->ucdl_base_addr ||
3574 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303575 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303576 "Command Descriptor Memory allocation failed\n");
3577 goto out;
3578 }
3579
3580 /*
3581 * Allocate memory for UTP Transfer descriptors
3582 * UFSHCI requires 1024 byte alignment of UTRD
3583 */
3584 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09003585 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3586 utrdl_size,
3587 &hba->utrdl_dma_addr,
3588 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303589 if (!hba->utrdl_base_addr ||
3590 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303591 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303592 "Transfer Descriptor Memory allocation failed\n");
3593 goto out;
3594 }
3595
3596 /*
3597 * Allocate memory for UTP Task Management descriptors
3598 * UFSHCI requires 1024 byte alignment of UTMRD
3599 */
3600 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09003601 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3602 utmrdl_size,
3603 &hba->utmrdl_dma_addr,
3604 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303605 if (!hba->utmrdl_base_addr ||
3606 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303607 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303608 "Task Management Descriptor Memory allocation failed\n");
3609 goto out;
3610 }
3611
3612 /* Allocate memory for local reference block */
Kees Cooka86854d2018-06-12 14:07:58 -07003613 hba->lrb = devm_kcalloc(hba->dev,
3614 hba->nutrs, sizeof(struct ufshcd_lrb),
Seungwon Jeon2953f852013-06-27 13:31:54 +09003615 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303616 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303617 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303618 goto out;
3619 }
3620 return 0;
3621out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303622 return -ENOMEM;
3623}
3624
3625/**
3626 * ufshcd_host_memory_configure - configure local reference block with
3627 * memory offsets
3628 * @hba: per adapter instance
3629 *
3630 * Configure Host memory space
3631 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3632 * address.
3633 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3634 * and PRDT offset.
3635 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3636 * into local reference block.
3637 */
3638static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3639{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303640 struct utp_transfer_req_desc *utrdlp;
3641 dma_addr_t cmd_desc_dma_addr;
3642 dma_addr_t cmd_desc_element_addr;
3643 u16 response_offset;
3644 u16 prdt_offset;
3645 int cmd_desc_size;
3646 int i;
3647
3648 utrdlp = hba->utrdl_base_addr;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303649
3650 response_offset =
3651 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3652 prdt_offset =
3653 offsetof(struct utp_transfer_cmd_desc, prd_table);
3654
3655 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3656 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3657
3658 for (i = 0; i < hba->nutrs; i++) {
3659 /* Configure UTRD with command descriptor base address */
3660 cmd_desc_element_addr =
3661 (cmd_desc_dma_addr + (cmd_desc_size * i));
3662 utrdlp[i].command_desc_base_addr_lo =
3663 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3664 utrdlp[i].command_desc_base_addr_hi =
3665 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3666
3667 /* Response upiu and prdt offset should be in double words */
Alim Akhtar26f968d2020-05-28 06:46:52 +05303668 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3669 utrdlp[i].response_upiu_offset =
3670 cpu_to_le16(response_offset);
3671 utrdlp[i].prd_table_offset =
3672 cpu_to_le16(prdt_offset);
3673 utrdlp[i].response_upiu_length =
3674 cpu_to_le16(ALIGNED_UPIU_SIZE);
3675 } else {
3676 utrdlp[i].response_upiu_offset =
3677 cpu_to_le16(response_offset >> 2);
3678 utrdlp[i].prd_table_offset =
3679 cpu_to_le16(prdt_offset >> 2);
3680 utrdlp[i].response_upiu_length =
3681 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3682 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303683
Bart Van Assche4d2b8d42020-01-22 19:56:35 -08003684 ufshcd_init_lrb(hba, &hba->lrb[i], i);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303685 }
3686}
3687
3688/**
3689 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3690 * @hba: per adapter instance
3691 *
3692 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3693 * in order to initialize the Unipro link startup procedure.
3694 * Once the Unipro links are up, the device connected to the controller
3695 * is detected.
3696 *
3697 * Returns 0 on success, non-zero value on failure
3698 */
3699static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3700{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303701 struct uic_command uic_cmd = {0};
3702 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303703
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303704 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3705
3706 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3707 if (ret)
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003708 dev_dbg(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303709 "dme-link-startup: error code %d\n", ret);
3710 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303711}
Alim Akhtar39bf2d82020-05-28 06:46:51 +05303712/**
3713 * ufshcd_dme_reset - UIC command for DME_RESET
3714 * @hba: per adapter instance
3715 *
3716 * DME_RESET command is issued in order to reset UniPro stack.
3717 * This function now deals with cold reset.
3718 *
3719 * Returns 0 on success, non-zero value on failure
3720 */
3721static int ufshcd_dme_reset(struct ufs_hba *hba)
3722{
3723 struct uic_command uic_cmd = {0};
3724 int ret;
3725
3726 uic_cmd.command = UIC_CMD_DME_RESET;
3727
3728 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3729 if (ret)
3730 dev_err(hba->dev,
3731 "dme-reset: error code %d\n", ret);
3732
3733 return ret;
3734}
3735
Stanley Chufc85a742020-11-16 14:50:52 +08003736int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3737 int agreed_gear,
3738 int adapt_val)
3739{
3740 int ret;
3741
3742 if (agreed_gear != UFS_HS_G4)
Bjorn Andersson66df79c2020-11-20 20:48:10 -08003743 adapt_val = PA_NO_ADAPT;
Stanley Chufc85a742020-11-16 14:50:52 +08003744
3745 ret = ufshcd_dme_set(hba,
3746 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3747 adapt_val);
3748 return ret;
3749}
3750EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3751
Alim Akhtar39bf2d82020-05-28 06:46:51 +05303752/**
3753 * ufshcd_dme_enable - UIC command for DME_ENABLE
3754 * @hba: per adapter instance
3755 *
3756 * DME_ENABLE command is issued in order to enable UniPro stack.
3757 *
3758 * Returns 0 on success, non-zero value on failure
3759 */
3760static int ufshcd_dme_enable(struct ufs_hba *hba)
3761{
3762 struct uic_command uic_cmd = {0};
3763 int ret;
3764
3765 uic_cmd.command = UIC_CMD_DME_ENABLE;
3766
3767 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3768 if (ret)
3769 dev_err(hba->dev,
Bean Huo1fa05702020-12-07 20:01:37 +01003770 "dme-enable: error code %d\n", ret);
Alim Akhtar39bf2d82020-05-28 06:46:51 +05303771
3772 return ret;
3773}
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303774
Yaniv Gardicad2e032015-03-31 17:37:14 +03003775static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3776{
3777 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3778 unsigned long min_sleep_time_us;
3779
3780 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3781 return;
3782
3783 /*
3784 * last_dme_cmd_tstamp will be 0 only for 1st call to
3785 * this function
3786 */
3787 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3788 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3789 } else {
3790 unsigned long delta =
3791 (unsigned long) ktime_to_us(
3792 ktime_sub(ktime_get(),
3793 hba->last_dme_cmd_tstamp));
3794
3795 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3796 min_sleep_time_us =
3797 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3798 else
3799 return; /* no more delay required */
3800 }
3801
3802 /* allow sleep for extra 50us if needed */
3803 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3804}
3805
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303806/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303807 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3808 * @hba: per adapter instance
3809 * @attr_sel: uic command argument1
3810 * @attr_set: attribute set type as uic command argument2
3811 * @mib_val: setting value as uic command argument3
3812 * @peer: indicate whether peer or local
3813 *
3814 * Returns 0 on success, non-zero value on failure
3815 */
3816int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3817 u8 attr_set, u32 mib_val, u8 peer)
3818{
3819 struct uic_command uic_cmd = {0};
3820 static const char *const action[] = {
3821 "dme-set",
3822 "dme-peer-set"
3823 };
3824 const char *set = action[!!peer];
3825 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003826 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303827
3828 uic_cmd.command = peer ?
3829 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3830 uic_cmd.argument1 = attr_sel;
3831 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3832 uic_cmd.argument3 = mib_val;
3833
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003834 do {
3835 /* for peer attributes we retry upon failure */
3836 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3837 if (ret)
3838 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3839 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3840 } while (ret && peer && --retries);
3841
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003842 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003843 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003844 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3845 UFS_UIC_COMMAND_RETRIES - retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303846
3847 return ret;
3848}
3849EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3850
3851/**
3852 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3853 * @hba: per adapter instance
3854 * @attr_sel: uic command argument1
3855 * @mib_val: the value of the attribute as returned by the UIC command
3856 * @peer: indicate whether peer or local
3857 *
3858 * Returns 0 on success, non-zero value on failure
3859 */
3860int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3861 u32 *mib_val, u8 peer)
3862{
3863 struct uic_command uic_cmd = {0};
3864 static const char *const action[] = {
3865 "dme-get",
3866 "dme-peer-get"
3867 };
3868 const char *get = action[!!peer];
3869 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003870 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03003871 struct ufs_pa_layer_attr orig_pwr_info;
3872 struct ufs_pa_layer_attr temp_pwr_info;
3873 bool pwr_mode_change = false;
3874
3875 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3876 orig_pwr_info = hba->pwr_info;
3877 temp_pwr_info = orig_pwr_info;
3878
3879 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3880 orig_pwr_info.pwr_rx == FAST_MODE) {
3881 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3882 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3883 pwr_mode_change = true;
3884 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3885 orig_pwr_info.pwr_rx == SLOW_MODE) {
3886 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3887 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3888 pwr_mode_change = true;
3889 }
3890 if (pwr_mode_change) {
3891 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3892 if (ret)
3893 goto out;
3894 }
3895 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303896
3897 uic_cmd.command = peer ?
3898 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3899 uic_cmd.argument1 = attr_sel;
3900
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003901 do {
3902 /* for peer attributes we retry upon failure */
3903 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3904 if (ret)
3905 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3906 get, UIC_GET_ATTR_ID(attr_sel), ret);
3907 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303908
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003909 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003910 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003911 get, UIC_GET_ATTR_ID(attr_sel),
3912 UFS_UIC_COMMAND_RETRIES - retries);
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003913
3914 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303915 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03003916
3917 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3918 && pwr_mode_change)
3919 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303920out:
3921 return ret;
3922}
3923EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3924
3925/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003926 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3927 * state) and waits for it to take effect.
3928 *
3929 * @hba: per adapter instance
3930 * @cmd: UIC command to execute
3931 *
3932 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3933 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3934 * and device UniPro link and hence it's final completion would be indicated by
3935 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3936 * addition to normal UIC command completion Status (UCCS). This function only
3937 * returns after the relevant status bits indicate the completion.
3938 *
3939 * Returns 0 on success, non-zero value on failure
3940 */
3941static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3942{
Bart Van Assche8a686f22021-07-21 20:34:26 -07003943 DECLARE_COMPLETION_ONSTACK(uic_async_done);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003944 unsigned long flags;
3945 u8 status;
3946 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003947 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003948
3949 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03003950 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003951
3952 spin_lock_irqsave(hba->host->host_lock, flags);
Can Guo4db7a232020-08-09 05:15:51 -07003953 if (ufshcd_is_link_broken(hba)) {
3954 ret = -ENOLINK;
3955 goto out_unlock;
3956 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003957 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003958 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3959 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3960 /*
3961 * Make sure UIC command completion interrupt is disabled before
3962 * issuing UIC command.
3963 */
3964 wmb();
3965 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003966 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003967 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3968 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003969 if (ret) {
3970 dev_err(hba->dev,
3971 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3972 cmd->command, cmd->argument3, ret);
3973 goto out;
3974 }
3975
3976 if (!wait_for_completion_timeout(hba->uic_async_done,
3977 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3978 dev_err(hba->dev,
3979 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3980 cmd->command, cmd->argument3);
Can Guo0f52fcb92020-11-02 22:24:40 -08003981
3982 if (!cmd->cmd_active) {
3983 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
3984 __func__);
3985 goto check_upmcrs;
3986 }
3987
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003988 ret = -ETIMEDOUT;
3989 goto out;
3990 }
3991
Can Guo0f52fcb92020-11-02 22:24:40 -08003992check_upmcrs:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003993 status = ufshcd_get_upmcrs(hba);
3994 if (status != PWR_LOCAL) {
3995 dev_err(hba->dev,
Zang Leigang479da362017-09-19 16:50:30 +08003996 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003997 cmd->command, status);
3998 ret = (status != PWR_OK) ? status : -1;
3999 }
4000out:
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004001 if (ret) {
4002 ufshcd_print_host_state(hba);
4003 ufshcd_print_pwr_info(hba);
Stanley Chue965e5e2020-12-05 19:58:59 +08004004 ufshcd_print_evt_hist(hba);
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004005 }
4006
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004007 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004008 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004009 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004010 if (reenable_intr)
4011 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Can Guo4db7a232020-08-09 05:15:51 -07004012 if (ret) {
4013 ufshcd_set_link_broken(hba);
Adrian Hunter88b099002021-09-17 17:43:49 +03004014 ufshcd_schedule_eh_work(hba);
Can Guo4db7a232020-08-09 05:15:51 -07004015 }
4016out_unlock:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004017 spin_unlock_irqrestore(hba->host->host_lock, flags);
4018 mutex_unlock(&hba->uic_cmd_mutex);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004019
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004020 return ret;
4021}
4022
4023/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304024 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4025 * using DME_SET primitives.
4026 * @hba: per adapter instance
4027 * @mode: powr mode value
4028 *
4029 * Returns 0 on success, non-zero value on failure
4030 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05304031static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304032{
4033 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004034 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304035
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03004036 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4037 ret = ufshcd_dme_set(hba,
4038 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4039 if (ret) {
4040 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4041 __func__, ret);
4042 goto out;
4043 }
4044 }
4045
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304046 uic_cmd.command = UIC_CMD_DME_SET;
4047 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4048 uic_cmd.argument3 = mode;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004049 ufshcd_hold(hba, false);
4050 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4051 ufshcd_release(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304052
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03004053out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004054 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004055}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304056
Stanley Chu087c5ef2020-03-27 17:53:28 +08004057int ufshcd_link_recovery(struct ufs_hba *hba)
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004058{
4059 int ret;
4060 unsigned long flags;
4061
4062 spin_lock_irqsave(hba->host->host_lock, flags);
4063 hba->ufshcd_state = UFSHCD_STATE_RESET;
4064 ufshcd_set_eh_in_progress(hba);
4065 spin_unlock_irqrestore(hba->host->host_lock, flags);
4066
Can Guoebdd1df2019-11-14 22:09:24 -08004067 /* Reset the attached device */
Stanley Chu31a5d9c2020-12-08 21:56:35 +08004068 ufshcd_device_reset(hba);
Can Guoebdd1df2019-11-14 22:09:24 -08004069
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004070 ret = ufshcd_host_reset_and_restore(hba);
4071
4072 spin_lock_irqsave(hba->host->host_lock, flags);
4073 if (ret)
4074 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4075 ufshcd_clear_eh_in_progress(hba);
4076 spin_unlock_irqrestore(hba->host->host_lock, flags);
4077
4078 if (ret)
4079 dev_err(hba->dev, "%s: link recovery failed, err %d",
4080 __func__, ret);
4081
4082 return ret;
4083}
Stanley Chu087c5ef2020-03-27 17:53:28 +08004084EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004085
Asutosh Das525943a2021-09-28 02:06:12 -07004086int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004087{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004088 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004089 struct uic_command uic_cmd = {0};
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08004090 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004091
Kiwoong Kimee32c902016-11-10 21:17:43 +09004092 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4093
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004094 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004095 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08004096 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4097 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004098
Can Guo4db7a232020-08-09 05:15:51 -07004099 if (ret)
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004100 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4101 __func__, ret);
Can Guo4db7a232020-08-09 05:15:51 -07004102 else
Kiwoong Kimee32c902016-11-10 21:17:43 +09004103 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4104 POST_CHANGE);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004105
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004106 return ret;
4107}
Asutosh Das525943a2021-09-28 02:06:12 -07004108EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004109
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08004110int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004111{
4112 struct uic_command uic_cmd = {0};
4113 int ret;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08004114 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004115
Kiwoong Kimee32c902016-11-10 21:17:43 +09004116 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4117
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004118 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4119 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08004120 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4121 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4122
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304123 if (ret) {
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004124 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4125 __func__, ret);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08004126 } else {
Kiwoong Kimee32c902016-11-10 21:17:43 +09004127 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4128 POST_CHANGE);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08004129 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4130 hba->ufs_stats.hibern8_exit_cnt++;
4131 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304132
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304133 return ret;
4134}
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08004135EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304136
Stanley Chuba7af5e2019-12-30 13:32:28 +08004137void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4138{
4139 unsigned long flags;
Can Guobe7594a2020-03-05 00:53:07 -08004140 bool update = false;
Stanley Chuba7af5e2019-12-30 13:32:28 +08004141
Can Guobe7594a2020-03-05 00:53:07 -08004142 if (!ufshcd_is_auto_hibern8_supported(hba))
Stanley Chuba7af5e2019-12-30 13:32:28 +08004143 return;
4144
4145 spin_lock_irqsave(hba->host->host_lock, flags);
Can Guobe7594a2020-03-05 00:53:07 -08004146 if (hba->ahit != ahit) {
4147 hba->ahit = ahit;
4148 update = true;
4149 }
Stanley Chuba7af5e2019-12-30 13:32:28 +08004150 spin_unlock_irqrestore(hba->host->host_lock, flags);
Can Guobe7594a2020-03-05 00:53:07 -08004151
Asutosh Dasb294ff32021-04-23 17:20:16 -07004152 if (update &&
4153 !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
4154 ufshcd_rpm_get_sync(hba);
Can Guobe7594a2020-03-05 00:53:07 -08004155 ufshcd_hold(hba, false);
4156 ufshcd_auto_hibern8_enable(hba);
4157 ufshcd_release(hba);
Asutosh Dasb294ff32021-04-23 17:20:16 -07004158 ufshcd_rpm_put_sync(hba);
Can Guobe7594a2020-03-05 00:53:07 -08004159 }
Stanley Chuba7af5e2019-12-30 13:32:28 +08004160}
4161EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4162
Can Guo71d848b2019-11-14 22:09:26 -08004163void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
Adrian Hunterad448372018-03-20 15:07:38 +02004164{
4165 unsigned long flags;
4166
Bao D. Nguyen499f7a92020-08-28 18:05:13 -07004167 if (!ufshcd_is_auto_hibern8_supported(hba))
Adrian Hunterad448372018-03-20 15:07:38 +02004168 return;
4169
4170 spin_lock_irqsave(hba->host->host_lock, flags);
4171 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4172 spin_unlock_irqrestore(hba->host->host_lock, flags);
4173}
4174
Yaniv Gardi50646362014-10-23 13:25:13 +03004175 /**
4176 * ufshcd_init_pwr_info - setting the POR (power on reset)
4177 * values in hba power info
4178 * @hba: per-adapter instance
4179 */
4180static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4181{
4182 hba->pwr_info.gear_rx = UFS_PWM_G1;
4183 hba->pwr_info.gear_tx = UFS_PWM_G1;
4184 hba->pwr_info.lane_rx = 1;
4185 hba->pwr_info.lane_tx = 1;
4186 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4187 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4188 hba->pwr_info.hs_rate = 0;
4189}
4190
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304191/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004192 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4193 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304194 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004195static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304196{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004197 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4198
4199 if (hba->max_pwr_info.is_valid)
4200 return 0;
4201
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08004202 pwr_info->pwr_tx = FAST_MODE;
4203 pwr_info->pwr_rx = FAST_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004204 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304205
4206 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004207 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4208 &pwr_info->lane_rx);
4209 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4210 &pwr_info->lane_tx);
4211
4212 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4213 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4214 __func__,
4215 pwr_info->lane_rx,
4216 pwr_info->lane_tx);
4217 return -EINVAL;
4218 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304219
4220 /*
4221 * First, get the maximum gears of HS speed.
4222 * If a zero value, it means there is no HSGEAR capability.
4223 * Then, get the maximum gears of PWM speed.
4224 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004225 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4226 if (!pwr_info->gear_rx) {
4227 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4228 &pwr_info->gear_rx);
4229 if (!pwr_info->gear_rx) {
4230 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4231 __func__, pwr_info->gear_rx);
4232 return -EINVAL;
4233 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08004234 pwr_info->pwr_rx = SLOW_MODE;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304235 }
4236
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004237 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4238 &pwr_info->gear_tx);
4239 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304240 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004241 &pwr_info->gear_tx);
4242 if (!pwr_info->gear_tx) {
4243 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4244 __func__, pwr_info->gear_tx);
4245 return -EINVAL;
4246 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08004247 pwr_info->pwr_tx = SLOW_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004248 }
4249
4250 hba->max_pwr_info.is_valid = true;
4251 return 0;
4252}
4253
4254static int ufshcd_change_power_mode(struct ufs_hba *hba,
4255 struct ufs_pa_layer_attr *pwr_mode)
4256{
4257 int ret;
4258
4259 /* if already configured to the requested pwr_mode */
Can Guo2355b662020-08-24 19:07:06 -07004260 if (!hba->force_pmc &&
4261 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004262 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4263 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4264 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4265 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4266 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4267 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4268 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4269 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304270 }
4271
4272 /*
4273 * Configure attributes for power mode change with below.
4274 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4275 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4276 * - PA_HSSERIES
4277 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004278 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4279 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4280 pwr_mode->lane_rx);
4281 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4282 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304283 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004284 else
4285 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304286
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004287 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4288 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4289 pwr_mode->lane_tx);
4290 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4291 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304292 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004293 else
4294 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304295
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004296 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4297 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4298 pwr_mode->pwr_rx == FAST_MODE ||
4299 pwr_mode->pwr_tx == FAST_MODE)
4300 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4301 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304302
Kiwoong Kimb1d0d2e2020-12-21 10:24:40 +09004303 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4304 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4305 DL_FC0ProtectionTimeOutVal_Default);
4306 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4307 DL_TC0ReplayTimeOutVal_Default);
4308 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4309 DL_AFC0ReqTimeOutVal_Default);
4310 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4311 DL_FC1ProtectionTimeOutVal_Default);
4312 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4313 DL_TC1ReplayTimeOutVal_Default);
4314 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4315 DL_AFC1ReqTimeOutVal_Default);
Can Guo08342532019-12-05 02:14:42 +00004316
Kiwoong Kimb1d0d2e2020-12-21 10:24:40 +09004317 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4318 DL_FC0ProtectionTimeOutVal_Default);
4319 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4320 DL_TC0ReplayTimeOutVal_Default);
4321 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4322 DL_AFC0ReqTimeOutVal_Default);
4323 }
Can Guo08342532019-12-05 02:14:42 +00004324
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004325 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4326 | pwr_mode->pwr_tx);
4327
4328 if (ret) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304329 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004330 "%s: power mode change failed %d\n", __func__, ret);
4331 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004332 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4333 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004334
4335 memcpy(&hba->pwr_info, pwr_mode,
4336 sizeof(struct ufs_pa_layer_attr));
4337 }
4338
4339 return ret;
4340}
4341
4342/**
4343 * ufshcd_config_pwr_mode - configure a new power mode
4344 * @hba: per-adapter instance
4345 * @desired_pwr_mode: desired power configuration
4346 */
Alim Akhtar0d846e72018-05-06 15:44:18 +05304347int ufshcd_config_pwr_mode(struct ufs_hba *hba,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004348 struct ufs_pa_layer_attr *desired_pwr_mode)
4349{
4350 struct ufs_pa_layer_attr final_params = { 0 };
4351 int ret;
4352
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004353 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4354 desired_pwr_mode, &final_params);
4355
4356 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004357 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4358
4359 ret = ufshcd_change_power_mode(hba, &final_params);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304360
4361 return ret;
4362}
Alim Akhtar0d846e72018-05-06 15:44:18 +05304363EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304364
4365/**
Dolev Raviv68078d52013-07-30 00:35:58 +05304366 * ufshcd_complete_dev_init() - checks device readiness
Bart Van Assche8aa29f12018-03-01 15:07:20 -08004367 * @hba: per-adapter instance
Dolev Raviv68078d52013-07-30 00:35:58 +05304368 *
4369 * Set fDeviceInit flag and poll until device toggles it.
4370 */
4371static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4372{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004373 int err;
Jason Yan7dfdcc32020-04-26 17:43:05 +08004374 bool flag_res = true;
Kiwoong Kim29707fa2020-08-10 19:02:27 +09004375 ktime_t timeout;
Dolev Raviv68078d52013-07-30 00:35:58 +05304376
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004377 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Stanley Chu1f34eed2020-05-08 16:01:12 +08004378 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05304379 if (err) {
4380 dev_err(hba->dev,
4381 "%s setting fDeviceInit flag failed with error %d\n",
4382 __func__, err);
4383 goto out;
4384 }
4385
Kiwoong Kim29707fa2020-08-10 19:02:27 +09004386 /* Poll fDeviceInit flag to be cleared */
4387 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4388 do {
4389 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4390 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4391 if (!flag_res)
4392 break;
4393 usleep_range(5000, 10000);
4394 } while (ktime_before(ktime_get(), timeout));
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004395
Kiwoong Kim29707fa2020-08-10 19:02:27 +09004396 if (err) {
Dolev Raviv68078d52013-07-30 00:35:58 +05304397 dev_err(hba->dev,
Kiwoong Kim29707fa2020-08-10 19:02:27 +09004398 "%s reading fDeviceInit flag failed with error %d\n",
4399 __func__, err);
4400 } else if (flag_res) {
Dolev Raviv68078d52013-07-30 00:35:58 +05304401 dev_err(hba->dev,
Kiwoong Kim29707fa2020-08-10 19:02:27 +09004402 "%s fDeviceInit was not cleared by the device\n",
4403 __func__);
4404 err = -EBUSY;
4405 }
Dolev Raviv68078d52013-07-30 00:35:58 +05304406out:
4407 return err;
4408}
4409
4410/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304411 * ufshcd_make_hba_operational - Make UFS controller operational
4412 * @hba: per adapter instance
4413 *
4414 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004415 * 1. Enable required interrupts
4416 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02004417 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004418 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304419 *
4420 * Returns 0 on success, non-zero value on failure
4421 */
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08004422int ufshcd_make_hba_operational(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304423{
4424 int err = 0;
4425 u32 reg;
4426
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304427 /* Enable required interrupts */
4428 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4429
4430 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03004431 if (ufshcd_is_intr_aggr_allowed(hba))
4432 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4433 else
4434 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304435
4436 /* Configure UTRL and UTMRL base address registers */
4437 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4438 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4439 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4440 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4441 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4442 REG_UTP_TASK_REQ_LIST_BASE_L);
4443 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4444 REG_UTP_TASK_REQ_LIST_BASE_H);
4445
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304446 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02004447 * Make sure base address and interrupt setup are updated before
4448 * enabling the run/stop registers below.
4449 */
4450 wmb();
4451
4452 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304453 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304454 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004455 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304456 if (!(ufshcd_get_lists_status(reg))) {
4457 ufshcd_enable_run_stop_reg(hba);
4458 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304459 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304460 "Host controller not ready to process requests");
4461 err = -EIO;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304462 }
4463
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304464 return err;
4465}
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08004466EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304467
4468/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02004469 * ufshcd_hba_stop - Send controller to reset state
4470 * @hba: per adapter instance
Yaniv Gardi596585a2016-03-10 17:37:08 +02004471 */
Alice.Chao3a95f5b2021-05-28 11:36:21 +08004472void ufshcd_hba_stop(struct ufs_hba *hba)
Yaniv Gardi596585a2016-03-10 17:37:08 +02004473{
Bart Van Assche5cac1092020-05-07 15:27:50 -07004474 unsigned long flags;
Yaniv Gardi596585a2016-03-10 17:37:08 +02004475 int err;
4476
Bart Van Assche5cac1092020-05-07 15:27:50 -07004477 /*
4478 * Obtain the host lock to prevent that the controller is disabled
4479 * while the UFS interrupt handler is active on another CPU.
4480 */
4481 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02004482 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
Bart Van Assche5cac1092020-05-07 15:27:50 -07004483 spin_unlock_irqrestore(hba->host->host_lock, flags);
4484
Yaniv Gardi596585a2016-03-10 17:37:08 +02004485 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4486 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
Bart Van Assche5cac1092020-05-07 15:27:50 -07004487 10, 1);
Yaniv Gardi596585a2016-03-10 17:37:08 +02004488 if (err)
4489 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4490}
Alice.Chao3a95f5b2021-05-28 11:36:21 +08004491EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
Yaniv Gardi596585a2016-03-10 17:37:08 +02004492
4493/**
Alim Akhtar39bf2d82020-05-28 06:46:51 +05304494 * ufshcd_hba_execute_hce - initialize the controller
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304495 * @hba: per adapter instance
4496 *
4497 * The controller resets itself and controller firmware initialization
4498 * sequence kicks off. When controller is ready it will set
4499 * the Host Controller Enable bit to 1.
4500 *
4501 * Returns 0 on success, non-zero value on failure
4502 */
Alim Akhtar39bf2d82020-05-28 06:46:51 +05304503static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304504{
Stanley Chu6081b122020-11-12 13:45:37 +08004505 int retry_outer = 3;
4506 int retry_inner;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304507
Stanley Chu6081b122020-11-12 13:45:37 +08004508start:
Yaniv Gardi596585a2016-03-10 17:37:08 +02004509 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304510 /* change controller state to "reset state" */
Bart Van Assche5cac1092020-05-07 15:27:50 -07004511 ufshcd_hba_stop(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304512
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004513 /* UniPro link is disabled at this point */
4514 ufshcd_set_link_off(hba);
4515
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004516 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004517
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304518 /* start controller initialization sequence */
4519 ufshcd_hba_start(hba);
4520
4521 /*
4522 * To initialize a UFS host controller HCE bit must be set to 1.
4523 * During initialization the HCE bit value changes from 1->0->1.
4524 * When the host controller completes initialization sequence
4525 * it sets the value of HCE bit to 1. The same HCE bit is read back
4526 * to check if the controller has completed initialization sequence.
4527 * So without this delay the value HCE = 1, set in the previous
4528 * instruction might be read back.
4529 * This delay can be changed based on the controller.
4530 */
Stanley Chu90b84912020-05-09 17:37:13 +08004531 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304532
4533 /* wait for the host controller to complete initialization */
Stanley Chu6081b122020-11-12 13:45:37 +08004534 retry_inner = 50;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304535 while (ufshcd_is_hba_active(hba)) {
Stanley Chu6081b122020-11-12 13:45:37 +08004536 if (retry_inner) {
4537 retry_inner--;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304538 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304539 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304540 "Controller enable failed\n");
Stanley Chu6081b122020-11-12 13:45:37 +08004541 if (retry_outer) {
4542 retry_outer--;
4543 goto start;
4544 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304545 return -EIO;
4546 }
Stanley Chu9fc305e2020-03-18 18:40:15 +08004547 usleep_range(1000, 1100);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304548 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004549
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004550 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004551 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004552
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004553 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004554
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304555 return 0;
4556}
Alim Akhtar39bf2d82020-05-28 06:46:51 +05304557
4558int ufshcd_hba_enable(struct ufs_hba *hba)
4559{
4560 int ret;
4561
4562 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4563 ufshcd_set_link_off(hba);
4564 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4565
4566 /* enable UIC related interrupts */
4567 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4568 ret = ufshcd_dme_reset(hba);
4569 if (!ret) {
4570 ret = ufshcd_dme_enable(hba);
4571 if (!ret)
4572 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4573 if (ret)
4574 dev_err(hba->dev,
4575 "Host controller enable failed with non-hce\n");
4576 }
4577 } else {
4578 ret = ufshcd_hba_execute_hce(hba);
4579 }
4580
4581 return ret;
4582}
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08004583EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4584
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004585static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4586{
Stanley Chuba0320f2020-03-18 18:40:10 +08004587 int tx_lanes = 0, i, err = 0;
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004588
4589 if (!peer)
4590 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4591 &tx_lanes);
4592 else
4593 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4594 &tx_lanes);
4595 for (i = 0; i < tx_lanes; i++) {
4596 if (!peer)
4597 err = ufshcd_dme_set(hba,
4598 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4599 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4600 0);
4601 else
4602 err = ufshcd_dme_peer_set(hba,
4603 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4604 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4605 0);
4606 if (err) {
4607 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4608 __func__, peer, i, err);
4609 break;
4610 }
4611 }
4612
4613 return err;
4614}
4615
4616static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4617{
4618 return ufshcd_disable_tx_lcc(hba, true);
4619}
4620
Stanley Chue965e5e2020-12-05 19:58:59 +08004621void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
Stanley Chu8808b4e2019-07-10 21:38:21 +08004622{
Stanley Chue965e5e2020-12-05 19:58:59 +08004623 struct ufs_event_hist *e;
4624
4625 if (id >= UFS_EVT_CNT)
4626 return;
4627
4628 e = &hba->ufs_stats.event[id];
4629 e->val[e->pos] = val;
4630 e->tstamp[e->pos] = ktime_get();
Adrian Hunterb6cacaf2021-01-07 09:25:38 +02004631 e->cnt += 1;
Stanley Chue965e5e2020-12-05 19:58:59 +08004632 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
Stanley Chu172614a2020-12-05 19:59:00 +08004633
4634 ufshcd_vops_event_notify(hba, id, &val);
Stanley Chu8808b4e2019-07-10 21:38:21 +08004635}
Stanley Chue965e5e2020-12-05 19:58:59 +08004636EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
Stanley Chu8808b4e2019-07-10 21:38:21 +08004637
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304638/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304639 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304640 * @hba: per adapter instance
4641 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304642 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304643 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304644static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304645{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304646 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004647 int retries = DME_LINKSTARTUP_RETRIES;
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004648 bool link_startup_again = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304649
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004650 /*
4651 * If UFS device isn't active then we will have to issue link startup
4652 * 2 times to make sure the device state move to active.
4653 */
4654 if (!ufshcd_is_ufs_dev_active(hba))
4655 link_startup_again = true;
4656
4657link_startup:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004658 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004659 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304660
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004661 ret = ufshcd_dme_link_startup(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004662
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004663 /* check if device is detected by inter-connect layer */
4664 if (!ret && !ufshcd_is_device_present(hba)) {
Stanley Chue965e5e2020-12-05 19:58:59 +08004665 ufshcd_update_evt_hist(hba,
4666 UFS_EVT_LINK_STARTUP_FAIL,
Stanley Chu8808b4e2019-07-10 21:38:21 +08004667 0);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004668 dev_err(hba->dev, "%s: Device not present\n", __func__);
4669 ret = -ENXIO;
4670 goto out;
4671 }
4672
4673 /*
4674 * DME link lost indication is only received when link is up,
4675 * but we can't be sure if the link is up until link startup
4676 * succeeds. So reset the local Uni-Pro and try again.
4677 */
Stanley Chu8808b4e2019-07-10 21:38:21 +08004678 if (ret && ufshcd_hba_enable(hba)) {
Stanley Chue965e5e2020-12-05 19:58:59 +08004679 ufshcd_update_evt_hist(hba,
4680 UFS_EVT_LINK_STARTUP_FAIL,
Stanley Chu8808b4e2019-07-10 21:38:21 +08004681 (u32)ret);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004682 goto out;
Stanley Chu8808b4e2019-07-10 21:38:21 +08004683 }
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004684 } while (ret && retries--);
4685
Stanley Chu8808b4e2019-07-10 21:38:21 +08004686 if (ret) {
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004687 /* failed to get the link up... retire */
Stanley Chue965e5e2020-12-05 19:58:59 +08004688 ufshcd_update_evt_hist(hba,
4689 UFS_EVT_LINK_STARTUP_FAIL,
Stanley Chu8808b4e2019-07-10 21:38:21 +08004690 (u32)ret);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304691 goto out;
Stanley Chu8808b4e2019-07-10 21:38:21 +08004692 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304693
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004694 if (link_startup_again) {
4695 link_startup_again = false;
4696 retries = DME_LINKSTARTUP_RETRIES;
4697 goto link_startup;
4698 }
4699
subhashj@codeaurora.orgd2aebb92016-12-22 18:41:33 -08004700 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4701 ufshcd_init_pwr_info(hba);
4702 ufshcd_print_pwr_info(hba);
4703
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004704 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4705 ret = ufshcd_disable_device_tx_lcc(hba);
4706 if (ret)
4707 goto out;
4708 }
4709
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004710 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004711 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4712 if (ret)
4713 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004714
Can Guo2355b662020-08-24 19:07:06 -07004715 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4716 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004717 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304718out:
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004719 if (ret) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304720 dev_err(hba->dev, "link startup failed %d\n", ret);
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004721 ufshcd_print_host_state(hba);
4722 ufshcd_print_pwr_info(hba);
Stanley Chue965e5e2020-12-05 19:58:59 +08004723 ufshcd_print_evt_hist(hba);
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004724 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304725 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304726}
4727
4728/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304729 * ufshcd_verify_dev_init() - Verify device initialization
4730 * @hba: per-adapter instance
4731 *
4732 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4733 * device Transport Protocol (UTP) layer is ready after a reset.
4734 * If the UTP layer at the device side is not initialized, it may
4735 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4736 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4737 */
4738static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4739{
4740 int err = 0;
4741 int retries;
4742
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004743 ufshcd_hold(hba, false);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304744 mutex_lock(&hba->dev_cmd.lock);
4745 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4746 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
Adrian Hunter1cbc9ad2021-08-31 17:53:17 +03004747 hba->nop_out_timeout);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304748
4749 if (!err || err == -ETIMEDOUT)
4750 break;
4751
4752 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4753 }
4754 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004755 ufshcd_release(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304756
4757 if (err)
4758 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4759 return err;
4760}
4761
4762/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004763 * ufshcd_set_queue_depth - set lun queue depth
4764 * @sdev: pointer to SCSI device
4765 *
4766 * Read bLUQueueDepth value and activate scsi tagged command
4767 * queueing. For WLUN, queue depth is set to 1. For best-effort
4768 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4769 * value that host can queue.
4770 */
4771static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4772{
4773 int ret = 0;
4774 u8 lun_qdepth;
4775 struct ufs_hba *hba;
4776
4777 hba = shost_priv(sdev->host);
4778
4779 lun_qdepth = hba->nutrs;
Szymon Mielczarekdbd34a62017-03-29 08:19:21 +02004780 ret = ufshcd_read_unit_desc_param(hba,
4781 ufshcd_scsi_to_upiu_lun(sdev->lun),
4782 UNIT_DESC_PARAM_LU_Q_DEPTH,
4783 &lun_qdepth,
4784 sizeof(lun_qdepth));
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004785
4786 /* Some WLUN doesn't support unit descriptor */
4787 if (ret == -EOPNOTSUPP)
4788 lun_qdepth = 1;
4789 else if (!lun_qdepth)
4790 /* eventually, we can figure out the real queue depth */
4791 lun_qdepth = hba->nutrs;
4792 else
4793 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4794
4795 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4796 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004797 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004798}
4799
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004800/*
4801 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4802 * @hba: per-adapter instance
4803 * @lun: UFS device lun id
4804 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4805 *
4806 * Returns 0 in case of success and b_lu_write_protect status would be returned
4807 * @b_lu_write_protect parameter.
4808 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4809 * Returns -EINVAL in case of invalid parameters passed to this function.
4810 */
4811static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4812 u8 lun,
4813 u8 *b_lu_write_protect)
4814{
4815 int ret;
4816
4817 if (!b_lu_write_protect)
4818 ret = -EINVAL;
4819 /*
4820 * According to UFS device spec, RPMB LU can't be write
4821 * protected so skip reading bLUWriteProtect parameter for
4822 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4823 */
Bean Huo1baa8012020-01-20 14:08:20 +01004824 else if (lun >= hba->dev_info.max_lu_supported)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004825 ret = -ENOTSUPP;
4826 else
4827 ret = ufshcd_read_unit_desc_param(hba,
4828 lun,
4829 UNIT_DESC_PARAM_LU_WR_PROTECT,
4830 b_lu_write_protect,
4831 sizeof(*b_lu_write_protect));
4832 return ret;
4833}
4834
4835/**
4836 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4837 * status
4838 * @hba: per-adapter instance
4839 * @sdev: pointer to SCSI device
4840 *
4841 */
4842static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4843 struct scsi_device *sdev)
4844{
4845 if (hba->dev_info.f_power_on_wp_en &&
4846 !hba->dev_info.is_lu_power_on_wp) {
4847 u8 b_lu_write_protect;
4848
4849 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4850 &b_lu_write_protect) &&
4851 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4852 hba->dev_info.is_lu_power_on_wp = true;
4853 }
4854}
4855
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004856/**
Asutosh Dasb294ff32021-04-23 17:20:16 -07004857 * ufshcd_setup_links - associate link b/w device wlun and other luns
4858 * @sdev: pointer to SCSI device
4859 * @hba: pointer to ufs hba
4860 */
4861static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
4862{
4863 struct device_link *link;
4864
4865 /*
4866 * Device wlun is the supplier & rest of the luns are consumers.
4867 * This ensures that device wlun suspends after all other luns.
4868 */
4869 if (hba->sdev_ufs_device) {
4870 link = device_link_add(&sdev->sdev_gendev,
4871 &hba->sdev_ufs_device->sdev_gendev,
4872 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
4873 if (!link) {
4874 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
4875 dev_name(&hba->sdev_ufs_device->sdev_gendev));
4876 return;
4877 }
4878 hba->luns_avail--;
4879 /* Ignore REPORT_LUN wlun probing */
4880 if (hba->luns_avail == 1) {
4881 ufshcd_rpm_put(hba);
4882 return;
4883 }
4884 } else {
4885 /*
4886 * Device wlun is probed. The assumption is that WLUNs are
4887 * scanned before other LUNs.
4888 */
4889 hba->luns_avail--;
4890 }
4891}
4892
4893/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304894 * ufshcd_slave_alloc - handle initial SCSI device configurations
4895 * @sdev: pointer to SCSI device
4896 *
4897 * Returns success
4898 */
4899static int ufshcd_slave_alloc(struct scsi_device *sdev)
4900{
4901 struct ufs_hba *hba;
4902
4903 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304904
4905 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4906 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304907
Can Guoa3a76392019-12-05 02:14:30 +00004908 /* DBD field should be set to 1 in mode sense(10) */
4909 sdev->set_dbd_for_ms = 1;
4910
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304911 /* allow SCSI layer to restart the device in case of errors */
4912 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004913
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03004914 /* REPORT SUPPORTED OPERATION CODES is not supported */
4915 sdev->no_report_opcodes = 1;
4916
Sujit Reddy Thumma84af7e82018-01-24 09:52:35 +05304917 /* WRITE_SAME command is not supported */
4918 sdev->no_write_same = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004919
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004920 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004921
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004922 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4923
Asutosh Dasb294ff32021-04-23 17:20:16 -07004924 ufshcd_setup_links(hba, sdev);
4925
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004926 return 0;
4927}
4928
4929/**
4930 * ufshcd_change_queue_depth - change queue depth
4931 * @sdev: pointer to SCSI device
4932 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004933 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004934 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004935 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004936static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004937{
Bart Van Asschefc21da82021-12-03 15:19:41 -08004938 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304939}
4940
Daejun Parkf02bc972021-07-12 17:58:30 +09004941static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
4942{
4943 /* skip well-known LU */
Daejun Park41d8a932021-07-12 18:00:25 +09004944 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4945 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
Daejun Parkf02bc972021-07-12 17:58:30 +09004946 return;
4947
4948 ufshpb_destroy_lu(hba, sdev);
4949}
4950
4951static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
4952{
4953 /* skip well-known LU */
4954 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4955 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
4956 return;
4957
4958 ufshpb_init_hpb_lu(hba, sdev);
4959}
4960
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304961/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004962 * ufshcd_slave_configure - adjust SCSI device configurations
4963 * @sdev: pointer to SCSI device
4964 */
4965static int ufshcd_slave_configure(struct scsi_device *sdev)
4966{
Stanley Chu49615ba2019-09-16 23:56:50 +08004967 struct ufs_hba *hba = shost_priv(sdev->host);
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004968 struct request_queue *q = sdev->request_queue;
4969
Daejun Parkf02bc972021-07-12 17:58:30 +09004970 ufshcd_hpb_configure(hba, sdev);
4971
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004972 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
Kiwoong Kim2b2bfc8a2021-01-19 12:33:41 +09004973 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
4974 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
Asutosh Dasb294ff32021-04-23 17:20:16 -07004975 /*
4976 * Block runtime-pm until all consumers are added.
4977 * Refer ufshcd_setup_links().
4978 */
4979 if (is_device_wlun(sdev))
4980 pm_runtime_get_noresume(&sdev->sdev_gendev);
4981 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
Stanley Chu49615ba2019-09-16 23:56:50 +08004982 sdev->rpm_autosuspend = 1;
4983
Eric Biggerscb77cb52021-10-18 11:04:52 -07004984 ufshcd_crypto_register(hba, q);
Satya Tangiraladf043c742020-07-06 20:04:14 +00004985
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004986 return 0;
4987}
4988
4989/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304990 * ufshcd_slave_destroy - remove SCSI device configurations
4991 * @sdev: pointer to SCSI device
4992 */
4993static void ufshcd_slave_destroy(struct scsi_device *sdev)
4994{
4995 struct ufs_hba *hba;
Adrian Hunterbf259672021-08-06 16:04:41 +03004996 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304997
4998 hba = shost_priv(sdev->host);
Daejun Parkf02bc972021-07-12 17:58:30 +09004999
5000 ufshcd_hpb_destroy(hba, sdev);
5001
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005002 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005003 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005004 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005005 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005006 spin_unlock_irqrestore(hba->host->host_lock, flags);
Adrian Hunterbf259672021-08-06 16:04:41 +03005007 } else if (hba->sdev_ufs_device) {
5008 struct device *supplier = NULL;
5009
5010 /* Ensure UFS Device WLUN exists and does not disappear */
5011 spin_lock_irqsave(hba->host->host_lock, flags);
5012 if (hba->sdev_ufs_device) {
5013 supplier = &hba->sdev_ufs_device->sdev_gendev;
5014 get_device(supplier);
5015 }
5016 spin_unlock_irqrestore(hba->host->host_lock, flags);
5017
5018 if (supplier) {
5019 /*
5020 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5021 * device will not have been registered but can still
5022 * have a device link holding a reference to the device.
5023 */
5024 device_link_remove(&sdev->sdev_gendev, supplier);
5025 put_device(supplier);
5026 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005027 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305028}
5029
5030/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305031 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
Bart Van Assche8aa29f12018-03-01 15:07:20 -08005032 * @lrbp: pointer to local reference block of completed command
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305033 * @scsi_status: SCSI command status
5034 *
5035 * Returns value base on SCSI command status
5036 */
5037static inline int
5038ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5039{
5040 int result = 0;
5041
5042 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05305043 case SAM_STAT_CHECK_CONDITION:
5044 ufshcd_copy_sense_data(lrbp);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05005045 fallthrough;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305046 case SAM_STAT_GOOD:
Hannes Reineckedb83d8a2021-01-13 10:04:48 +01005047 result |= DID_OK << 16 | scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305048 break;
5049 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05305050 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305051 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05305052 ufshcd_copy_sense_data(lrbp);
5053 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305054 break;
5055 default:
5056 result |= DID_ERROR << 16;
5057 break;
5058 } /* end of switch */
5059
5060 return result;
5061}
5062
5063/**
5064 * ufshcd_transfer_rsp_status - Get overall status of the response
5065 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08005066 * @lrbp: pointer to local reference block of completed command
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305067 *
5068 * Returns result of the command to notify SCSI midlayer
5069 */
5070static inline int
5071ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5072{
5073 int result = 0;
5074 int scsi_status;
Bart Van Assche957d63e2021-10-20 14:40:17 -07005075 enum utp_ocs ocs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305076
5077 /* overall command status of utrd */
5078 ocs = ufshcd_get_tr_ocs(lrbp);
5079
Kiwoong Kimd779a6e2020-05-28 06:46:53 +05305080 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5081 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5082 MASK_RSP_UPIU_RESULT)
5083 ocs = OCS_SUCCESS;
5084 }
5085
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305086 switch (ocs) {
5087 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305088 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005089 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305090 switch (result) {
5091 case UPIU_TRANSACTION_RESPONSE:
5092 /*
5093 * get the response UPIU result to extract
5094 * the SCSI command status
5095 */
5096 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5097
5098 /*
5099 * get the result based on SCSI status response
5100 * to notify the SCSI midlayer of the command status
5101 */
5102 scsi_status = result & MASK_SCSI_STATUS;
5103 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305104
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02005105 /*
5106 * Currently we are only supporting BKOPs exception
5107 * events hence we can ignore BKOPs exception event
5108 * during power management callbacks. BKOPs exception
5109 * event is not expected to be raised in runtime suspend
5110 * callback as it allows the urgent bkops.
5111 * During system suspend, we are anyway forcefully
5112 * disabling the bkops and if urgent bkops is needed
5113 * it will be enabled on system resume. Long term
5114 * solution could be to abort the system suspend if
5115 * UFS device needs urgent BKOPs.
5116 */
5117 if (!hba->pm_op_in_progress &&
Can Guoaa53f582021-02-23 21:36:47 -08005118 !ufshcd_eh_in_progress(hba) &&
Asutosh Dasb294ff32021-04-23 17:20:16 -07005119 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5120 /* Flushed in suspend */
5121 schedule_work(&hba->eeh_work);
Daejun Park4b5f4902021-07-12 17:58:59 +09005122
5123 if (scsi_status == SAM_STAT_GOOD)
5124 ufshpb_rsp_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305125 break;
5126 case UPIU_TRANSACTION_REJECT_UPIU:
5127 /* TODO: handle Reject UPIU Response */
5128 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305129 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305130 "Reject UPIU not fully implemented\n");
5131 break;
5132 default:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305133 dev_err(hba->dev,
5134 "Unexpected request response code = %x\n",
5135 result);
Stanley Chue0347d82019-04-15 20:23:38 +08005136 result = DID_ERROR << 16;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305137 break;
5138 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305139 break;
5140 case OCS_ABORTED:
5141 result |= DID_ABORT << 16;
5142 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305143 case OCS_INVALID_COMMAND_STATUS:
5144 result |= DID_REQUEUE << 16;
5145 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305146 case OCS_INVALID_CMD_TABLE_ATTR:
5147 case OCS_INVALID_PRDT_ATTR:
5148 case OCS_MISMATCH_DATA_BUF_SIZE:
5149 case OCS_MISMATCH_RESP_UPIU_SIZE:
5150 case OCS_PEER_COMM_FAILURE:
5151 case OCS_FATAL_ERROR:
Satya Tangirala5e7341e2020-07-06 20:04:12 +00005152 case OCS_DEVICE_FATAL_ERROR:
5153 case OCS_INVALID_CRYPTO_CONFIG:
5154 case OCS_GENERAL_CRYPTO_ERROR:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305155 default:
5156 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305157 dev_err(hba->dev,
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005158 "OCS error from controller = %x for tag %d\n",
5159 ocs, lrbp->task_tag);
Stanley Chue965e5e2020-12-05 19:58:59 +08005160 ufshcd_print_evt_hist(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08005161 ufshcd_print_host_state(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305162 break;
5163 } /* end of switch */
5164
Jaegeuk Kimeeb1b552021-01-07 10:53:16 -08005165 if ((host_byte(result) != DID_OK) &&
5166 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
Dolev Raviv66cc8202016-12-22 18:39:42 -08005167 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305168 return result;
5169}
5170
Can Guoa45f9372021-05-24 01:36:57 -07005171static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5172 u32 intr_mask)
5173{
5174 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5175 !ufshcd_is_auto_hibern8_enabled(hba))
5176 return false;
5177
5178 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5179 return false;
5180
5181 if (hba->active_uic_cmd &&
5182 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5183 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5184 return false;
5185
5186 return true;
5187}
5188
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305189/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305190 * ufshcd_uic_cmd_compl - handle completion of uic command
5191 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305192 * @intr_status: interrupt status generated by the controller
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005193 *
5194 * Returns
5195 * IRQ_HANDLED - If interrupt is valid
5196 * IRQ_NONE - If invalid interrupt
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305197 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005198static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305199{
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005200 irqreturn_t retval = IRQ_NONE;
5201
Can Guoa45f9372021-05-24 01:36:57 -07005202 spin_lock(hba->host->host_lock);
5203 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5204 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5205
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305206 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305207 hba->active_uic_cmd->argument2 |=
5208 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05305209 hba->active_uic_cmd->argument3 =
5210 ufshcd_get_dme_attr_val(hba);
Can Guo0f52fcb92020-11-02 22:24:40 -08005211 if (!hba->uic_async_done)
5212 hba->active_uic_cmd->cmd_active = 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305213 complete(&hba->active_uic_cmd->done);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005214 retval = IRQ_HANDLED;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305215 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305216
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005217 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
Can Guo0f52fcb92020-11-02 22:24:40 -08005218 hba->active_uic_cmd->cmd_active = 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005219 complete(hba->uic_async_done);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005220 retval = IRQ_HANDLED;
5221 }
Stanley Chuaa5c6972020-06-15 15:22:35 +08005222
5223 if (retval == IRQ_HANDLED)
5224 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
Bean Huo28fa68f2021-01-05 12:34:42 +01005225 UFS_CMD_COMP);
Can Guoa45f9372021-05-24 01:36:57 -07005226 spin_unlock(hba->host->host_lock);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005227 return retval;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305228}
5229
Bart Van Assche6f8dafd2021-12-03 15:19:45 -08005230/* Release the resources allocated for processing a SCSI command. */
5231static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5232 struct ufshcd_lrb *lrbp)
5233{
5234 struct scsi_cmnd *cmd = lrbp->cmd;
5235
5236 scsi_dma_unmap(cmd);
5237 lrbp->cmd = NULL; /* Mark the command as completed. */
5238 ufshcd_release(hba);
5239 ufshcd_clk_scaling_update_busy(hba);
5240}
5241
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305242/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005243 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305244 * @hba: per adapter instance
Bart Van Assche73dc3c42021-07-21 20:34:38 -07005245 * @completed_reqs: bitmask that indicates which requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305246 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005247static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
Bart Van Assche11682522021-10-20 14:40:15 -07005248 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305249{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305250 struct ufshcd_lrb *lrbp;
5251 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305252 int index;
Dolev Ravive9d501b2014-07-01 12:22:37 +03005253
Dolev Ravive9d501b2014-07-01 12:22:37 +03005254 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5255 lrbp = &hba->lrb[index];
Stanley Chua3170372020-07-06 14:07:06 +08005256 lrbp->compl_time_stamp = ktime_get();
Dolev Ravive9d501b2014-07-01 12:22:37 +03005257 cmd = lrbp->cmd;
5258 if (cmd) {
Can Guo1d8613a2021-04-21 19:28:39 -07005259 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5260 ufshcd_update_monitor(hba, lrbp);
Bean Huo28fa68f2021-01-05 12:34:42 +01005261 ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
Bart Van Assche6f8dafd2021-12-03 15:19:45 -08005262 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
5263 ufshcd_release_scsi_cmd(hba, lrbp);
Dolev Ravive9d501b2014-07-01 12:22:37 +03005264 /* Do not touch lrbp after scsi done */
Bart Van Assche35c37302021-10-07 13:46:01 -07005265 scsi_done(cmd);
Joao Pinto300bb132016-05-11 12:21:27 +01005266 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5267 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
Lee Susman1a07f2d2016-12-22 18:42:03 -08005268 if (hba->dev_cmd.complete) {
5269 ufshcd_add_command_trace(hba, index,
Bean Huo28fa68f2021-01-05 12:34:42 +01005270 UFS_DEV_COMP);
Dolev Ravive9d501b2014-07-01 12:22:37 +03005271 complete(hba->dev_cmd.complete);
Bart Van Assche3eb9dcc2021-12-03 15:19:44 -08005272 ufshcd_clk_scaling_update_busy(hba);
Lee Susman1a07f2d2016-12-22 18:42:03 -08005273 }
Dolev Ravive9d501b2014-07-01 12:22:37 +03005274 }
5275 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305276}
5277
5278/**
Bart Van Assche1f522c52021-07-21 20:34:32 -07005279 * ufshcd_transfer_req_compl - handle SCSI and query command completion
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005280 * @hba: per adapter instance
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005281 *
5282 * Returns
5283 * IRQ_HANDLED - If interrupt is valid
5284 * IRQ_NONE - If invalid interrupt
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005285 */
Bart Van Assche11682522021-10-20 14:40:15 -07005286static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005287{
Bart Van Assche1f522c52021-07-21 20:34:32 -07005288 unsigned long completed_reqs, flags;
5289 u32 tr_doorbell;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005290
5291 /* Resetting interrupt aggregation counters first and reading the
5292 * DOOR_BELL afterward allows us to handle all the completed requests.
5293 * In order to prevent other interrupts starvation the DB is read once
5294 * after reset. The down side of this solution is the possibility of
5295 * false interrupt if device completes another request after resetting
5296 * aggregation and before reading the DB.
5297 */
Alim Akhtarb638b5e2020-05-28 06:46:50 +05305298 if (ufshcd_is_intr_aggr_allowed(hba) &&
5299 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005300 ufshcd_reset_intr_aggr(hba);
5301
Bart Van Asschec11a1ae2021-07-21 20:34:39 -07005302 if (ufs_fail_completion())
5303 return IRQ_HANDLED;
5304
Bart Van Assche169f5eb2021-07-21 20:34:34 -07005305 spin_lock_irqsave(&hba->outstanding_lock, flags);
Bart Van Assche1f522c52021-07-21 20:34:32 -07005306 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Bart Van Assche169f5eb2021-07-21 20:34:34 -07005307 completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5308 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5309 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5310 hba->outstanding_reqs);
5311 hba->outstanding_reqs &= ~completed_reqs;
5312 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005313
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005314 if (completed_reqs) {
Bart Van Assche11682522021-10-20 14:40:15 -07005315 __ufshcd_transfer_req_compl(hba, completed_reqs);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005316 return IRQ_HANDLED;
5317 } else {
5318 return IRQ_NONE;
5319 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005320}
5321
Adrian Hunter7deedfd2021-02-09 08:24:37 +02005322int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
Adrian Huntercd469472021-02-09 08:24:36 +02005323{
5324 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5325 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5326 &ee_ctrl_mask);
5327}
5328
Adrian Hunter7deedfd2021-02-09 08:24:37 +02005329int ufshcd_write_ee_control(struct ufs_hba *hba)
Adrian Huntercd469472021-02-09 08:24:36 +02005330{
5331 int err;
5332
5333 mutex_lock(&hba->ee_ctrl_mutex);
5334 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5335 mutex_unlock(&hba->ee_ctrl_mutex);
5336 if (err)
5337 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5338 __func__, err);
5339 return err;
5340}
5341
5342int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
5343 u16 set, u16 clr)
5344{
5345 u16 new_mask, ee_ctrl_mask;
5346 int err = 0;
5347
5348 mutex_lock(&hba->ee_ctrl_mutex);
5349 new_mask = (*mask & ~clr) | set;
5350 ee_ctrl_mask = new_mask | *other_mask;
5351 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5352 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5353 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5354 if (!err) {
5355 hba->ee_ctrl_mask = ee_ctrl_mask;
5356 *mask = new_mask;
5357 }
5358 mutex_unlock(&hba->ee_ctrl_mutex);
5359 return err;
5360}
5361
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005362/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305363 * ufshcd_disable_ee - disable exception event
5364 * @hba: per-adapter instance
5365 * @mask: exception event to disable
5366 *
5367 * Disables exception event in the device so that the EVENT_ALERT
5368 * bit is not set.
5369 *
5370 * Returns zero on success, non-zero error value on failure.
5371 */
Adrian Huntercd469472021-02-09 08:24:36 +02005372static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305373{
Adrian Huntercd469472021-02-09 08:24:36 +02005374 return ufshcd_update_ee_drv_mask(hba, 0, mask);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305375}
5376
5377/**
5378 * ufshcd_enable_ee - enable exception event
5379 * @hba: per-adapter instance
5380 * @mask: exception event to enable
5381 *
5382 * Enable corresponding exception event in the device to allow
5383 * device to alert host in critical scenarios.
5384 *
5385 * Returns zero on success, non-zero error value on failure.
5386 */
Adrian Huntercd469472021-02-09 08:24:36 +02005387static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305388{
Adrian Huntercd469472021-02-09 08:24:36 +02005389 return ufshcd_update_ee_drv_mask(hba, mask, 0);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305390}
5391
5392/**
5393 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5394 * @hba: per-adapter instance
5395 *
5396 * Allow device to manage background operations on its own. Enabling
5397 * this might lead to inconsistent latencies during normal data transfers
5398 * as the device is allowed to manage its own way of handling background
5399 * operations.
5400 *
5401 * Returns zero on success, non-zero on failure.
5402 */
5403static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5404{
5405 int err = 0;
5406
5407 if (hba->auto_bkops_enabled)
5408 goto out;
5409
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005410 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Stanley Chu1f34eed2020-05-08 16:01:12 +08005411 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305412 if (err) {
5413 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5414 __func__, err);
5415 goto out;
5416 }
5417
5418 hba->auto_bkops_enabled = true;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005419 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305420
5421 /* No need of URGENT_BKOPS exception from the device */
5422 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5423 if (err)
5424 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5425 __func__, err);
5426out:
5427 return err;
5428}
5429
5430/**
5431 * ufshcd_disable_auto_bkops - block device in doing background operations
5432 * @hba: per-adapter instance
5433 *
5434 * Disabling background operations improves command response latency but
5435 * has drawback of device moving into critical state where the device is
5436 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5437 * host is idle so that BKOPS are managed effectively without any negative
5438 * impacts.
5439 *
5440 * Returns zero on success, non-zero on failure.
5441 */
5442static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5443{
5444 int err = 0;
5445
5446 if (!hba->auto_bkops_enabled)
5447 goto out;
5448
5449 /*
5450 * If host assisted BKOPs is to be enabled, make sure
5451 * urgent bkops exception is allowed.
5452 */
5453 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5454 if (err) {
5455 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5456 __func__, err);
5457 goto out;
5458 }
5459
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005460 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Stanley Chu1f34eed2020-05-08 16:01:12 +08005461 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305462 if (err) {
5463 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5464 __func__, err);
5465 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5466 goto out;
5467 }
5468
5469 hba->auto_bkops_enabled = false;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005470 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
Asutosh Das24366c2a2019-11-25 22:53:30 -08005471 hba->is_urgent_bkops_lvl_checked = false;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305472out:
5473 return err;
5474}
5475
5476/**
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005477 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305478 * @hba: per adapter instance
5479 *
5480 * After a device reset the device may toggle the BKOPS_EN flag
5481 * to default value. The s/w tracking variables should be updated
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005482 * as well. This function would change the auto-bkops state based on
5483 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305484 */
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005485static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305486{
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005487 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5488 hba->auto_bkops_enabled = false;
5489 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5490 ufshcd_enable_auto_bkops(hba);
5491 } else {
5492 hba->auto_bkops_enabled = true;
5493 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5494 ufshcd_disable_auto_bkops(hba);
5495 }
Stanley Chu7b6668d2020-05-30 22:12:00 +08005496 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
Asutosh Das24366c2a2019-11-25 22:53:30 -08005497 hba->is_urgent_bkops_lvl_checked = false;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305498}
5499
5500static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5501{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005502 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305503 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5504}
5505
5506/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005507 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5508 * @hba: per-adapter instance
5509 * @status: bkops_status value
5510 *
5511 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5512 * flag in the device to permit background operations if the device
5513 * bkops_status is greater than or equal to "status" argument passed to
5514 * this function, disable otherwise.
5515 *
5516 * Returns 0 for success, non-zero in case of failure.
5517 *
5518 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5519 * to know whether auto bkops is enabled or disabled after this function
5520 * returns control to it.
5521 */
5522static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5523 enum bkops_status status)
5524{
5525 int err;
5526 u32 curr_status = 0;
5527
5528 err = ufshcd_get_bkops_status(hba, &curr_status);
5529 if (err) {
5530 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5531 __func__, err);
5532 goto out;
5533 } else if (curr_status > BKOPS_STATUS_MAX) {
5534 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5535 __func__, curr_status);
5536 err = -EINVAL;
5537 goto out;
5538 }
5539
5540 if (curr_status >= status)
5541 err = ufshcd_enable_auto_bkops(hba);
5542 else
5543 err = ufshcd_disable_auto_bkops(hba);
5544out:
5545 return err;
5546}
5547
5548/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305549 * ufshcd_urgent_bkops - handle urgent bkops exception event
5550 * @hba: per-adapter instance
5551 *
5552 * Enable fBackgroundOpsEn flag in the device to permit background
5553 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005554 *
5555 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5556 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305557 */
5558static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5559{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005560 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305561}
5562
5563static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5564{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005565 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305566 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5567}
5568
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005569static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5570{
5571 int err;
5572 u32 curr_status = 0;
5573
5574 if (hba->is_urgent_bkops_lvl_checked)
5575 goto enable_auto_bkops;
5576
5577 err = ufshcd_get_bkops_status(hba, &curr_status);
5578 if (err) {
5579 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5580 __func__, err);
5581 goto out;
5582 }
5583
5584 /*
5585 * We are seeing that some devices are raising the urgent bkops
5586 * exception events even when BKOPS status doesn't indicate performace
5587 * impacted or critical. Handle these device by determining their urgent
5588 * bkops status at runtime.
5589 */
5590 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5591 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5592 __func__, curr_status);
5593 /* update the current status as the urgent bkops level */
5594 hba->urgent_bkops_lvl = curr_status;
5595 hba->is_urgent_bkops_lvl_checked = true;
5596 }
5597
5598enable_auto_bkops:
5599 err = ufshcd_enable_auto_bkops(hba);
5600out:
5601 if (err < 0)
5602 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5603 __func__, err);
5604}
5605
Avri Altman322c4b22021-09-15 09:04:07 +03005606static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5607{
5608 u32 value;
5609
5610 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5611 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5612 return;
5613
5614 dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5615
5616 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5617
5618 /*
5619 * A placeholder for the platform vendors to add whatever additional
5620 * steps required
5621 */
5622}
5623
Yue Hu3b5f3c02021-03-18 17:55:36 +08005624static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
5625{
5626 u8 index;
5627 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5628 UPIU_QUERY_OPCODE_CLEAR_FLAG;
5629
5630 index = ufshcd_wb_get_query_index(hba);
5631 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5632}
5633
5634int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005635{
5636 int ret;
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005637
Stanley Chu79e35202020-05-08 16:01:15 +08005638 if (!ufshcd_is_wb_allowed(hba))
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005639 return 0;
5640
Bean Huo4cd48992021-01-19 17:38:46 +01005641 if (!(enable ^ hba->dev_info.wb_enabled))
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005642 return 0;
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005643
Yue Hu3b5f3c02021-03-18 17:55:36 +08005644 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005645 if (ret) {
Yue Hu3b5f3c02021-03-18 17:55:36 +08005646 dev_err(hba->dev, "%s Write Booster %s failed %d\n",
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005647 __func__, enable ? "enable" : "disable", ret);
5648 return ret;
5649 }
5650
Bean Huo4cd48992021-01-19 17:38:46 +01005651 hba->dev_info.wb_enabled = enable;
Yue Hu3b5f3c02021-03-18 17:55:36 +08005652 dev_info(hba->dev, "%s Write Booster %s\n",
5653 __func__, enable ? "enabled" : "disabled");
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005654
5655 return ret;
5656}
5657
Yue Hu3b5f3c02021-03-18 17:55:36 +08005658static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005659{
Bean Huod3ba6222021-01-21 19:57:36 +01005660 int ret;
Yue Hu3b5f3c02021-03-18 17:55:36 +08005661
5662 ret = __ufshcd_wb_toggle(hba, set,
5663 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5664 if (ret) {
5665 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
5666 __func__, set ? "enable" : "disable", ret);
5667 return;
5668 }
5669 dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
5670 __func__, set ? "enabled" : "disabled");
5671}
5672
5673static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5674{
5675 int ret;
Bean Huod3ba6222021-01-21 19:57:36 +01005676
5677 if (!ufshcd_is_wb_allowed(hba) ||
5678 hba->dev_info.wb_buf_flush_enabled == enable)
Yue Hu3b5f3c02021-03-18 17:55:36 +08005679 return;
Bean Huod3ba6222021-01-21 19:57:36 +01005680
Yue Hu3b5f3c02021-03-18 17:55:36 +08005681 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005682 if (ret) {
Bean Huod3ba6222021-01-21 19:57:36 +01005683 dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
5684 enable ? "enable" : "disable", ret);
Yue Hu3b5f3c02021-03-18 17:55:36 +08005685 return;
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005686 }
5687
Bean Huod3ba6222021-01-21 19:57:36 +01005688 hba->dev_info.wb_buf_flush_enabled = enable;
5689
Yue Hu3b5f3c02021-03-18 17:55:36 +08005690 dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
5691 __func__, enable ? "enabled" : "disabled");
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005692}
5693
5694static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5695 u32 avail_buf)
5696{
5697 u32 cur_buf;
5698 int ret;
Stanley Chue31011a2020-05-22 16:32:11 +08005699 u8 index;
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005700
Stanley Chue31011a2020-05-22 16:32:11 +08005701 index = ufshcd_wb_get_query_index(hba);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005702 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5703 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
Stanley Chue31011a2020-05-22 16:32:11 +08005704 index, 0, &cur_buf);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005705 if (ret) {
5706 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5707 __func__, ret);
5708 return false;
5709 }
5710
5711 if (!cur_buf) {
5712 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5713 cur_buf);
5714 return false;
5715 }
Stanley Chud14734ae2020-05-09 17:37:15 +08005716 /* Let it continue to flush when available buffer exceeds threshold */
5717 if (avail_buf < hba->vps->wb_flush_threshold)
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005718 return true;
5719
5720 return false;
5721}
5722
Stanley Chu51dd9052020-05-22 16:32:12 +08005723static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005724{
5725 int ret;
5726 u32 avail_buf;
Stanley Chue31011a2020-05-22 16:32:11 +08005727 u8 index;
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005728
Stanley Chu79e35202020-05-08 16:01:15 +08005729 if (!ufshcd_is_wb_allowed(hba))
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005730 return false;
5731 /*
5732 * The ufs device needs the vcc to be ON to flush.
5733 * With user-space reduction enabled, it's enough to enable flush
5734 * by checking only the available buffer. The threshold
5735 * defined here is > 90% full.
5736 * With user-space preserved enabled, the current-buffer
5737 * should be checked too because the wb buffer size can reduce
5738 * when disk tends to be full. This info is provided by current
5739 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5740 * keeping vcc on when current buffer is empty.
5741 */
Stanley Chue31011a2020-05-22 16:32:11 +08005742 index = ufshcd_wb_get_query_index(hba);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005743 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5744 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
Stanley Chue31011a2020-05-22 16:32:11 +08005745 index, 0, &avail_buf);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005746 if (ret) {
5747 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5748 __func__, ret);
5749 return false;
5750 }
5751
5752 if (!hba->dev_info.b_presrv_uspc_en) {
Stanley Chud14734ae2020-05-09 17:37:15 +08005753 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005754 return true;
5755 return false;
5756 }
5757
5758 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5759}
5760
Stanley Chu51dd9052020-05-22 16:32:12 +08005761static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5762{
5763 struct ufs_hba *hba = container_of(to_delayed_work(work),
5764 struct ufs_hba,
5765 rpm_dev_flush_recheck_work);
5766 /*
5767 * To prevent unnecessary VCC power drain after device finishes
5768 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5769 * after a certain delay to recheck the threshold by next runtime
5770 * suspend.
5771 */
Asutosh Dasb294ff32021-04-23 17:20:16 -07005772 ufshcd_rpm_get_sync(hba);
5773 ufshcd_rpm_put_sync(hba);
Stanley Chu51dd9052020-05-22 16:32:12 +08005774}
5775
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305776/**
5777 * ufshcd_exception_event_handler - handle exceptions raised by device
5778 * @work: pointer to work data
5779 *
5780 * Read bExceptionEventStatus attribute from the device and handle the
5781 * exception event accordingly.
5782 */
5783static void ufshcd_exception_event_handler(struct work_struct *work)
5784{
5785 struct ufs_hba *hba;
5786 int err;
5787 u32 status = 0;
5788 hba = container_of(work, struct ufs_hba, eeh_work);
5789
Stanley Chu03e1d282019-12-24 21:01:05 +08005790 ufshcd_scsi_block_requests(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305791 err = ufshcd_get_ee_status(hba, &status);
5792 if (err) {
5793 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5794 __func__, err);
5795 goto out;
5796 }
5797
Adrian Hunterf7733622021-02-09 08:24:34 +02005798 trace_ufshcd_exception_event(dev_name(hba->dev), status);
5799
Adrian Huntercd469472021-02-09 08:24:36 +02005800 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005801 ufshcd_bkops_exception_event_handler(hba);
5802
Avri Altman322c4b22021-09-15 09:04:07 +03005803 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
5804 ufshcd_temp_exception_event_handler(hba, status);
5805
Adrian Hunter7deedfd2021-02-09 08:24:37 +02005806 ufs_debugfs_exception_event(hba, status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305807out:
Stanley Chu03e1d282019-12-24 21:01:05 +08005808 ufshcd_scsi_unblock_requests(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305809}
5810
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005811/* Complete requests that have door-bell cleared */
5812static void ufshcd_complete_requests(struct ufs_hba *hba)
5813{
Bart Van Assche11682522021-10-20 14:40:15 -07005814 ufshcd_transfer_req_compl(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005815 ufshcd_tmc_handler(hba);
5816}
5817
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305818/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02005819 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5820 * to recover from the DL NAC errors or not.
5821 * @hba: per-adapter instance
5822 *
5823 * Returns true if error handling is required, false otherwise
5824 */
5825static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5826{
5827 unsigned long flags;
5828 bool err_handling = true;
5829
5830 spin_lock_irqsave(hba->host->host_lock, flags);
5831 /*
5832 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5833 * device fatal error and/or DL NAC & REPLAY timeout errors.
5834 */
5835 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5836 goto out;
5837
5838 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5839 ((hba->saved_err & UIC_ERROR) &&
5840 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5841 goto out;
5842
5843 if ((hba->saved_err & UIC_ERROR) &&
5844 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5845 int err;
5846 /*
5847 * wait for 50ms to see if we can get any other errors or not.
5848 */
5849 spin_unlock_irqrestore(hba->host->host_lock, flags);
5850 msleep(50);
5851 spin_lock_irqsave(hba->host->host_lock, flags);
5852
5853 /*
5854 * now check if we have got any other severe errors other than
5855 * DL NAC error?
5856 */
5857 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5858 ((hba->saved_err & UIC_ERROR) &&
5859 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5860 goto out;
5861
5862 /*
5863 * As DL NAC is the only error received so far, send out NOP
5864 * command to confirm if link is still active or not.
5865 * - If we don't get any response then do error recovery.
5866 * - If we get response then clear the DL NAC error bit.
5867 */
5868
5869 spin_unlock_irqrestore(hba->host->host_lock, flags);
5870 err = ufshcd_verify_dev_init(hba);
5871 spin_lock_irqsave(hba->host->host_lock, flags);
5872
5873 if (err)
5874 goto out;
5875
5876 /* Link seems to be alive hence ignore the DL NAC errors */
5877 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5878 hba->saved_err &= ~UIC_ERROR;
5879 /* clear NAC error */
5880 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
Bean Huob0008622020-08-14 11:50:34 +02005881 if (!hba->saved_uic_err)
Yaniv Gardi583fa622016-03-10 17:37:13 +02005882 err_handling = false;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005883 }
5884out:
5885 spin_unlock_irqrestore(hba->host->host_lock, flags);
5886 return err_handling;
5887}
5888
Adrian Hunter88b099002021-09-17 17:43:49 +03005889/* host lock must be held before calling this func */
5890static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5891{
5892 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5893 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5894}
5895
Bart Van Assche267a59f2021-10-20 14:40:19 -07005896void ufshcd_schedule_eh_work(struct ufs_hba *hba)
Adrian Hunter88b099002021-09-17 17:43:49 +03005897{
Bart Van Assche267a59f2021-10-20 14:40:19 -07005898 lockdep_assert_held(hba->host->host_lock);
5899
Adrian Hunter88b099002021-09-17 17:43:49 +03005900 /* handle fatal errors only when link is not in error state */
5901 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5902 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5903 ufshcd_is_saved_err_fatal(hba))
5904 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5905 else
5906 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5907 queue_work(hba->eh_wq, &hba->eh_work);
5908 }
5909}
5910
Stanley Chu348e1bc2021-01-20 23:01:42 +08005911static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
5912{
5913 down_write(&hba->clk_scaling_lock);
5914 hba->clk_scaling.is_allowed = allow;
5915 up_write(&hba->clk_scaling_lock);
5916}
5917
5918static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
5919{
5920 if (suspend) {
5921 if (hba->clk_scaling.is_enabled)
5922 ufshcd_suspend_clkscaling(hba);
5923 ufshcd_clk_scaling_allow(hba, false);
5924 } else {
5925 ufshcd_clk_scaling_allow(hba, true);
5926 if (hba->clk_scaling.is_enabled)
5927 ufshcd_resume_clkscaling(hba);
5928 }
5929}
5930
Can Guoc72e79c2020-08-09 05:15:52 -07005931static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5932{
Asutosh Dasb294ff32021-04-23 17:20:16 -07005933 ufshcd_rpm_get_sync(hba);
5934 if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
5935 hba->is_sys_suspended) {
Can Guo88a92d62020-12-02 04:04:01 -08005936 enum ufs_pm_op pm_op;
5937
Can Guoc72e79c2020-08-09 05:15:52 -07005938 /*
Asutosh Dasb294ff32021-04-23 17:20:16 -07005939 * Don't assume anything of resume, if
Can Guoc72e79c2020-08-09 05:15:52 -07005940 * resume fails, irq and clocks can be OFF, and powers
5941 * can be OFF or in LPM.
5942 */
5943 ufshcd_setup_hba_vreg(hba, true);
5944 ufshcd_enable_irq(hba);
5945 ufshcd_setup_vreg(hba, true);
5946 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5947 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5948 ufshcd_hold(hba, false);
5949 if (!ufshcd_is_clkgating_allowed(hba))
5950 ufshcd_setup_clocks(hba, true);
5951 ufshcd_release(hba);
Can Guo88a92d62020-12-02 04:04:01 -08005952 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
5953 ufshcd_vops_resume(hba, pm_op);
Can Guoc72e79c2020-08-09 05:15:52 -07005954 } else {
5955 ufshcd_hold(hba, false);
Stanley Chu348e1bc2021-01-20 23:01:42 +08005956 if (ufshcd_is_clkscaling_supported(hba) &&
5957 hba->clk_scaling.is_enabled)
Can Guoc72e79c2020-08-09 05:15:52 -07005958 ufshcd_suspend_clkscaling(hba);
Stanley Chu348e1bc2021-01-20 23:01:42 +08005959 ufshcd_clk_scaling_allow(hba, false);
Can Guoc72e79c2020-08-09 05:15:52 -07005960 }
Can Guoaa53f582021-02-23 21:36:47 -08005961 ufshcd_scsi_block_requests(hba);
5962 /* Drain ufshcd_queuecommand() */
Bart Van Assche5675c382021-12-03 15:19:48 -08005963 synchronize_rcu();
Can Guoaa53f582021-02-23 21:36:47 -08005964 cancel_work_sync(&hba->eeh_work);
Can Guoc72e79c2020-08-09 05:15:52 -07005965}
5966
5967static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5968{
Can Guoaa53f582021-02-23 21:36:47 -08005969 ufshcd_scsi_unblock_requests(hba);
Can Guoc72e79c2020-08-09 05:15:52 -07005970 ufshcd_release(hba);
Stanley Chu348e1bc2021-01-20 23:01:42 +08005971 if (ufshcd_is_clkscaling_supported(hba))
5972 ufshcd_clk_scaling_suspend(hba, false);
Asutosh Dasb294ff32021-04-23 17:20:16 -07005973 ufshcd_rpm_put(hba);
Can Guoc72e79c2020-08-09 05:15:52 -07005974}
5975
5976static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5977{
Can Guo9cd20d32021-01-13 19:13:28 -08005978 return (!hba->is_powered || hba->shutting_down ||
Asutosh Dasb294ff32021-04-23 17:20:16 -07005979 !hba->sdev_ufs_device ||
Can Guo9cd20d32021-01-13 19:13:28 -08005980 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
Can Guoc72e79c2020-08-09 05:15:52 -07005981 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
Can Guo9cd20d32021-01-13 19:13:28 -08005982 ufshcd_is_link_broken(hba))));
Can Guoc72e79c2020-08-09 05:15:52 -07005983}
5984
5985#ifdef CONFIG_PM
5986static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5987{
5988 struct Scsi_Host *shost = hba->host;
5989 struct scsi_device *sdev;
5990 struct request_queue *q;
5991 int ret;
5992
Can Guo88a92d62020-12-02 04:04:01 -08005993 hba->is_sys_suspended = false;
Can Guoc72e79c2020-08-09 05:15:52 -07005994 /*
Asutosh Dasb294ff32021-04-23 17:20:16 -07005995 * Set RPM status of wlun device to RPM_ACTIVE,
Can Guoc72e79c2020-08-09 05:15:52 -07005996 * this also clears its runtime error.
5997 */
Asutosh Dasb294ff32021-04-23 17:20:16 -07005998 ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
5999
6000 /* hba device might have a runtime error otherwise */
6001 if (ret)
6002 ret = pm_runtime_set_active(hba->dev);
Can Guoc72e79c2020-08-09 05:15:52 -07006003 /*
Asutosh Dasb294ff32021-04-23 17:20:16 -07006004 * If wlun device had runtime error, we also need to resume those
6005 * consumer scsi devices in case any of them has failed to be
6006 * resumed due to supplier runtime resume failure. This is to unblock
Can Guoc72e79c2020-08-09 05:15:52 -07006007 * blk_queue_enter in case there are bios waiting inside it.
6008 */
6009 if (!ret) {
6010 shost_for_each_device(sdev, shost) {
6011 q = sdev->request_queue;
6012 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6013 q->rpm_status == RPM_SUSPENDING))
6014 pm_request_resume(q->dev);
6015 }
6016 }
6017}
6018#else
6019static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6020{
6021}
6022#endif
6023
Can Guo2355b662020-08-24 19:07:06 -07006024static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6025{
6026 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6027 u32 mode;
6028
6029 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6030
6031 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6032 return true;
6033
6034 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6035 return true;
6036
6037 return false;
6038}
6039
Yaniv Gardi583fa622016-03-10 17:37:13 +02006040/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306041 * ufshcd_err_handler - handle UFS errors that require s/w attention
Adrian Hunter88b099002021-09-17 17:43:49 +03006042 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306043 */
Adrian Hunter88b099002021-09-17 17:43:49 +03006044static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306045{
Adrian Hunter87bf6a62021-10-02 18:45:50 +03006046 int retries = MAX_ERR_HANDLER_RETRIES;
Adrian Hunter88b099002021-09-17 17:43:49 +03006047 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306048 unsigned long flags;
Adrian Hunter87bf6a62021-10-02 18:45:50 +03006049 bool needs_restore;
6050 bool needs_reset;
6051 bool err_xfer;
6052 bool err_tm;
6053 int pmc_err;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306054 int tag;
6055
Adrian Hunter88b099002021-09-17 17:43:49 +03006056 hba = container_of(work, struct ufs_hba, eh_work);
6057
Bart Van Assche4693fad2021-10-20 14:40:18 -07006058 dev_info(hba->dev,
6059 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6060 __func__, ufshcd_state_name[hba->ufshcd_state],
6061 hba->is_powered, hba->shutting_down, hba->saved_err,
6062 hba->saved_uic_err, hba->force_reset,
6063 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6064
Can Guo9cd20d32021-01-13 19:13:28 -08006065 down(&hba->host_sem);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306066 spin_lock_irqsave(hba->host->host_lock, flags);
Can Guoc72e79c2020-08-09 05:15:52 -07006067 if (ufshcd_err_handling_should_stop(hba)) {
Can Guo4db7a232020-08-09 05:15:51 -07006068 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6069 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6070 spin_unlock_irqrestore(hba->host->host_lock, flags);
Can Guo9cd20d32021-01-13 19:13:28 -08006071 up(&hba->host_sem);
Can Guo4db7a232020-08-09 05:15:51 -07006072 return;
6073 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306074 ufshcd_set_eh_in_progress(hba);
Can Guo4db7a232020-08-09 05:15:51 -07006075 spin_unlock_irqrestore(hba->host->host_lock, flags);
Can Guoc72e79c2020-08-09 05:15:52 -07006076 ufshcd_err_handling_prepare(hba);
Can Guoa45f9372021-05-24 01:36:57 -07006077 /* Complete requests that have door-bell cleared by h/w */
6078 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306079 spin_lock_irqsave(hba->host->host_lock, flags);
Adrian Hunter87bf6a62021-10-02 18:45:50 +03006080again:
6081 needs_restore = false;
6082 needs_reset = false;
6083 err_xfer = false;
6084 err_tm = false;
6085
Can Guoaa53f582021-02-23 21:36:47 -08006086 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6087 hba->ufshcd_state = UFSHCD_STATE_RESET;
Can Guo88a92d62020-12-02 04:04:01 -08006088 /*
6089 * A full reset and restore might have happened after preparation
6090 * is finished, double check whether we should stop.
6091 */
6092 if (ufshcd_err_handling_should_stop(hba))
6093 goto skip_err_handling;
6094
Yaniv Gardi583fa622016-03-10 17:37:13 +02006095 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6096 bool ret;
6097
6098 spin_unlock_irqrestore(hba->host->host_lock, flags);
6099 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6100 ret = ufshcd_quirk_dl_nac_errors(hba);
6101 spin_lock_irqsave(hba->host->host_lock, flags);
Can Guo88a92d62020-12-02 04:04:01 -08006102 if (!ret && ufshcd_err_handling_should_stop(hba))
Yaniv Gardi583fa622016-03-10 17:37:13 +02006103 goto skip_err_handling;
6104 }
Can Guo4db7a232020-08-09 05:15:51 -07006105
Can Guo2355b662020-08-24 19:07:06 -07006106 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6107 (hba->saved_uic_err &&
6108 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
Can Guoc3be8d1e2020-08-09 05:15:53 -07006109 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6110
6111 spin_unlock_irqrestore(hba->host->host_lock, flags);
6112 ufshcd_print_host_state(hba);
6113 ufshcd_print_pwr_info(hba);
Stanley Chue965e5e2020-12-05 19:58:59 +08006114 ufshcd_print_evt_hist(hba);
Can Guoc3be8d1e2020-08-09 05:15:53 -07006115 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6116 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
6117 spin_lock_irqsave(hba->host->host_lock, flags);
6118 }
6119
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006120 /*
6121 * if host reset is required then skip clearing the pending
Can Guo2df74b62019-11-25 22:53:33 -08006122 * transfers forcefully because they will get cleared during
6123 * host reset and restore
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006124 */
Can Guo88a92d62020-12-02 04:04:01 -08006125 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6126 ufshcd_is_saved_err_fatal(hba) ||
6127 ((hba->saved_err & UIC_ERROR) &&
6128 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6129 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6130 needs_reset = true;
Can Guo2355b662020-08-24 19:07:06 -07006131 goto do_reset;
Can Guo88a92d62020-12-02 04:04:01 -08006132 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006133
Can Guo2355b662020-08-24 19:07:06 -07006134 /*
6135 * If LINERESET was caught, UFS might have been put to PWM mode,
6136 * check if power mode restore is needed.
6137 */
6138 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6139 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6140 if (!hba->saved_uic_err)
6141 hba->saved_err &= ~UIC_ERROR;
6142 spin_unlock_irqrestore(hba->host->host_lock, flags);
6143 if (ufshcd_is_pwr_mode_restore_needed(hba))
6144 needs_restore = true;
6145 spin_lock_irqsave(hba->host->host_lock, flags);
6146 if (!hba->saved_err && !needs_restore)
6147 goto skip_err_handling;
6148 }
6149
6150 hba->silence_err_logs = true;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006151 /* release lock as clear command might sleep */
6152 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306153 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006154 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
Can Guo307348f2020-08-24 19:07:05 -07006155 if (ufshcd_try_to_abort_task(hba, tag)) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006156 err_xfer = true;
6157 goto lock_skip_pending_xfer_clear;
6158 }
Bart Van Assche4693fad2021-10-20 14:40:18 -07006159 dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag,
6160 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006161 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306162
6163 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006164 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6165 if (ufshcd_clear_tm_cmd(hba, tag)) {
6166 err_tm = true;
6167 goto lock_skip_pending_xfer_clear;
6168 }
6169 }
6170
6171lock_skip_pending_xfer_clear:
Bart Van Assche11682522021-10-20 14:40:15 -07006172 /* Complete the requests that are cleared by s/w */
6173 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306174
Can Guoa45f9372021-05-24 01:36:57 -07006175 spin_lock_irqsave(hba->host->host_lock, flags);
6176 hba->silence_err_logs = false;
Can Guo2355b662020-08-24 19:07:06 -07006177 if (err_xfer || err_tm) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006178 needs_reset = true;
Can Guo2355b662020-08-24 19:07:06 -07006179 goto do_reset;
6180 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006181
Can Guo2355b662020-08-24 19:07:06 -07006182 /*
6183 * After all reqs and tasks are cleared from doorbell,
6184 * now it is safe to retore power mode.
6185 */
6186 if (needs_restore) {
6187 spin_unlock_irqrestore(hba->host->host_lock, flags);
6188 /*
6189 * Hold the scaling lock just in case dev cmds
6190 * are sent via bsg and/or sysfs.
6191 */
6192 down_write(&hba->clk_scaling_lock);
6193 hba->force_pmc = true;
6194 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6195 if (pmc_err) {
6196 needs_reset = true;
6197 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6198 __func__, pmc_err);
6199 }
6200 hba->force_pmc = false;
6201 ufshcd_print_pwr_info(hba);
6202 up_write(&hba->clk_scaling_lock);
6203 spin_lock_irqsave(hba->host->host_lock, flags);
6204 }
6205
6206do_reset:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306207 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006208 if (needs_reset) {
Adrian Hunter87bf6a62021-10-02 18:45:50 +03006209 int err;
6210
Can Guo4db7a232020-08-09 05:15:51 -07006211 hba->force_reset = false;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006212 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306213 err = ufshcd_reset_and_restore(hba);
Can Guo4db7a232020-08-09 05:15:51 -07006214 if (err)
6215 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6216 __func__, err);
Can Guoc72e79c2020-08-09 05:15:52 -07006217 else
6218 ufshcd_recover_pm_error(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006219 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306220 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006221
Yaniv Gardi583fa622016-03-10 17:37:13 +02006222skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006223 if (!needs_reset) {
Can Guo4db7a232020-08-09 05:15:51 -07006224 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6225 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006226 if (hba->saved_err || hba->saved_uic_err)
6227 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6228 __func__, hba->saved_err, hba->saved_uic_err);
6229 }
Adrian Hunter87bf6a62021-10-02 18:45:50 +03006230 /* Exit in an operational state or dead */
6231 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6232 hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6233 if (--retries)
6234 goto again;
6235 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6236 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306237 ufshcd_clear_eh_in_progress(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006238 spin_unlock_irqrestore(hba->host->host_lock, flags);
Can Guoc72e79c2020-08-09 05:15:52 -07006239 ufshcd_err_handling_unprepare(hba);
Can Guo9cd20d32021-01-13 19:13:28 -08006240 up(&hba->host_sem);
Bart Van Assche4693fad2021-10-20 14:40:18 -07006241
6242 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6243 ufshcd_state_name[hba->ufshcd_state]);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306244}
6245
6246/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306247 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6248 * @hba: per-adapter instance
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006249 *
6250 * Returns
6251 * IRQ_HANDLED - If interrupt is valid
6252 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306253 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006254static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306255{
6256 u32 reg;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006257 irqreturn_t retval = IRQ_NONE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306258
Can Guo2355b662020-08-24 19:07:06 -07006259 /* PHY layer error */
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08006260 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08006261 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
Can Guo2355b662020-08-24 19:07:06 -07006262 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
Stanley Chue965e5e2020-12-05 19:58:59 +08006263 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08006264 /*
6265 * To know whether this error is fatal or not, DB timeout
6266 * must be checked but this error is handled separately.
6267 */
Can Guo2355b662020-08-24 19:07:06 -07006268 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6269 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6270 __func__);
6271
6272 /* Got a LINERESET indication. */
6273 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6274 struct uic_command *cmd = NULL;
6275
6276 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6277 if (hba->uic_async_done && hba->active_uic_cmd)
6278 cmd = hba->active_uic_cmd;
6279 /*
6280 * Ignore the LINERESET during power mode change
6281 * operation via DME_SET command.
6282 */
6283 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6284 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6285 }
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006286 retval |= IRQ_HANDLED;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006287 }
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08006288
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306289 /* PA_INIT_ERROR is fatal and needs UIC reset */
6290 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006291 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6292 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
Stanley Chue965e5e2020-12-05 19:58:59 +08006293 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006294
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006295 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6296 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6297 else if (hba->dev_quirks &
6298 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6299 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6300 hba->uic_error |=
6301 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6302 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6303 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6304 }
6305 retval |= IRQ_HANDLED;
Yaniv Gardi583fa622016-03-10 17:37:13 +02006306 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306307
6308 /* UIC NL/TL/DME errors needs software retry */
6309 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006310 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6311 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
Stanley Chue965e5e2020-12-05 19:58:59 +08006312 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306313 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006314 retval |= IRQ_HANDLED;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006315 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306316
6317 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006318 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6319 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
Stanley Chue965e5e2020-12-05 19:58:59 +08006320 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306321 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006322 retval |= IRQ_HANDLED;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006323 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306324
6325 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006326 if ((reg & UIC_DME_ERROR) &&
6327 (reg & UIC_DME_ERROR_CODE_MASK)) {
Stanley Chue965e5e2020-12-05 19:58:59 +08006328 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306329 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006330 retval |= IRQ_HANDLED;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006331 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306332
6333 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6334 __func__, hba->uic_error);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006335 return retval;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306336}
6337
6338/**
6339 * ufshcd_check_errors - Check for errors that need s/w attention
6340 * @hba: per-adapter instance
Can Guoa45f9372021-05-24 01:36:57 -07006341 * @intr_status: interrupt status generated by the controller
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006342 *
6343 * Returns
6344 * IRQ_HANDLED - If interrupt is valid
6345 * IRQ_NONE - If invalid interrupt
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306346 */
Can Guoa45f9372021-05-24 01:36:57 -07006347static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306348{
6349 bool queue_eh_work = false;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006350 irqreturn_t retval = IRQ_NONE;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306351
Can Guoa45f9372021-05-24 01:36:57 -07006352 spin_lock(hba->host->host_lock);
6353 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6354
Stanley Chud3c615b2019-07-10 21:38:19 +08006355 if (hba->errors & INT_FATAL_ERRORS) {
Stanley Chue965e5e2020-12-05 19:58:59 +08006356 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6357 hba->errors);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306358 queue_eh_work = true;
Stanley Chud3c615b2019-07-10 21:38:19 +08006359 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306360
6361 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306362 hba->uic_error = 0;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006363 retval = ufshcd_update_uic_error(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306364 if (hba->uic_error)
6365 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306366 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306367
Stanley Chu82174442019-05-21 14:44:54 +08006368 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6369 dev_err(hba->dev,
6370 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6371 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6372 "Enter" : "Exit",
6373 hba->errors, ufshcd_get_upmcrs(hba));
Stanley Chue965e5e2020-12-05 19:58:59 +08006374 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
Stanley Chud3c615b2019-07-10 21:38:19 +08006375 hba->errors);
Can Guo4db7a232020-08-09 05:15:51 -07006376 ufshcd_set_link_broken(hba);
Stanley Chu82174442019-05-21 14:44:54 +08006377 queue_eh_work = true;
6378 }
6379
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306380 if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006381 /*
6382 * update the transfer error masks to sticky bits, let's do this
6383 * irrespective of current ufshcd_state.
6384 */
6385 hba->saved_err |= hba->errors;
6386 hba->saved_uic_err |= hba->uic_error;
6387
Can Guo4db7a232020-08-09 05:15:51 -07006388 /* dump controller state before resetting */
Can Guoace38042020-12-02 04:04:03 -08006389 if ((hba->saved_err &
6390 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
Can Guo2355b662020-08-24 19:07:06 -07006391 (hba->saved_uic_err &&
6392 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
Can Guo4db7a232020-08-09 05:15:51 -07006393 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
Dolev Raviv66cc8202016-12-22 18:39:42 -08006394 __func__, hba->saved_err,
6395 hba->saved_uic_err);
Can Guoc3be8d1e2020-08-09 05:15:53 -07006396 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6397 "host_regs: ");
Can Guo4db7a232020-08-09 05:15:51 -07006398 ufshcd_print_pwr_info(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306399 }
Adrian Hunter88b099002021-09-17 17:43:49 +03006400 ufshcd_schedule_eh_work(hba);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006401 retval |= IRQ_HANDLED;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306402 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306403 /*
6404 * if (!queue_eh_work) -
6405 * Other errors are either non-fatal where host recovers
6406 * itself without s/w intervention or errors that will be
6407 * handled by the SCSI core layer.
6408 */
Can Guoa45f9372021-05-24 01:36:57 -07006409 hba->errors = 0;
6410 hba->uic_error = 0;
6411 spin_unlock(hba->host->host_lock);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006412 return retval;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306413}
6414
6415/**
6416 * ufshcd_tmc_handler - handle task management function completion
6417 * @hba: per adapter instance
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006418 *
6419 * Returns
6420 * IRQ_HANDLED - If interrupt is valid
6421 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306422 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006423static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306424{
Adrian Hunterf5ef3362021-09-22 12:10:59 +03006425 unsigned long flags, pending, issued;
6426 irqreturn_t ret = IRQ_NONE;
6427 int tag;
6428
6429 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306430
Can Guoa45f9372021-05-24 01:36:57 -07006431 spin_lock_irqsave(hba->host->host_lock, flags);
Adrian Hunterf5ef3362021-09-22 12:10:59 +03006432 issued = hba->outstanding_tasks & ~pending;
6433 for_each_set_bit(tag, &issued, hba->nutmrs) {
6434 struct request *req = hba->tmf_rqs[tag];
6435 struct completion *c = req->end_io_data;
6436
6437 complete(c);
6438 ret = IRQ_HANDLED;
6439 }
Can Guoa45f9372021-05-24 01:36:57 -07006440 spin_unlock_irqrestore(hba->host->host_lock, flags);
6441
Adrian Hunterf5ef3362021-09-22 12:10:59 +03006442 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306443}
6444
6445/**
6446 * ufshcd_sl_intr - Interrupt service routine
6447 * @hba: per adapter instance
6448 * @intr_status: contains interrupts generated by the controller
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006449 *
6450 * Returns
6451 * IRQ_HANDLED - If interrupt is valid
6452 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306453 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006454static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306455{
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006456 irqreturn_t retval = IRQ_NONE;
6457
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05306458 if (intr_status & UFSHCD_UIC_MASK)
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006459 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306460
Can Guoa45f9372021-05-24 01:36:57 -07006461 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6462 retval |= ufshcd_check_errors(hba, intr_status);
6463
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306464 if (intr_status & UTP_TASK_REQ_COMPL)
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006465 retval |= ufshcd_tmc_handler(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306466
6467 if (intr_status & UTP_TRANSFER_REQ_COMPL)
Bart Van Assche11682522021-10-20 14:40:15 -07006468 retval |= ufshcd_transfer_req_compl(hba);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006469
6470 return retval;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306471}
6472
6473/**
6474 * ufshcd_intr - Main interrupt service routine
6475 * @irq: irq number
6476 * @__hba: pointer to adapter instance
6477 *
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006478 * Returns
6479 * IRQ_HANDLED - If interrupt is valid
6480 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306481 */
6482static irqreturn_t ufshcd_intr(int irq, void *__hba)
6483{
Adrian Hunter127d5f72020-08-11 16:39:36 +03006484 u32 intr_status, enabled_intr_status = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306485 irqreturn_t retval = IRQ_NONE;
6486 struct ufs_hba *hba = __hba;
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05306487 int retries = hba->nutrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306488
Seungwon Jeonb873a2752013-06-26 22:39:26 +05306489 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Can Guo3f8af602020-08-09 05:15:50 -07006490 hba->ufs_stats.last_intr_status = intr_status;
6491 hba->ufs_stats.last_intr_ts = ktime_get();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306492
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05306493 /*
6494 * There could be max of hba->nutrs reqs in flight and in worst case
6495 * if the reqs get finished 1 by 1 after the interrupt status is
6496 * read, make sure we handle them by checking the interrupt status
6497 * again in a loop until we process all of the reqs before returning.
6498 */
Adrian Hunter127d5f72020-08-11 16:39:36 +03006499 while (intr_status && retries--) {
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05306500 enabled_intr_status =
6501 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
Bean Huo60ec3752021-01-18 21:12:33 +01006502 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006503 if (enabled_intr_status)
6504 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02006505
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05306506 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Adrian Hunter127d5f72020-08-11 16:39:36 +03006507 }
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05306508
Jaegeuk Kimeeb1b552021-01-07 10:53:16 -08006509 if (enabled_intr_status && retval == IRQ_NONE &&
Bart Van Assche40d2fd02021-05-19 13:20:57 -07006510 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6511 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
Jaegeuk Kimeeb1b552021-01-07 10:53:16 -08006512 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6513 __func__,
6514 intr_status,
6515 hba->ufs_stats.last_intr_status,
6516 enabled_intr_status);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08006517 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6518 }
6519
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306520 return retval;
6521}
6522
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306523static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6524{
6525 int err = 0;
6526 u32 mask = 1 << tag;
6527 unsigned long flags;
6528
6529 if (!test_bit(tag, &hba->outstanding_tasks))
6530 goto out;
6531
6532 spin_lock_irqsave(hba->host->host_lock, flags);
Alim Akhtar1399c5b2018-05-06 15:44:15 +05306533 ufshcd_utmrl_clear(hba, tag);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306534 spin_unlock_irqrestore(hba->host->host_lock, flags);
6535
6536 /* poll for max. 1 sec to clear door bell register by h/w */
6537 err = ufshcd_wait_for_register(hba,
6538 REG_UTP_TASK_REQ_DOOR_BELL,
Bart Van Assche5cac1092020-05-07 15:27:50 -07006539 mask, 0, 1000, 1000);
Bart Van Assche4693fad2021-10-20 14:40:18 -07006540
6541 dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
6542 tag, err ? "succeeded" : "failed");
6543
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306544out:
6545 return err;
6546}
6547
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006548static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6549 struct utp_task_req_desc *treq, u8 tm_function)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306550{
Bart Van Assche69a6c262019-12-09 10:13:09 -08006551 struct request_queue *q = hba->tmf_queue;
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006552 struct Scsi_Host *host = hba->host;
Bart Van Assche69a6c262019-12-09 10:13:09 -08006553 DECLARE_COMPLETION_ONSTACK(wait);
6554 struct request *req;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306555 unsigned long flags;
Can Guo4b42d5572021-04-01 00:39:09 -07006556 int task_tag, err;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306557
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306558 /*
Christoph Hellwig0bf6d962021-10-25 09:05:07 +02006559 * blk_mq_alloc_request() is used here only to get a free tag.
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306560 */
Christoph Hellwig0bf6d962021-10-25 09:05:07 +02006561 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
Jaegeuk Kimeeb1b552021-01-07 10:53:16 -08006562 if (IS_ERR(req))
6563 return PTR_ERR(req);
6564
Bart Van Assche69a6c262019-12-09 10:13:09 -08006565 req->end_io_data = &wait;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006566 ufshcd_hold(hba, false);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306567
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306568 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306569
Can Guo4b42d5572021-04-01 00:39:09 -07006570 task_tag = req->tag;
Adrian Hunterf5ef3362021-09-22 12:10:59 +03006571 hba->tmf_rqs[req->tag] = req;
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -05006572 treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006573
Can Guo4b42d5572021-04-01 00:39:09 -07006574 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6575 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
Kiwoong Kimd2877be2016-11-10 21:16:15 +09006576
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306577 /* send command to the controller */
Can Guo4b42d5572021-04-01 00:39:09 -07006578 __set_bit(task_tag, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02006579
Can Guo4b42d5572021-04-01 00:39:09 -07006580 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07006581 /* Make sure that doorbell is committed immediately */
6582 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306583
6584 spin_unlock_irqrestore(host->host_lock, flags);
6585
Bean Huo28fa68f2021-01-05 12:34:42 +01006586 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03006587
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306588 /* wait until the task management command is completed */
Bart Van Assche69a6c262019-12-09 10:13:09 -08006589 err = wait_for_completion_io_timeout(&wait,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306590 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306591 if (!err) {
Bart Van Assche69a6c262019-12-09 10:13:09 -08006592 /*
6593 * Make sure that ufshcd_compl_tm() does not trigger a
6594 * use-after-free.
6595 */
6596 req->end_io_data = NULL;
Bean Huo28fa68f2021-01-05 12:34:42 +01006597 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306598 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6599 __func__, tm_function);
Can Guo4b42d5572021-04-01 00:39:09 -07006600 if (ufshcd_clear_tm_cmd(hba, task_tag))
6601 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6602 __func__, task_tag);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306603 err = -ETIMEDOUT;
6604 } else {
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006605 err = 0;
Can Guo4b42d5572021-04-01 00:39:09 -07006606 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006607
Bean Huo28fa68f2021-01-05 12:34:42 +01006608 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306609 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306610
Stanley Chub5572172019-08-19 21:43:28 +08006611 spin_lock_irqsave(hba->host->host_lock, flags);
Adrian Hunterf5ef3362021-09-22 12:10:59 +03006612 hba->tmf_rqs[req->tag] = NULL;
Can Guo4b42d5572021-04-01 00:39:09 -07006613 __clear_bit(task_tag, &hba->outstanding_tasks);
Stanley Chub5572172019-08-19 21:43:28 +08006614 spin_unlock_irqrestore(hba->host->host_lock, flags);
6615
Can Guo4b42d5572021-04-01 00:39:09 -07006616 ufshcd_release(hba);
Christoph Hellwig0bf6d962021-10-25 09:05:07 +02006617 blk_mq_free_request(req);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306618
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306619 return err;
6620}
6621
6622/**
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006623 * ufshcd_issue_tm_cmd - issues task management commands to controller
6624 * @hba: per adapter instance
6625 * @lun_id: LUN ID to which TM command is sent
6626 * @task_id: task ID to which the TM command is applicable
6627 * @tm_function: task management function opcode
6628 * @tm_response: task management service response return value
6629 *
6630 * Returns non-zero value on error, zero on success.
6631 */
6632static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6633 u8 tm_function, u8 *tm_response)
6634{
6635 struct utp_task_req_desc treq = { { 0 }, };
Bart Van Assche957d63e2021-10-20 14:40:17 -07006636 enum utp_ocs ocs_value;
6637 int err;
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006638
6639 /* Configure task request descriptor */
6640 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6641 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6642
6643 /* Configure task request UPIU */
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -05006644 treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006645 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -05006646 treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006647
6648 /*
6649 * The host shall provide the same value for LUN field in the basic
6650 * header and for Input Parameter.
6651 */
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -05006652 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
6653 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006654
6655 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6656 if (err == -ETIMEDOUT)
6657 return err;
6658
6659 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6660 if (ocs_value != OCS_SUCCESS)
6661 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6662 __func__, ocs_value);
6663 else if (tm_response)
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -05006664 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006665 MASK_TM_SERVICE_RESP;
6666 return err;
6667}
6668
6669/**
Avri Altman5e0a86e2018-10-07 17:30:37 +03006670 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6671 * @hba: per-adapter instance
6672 * @req_upiu: upiu request
6673 * @rsp_upiu: upiu reply
Avri Altman5e0a86e2018-10-07 17:30:37 +03006674 * @desc_buff: pointer to descriptor buffer, NULL if NA
6675 * @buff_len: descriptor size, 0 if NA
Bart Van Assched0e97602019-10-29 16:07:08 -07006676 * @cmd_type: specifies the type (NOP, Query...)
Avri Altman5e0a86e2018-10-07 17:30:37 +03006677 * @desc_op: descriptor operation
6678 *
6679 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6680 * Therefore, it "rides" the device management infrastructure: uses its tag and
6681 * tasks work queues.
6682 *
6683 * Since there is only one available tag for device management commands,
6684 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6685 */
6686static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6687 struct utp_upiu_req *req_upiu,
6688 struct utp_upiu_req *rsp_upiu,
6689 u8 *desc_buff, int *buff_len,
Bart Van Assche7f674c32019-10-29 16:07:09 -07006690 enum dev_cmd_type cmd_type,
Avri Altman5e0a86e2018-10-07 17:30:37 +03006691 enum query_opcode desc_op)
6692{
Bart Van Assche8a686f22021-07-21 20:34:26 -07006693 DECLARE_COMPLETION_ONSTACK(wait);
Bart Van Assche945c3cc2021-12-03 15:19:42 -08006694 const u32 tag = hba->reserved_slot;
Avri Altman5e0a86e2018-10-07 17:30:37 +03006695 struct ufshcd_lrb *lrbp;
6696 int err = 0;
Bean Huoa23064c2020-07-06 14:39:36 +02006697 u8 upiu_flags;
Avri Altman5e0a86e2018-10-07 17:30:37 +03006698
Bart Van Assche945c3cc2021-12-03 15:19:42 -08006699 /* Protects use of hba->reserved_slot. */
6700 lockdep_assert_held(&hba->dev_cmd.lock);
Avri Altman5e0a86e2018-10-07 17:30:37 +03006701
Bart Van Assche945c3cc2021-12-03 15:19:42 -08006702 down_read(&hba->clk_scaling_lock);
Avri Altman5e0a86e2018-10-07 17:30:37 +03006703
Can Guoa45f9372021-05-24 01:36:57 -07006704 lrbp = &hba->lrb[tag];
Can Guo7a7e66c2020-12-02 04:04:02 -08006705 WARN_ON(lrbp->cmd);
Avri Altman5e0a86e2018-10-07 17:30:37 +03006706 lrbp->cmd = NULL;
6707 lrbp->sense_bufflen = 0;
6708 lrbp->sense_buffer = NULL;
6709 lrbp->task_tag = tag;
6710 lrbp->lun = 0;
6711 lrbp->intr_cmd = true;
Satya Tangiraladf043c742020-07-06 20:04:14 +00006712 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
Avri Altman5e0a86e2018-10-07 17:30:37 +03006713 hba->dev_cmd.type = cmd_type;
6714
Caleb Connolly51428812021-03-10 15:33:42 +00006715 if (hba->ufs_version <= ufshci_version(1, 1))
Avri Altman5e0a86e2018-10-07 17:30:37 +03006716 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
Caleb Connolly51428812021-03-10 15:33:42 +00006717 else
Avri Altman5e0a86e2018-10-07 17:30:37 +03006718 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
Avri Altman5e0a86e2018-10-07 17:30:37 +03006719
6720 /* update the task tag in the request upiu */
6721 req_upiu->header.dword_0 |= cpu_to_be32(tag);
6722
6723 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6724
6725 /* just copy the upiu request as it is */
6726 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6727 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6728 /* The Data Segment Area is optional depending upon the query
6729 * function value. for WRITE DESCRIPTOR, the data segment
6730 * follows right after the tsf.
6731 */
6732 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6733 *buff_len = 0;
6734 }
6735
6736 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6737
6738 hba->dev_cmd.complete = &wait;
6739
Bean Huo10542482021-05-31 12:43:08 +02006740 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
Avri Altman5e0a86e2018-10-07 17:30:37 +03006741
Can Guoa45f9372021-05-24 01:36:57 -07006742 ufshcd_send_command(hba, tag);
Avri Altman5e0a86e2018-10-07 17:30:37 +03006743 /*
6744 * ignore the returning value here - ufshcd_check_query_response is
6745 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6746 * read the response directly ignoring all errors.
6747 */
6748 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6749
6750 /* just copy the upiu response as it is */
6751 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
Avri Altman4bbbe242019-02-20 09:11:13 +02006752 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6753 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6754 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6755 MASK_QUERY_DATA_SEG_LEN;
6756
6757 if (*buff_len >= resp_len) {
6758 memcpy(desc_buff, descp, resp_len);
6759 *buff_len = resp_len;
6760 } else {
Bean Huo3d4881d2019-11-12 23:34:35 +01006761 dev_warn(hba->dev,
6762 "%s: rsp size %d is bigger than buffer size %d",
6763 __func__, resp_len, *buff_len);
Avri Altman4bbbe242019-02-20 09:11:13 +02006764 *buff_len = 0;
6765 err = -EINVAL;
6766 }
6767 }
Bean Huo10542482021-05-31 12:43:08 +02006768 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
6769 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
Avri Altman5e0a86e2018-10-07 17:30:37 +03006770
Avri Altman5e0a86e2018-10-07 17:30:37 +03006771 up_read(&hba->clk_scaling_lock);
6772 return err;
6773}
6774
6775/**
6776 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6777 * @hba: per-adapter instance
6778 * @req_upiu: upiu request
6779 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
6780 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
6781 * @desc_buff: pointer to descriptor buffer, NULL if NA
6782 * @buff_len: descriptor size, 0 if NA
6783 * @desc_op: descriptor operation
6784 *
6785 * Supports UTP Transfer requests (nop and query), and UTP Task
6786 * Management requests.
6787 * It is up to the caller to fill the upiu conent properly, as it will
6788 * be copied without any further input validations.
6789 */
6790int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6791 struct utp_upiu_req *req_upiu,
6792 struct utp_upiu_req *rsp_upiu,
6793 int msgcode,
6794 u8 *desc_buff, int *buff_len,
6795 enum query_opcode desc_op)
6796{
6797 int err;
Bart Van Assche7f674c32019-10-29 16:07:09 -07006798 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
Avri Altman5e0a86e2018-10-07 17:30:37 +03006799 struct utp_task_req_desc treq = { { 0 }, };
Bart Van Assche957d63e2021-10-20 14:40:17 -07006800 enum utp_ocs ocs_value;
Avri Altman5e0a86e2018-10-07 17:30:37 +03006801 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6802
Avri Altman5e0a86e2018-10-07 17:30:37 +03006803 switch (msgcode) {
6804 case UPIU_TRANSACTION_NOP_OUT:
6805 cmd_type = DEV_CMD_TYPE_NOP;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05006806 fallthrough;
Avri Altman5e0a86e2018-10-07 17:30:37 +03006807 case UPIU_TRANSACTION_QUERY_REQ:
6808 ufshcd_hold(hba, false);
6809 mutex_lock(&hba->dev_cmd.lock);
6810 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6811 desc_buff, buff_len,
6812 cmd_type, desc_op);
6813 mutex_unlock(&hba->dev_cmd.lock);
6814 ufshcd_release(hba);
6815
6816 break;
6817 case UPIU_TRANSACTION_TASK_REQ:
6818 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6819 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6820
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -05006821 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
Avri Altman5e0a86e2018-10-07 17:30:37 +03006822
6823 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6824 if (err == -ETIMEDOUT)
6825 break;
6826
6827 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6828 if (ocs_value != OCS_SUCCESS) {
6829 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6830 ocs_value);
6831 break;
6832 }
6833
Gustavo A. R. Silva1352eec2021-03-31 17:43:38 -05006834 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
Avri Altman5e0a86e2018-10-07 17:30:37 +03006835
6836 break;
6837 default:
6838 err = -EINVAL;
6839
6840 break;
6841 }
6842
Avri Altman5e0a86e2018-10-07 17:30:37 +03006843 return err;
6844}
6845
6846/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306847 * ufshcd_eh_device_reset_handler - device reset handler registered to
6848 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306849 * @cmd: SCSI command pointer
6850 *
6851 * Returns SUCCESS/FAILED
6852 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306853static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306854{
6855 struct Scsi_Host *host;
6856 struct ufs_hba *hba;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306857 u32 pos;
6858 int err;
Can Guo35fc4cd2020-12-28 04:04:36 -08006859 u8 resp = 0xF, lun;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306860
6861 host = cmd->device->host;
6862 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306863
Can Guo35fc4cd2020-12-28 04:04:36 -08006864 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
6865 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306866 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306867 if (!err)
6868 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306869 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306870 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306871
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306872 /* clear the commands that were pending for corresponding LUN */
6873 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
Can Guo35fc4cd2020-12-28 04:04:36 -08006874 if (hba->lrb[pos].lun == lun) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306875 err = ufshcd_clear_cmd(hba, pos);
6876 if (err)
6877 break;
Bart Van Assche11682522021-10-20 14:40:15 -07006878 __ufshcd_transfer_req_compl(hba, 1U << pos);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306879 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306880 }
Gilad Broner7fabb772017-02-03 16:56:50 -08006881
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306882out:
Gilad Broner7fabb772017-02-03 16:56:50 -08006883 hba->req_abort_count = 0;
Stanley Chue965e5e2020-12-05 19:58:59 +08006884 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306885 if (!err) {
6886 err = SUCCESS;
6887 } else {
6888 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6889 err = FAILED;
6890 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306891 return err;
6892}
6893
Gilad Bronere0b299e2017-02-03 16:56:40 -08006894static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6895{
6896 struct ufshcd_lrb *lrbp;
6897 int tag;
6898
6899 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6900 lrbp = &hba->lrb[tag];
6901 lrbp->req_abort_skip = true;
6902 }
6903}
6904
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306905/**
Can Guo307348f2020-08-24 19:07:05 -07006906 * ufshcd_try_to_abort_task - abort a specific task
Lee Jonesd23ec0b2020-11-02 14:23:51 +00006907 * @hba: Pointer to adapter instance
6908 * @tag: Task tag/index to be aborted
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306909 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306910 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6911 * command, and in host controller by clearing the door-bell register. There can
6912 * be race between controller sending the command to the device while abort is
6913 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6914 * really issued and then try to abort it.
6915 *
Can Guo307348f2020-08-24 19:07:05 -07006916 * Returns zero on success, non-zero on failure
6917 */
6918static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
6919{
6920 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6921 int err = 0;
6922 int poll_cnt;
6923 u8 resp = 0xF;
6924 u32 reg;
6925
6926 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6927 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6928 UFS_QUERY_TASK, &resp);
6929 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6930 /* cmd pending in the device */
6931 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6932 __func__, tag);
6933 break;
6934 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6935 /*
6936 * cmd not pending in the device, check if it is
6937 * in transition.
6938 */
6939 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6940 __func__, tag);
6941 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6942 if (reg & (1 << tag)) {
6943 /* sleep for max. 200us to stabilize */
6944 usleep_range(100, 200);
6945 continue;
6946 }
6947 /* command completed already */
6948 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6949 __func__, tag);
6950 goto out;
6951 } else {
6952 dev_err(hba->dev,
6953 "%s: no response from device. tag = %d, err %d\n",
6954 __func__, tag, err);
6955 if (!err)
6956 err = resp; /* service response error */
6957 goto out;
6958 }
6959 }
6960
6961 if (!poll_cnt) {
6962 err = -EBUSY;
6963 goto out;
6964 }
6965
6966 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6967 UFS_ABORT_TASK, &resp);
6968 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6969 if (!err) {
6970 err = resp; /* service response error */
6971 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6972 __func__, tag, err);
6973 }
6974 goto out;
6975 }
6976
6977 err = ufshcd_clear_cmd(hba, tag);
6978 if (err)
6979 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6980 __func__, tag, err);
6981
6982out:
6983 return err;
6984}
6985
6986/**
6987 * ufshcd_abort - scsi host template eh_abort_handler callback
6988 * @cmd: SCSI command pointer
6989 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306990 * Returns SUCCESS/FAILED
6991 */
6992static int ufshcd_abort(struct scsi_cmnd *cmd)
6993{
Bart Van Assche4728ab42021-07-21 20:34:27 -07006994 struct Scsi_Host *host = cmd->device->host;
6995 struct ufs_hba *hba = shost_priv(host);
Bart Van Assche3f2c1002021-08-09 16:03:50 -07006996 int tag = scsi_cmd_to_rq(cmd)->tag;
Bart Van Assche4728ab42021-07-21 20:34:27 -07006997 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306998 unsigned long flags;
Bart Van Assche64180742021-07-21 20:34:35 -07006999 int err = FAILED;
Bart Van Assche1fbaa02d2021-12-03 15:19:46 -08007000 bool outstanding;
Dolev Ravive9d501b2014-07-01 12:22:37 +03007001 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307002
Bart Van Assche4728ab42021-07-21 20:34:27 -07007003 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307004
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007005 ufshcd_hold(hba, false);
Dolev Ravive9d501b2014-07-01 12:22:37 +03007006 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Bart Van Assche64180742021-07-21 20:34:35 -07007007 /* If command is already aborted/completed, return FAILED. */
Yaniv Gardi14497322016-02-01 15:02:39 +02007008 if (!(test_bit(tag, &hba->outstanding_reqs))) {
7009 dev_err(hba->dev,
7010 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7011 __func__, tag, hba->outstanding_reqs, reg);
Bart Van Assche64180742021-07-21 20:34:35 -07007012 goto release;
Yaniv Gardi14497322016-02-01 15:02:39 +02007013 }
7014
Dolev Raviv66cc8202016-12-22 18:39:42 -08007015 /* Print Transfer Request of aborted task */
Bean Huod87a1f62020-08-11 16:18:59 +02007016 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
Dolev Raviv66cc8202016-12-22 18:39:42 -08007017
Gilad Broner7fabb772017-02-03 16:56:50 -08007018 /*
7019 * Print detailed info about aborted request.
7020 * As more than one request might get aborted at the same time,
7021 * print full information only for the first aborted request in order
7022 * to reduce repeated printouts. For other aborted requests only print
7023 * basic details.
7024 */
Can Guo7a7e66c2020-12-02 04:04:02 -08007025 scsi_print_command(cmd);
Gilad Broner7fabb772017-02-03 16:56:50 -08007026 if (!hba->req_abort_count) {
Stanley Chue965e5e2020-12-05 19:58:59 +08007027 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7028 ufshcd_print_evt_hist(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08007029 ufshcd_print_host_state(hba);
Gilad Broner7fabb772017-02-03 16:56:50 -08007030 ufshcd_print_pwr_info(hba);
7031 ufshcd_print_trs(hba, 1 << tag, true);
7032 } else {
7033 ufshcd_print_trs(hba, 1 << tag, false);
7034 }
7035 hba->req_abort_count++;
Gilad Bronere0b299e2017-02-03 16:56:40 -08007036
Bean Huod87a1f62020-08-11 16:18:59 +02007037 if (!(reg & (1 << tag))) {
7038 dev_err(hba->dev,
7039 "%s: cmd was completed, but without a notifying intr, tag = %d",
7040 __func__, tag);
Bart Van Assche11682522021-10-20 14:40:15 -07007041 __ufshcd_transfer_req_compl(hba, 1UL << tag);
Bart Van Assche64180742021-07-21 20:34:35 -07007042 goto release;
Bean Huod87a1f62020-08-11 16:18:59 +02007043 }
7044
Can Guo7a7e66c2020-12-02 04:04:02 -08007045 /*
7046 * Task abort to the device W-LUN is illegal. When this command
7047 * will fail, due to spec violation, scsi err handling next step
7048 * will be to send LU reset which, again, is a spec violation.
7049 * To avoid these unnecessary/illegal steps, first we clean up
Can Guoa45f9372021-05-24 01:36:57 -07007050 * the lrb taken by this cmd and re-set it in outstanding_reqs,
Adrian Hunter88b099002021-09-17 17:43:49 +03007051 * then queue the eh_work and bail.
Can Guo7a7e66c2020-12-02 04:04:02 -08007052 */
7053 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7054 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
Bart Van Assche64180742021-07-21 20:34:35 -07007055
Can Guo7a7e66c2020-12-02 04:04:02 -08007056 spin_lock_irqsave(host->host_lock, flags);
Can Guoa45f9372021-05-24 01:36:57 -07007057 hba->force_reset = true;
Adrian Hunter88b099002021-09-17 17:43:49 +03007058 ufshcd_schedule_eh_work(hba);
Can Guo7a7e66c2020-12-02 04:04:02 -08007059 spin_unlock_irqrestore(host->host_lock, flags);
Bart Van Assche64180742021-07-21 20:34:35 -07007060 goto release;
Can Guo7a7e66c2020-12-02 04:04:02 -08007061 }
7062
Gilad Bronere0b299e2017-02-03 16:56:40 -08007063 /* Skip task abort in case previous aborts failed and report failure */
Bart Van Assche64180742021-07-21 20:34:35 -07007064 if (lrbp->req_abort_skip) {
7065 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7066 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7067 goto release;
7068 }
Gilad Bronere0b299e2017-02-03 16:56:40 -08007069
Bart Van Assche64180742021-07-21 20:34:35 -07007070 err = ufshcd_try_to_abort_task(hba, tag);
7071 if (err) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307072 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
Gilad Bronere0b299e2017-02-03 16:56:40 -08007073 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307074 err = FAILED;
Bart Van Assche64180742021-07-21 20:34:35 -07007075 goto release;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307076 }
7077
Bart Van Assche1fbaa02d2021-12-03 15:19:46 -08007078 /*
7079 * Clear the corresponding bit from outstanding_reqs since the command
7080 * has been aborted successfully.
7081 */
7082 spin_lock_irqsave(&hba->outstanding_lock, flags);
7083 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7084 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7085
7086 if (outstanding)
7087 ufshcd_release_scsi_cmd(hba, lrbp);
7088
Bart Van Assche64180742021-07-21 20:34:35 -07007089 err = SUCCESS;
7090
7091release:
7092 /* Matches the ufshcd_hold() call at the start of this function. */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007093 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307094 return err;
7095}
7096
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307097/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307098 * ufshcd_host_reset_and_restore - reset and restore host controller
7099 * @hba: per-adapter instance
7100 *
7101 * Note that host controller reset may issue DME_RESET to
7102 * local and remote (device) Uni-Pro stack and the attributes
7103 * are reset to default state.
7104 *
7105 * Returns zero on success, non-zero on failure
7106 */
7107static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7108{
7109 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307110
Can Guo2df74b62019-11-25 22:53:33 -08007111 /*
7112 * Stop the host controller and complete the requests
7113 * cleared by h/w
7114 */
Daejun Parkf02bc972021-07-12 17:58:30 +09007115 ufshpb_reset_host(hba);
Bart Van Assche5cac1092020-05-07 15:27:50 -07007116 ufshcd_hba_stop(hba);
Can Guo2df74b62019-11-25 22:53:33 -08007117 hba->silence_err_logs = true;
Bart Van Assche11682522021-10-20 14:40:15 -07007118 ufshcd_complete_requests(hba);
Can Guo2df74b62019-11-25 22:53:33 -08007119 hba->silence_err_logs = false;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307120
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08007121 /* scale up clocks to max frequency before full reinitialization */
Subhash Jadavani394b9492020-03-26 02:25:40 -07007122 ufshcd_set_clk_freq(hba, true);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08007123
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307124 err = ufshcd_hba_enable(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307125
7126 /* Establish the link again and restore the device */
Randall Huang19186512020-11-30 20:14:02 -08007127 if (!err)
Jaegeuk Kim4ee7ee52021-01-07 10:53:15 -08007128 err = ufshcd_probe_hba(hba, false);
7129
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307130 if (err)
7131 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
Stanley Chue965e5e2020-12-05 19:58:59 +08007132 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307133 return err;
7134}
7135
7136/**
7137 * ufshcd_reset_and_restore - reset and re-initialize host/device
7138 * @hba: per-adapter instance
7139 *
7140 * Reset and recover device, host and re-establish link. This
7141 * is helpful to recover the communication in fatal error conditions.
7142 *
7143 * Returns zero on success, non-zero on failure
7144 */
7145static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7146{
Adrian Hunter54a40452021-10-02 18:45:49 +03007147 u32 saved_err = 0;
7148 u32 saved_uic_err = 0;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307149 int err = 0;
Can Guo4db7a232020-08-09 05:15:51 -07007150 unsigned long flags;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007151 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307152
Can Guo4db7a232020-08-09 05:15:51 -07007153 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007154 do {
Adrian Hunter54a40452021-10-02 18:45:49 +03007155 /*
7156 * This is a fresh start, cache and clear saved error first,
7157 * in case new error generated during reset and restore.
7158 */
7159 saved_err |= hba->saved_err;
7160 saved_uic_err |= hba->saved_uic_err;
7161 hba->saved_err = 0;
7162 hba->saved_uic_err = 0;
7163 hba->force_reset = false;
7164 hba->ufshcd_state = UFSHCD_STATE_RESET;
7165 spin_unlock_irqrestore(hba->host->host_lock, flags);
7166
Bjorn Anderssond8d9f792019-08-28 12:17:54 -07007167 /* Reset the attached device */
Stanley Chu31a5d9c2020-12-08 21:56:35 +08007168 ufshcd_device_reset(hba);
Bjorn Anderssond8d9f792019-08-28 12:17:54 -07007169
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007170 err = ufshcd_host_reset_and_restore(hba);
Adrian Hunter54a40452021-10-02 18:45:49 +03007171
7172 spin_lock_irqsave(hba->host->host_lock, flags);
7173 if (err)
7174 continue;
7175 /* Do not exit unless operational or dead */
7176 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7177 hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7178 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7179 err = -EAGAIN;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007180 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307181
Can Guo4db7a232020-08-09 05:15:51 -07007182 /*
7183 * Inform scsi mid-layer that we did reset and allow to handle
7184 * Unit Attention properly.
7185 */
7186 scsi_report_bus_reset(hba->host, 0);
7187 if (err) {
Can Guo88a92d62020-12-02 04:04:01 -08007188 hba->ufshcd_state = UFSHCD_STATE_ERROR;
Can Guo4db7a232020-08-09 05:15:51 -07007189 hba->saved_err |= saved_err;
7190 hba->saved_uic_err |= saved_uic_err;
7191 }
7192 spin_unlock_irqrestore(hba->host->host_lock, flags);
7193
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307194 return err;
7195}
7196
7197/**
7198 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
Bart Van Assche8aa29f12018-03-01 15:07:20 -08007199 * @cmd: SCSI command pointer
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307200 *
7201 * Returns SUCCESS/FAILED
7202 */
7203static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7204{
Can Guo4db7a232020-08-09 05:15:51 -07007205 int err = SUCCESS;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307206 unsigned long flags;
7207 struct ufs_hba *hba;
7208
7209 hba = shost_priv(cmd->device->host);
7210
Can Guo4db7a232020-08-09 05:15:51 -07007211 spin_lock_irqsave(hba->host->host_lock, flags);
7212 hba->force_reset = true;
Adrian Hunter88b099002021-09-17 17:43:49 +03007213 ufshcd_schedule_eh_work(hba);
Can Guo4db7a232020-08-09 05:15:51 -07007214 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307215 spin_unlock_irqrestore(hba->host->host_lock, flags);
7216
Adrian Hunter88b099002021-09-17 17:43:49 +03007217 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307218
7219 spin_lock_irqsave(hba->host->host_lock, flags);
Can Guo4db7a232020-08-09 05:15:51 -07007220 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307221 err = FAILED;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307222 spin_unlock_irqrestore(hba->host->host_lock, flags);
7223
7224 return err;
7225}
7226
7227/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007228 * ufshcd_get_max_icc_level - calculate the ICC level
7229 * @sup_curr_uA: max. current supported by the regulator
7230 * @start_scan: row at the desc table to start scan from
7231 * @buff: power descriptor buffer
7232 *
7233 * Returns calculated max ICC level for specific regulator
7234 */
7235static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7236{
7237 int i;
7238 int curr_uA;
7239 u16 data;
7240 u16 unit;
7241
7242 for (i = start_scan; i >= 0; i--) {
Tomas Winklerd79713f2017-01-05 10:45:11 +02007243 data = be16_to_cpup((__be16 *)&buff[2 * i]);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007244 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7245 ATTR_ICC_LVL_UNIT_OFFSET;
7246 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7247 switch (unit) {
7248 case UFSHCD_NANO_AMP:
7249 curr_uA = curr_uA / 1000;
7250 break;
7251 case UFSHCD_MILI_AMP:
7252 curr_uA = curr_uA * 1000;
7253 break;
7254 case UFSHCD_AMP:
7255 curr_uA = curr_uA * 1000 * 1000;
7256 break;
7257 case UFSHCD_MICRO_AMP:
7258 default:
7259 break;
7260 }
7261 if (sup_curr_uA >= curr_uA)
7262 break;
7263 }
7264 if (i < 0) {
7265 i = 0;
7266 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7267 }
7268
7269 return (u32)i;
7270}
7271
7272/**
Lee Jones11eea9b2021-03-12 09:47:10 +00007273 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007274 * In case regulators are not initialized we'll return 0
7275 * @hba: per-adapter instance
7276 * @desc_buf: power descriptor buffer to extract ICC levels from.
7277 * @len: length of desc_buff
7278 *
7279 * Returns calculated ICC level
7280 */
7281static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7282 u8 *desc_buf, int len)
7283{
7284 u32 icc_level = 0;
7285
7286 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7287 !hba->vreg_info.vccq2) {
7288 dev_err(hba->dev,
7289 "%s: Regulator capability was not set, actvIccLevel=%d",
7290 __func__, icc_level);
7291 goto out;
7292 }
7293
Yue Hu08730452021-03-19 15:09:16 +08007294 if (hba->vreg_info.vcc->max_uA)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007295 icc_level = ufshcd_get_max_icc_level(
7296 hba->vreg_info.vcc->max_uA,
7297 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7298 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7299
Yue Hu08730452021-03-19 15:09:16 +08007300 if (hba->vreg_info.vccq->max_uA)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007301 icc_level = ufshcd_get_max_icc_level(
7302 hba->vreg_info.vccq->max_uA,
7303 icc_level,
7304 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7305
Yue Hu08730452021-03-19 15:09:16 +08007306 if (hba->vreg_info.vccq2->max_uA)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007307 icc_level = ufshcd_get_max_icc_level(
7308 hba->vreg_info.vccq2->max_uA,
7309 icc_level,
7310 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7311out:
7312 return icc_level;
7313}
7314
Can Guoe89860f2020-03-26 02:25:41 -07007315static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007316{
7317 int ret;
Bean Huo7a0bf852020-06-03 11:19:58 +02007318 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
Kees Cookbbe21d72018-05-02 16:58:09 -07007319 u8 *desc_buf;
Can Guoe89860f2020-03-26 02:25:41 -07007320 u32 icc_level;
Kees Cookbbe21d72018-05-02 16:58:09 -07007321
7322 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7323 if (!desc_buf)
7324 return;
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007325
Bean Huoc4607a02020-06-03 11:19:56 +02007326 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7327 desc_buf, buff_len);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007328 if (ret) {
7329 dev_err(hba->dev,
7330 "%s: Failed reading power descriptor.len = %d ret = %d",
7331 __func__, buff_len, ret);
Kees Cookbbe21d72018-05-02 16:58:09 -07007332 goto out;
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007333 }
7334
Can Guoe89860f2020-03-26 02:25:41 -07007335 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7336 buff_len);
7337 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007338
Szymon Mielczarekdbd34a62017-03-29 08:19:21 +02007339 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Can Guoe89860f2020-03-26 02:25:41 -07007340 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007341
7342 if (ret)
7343 dev_err(hba->dev,
7344 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
Can Guoe89860f2020-03-26 02:25:41 -07007345 __func__, icc_level, ret);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007346
Kees Cookbbe21d72018-05-02 16:58:09 -07007347out:
7348 kfree(desc_buf);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007349}
7350
Can Guofb276f72020-03-25 18:09:59 -07007351static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7352{
7353 scsi_autopm_get_device(sdev);
7354 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7355 if (sdev->rpm_autosuspend)
7356 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7357 RPM_AUTOSUSPEND_DELAY_MS);
7358 scsi_autopm_put_device(sdev);
7359}
7360
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007361/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007362 * ufshcd_scsi_add_wlus - Adds required W-LUs
7363 * @hba: per-adapter instance
7364 *
7365 * UFS device specification requires the UFS devices to support 4 well known
7366 * logical units:
7367 * "REPORT_LUNS" (address: 01h)
7368 * "UFS Device" (address: 50h)
7369 * "RPMB" (address: 44h)
7370 * "BOOT" (address: 30h)
7371 * UFS device's power management needs to be controlled by "POWER CONDITION"
7372 * field of SSU (START STOP UNIT) command. But this "power condition" field
7373 * will take effect only when its sent to "UFS device" well known logical unit
7374 * hence we require the scsi_device instance to represent this logical unit in
7375 * order for the UFS host driver to send the SSU command for power management.
Bart Van Assche8aa29f12018-03-01 15:07:20 -08007376 *
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007377 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7378 * Block) LU so user space process can control this LU. User space may also
7379 * want to have access to BOOT LU.
Bart Van Assche8aa29f12018-03-01 15:07:20 -08007380 *
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007381 * This function adds scsi device instances for each of all well known LUs
7382 * (except "REPORT LUNS" LU).
7383 *
7384 * Returns zero on success (all required W-LUs are added successfully),
7385 * non-zero error value on failure (if failed to add any of the required W-LU).
7386 */
7387static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7388{
7389 int ret = 0;
Bart Van Assche59830c02021-12-03 15:19:37 -08007390 struct scsi_device *sdev_boot, *sdev_rpmb;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007391
7392 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7393 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7394 if (IS_ERR(hba->sdev_ufs_device)) {
7395 ret = PTR_ERR(hba->sdev_ufs_device);
7396 hba->sdev_ufs_device = NULL;
7397 goto out;
7398 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007399 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007400
Bart Van Assche59830c02021-12-03 15:19:37 -08007401 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007402 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
Bart Van Assche59830c02021-12-03 15:19:37 -08007403 if (IS_ERR(sdev_rpmb)) {
7404 ret = PTR_ERR(sdev_rpmb);
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08007405 goto remove_sdev_ufs_device;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007406 }
Bart Van Assche59830c02021-12-03 15:19:37 -08007407 ufshcd_blk_pm_runtime_init(sdev_rpmb);
7408 scsi_device_put(sdev_rpmb);
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08007409
7410 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7411 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
Can Guofb276f72020-03-25 18:09:59 -07007412 if (IS_ERR(sdev_boot)) {
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08007413 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
Can Guofb276f72020-03-25 18:09:59 -07007414 } else {
7415 ufshcd_blk_pm_runtime_init(sdev_boot);
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08007416 scsi_device_put(sdev_boot);
Can Guofb276f72020-03-25 18:09:59 -07007417 }
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007418 goto out;
7419
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007420remove_sdev_ufs_device:
7421 scsi_remove_device(hba->sdev_ufs_device);
7422out:
7423 return ret;
7424}
7425
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007426static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7427{
Stanley Chua7f1e692020-06-25 11:04:30 +08007428 struct ufs_dev_info *dev_info = &hba->dev_info;
Stanley Chu6f8d5a62020-05-08 16:01:13 +08007429 u8 lun;
7430 u32 d_lu_wb_buf_alloc;
Bean Huoe8d03812021-01-19 17:38:45 +01007431 u32 ext_ufs_feature;
Stanley Chu6f8d5a62020-05-08 16:01:13 +08007432
Stanley Chu817d7e12020-05-08 16:01:08 +08007433 if (!ufshcd_is_wb_allowed(hba))
7434 return;
Stanley Chua7f1e692020-06-25 11:04:30 +08007435 /*
7436 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7437 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7438 * enabled
7439 */
7440 if (!(dev_info->wspecversion >= 0x310 ||
7441 dev_info->wspecversion == 0x220 ||
7442 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7443 goto wb_disabled;
Stanley Chu817d7e12020-05-08 16:01:08 +08007444
Bean Huo7a0bf852020-06-03 11:19:58 +02007445 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7446 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
Stanley Chu817d7e12020-05-08 16:01:08 +08007447 goto wb_disabled;
7448
Bean Huoe8d03812021-01-19 17:38:45 +01007449 ext_ufs_feature = get_unaligned_be32(desc_buf +
7450 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
Stanley Chu817d7e12020-05-08 16:01:08 +08007451
Bean Huoe8d03812021-01-19 17:38:45 +01007452 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
Stanley Chu817d7e12020-05-08 16:01:08 +08007453 goto wb_disabled;
7454
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007455 /*
Bean Huoae1ce1f2021-01-19 17:38:44 +01007456 * WB may be supported but not configured while provisioning. The spec
7457 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7458 * buffer configured.
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007459 */
Bean Huo4cd48992021-01-19 17:38:46 +01007460 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007461
Stanley Chua7f1e692020-06-25 11:04:30 +08007462 dev_info->b_presrv_uspc_en =
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007463 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7464
Bean Huo4cd48992021-01-19 17:38:46 +01007465 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
Bean Huoe8d03812021-01-19 17:38:45 +01007466 if (!get_unaligned_be32(desc_buf +
7467 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
Stanley Chu6f8d5a62020-05-08 16:01:13 +08007468 goto wb_disabled;
7469 } else {
7470 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7471 d_lu_wb_buf_alloc = 0;
7472 ufshcd_read_unit_desc_param(hba,
7473 lun,
7474 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7475 (u8 *)&d_lu_wb_buf_alloc,
7476 sizeof(d_lu_wb_buf_alloc));
7477 if (d_lu_wb_buf_alloc) {
Stanley Chua7f1e692020-06-25 11:04:30 +08007478 dev_info->wb_dedicated_lu = lun;
Stanley Chu6f8d5a62020-05-08 16:01:13 +08007479 break;
7480 }
7481 }
Stanley Chu817d7e12020-05-08 16:01:08 +08007482
Stanley Chu6f8d5a62020-05-08 16:01:13 +08007483 if (!d_lu_wb_buf_alloc)
7484 goto wb_disabled;
7485 }
Stanley Chu817d7e12020-05-08 16:01:08 +08007486 return;
7487
7488wb_disabled:
7489 hba->caps &= ~UFSHCD_CAP_WB_EN;
7490}
7491
Avri Altmane88e2d32021-09-15 09:04:06 +03007492static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
7493{
7494 struct ufs_dev_info *dev_info = &hba->dev_info;
7495 u32 ext_ufs_feature;
7496 u8 mask = 0;
7497
7498 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
7499 return;
7500
7501 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7502
7503 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
7504 mask |= MASK_EE_TOO_LOW_TEMP;
7505
7506 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
7507 mask |= MASK_EE_TOO_HIGH_TEMP;
7508
7509 if (mask) {
7510 ufshcd_enable_ee(hba, mask);
7511 ufs_hwmon_probe(hba, mask);
7512 }
7513}
7514
Stanley Chu8db269a2020-05-08 16:01:10 +08007515void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
Stanley Chu817d7e12020-05-08 16:01:08 +08007516{
7517 struct ufs_dev_fix *f;
7518 struct ufs_dev_info *dev_info = &hba->dev_info;
7519
Stanley Chu8db269a2020-05-08 16:01:10 +08007520 if (!fixups)
7521 return;
7522
7523 for (f = fixups; f->quirk; f++) {
Stanley Chu817d7e12020-05-08 16:01:08 +08007524 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7525 f->wmanufacturerid == UFS_ANY_VENDOR) &&
7526 ((dev_info->model &&
7527 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7528 !strcmp(f->model, UFS_ANY_MODEL)))
7529 hba->dev_quirks |= f->quirk;
7530 }
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007531}
Stanley Chu8db269a2020-05-08 16:01:10 +08007532EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007533
Stanley Chuc28c00b2020-05-08 16:01:09 +08007534static void ufs_fixup_device_setup(struct ufs_hba *hba)
7535{
7536 /* fix by general quirk table */
Stanley Chu8db269a2020-05-08 16:01:10 +08007537 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
Stanley Chuc28c00b2020-05-08 16:01:09 +08007538
7539 /* allow vendors to fix quirks */
7540 ufshcd_vops_fixup_dev_quirks(hba);
7541}
7542
Bean Huo09750062020-01-20 14:08:14 +01007543static int ufs_get_device_desc(struct ufs_hba *hba)
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007544{
7545 int err;
7546 u8 model_index;
Daejun Parkf02bc972021-07-12 17:58:30 +09007547 u8 b_ufs_feature_sup;
Kees Cookbbe21d72018-05-02 16:58:09 -07007548 u8 *desc_buf;
Bean Huo09750062020-01-20 14:08:14 +01007549 struct ufs_dev_info *dev_info = &hba->dev_info;
Tomas Winkler4b828fe2019-07-30 08:55:17 +03007550
Bean Huo458a45f2020-06-03 11:19:55 +02007551 desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
Kees Cookbbe21d72018-05-02 16:58:09 -07007552 if (!desc_buf) {
7553 err = -ENOMEM;
7554 goto out;
7555 }
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007556
Bean Huoc4607a02020-06-03 11:19:56 +02007557 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
Bean Huo7a0bf852020-06-03 11:19:58 +02007558 hba->desc_size[QUERY_DESC_IDN_DEVICE]);
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007559 if (err) {
7560 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7561 __func__, err);
7562 goto out;
7563 }
7564
7565 /*
7566 * getting vendor (manufacturerID) and Bank Index in big endian
7567 * format
7568 */
Bean Huo09750062020-01-20 14:08:14 +01007569 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007570 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7571
Can Guo09f17792020-02-10 19:40:49 -08007572 /* getting Specification Version in big endian format */
7573 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7574 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
Daejun Parkf02bc972021-07-12 17:58:30 +09007575 b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
Can Guo09f17792020-02-10 19:40:49 -08007576
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007577 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007578
Daejun Parkf02bc972021-07-12 17:58:30 +09007579 if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
7580 (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
Daejun Park41d8a932021-07-12 18:00:25 +09007581 bool hpb_en = false;
7582
Daejun Parkf02bc972021-07-12 17:58:30 +09007583 ufshpb_get_dev_info(hba, desc_buf);
Daejun Park41d8a932021-07-12 18:00:25 +09007584
7585 if (!ufshpb_is_legacy(hba))
7586 err = ufshcd_query_flag_retry(hba,
7587 UPIU_QUERY_OPCODE_READ_FLAG,
7588 QUERY_FLAG_IDN_HPB_EN, 0,
7589 &hpb_en);
7590
7591 if (ufshpb_is_legacy(hba) || (!err && hpb_en))
7592 dev_info->hpb_enabled = true;
Daejun Parkf02bc972021-07-12 17:58:30 +09007593 }
7594
Tomas Winkler4b828fe2019-07-30 08:55:17 +03007595 err = ufshcd_read_string_desc(hba, model_index,
Bean Huo09750062020-01-20 14:08:14 +01007596 &dev_info->model, SD_ASCII_STD);
Tomas Winkler4b828fe2019-07-30 08:55:17 +03007597 if (err < 0) {
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007598 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7599 __func__, err);
7600 goto out;
7601 }
7602
Asutosh Dasb294ff32021-04-23 17:20:16 -07007603 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
7604 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
7605
Stanley Chu817d7e12020-05-08 16:01:08 +08007606 ufs_fixup_device_setup(hba);
7607
Stanley Chua7f1e692020-06-25 11:04:30 +08007608 ufshcd_wb_probe(hba, desc_buf);
Stanley Chu817d7e12020-05-08 16:01:08 +08007609
Avri Altmane88e2d32021-09-15 09:04:06 +03007610 ufshcd_temp_notif_probe(hba, desc_buf);
7611
Tomas Winkler4b828fe2019-07-30 08:55:17 +03007612 /*
7613 * ufshcd_read_string_desc returns size of the string
7614 * reset the error value
7615 */
7616 err = 0;
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007617
7618out:
Kees Cookbbe21d72018-05-02 16:58:09 -07007619 kfree(desc_buf);
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007620 return err;
7621}
7622
Bean Huo09750062020-01-20 14:08:14 +01007623static void ufs_put_device_desc(struct ufs_hba *hba)
Tomas Winkler4b828fe2019-07-30 08:55:17 +03007624{
Bean Huo09750062020-01-20 14:08:14 +01007625 struct ufs_dev_info *dev_info = &hba->dev_info;
7626
7627 kfree(dev_info->model);
7628 dev_info->model = NULL;
Tomas Winkler4b828fe2019-07-30 08:55:17 +03007629}
7630
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007631/**
Yaniv Gardi37113102016-03-10 17:37:16 +02007632 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7633 * @hba: per-adapter instance
7634 *
7635 * PA_TActivate parameter can be tuned manually if UniPro version is less than
7636 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7637 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7638 * the hibern8 exit latency.
7639 *
7640 * Returns zero on success, non-zero error value on failure.
7641 */
7642static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7643{
7644 int ret = 0;
7645 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7646
7647 ret = ufshcd_dme_peer_get(hba,
7648 UIC_ARG_MIB_SEL(
7649 RX_MIN_ACTIVATETIME_CAPABILITY,
7650 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7651 &peer_rx_min_activatetime);
7652 if (ret)
7653 goto out;
7654
7655 /* make sure proper unit conversion is applied */
7656 tuned_pa_tactivate =
7657 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7658 / PA_TACTIVATE_TIME_UNIT_US);
7659 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7660 tuned_pa_tactivate);
7661
7662out:
7663 return ret;
7664}
7665
7666/**
7667 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7668 * @hba: per-adapter instance
7669 *
7670 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7671 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7672 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7673 * This optimal value can help reduce the hibern8 exit latency.
7674 *
7675 * Returns zero on success, non-zero error value on failure.
7676 */
7677static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7678{
7679 int ret = 0;
7680 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7681 u32 max_hibern8_time, tuned_pa_hibern8time;
7682
7683 ret = ufshcd_dme_get(hba,
7684 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7685 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7686 &local_tx_hibern8_time_cap);
7687 if (ret)
7688 goto out;
7689
7690 ret = ufshcd_dme_peer_get(hba,
7691 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7692 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7693 &peer_rx_hibern8_time_cap);
7694 if (ret)
7695 goto out;
7696
7697 max_hibern8_time = max(local_tx_hibern8_time_cap,
7698 peer_rx_hibern8_time_cap);
7699 /* make sure proper unit conversion is applied */
7700 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7701 / PA_HIBERN8_TIME_UNIT_US);
7702 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7703 tuned_pa_hibern8time);
7704out:
7705 return ret;
7706}
7707
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08007708/**
7709 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7710 * less than device PA_TACTIVATE time.
7711 * @hba: per-adapter instance
7712 *
7713 * Some UFS devices require host PA_TACTIVATE to be lower than device
7714 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7715 * for such devices.
7716 *
7717 * Returns zero on success, non-zero error value on failure.
7718 */
7719static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7720{
7721 int ret = 0;
7722 u32 granularity, peer_granularity;
7723 u32 pa_tactivate, peer_pa_tactivate;
7724 u32 pa_tactivate_us, peer_pa_tactivate_us;
7725 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7726
7727 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7728 &granularity);
7729 if (ret)
7730 goto out;
7731
7732 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7733 &peer_granularity);
7734 if (ret)
7735 goto out;
7736
7737 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7738 (granularity > PA_GRANULARITY_MAX_VAL)) {
7739 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7740 __func__, granularity);
7741 return -EINVAL;
7742 }
7743
7744 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7745 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7746 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7747 __func__, peer_granularity);
7748 return -EINVAL;
7749 }
7750
7751 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7752 if (ret)
7753 goto out;
7754
7755 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7756 &peer_pa_tactivate);
7757 if (ret)
7758 goto out;
7759
7760 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7761 peer_pa_tactivate_us = peer_pa_tactivate *
7762 gran_to_us_table[peer_granularity - 1];
7763
7764 if (pa_tactivate_us > peer_pa_tactivate_us) {
7765 u32 new_peer_pa_tactivate;
7766
7767 new_peer_pa_tactivate = pa_tactivate_us /
7768 gran_to_us_table[peer_granularity - 1];
7769 new_peer_pa_tactivate++;
7770 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7771 new_peer_pa_tactivate);
7772 }
7773
7774out:
7775 return ret;
7776}
7777
Bean Huo09750062020-01-20 14:08:14 +01007778static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
Yaniv Gardi37113102016-03-10 17:37:16 +02007779{
7780 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7781 ufshcd_tune_pa_tactivate(hba);
7782 ufshcd_tune_pa_hibern8time(hba);
7783 }
7784
Can Guoe91ed9e2020-02-23 20:09:21 -08007785 ufshcd_vops_apply_dev_quirks(hba);
7786
Yaniv Gardi37113102016-03-10 17:37:16 +02007787 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7788 /* set 1ms timeout for PA_TACTIVATE */
7789 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08007790
7791 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7792 ufshcd_quirk_tune_host_pa_tactivate(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02007793}
7794
Dolev Ravivff8e20c2016-12-22 18:42:18 -08007795static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7796{
Dolev Ravivff8e20c2016-12-22 18:42:18 -08007797 hba->ufs_stats.hibern8_exit_cnt = 0;
7798 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Gilad Broner7fabb772017-02-03 16:56:50 -08007799 hba->req_abort_count = 0;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08007800}
7801
Bean Huo731f0622020-01-20 14:08:19 +01007802static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7803{
7804 int err;
7805 size_t buff_len;
7806 u8 *desc_buf;
7807
Bean Huo7a0bf852020-06-03 11:19:58 +02007808 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
Bean Huo731f0622020-01-20 14:08:19 +01007809 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7810 if (!desc_buf) {
7811 err = -ENOMEM;
7812 goto out;
7813 }
7814
Bean Huoc4607a02020-06-03 11:19:56 +02007815 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7816 desc_buf, buff_len);
Bean Huo731f0622020-01-20 14:08:19 +01007817 if (err) {
7818 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7819 __func__, err);
7820 goto out;
7821 }
7822
7823 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7824 hba->dev_info.max_lu_supported = 32;
7825 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7826 hba->dev_info.max_lu_supported = 8;
7827
Daejun Parkf02bc972021-07-12 17:58:30 +09007828 if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
7829 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
7830 ufshpb_get_geo_info(hba, desc_buf);
7831
Bean Huo731f0622020-01-20 14:08:19 +01007832out:
7833 kfree(desc_buf);
7834 return err;
7835}
7836
Subhash Jadavani9e1e8a72018-10-16 14:29:41 +05307837static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7838 {19200000, REF_CLK_FREQ_19_2_MHZ},
7839 {26000000, REF_CLK_FREQ_26_MHZ},
7840 {38400000, REF_CLK_FREQ_38_4_MHZ},
7841 {52000000, REF_CLK_FREQ_52_MHZ},
7842 {0, REF_CLK_FREQ_INVAL},
7843};
7844
7845static enum ufs_ref_clk_freq
7846ufs_get_bref_clk_from_hz(unsigned long freq)
7847{
7848 int i;
7849
7850 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7851 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7852 return ufs_ref_clk_freqs[i].val;
7853
7854 return REF_CLK_FREQ_INVAL;
7855}
7856
7857void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7858{
7859 unsigned long freq;
7860
7861 freq = clk_get_rate(refclk);
7862
7863 hba->dev_ref_clk_freq =
7864 ufs_get_bref_clk_from_hz(freq);
7865
7866 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7867 dev_err(hba->dev,
7868 "invalid ref_clk setting = %ld\n", freq);
7869}
7870
7871static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7872{
7873 int err;
7874 u32 ref_clk;
7875 u32 freq = hba->dev_ref_clk_freq;
7876
7877 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7878 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7879
7880 if (err) {
7881 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7882 err);
7883 goto out;
7884 }
7885
7886 if (ref_clk == freq)
7887 goto out; /* nothing to update */
7888
7889 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7890 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7891
7892 if (err) {
7893 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7894 ufs_ref_clk_freqs[freq].freq_hz);
7895 goto out;
7896 }
7897
7898 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7899 ufs_ref_clk_freqs[freq].freq_hz);
7900
7901out:
7902 return err;
7903}
7904
Bean Huo1b9e2142020-01-20 14:08:15 +01007905static int ufshcd_device_params_init(struct ufs_hba *hba)
7906{
7907 bool flag;
Bean Huo7a0bf852020-06-03 11:19:58 +02007908 int ret, i;
Bean Huo1b9e2142020-01-20 14:08:15 +01007909
Bean Huo7a0bf852020-06-03 11:19:58 +02007910 /* Init device descriptor sizes */
7911 for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7912 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
Bean Huo1b9e2142020-01-20 14:08:15 +01007913
Bean Huo731f0622020-01-20 14:08:19 +01007914 /* Init UFS geometry descriptor related parameters */
7915 ret = ufshcd_device_geo_params_init(hba);
7916 if (ret)
7917 goto out;
7918
Bean Huo1b9e2142020-01-20 14:08:15 +01007919 /* Check and apply UFS device quirks */
7920 ret = ufs_get_device_desc(hba);
7921 if (ret) {
7922 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7923 __func__, ret);
7924 goto out;
7925 }
7926
Can Guo09f17792020-02-10 19:40:49 -08007927 ufshcd_get_ref_clk_gating_wait(hba);
7928
Bean Huo1b9e2142020-01-20 14:08:15 +01007929 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
Stanley Chu1f34eed2020-05-08 16:01:12 +08007930 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
Bean Huo1b9e2142020-01-20 14:08:15 +01007931 hba->dev_info.f_power_on_wp_en = flag;
7932
Bean Huo2b35b2a2020-01-20 14:08:16 +01007933 /* Probe maximum power mode co-supported by both UFS host and device */
7934 if (ufshcd_get_max_pwr_mode(hba))
7935 dev_err(hba->dev,
7936 "%s: Failed getting max supported power mode\n",
7937 __func__);
Bean Huo1b9e2142020-01-20 14:08:15 +01007938out:
7939 return ret;
7940}
7941
7942/**
7943 * ufshcd_add_lus - probe and add UFS logical units
7944 * @hba: per-adapter instance
7945 */
7946static int ufshcd_add_lus(struct ufs_hba *hba)
7947{
7948 int ret;
7949
Bean Huo1b9e2142020-01-20 14:08:15 +01007950 /* Add required well known logical units to scsi mid layer */
7951 ret = ufshcd_scsi_add_wlus(hba);
7952 if (ret)
7953 goto out;
7954
7955 /* Initialize devfreq after UFS device is detected */
7956 if (ufshcd_is_clkscaling_supported(hba)) {
7957 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7958 &hba->pwr_info,
7959 sizeof(struct ufs_pa_layer_attr));
7960 hba->clk_scaling.saved_pwr_info.is_valid = true;
Bean Huo1b9e2142020-01-20 14:08:15 +01007961 hba->clk_scaling.is_allowed = true;
Bean Huo1b9e2142020-01-20 14:08:15 +01007962
Stanley Chub058fa82021-01-20 23:01:41 +08007963 ret = ufshcd_devfreq_init(hba);
7964 if (ret)
7965 goto out;
7966
7967 hba->clk_scaling.is_enabled = true;
7968 ufshcd_init_clk_scaling_sysfs(hba);
Bean Huo1b9e2142020-01-20 14:08:15 +01007969 }
7970
7971 ufs_bsg_probe(hba);
Daejun Parkf02bc972021-07-12 17:58:30 +09007972 ufshpb_init(hba);
Bean Huo1b9e2142020-01-20 14:08:15 +01007973 scsi_scan_host(hba->host);
7974 pm_runtime_put_sync(hba->dev);
7975
Bean Huo1b9e2142020-01-20 14:08:15 +01007976out:
7977 return ret;
7978}
7979
Yaniv Gardi37113102016-03-10 17:37:16 +02007980/**
Bart Van Assche568dd992021-07-21 20:34:25 -07007981 * ufshcd_probe_hba - probe hba to detect device and initialize it
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007982 * @hba: per-adapter instance
Bart Van Assche568dd992021-07-21 20:34:25 -07007983 * @init_dev_params: whether or not to call ufshcd_device_params_init().
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007984 *
7985 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307986 */
Bart Van Assche568dd992021-07-21 20:34:25 -07007987static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307988{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307989 int ret;
Can Guo4db7a232020-08-09 05:15:51 -07007990 unsigned long flags;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007991 ktime_t start = ktime_get();
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307992
Can Guoaa53f582021-02-23 21:36:47 -08007993 hba->ufshcd_state = UFSHCD_STATE_RESET;
7994
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307995 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307996 if (ret)
7997 goto out;
7998
jongmin jeong10fb4f82021-10-18 21:42:03 +09007999 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8000 goto out;
8001
Dolev Ravivff8e20c2016-12-22 18:42:18 -08008002 /* Debug counters initialization */
8003 ufshcd_clear_dbg_ufs_stats(hba);
8004
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008005 /* UniPro link is active now */
8006 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05308007
Bean Huo1b9e2142020-01-20 14:08:15 +01008008 /* Verify device initialization by sending NOP OUT UPIU */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05308009 ret = ufshcd_verify_dev_init(hba);
8010 if (ret)
8011 goto out;
8012
Bean Huo1b9e2142020-01-20 14:08:15 +01008013 /* Initiate UFS initialization, and waiting until completion */
Dolev Raviv68078d52013-07-30 00:35:58 +05308014 ret = ufshcd_complete_dev_init(hba);
8015 if (ret)
8016 goto out;
8017
Bean Huo1b9e2142020-01-20 14:08:15 +01008018 /*
8019 * Initialize UFS device parameters used by driver, these
8020 * parameters are associated with UFS descriptors.
8021 */
Bart Van Assche568dd992021-07-21 20:34:25 -07008022 if (init_dev_params) {
Bean Huo1b9e2142020-01-20 14:08:15 +01008023 ret = ufshcd_device_params_init(hba);
8024 if (ret)
8025 goto out;
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02008026 }
8027
Bean Huo09750062020-01-20 14:08:14 +01008028 ufshcd_tune_unipro_params(hba);
Tomas Winkler4b828fe2019-07-30 08:55:17 +03008029
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008030 /* UFS device is also active now */
8031 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308032 ufshcd_force_reset_auto_bkops(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05308033
Bean Huo2b35b2a2020-01-20 14:08:16 +01008034 /* Gear up to HS gear if supported */
8035 if (hba->max_pwr_info.is_valid) {
Subhash Jadavani9e1e8a72018-10-16 14:29:41 +05308036 /*
8037 * Set the right value to bRefClkFreq before attempting to
8038 * switch to HS gears.
8039 */
8040 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8041 ufshcd_set_dev_ref_clk(hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03008042 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
Dov Levenglick8643ae62016-10-17 17:10:14 -07008043 if (ret) {
Dolev Raviv7eb584d2014-09-25 15:32:31 +03008044 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8045 __func__, ret);
Dov Levenglick8643ae62016-10-17 17:10:14 -07008046 goto out;
8047 }
Can Guo6a9df812020-02-11 21:38:28 -08008048 ufshcd_print_pwr_info(hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03008049 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008050
Can Guoe89860f2020-03-26 02:25:41 -07008051 /*
8052 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8053 * and for removable UFS card as well, hence always set the parameter.
8054 * Note: Error handler may issue the device reset hence resetting
8055 * bActiveICCLevel as well so it is always safe to set this here.
8056 */
8057 ufshcd_set_active_icc_lvl(hba);
8058
Asutosh Das3d17b9b2020-04-22 14:41:42 -07008059 ufshcd_wb_config(hba);
Adrian Huntercd469472021-02-09 08:24:36 +02008060 if (hba->ee_usr_mask)
8061 ufshcd_write_ee_control(hba);
Can Guo71d848b2019-11-14 22:09:26 -08008062 /* Enable Auto-Hibernate if configured */
8063 ufshcd_auto_hibern8_enable(hba);
8064
Daejun Parkf02bc972021-07-12 17:58:30 +09008065 ufshpb_reset(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05308066out:
Can Guo4db7a232020-08-09 05:15:51 -07008067 spin_lock_irqsave(hba->host->host_lock, flags);
8068 if (ret)
8069 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8070 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8071 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8072 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008073
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008074 trace_ufshcd_init(dev_name(hba->dev), ret,
8075 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08008076 hba->curr_dev_pwr_mode, hba->uic_link_state);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008077 return ret;
8078}
8079
8080/**
8081 * ufshcd_async_scan - asynchronous execution for probing hba
8082 * @data: data pointer to pass to this function
8083 * @cookie: cookie data
8084 */
8085static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8086{
8087 struct ufs_hba *hba = (struct ufs_hba *)data;
Bean Huo1b9e2142020-01-20 14:08:15 +01008088 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008089
Can Guo9cd20d32021-01-13 19:13:28 -08008090 down(&hba->host_sem);
Bean Huo1b9e2142020-01-20 14:08:15 +01008091 /* Initialize hba, detect and initialize UFS device */
8092 ret = ufshcd_probe_hba(hba, true);
Can Guo9cd20d32021-01-13 19:13:28 -08008093 up(&hba->host_sem);
Bean Huo1b9e2142020-01-20 14:08:15 +01008094 if (ret)
8095 goto out;
8096
8097 /* Probe and add UFS logical units */
8098 ret = ufshcd_add_lus(hba);
8099out:
8100 /*
8101 * If we failed to initialize the device or the device is not
8102 * present, turn off the power/clocks etc.
8103 */
8104 if (ret) {
8105 pm_runtime_put_sync(hba->dev);
Bean Huo1b9e2142020-01-20 14:08:15 +01008106 ufshcd_hba_exit(hba);
8107 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308108}
8109
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02008110static const struct attribute_group *ufshcd_driver_groups[] = {
8111 &ufs_sysfs_unit_descriptor_group,
Stanislav Nijnikovec92b592018-02-15 14:14:11 +02008112 &ufs_sysfs_lun_attributes_group,
Daejun Parkf02bc972021-07-12 17:58:30 +09008113#ifdef CONFIG_SCSI_UFS_HPB
8114 &ufs_sysfs_hpb_stat_group,
Daejun Park41d8a932021-07-12 18:00:25 +09008115 &ufs_sysfs_hpb_param_group,
Daejun Parkf02bc972021-07-12 17:58:30 +09008116#endif
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02008117 NULL,
8118};
8119
Stanley Chu90b84912020-05-09 17:37:13 +08008120static struct ufs_hba_variant_params ufs_hba_vps = {
8121 .hba_enable_delay_us = 1000,
Stanley Chud14734ae2020-05-09 17:37:15 +08008122 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
Stanley Chu90b84912020-05-09 17:37:13 +08008123 .devfreq_profile.polling_ms = 100,
8124 .devfreq_profile.target = ufshcd_devfreq_target,
8125 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8126 .ondemand_data.upthreshold = 70,
8127 .ondemand_data.downdifferential = 5,
8128};
8129
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308130static struct scsi_host_template ufshcd_driver_template = {
8131 .module = THIS_MODULE,
8132 .name = UFSHCD,
8133 .proc_name = UFSHCD,
8134 .queuecommand = ufshcd_queuecommand,
8135 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09008136 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308137 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03008138 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308139 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05308140 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8141 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308142 .this_id = -1,
8143 .sg_tablesize = SG_ALL,
8144 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
8145 .can_queue = UFSHCD_CAN_QUEUE,
Christoph Hellwig552a9902019-06-17 14:19:55 +02008146 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008147 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01008148 .track_queue_depth = 1,
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02008149 .sdev_groups = ufshcd_driver_groups,
Christoph Hellwig4af14d12018-12-13 16:17:09 +01008150 .dma_boundary = PAGE_SIZE - 1,
Stanley Chu49615ba2019-09-16 23:56:50 +08008151 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308152};
8153
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008154static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8155 int ua)
8156{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08008157 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008158
Bjorn Andersson7b16a072015-02-11 19:35:28 -08008159 if (!vreg)
8160 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008161
Stanley Chu0487fff2019-03-28 17:16:25 +08008162 /*
8163 * "set_load" operation shall be required on those regulators
8164 * which specifically configured current limitation. Otherwise
8165 * zero max_uA may cause unexpected behavior when regulator is
8166 * enabled or set as high power mode.
8167 */
8168 if (!vreg->max_uA)
8169 return 0;
8170
Bjorn Andersson7b16a072015-02-11 19:35:28 -08008171 ret = regulator_set_load(vreg->reg, ua);
8172 if (ret < 0) {
8173 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8174 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008175 }
8176
8177 return ret;
8178}
8179
8180static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8181 struct ufs_vreg *vreg)
8182{
Marc Gonzalez73067982019-02-27 11:41:45 +01008183 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008184}
8185
8186static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8187 struct ufs_vreg *vreg)
8188{
Adrian Hunter7c7cfdc2019-08-14 15:59:50 +03008189 if (!vreg)
8190 return 0;
8191
Marc Gonzalez73067982019-02-27 11:41:45 +01008192 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008193}
8194
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008195static int ufshcd_config_vreg(struct device *dev,
8196 struct ufs_vreg *vreg, bool on)
8197{
8198 int ret = 0;
Gustavo A. R. Silva72753592017-11-20 08:12:29 -06008199 struct regulator *reg;
8200 const char *name;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008201 int min_uV, uA_load;
8202
8203 BUG_ON(!vreg);
8204
Gustavo A. R. Silva72753592017-11-20 08:12:29 -06008205 reg = vreg->reg;
8206 name = vreg->name;
8207
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008208 if (regulator_count_voltages(reg) > 0) {
Asutosh Das90d88f42020-02-10 19:40:45 -08008209 uA_load = on ? vreg->max_uA : 0;
8210 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8211 if (ret)
8212 goto out;
8213
Stanley Chu3b141e82019-03-28 17:16:24 +08008214 if (vreg->min_uV && vreg->max_uV) {
8215 min_uV = on ? vreg->min_uV : 0;
8216 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
Bean Huob0008622020-08-14 11:50:34 +02008217 if (ret)
Stanley Chu3b141e82019-03-28 17:16:24 +08008218 dev_err(dev,
8219 "%s: %s set voltage failed, err=%d\n",
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008220 __func__, name, ret);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008221 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008222 }
8223out:
8224 return ret;
8225}
8226
8227static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8228{
8229 int ret = 0;
8230
Marc Gonzalez73067982019-02-27 11:41:45 +01008231 if (!vreg || vreg->enabled)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008232 goto out;
8233
8234 ret = ufshcd_config_vreg(dev, vreg, true);
8235 if (!ret)
8236 ret = regulator_enable(vreg->reg);
8237
8238 if (!ret)
8239 vreg->enabled = true;
8240 else
8241 dev_err(dev, "%s: %s enable failed, err=%d\n",
8242 __func__, vreg->name, ret);
8243out:
8244 return ret;
8245}
8246
8247static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8248{
8249 int ret = 0;
8250
Stanley Chuf8162ac2020-12-07 13:49:54 +08008251 if (!vreg || !vreg->enabled || vreg->always_on)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008252 goto out;
8253
8254 ret = regulator_disable(vreg->reg);
8255
8256 if (!ret) {
8257 /* ignore errors on applying disable config */
8258 ufshcd_config_vreg(dev, vreg, false);
8259 vreg->enabled = false;
8260 } else {
8261 dev_err(dev, "%s: %s disable failed, err=%d\n",
8262 __func__, vreg->name, ret);
8263 }
8264out:
8265 return ret;
8266}
8267
8268static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8269{
8270 int ret = 0;
8271 struct device *dev = hba->dev;
8272 struct ufs_vreg_info *info = &hba->vreg_info;
8273
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008274 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8275 if (ret)
8276 goto out;
8277
8278 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8279 if (ret)
8280 goto out;
8281
8282 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008283
8284out:
8285 if (ret) {
8286 ufshcd_toggle_vreg(dev, info->vccq2, false);
8287 ufshcd_toggle_vreg(dev, info->vccq, false);
8288 ufshcd_toggle_vreg(dev, info->vcc, false);
8289 }
8290 return ret;
8291}
8292
Raviv Shvili6a771a62014-09-25 15:32:24 +03008293static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8294{
8295 struct ufs_vreg_info *info = &hba->vreg_info;
8296
Zeng Guangyue60b7b822019-03-30 17:03:13 +08008297 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
Raviv Shvili6a771a62014-09-25 15:32:24 +03008298}
8299
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008300static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8301{
8302 int ret = 0;
8303
8304 if (!vreg)
8305 goto out;
8306
8307 vreg->reg = devm_regulator_get(dev, vreg->name);
8308 if (IS_ERR(vreg->reg)) {
8309 ret = PTR_ERR(vreg->reg);
8310 dev_err(dev, "%s: %s get failed, err=%d\n",
8311 __func__, vreg->name, ret);
8312 }
8313out:
8314 return ret;
8315}
8316
8317static int ufshcd_init_vreg(struct ufs_hba *hba)
8318{
8319 int ret = 0;
8320 struct device *dev = hba->dev;
8321 struct ufs_vreg_info *info = &hba->vreg_info;
8322
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008323 ret = ufshcd_get_vreg(dev, info->vcc);
8324 if (ret)
8325 goto out;
8326
8327 ret = ufshcd_get_vreg(dev, info->vccq);
Bean Huob0008622020-08-14 11:50:34 +02008328 if (!ret)
8329 ret = ufshcd_get_vreg(dev, info->vccq2);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008330out:
8331 return ret;
8332}
8333
Raviv Shvili6a771a62014-09-25 15:32:24 +03008334static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8335{
8336 struct ufs_vreg_info *info = &hba->vreg_info;
8337
8338 if (info)
8339 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8340
8341 return 0;
8342}
8343
Can Guo81309c22020-11-25 18:01:00 -08008344static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008345{
8346 int ret = 0;
8347 struct ufs_clk_info *clki;
8348 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008349 unsigned long flags;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08008350 ktime_t start = ktime_get();
8351 bool clk_state_changed = false;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008352
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03008353 if (list_empty(head))
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008354 goto out;
8355
Can Guo38f32422020-02-10 19:40:47 -08008356 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8357 if (ret)
8358 return ret;
Subhash Jadavani1e879e82016-10-06 21:48:22 -07008359
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008360 list_for_each_entry(clki, head, list) {
8361 if (!IS_ERR_OR_NULL(clki->clk)) {
Can Guo81309c22020-11-25 18:01:00 -08008362 /*
8363 * Don't disable clocks which are needed
8364 * to keep the link active.
8365 */
8366 if (ufshcd_is_link_active(hba) &&
8367 clki->keep_link_active)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008368 continue;
8369
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08008370 clk_state_changed = on ^ clki->enabled;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008371 if (on && !clki->enabled) {
8372 ret = clk_prepare_enable(clki->clk);
8373 if (ret) {
8374 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8375 __func__, clki->name, ret);
8376 goto out;
8377 }
8378 } else if (!on && clki->enabled) {
8379 clk_disable_unprepare(clki->clk);
8380 }
8381 clki->enabled = on;
8382 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8383 clki->name, on ? "en" : "dis");
8384 }
8385 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008386
Can Guo38f32422020-02-10 19:40:47 -08008387 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8388 if (ret)
8389 return ret;
Subhash Jadavani1e879e82016-10-06 21:48:22 -07008390
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008391out:
8392 if (ret) {
8393 list_for_each_entry(clki, head, list) {
8394 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8395 clk_disable_unprepare(clki->clk);
8396 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008397 } else if (!ret && on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008398 spin_lock_irqsave(hba->host->host_lock, flags);
8399 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008400 trace_ufshcd_clk_gating(dev_name(hba->dev),
8401 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008402 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008403 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008404
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08008405 if (clk_state_changed)
8406 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8407 (on ? "on" : "off"),
8408 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008409 return ret;
8410}
8411
8412static int ufshcd_init_clocks(struct ufs_hba *hba)
8413{
8414 int ret = 0;
8415 struct ufs_clk_info *clki;
8416 struct device *dev = hba->dev;
8417 struct list_head *head = &hba->clk_list_head;
8418
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03008419 if (list_empty(head))
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008420 goto out;
8421
8422 list_for_each_entry(clki, head, list) {
8423 if (!clki->name)
8424 continue;
8425
8426 clki->clk = devm_clk_get(dev, clki->name);
8427 if (IS_ERR(clki->clk)) {
8428 ret = PTR_ERR(clki->clk);
8429 dev_err(dev, "%s: %s clk get failed, %d\n",
8430 __func__, clki->name, ret);
8431 goto out;
8432 }
8433
Subhash Jadavani9e1e8a72018-10-16 14:29:41 +05308434 /*
8435 * Parse device ref clk freq as per device tree "ref_clk".
8436 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8437 * in ufshcd_alloc_host().
8438 */
8439 if (!strcmp(clki->name, "ref_clk"))
8440 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8441
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008442 if (clki->max_freq) {
8443 ret = clk_set_rate(clki->clk, clki->max_freq);
8444 if (ret) {
8445 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8446 __func__, clki->name,
8447 clki->max_freq, ret);
8448 goto out;
8449 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03008450 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008451 }
8452 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8453 clki->name, clk_get_rate(clki->clk));
8454 }
8455out:
8456 return ret;
8457}
8458
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008459static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8460{
8461 int err = 0;
8462
8463 if (!hba->vops)
8464 goto out;
8465
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008466 err = ufshcd_vops_init(hba);
8467 if (err)
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008468 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008469 __func__, ufshcd_get_var_name(hba), err);
Stanley Chuade921a2020-12-05 20:00:38 +08008470out:
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008471 return err;
8472}
8473
8474static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8475{
8476 if (!hba->vops)
8477 return;
8478
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008479 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008480}
8481
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008482static int ufshcd_hba_init(struct ufs_hba *hba)
8483{
8484 int err;
8485
Raviv Shvili6a771a62014-09-25 15:32:24 +03008486 /*
8487 * Handle host controller power separately from the UFS device power
8488 * rails as it will help controlling the UFS host controller power
8489 * collapse easily which is different than UFS device power collapse.
8490 * Also, enable the host controller power before we go ahead with rest
8491 * of the initialization here.
8492 */
8493 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008494 if (err)
8495 goto out;
8496
Raviv Shvili6a771a62014-09-25 15:32:24 +03008497 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008498 if (err)
8499 goto out;
8500
Raviv Shvili6a771a62014-09-25 15:32:24 +03008501 err = ufshcd_init_clocks(hba);
8502 if (err)
8503 goto out_disable_hba_vreg;
8504
8505 err = ufshcd_setup_clocks(hba, true);
8506 if (err)
8507 goto out_disable_hba_vreg;
8508
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008509 err = ufshcd_init_vreg(hba);
8510 if (err)
8511 goto out_disable_clks;
8512
8513 err = ufshcd_setup_vreg(hba, true);
8514 if (err)
8515 goto out_disable_clks;
8516
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008517 err = ufshcd_variant_hba_init(hba);
8518 if (err)
8519 goto out_disable_vreg;
8520
Adrian Hunterb6cacaf2021-01-07 09:25:38 +02008521 ufs_debugfs_hba_init(hba);
8522
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008523 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008524 goto out;
8525
8526out_disable_vreg:
8527 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008528out_disable_clks:
8529 ufshcd_setup_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03008530out_disable_hba_vreg:
8531 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008532out:
8533 return err;
8534}
8535
8536static void ufshcd_hba_exit(struct ufs_hba *hba)
8537{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008538 if (hba->is_powered) {
Can Guo4543d9d2021-01-20 02:04:22 -08008539 ufshcd_exit_clk_scaling(hba);
8540 ufshcd_exit_clk_gating(hba);
Adrian Hunter88b099002021-09-17 17:43:49 +03008541 if (hba->eh_wq)
8542 destroy_workqueue(hba->eh_wq);
Adrian Hunterb6cacaf2021-01-07 09:25:38 +02008543 ufs_debugfs_hba_exit(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008544 ufshcd_variant_hba_exit(hba);
8545 ufshcd_setup_vreg(hba, false);
8546 ufshcd_setup_clocks(hba, false);
8547 ufshcd_setup_hba_vreg(hba, false);
8548 hba->is_powered = false;
Bean Huo09750062020-01-20 14:08:14 +01008549 ufs_put_device_desc(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008550 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008551}
8552
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308553/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008554 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8555 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308556 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008557 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308558 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008559 * Returns 0 if requested power mode is set successfully
8560 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308561 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008562static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8563 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308564{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008565 unsigned char cmd[6] = { START_STOP };
8566 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03008567 struct scsi_device *sdp;
8568 unsigned long flags;
Jaegeuk Kimaf21c3f2021-10-01 11:20:14 -07008569 int ret, retries;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008570
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03008571 spin_lock_irqsave(hba->host->host_lock, flags);
8572 sdp = hba->sdev_ufs_device;
8573 if (sdp) {
8574 ret = scsi_device_get(sdp);
8575 if (!ret && !scsi_device_online(sdp)) {
8576 ret = -ENODEV;
8577 scsi_device_put(sdp);
8578 }
8579 } else {
8580 ret = -ENODEV;
8581 }
8582 spin_unlock_irqrestore(hba->host->host_lock, flags);
8583
8584 if (ret)
8585 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008586
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308587 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008588 * If scsi commands fail, the scsi mid-layer schedules scsi error-
8589 * handling, which would wait for host to be resumed. Since we know
8590 * we are functional while we are here, skip host resume in error
8591 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308592 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008593 hba->host->eh_noresume = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308594
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008595 cmd[4] = pwr_mode << 4;
8596
8597 /*
8598 * Current function would be generally called from the power management
Christoph Hellwige8064022016-10-20 15:12:13 +02008599 * callbacks hence set the RQF_PM flag so that it doesn't resume the
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008600 * already suspended childs.
8601 */
Jaegeuk Kimaf21c3f2021-10-01 11:20:14 -07008602 for (retries = 3; retries > 0; --retries) {
8603 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8604 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8605 if (!scsi_status_is_check_condition(ret) ||
8606 !scsi_sense_valid(&sshdr) ||
8607 sshdr.sense_key != UNIT_ATTENTION)
8608 break;
8609 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008610 if (ret) {
8611 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02008612 "START_STOP failed for power mode: %d, result %x\n",
8613 pwr_mode, ret);
Hannes Reinecke464a00c2021-04-27 10:30:15 +02008614 if (ret > 0 && scsi_sense_valid(&sshdr))
Hannes Reinecke21045512015-01-08 07:43:46 +01008615 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008616 }
8617
8618 if (!ret)
8619 hba->curr_dev_pwr_mode = pwr_mode;
Randall Huang19186512020-11-30 20:14:02 -08008620
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03008621 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008622 hba->host->eh_noresume = 0;
8623 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308624}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308625
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008626static int ufshcd_link_state_transition(struct ufs_hba *hba,
8627 enum uic_link_state req_link_state,
8628 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308629{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008630 int ret = 0;
8631
8632 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308633 return 0;
8634
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008635 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8636 ret = ufshcd_uic_hibern8_enter(hba);
Can Guo4db7a232020-08-09 05:15:51 -07008637 if (!ret) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008638 ufshcd_set_link_hibern8(hba);
Can Guo4db7a232020-08-09 05:15:51 -07008639 } else {
8640 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8641 __func__, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008642 goto out;
Can Guo4db7a232020-08-09 05:15:51 -07008643 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008644 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308645 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008646 * If autobkops is enabled, link can't be turned off because
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008647 * turning off the link would also turn off the device, except in the
8648 * case of DeepSleep where the device is expected to remain powered.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308649 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008650 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
Dan Carpenterdc30c9e2019-12-13 13:49:35 +03008651 (!check_for_bkops || !hba->auto_bkops_enabled)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008652 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02008653 * Let's make sure that link is in low power mode, we are doing
8654 * this currently by putting the link in Hibern8. Otherway to
8655 * put the link in low power mode is to send the DME end point
8656 * to device and then send the DME reset command to local
8657 * unipro. But putting the link in hibern8 is much faster.
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008658 *
8659 * Note also that putting the link in Hibern8 is a requirement
8660 * for entering DeepSleep.
Yaniv Gardif3099fb2016-03-10 17:37:17 +02008661 */
8662 ret = ufshcd_uic_hibern8_enter(hba);
Can Guo4db7a232020-08-09 05:15:51 -07008663 if (ret) {
8664 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8665 __func__, ret);
Yaniv Gardif3099fb2016-03-10 17:37:17 +02008666 goto out;
Can Guo4db7a232020-08-09 05:15:51 -07008667 }
Yaniv Gardif3099fb2016-03-10 17:37:17 +02008668 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008669 * Change controller state to "reset state" which
8670 * should also put the link in off/reset state
8671 */
Bart Van Assche5cac1092020-05-07 15:27:50 -07008672 ufshcd_hba_stop(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008673 /*
8674 * TODO: Check if we need any delay to make sure that
8675 * controller is reset
8676 */
8677 ufshcd_set_link_off(hba);
8678 }
8679
8680out:
8681 return ret;
8682}
8683
8684static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8685{
Stanley Chuc4df6ee2020-07-29 13:18:39 +08008686 bool vcc_off = false;
8687
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008688 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02008689 * It seems some UFS devices may keep drawing more than sleep current
8690 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8691 * To avoid this situation, add 2ms delay before putting these UFS
8692 * rails in LPM mode.
8693 */
8694 if (!ufshcd_is_link_active(hba) &&
8695 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8696 usleep_range(2000, 2100);
8697
8698 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008699 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8700 * power.
8701 *
8702 * If UFS device and link is in OFF state, all power supplies (VCC,
8703 * VCCQ, VCCQ2) can be turned off if power on write protect is not
8704 * required. If UFS link is inactive (Hibern8 or OFF state) and device
8705 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8706 *
8707 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8708 * in low power state which would save some power.
Asutosh Das3d17b9b2020-04-22 14:41:42 -07008709 *
8710 * If Write Booster is enabled and the device needs to flush the WB
8711 * buffer OR if bkops status is urgent for WB, keep Vcc on.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008712 */
8713 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8714 !hba->dev_info.is_lu_power_on_wp) {
8715 ufshcd_setup_vreg(hba, false);
Stanley Chuc4df6ee2020-07-29 13:18:39 +08008716 vcc_off = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008717 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Stanley Chu51dd9052020-05-22 16:32:12 +08008718 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
Stanley Chuc4df6ee2020-07-29 13:18:39 +08008719 vcc_off = true;
Can Guo23043dd2021-04-25 20:48:38 -07008720 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008721 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8722 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8723 }
8724 }
Stanley Chuc4df6ee2020-07-29 13:18:39 +08008725
8726 /*
8727 * Some UFS devices require delay after VCC power rail is turned-off.
8728 */
8729 if (vcc_off && hba->vreg_info.vcc &&
8730 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8731 usleep_range(5000, 5100);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008732}
8733
Bart Van Assche9bb25e52021-07-21 20:34:24 -07008734#ifdef CONFIG_PM
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008735static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8736{
8737 int ret = 0;
8738
8739 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8740 !hba->dev_info.is_lu_power_on_wp) {
8741 ret = ufshcd_setup_vreg(hba, true);
8742 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Can Guo23043dd2021-04-25 20:48:38 -07008743 if (!ufshcd_is_link_active(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008744 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8745 if (ret)
8746 goto vcc_disable;
8747 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8748 if (ret)
8749 goto vccq_lpm;
8750 }
Subhash Jadavani69d72ac2016-10-27 17:26:24 -07008751 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008752 }
8753 goto out;
8754
8755vccq_lpm:
8756 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8757vcc_disable:
8758 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8759out:
8760 return ret;
8761}
Bart Van Assche9bb25e52021-07-21 20:34:24 -07008762#endif /* CONFIG_PM */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008763
8764static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8765{
Can Guodd7143e2020-10-27 12:10:36 -07008766 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008767 ufshcd_setup_hba_vreg(hba, false);
8768}
8769
8770static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8771{
Can Guodd7143e2020-10-27 12:10:36 -07008772 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008773 ufshcd_setup_hba_vreg(hba, true);
8774}
8775
Asutosh Dasb294ff32021-04-23 17:20:16 -07008776static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008777{
8778 int ret = 0;
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008779 int check_for_bkops;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008780 enum ufs_pm_level pm_lvl;
8781 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8782 enum uic_link_state req_link_state;
8783
Asutosh Dasb294ff32021-04-23 17:20:16 -07008784 hba->pm_op_in_progress = true;
Bart Van Assche4c6cb9e2021-05-13 10:12:29 -07008785 if (pm_op != UFS_SHUTDOWN_PM) {
8786 pm_lvl = pm_op == UFS_RUNTIME_PM ?
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008787 hba->rpm_lvl : hba->spm_lvl;
8788 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8789 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8790 } else {
8791 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8792 req_link_state = UIC_LINK_OFF_STATE;
8793 }
8794
Daejun Parkf02bc972021-07-12 17:58:30 +09008795 ufshpb_suspend(hba);
8796
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008797 /*
8798 * If we can't transition into any of the low power modes
8799 * just gate the clocks.
8800 */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008801 ufshcd_hold(hba, false);
8802 hba->clk_gating.is_suspended = true;
8803
Stanley Chu348e1bc2021-01-20 23:01:42 +08008804 if (ufshcd_is_clkscaling_supported(hba))
8805 ufshcd_clk_scaling_suspend(hba, true);
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07008806
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008807 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8808 req_link_state == UIC_LINK_ACTIVE_STATE) {
Asutosh Dasb294ff32021-04-23 17:20:16 -07008809 goto vops_suspend;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008810 }
8811
8812 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8813 (req_link_state == hba->uic_link_state))
Asutosh Dasb294ff32021-04-23 17:20:16 -07008814 goto enable_scaling;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008815
8816 /* UFS device & link must be active before we enter in this function */
8817 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8818 ret = -EINVAL;
Asutosh Dasb294ff32021-04-23 17:20:16 -07008819 goto enable_scaling;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008820 }
8821
Bart Van Assche4c6cb9e2021-05-13 10:12:29 -07008822 if (pm_op == UFS_RUNTIME_PM) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03008823 if (ufshcd_can_autobkops_during_suspend(hba)) {
8824 /*
8825 * The device is idle with no requests in the queue,
8826 * allow background operations if bkops status shows
8827 * that performance might be impacted.
8828 */
8829 ret = ufshcd_urgent_bkops(hba);
8830 if (ret)
Asutosh Dasb294ff32021-04-23 17:20:16 -07008831 goto enable_scaling;
Subhash Jadavani374a2462014-09-25 15:32:35 +03008832 } else {
8833 /* make sure that auto bkops is disabled */
8834 ufshcd_disable_auto_bkops(hba);
8835 }
Asutosh Das3d17b9b2020-04-22 14:41:42 -07008836 /*
Stanley Chu51dd9052020-05-22 16:32:12 +08008837 * If device needs to do BKOP or WB buffer flush during
8838 * Hibern8, keep device power mode as "active power mode"
8839 * and VCC supply.
Asutosh Das3d17b9b2020-04-22 14:41:42 -07008840 */
Stanley Chu51dd9052020-05-22 16:32:12 +08008841 hba->dev_info.b_rpm_dev_flush_capable =
8842 hba->auto_bkops_enabled ||
8843 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8844 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8845 ufshcd_is_auto_hibern8_enabled(hba))) &&
8846 ufshcd_wb_need_flush(hba));
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008847 }
8848
Kiwoong Kim6948a962020-12-19 15:40:39 +09008849 flush_work(&hba->eeh_work);
8850
Peter Wang9561f582021-10-06 13:47:05 +08008851 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
8852 if (ret)
8853 goto enable_scaling;
8854
Stanley Chu51dd9052020-05-22 16:32:12 +08008855 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
Bart Van Assche4c6cb9e2021-05-13 10:12:29 -07008856 if (pm_op != UFS_RUNTIME_PM)
Stanley Chu51dd9052020-05-22 16:32:12 +08008857 /* ensure that bkops is disabled */
8858 ufshcd_disable_auto_bkops(hba);
Stanley Chu51dd9052020-05-22 16:32:12 +08008859
8860 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8861 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8862 if (ret)
Asutosh Dasb294ff32021-04-23 17:20:16 -07008863 goto enable_scaling;
Stanley Chu51dd9052020-05-22 16:32:12 +08008864 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008865 }
8866
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008867 /*
8868 * In the case of DeepSleep, the device is expected to remain powered
8869 * with the link off, so do not check for bkops.
8870 */
8871 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
8872 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008873 if (ret)
8874 goto set_dev_active;
8875
Asutosh Dasb294ff32021-04-23 17:20:16 -07008876vops_suspend:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008877 /*
8878 * Call vendor specific suspend callback. As these callbacks may access
8879 * vendor specific host controller register space call them before the
8880 * host clocks are ON.
8881 */
Peter Wang9561f582021-10-06 13:47:05 +08008882 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008883 if (ret)
8884 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008885 goto out;
8886
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008887set_link_active:
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008888 /*
8889 * Device hardware reset is required to exit DeepSleep. Also, for
8890 * DeepSleep, the link is off so host reset and restore will be done
8891 * further below.
8892 */
8893 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
Stanley Chu31a5d9c2020-12-08 21:56:35 +08008894 ufshcd_device_reset(hba);
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008895 WARN_ON(!ufshcd_is_link_off(hba));
8896 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008897 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8898 ufshcd_set_link_active(hba);
8899 else if (ufshcd_is_link_off(hba))
8900 ufshcd_host_reset_and_restore(hba);
8901set_dev_active:
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008902 /* Can also get here needing to exit DeepSleep */
8903 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
Stanley Chu31a5d9c2020-12-08 21:56:35 +08008904 ufshcd_device_reset(hba);
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008905 ufshcd_host_reset_and_restore(hba);
8906 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008907 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8908 ufshcd_disable_auto_bkops(hba);
Asutosh Dasb294ff32021-04-23 17:20:16 -07008909enable_scaling:
Stanley Chu348e1bc2021-01-20 23:01:42 +08008910 if (ufshcd_is_clkscaling_supported(hba))
8911 ufshcd_clk_scaling_suspend(hba, false);
8912
Stanley Chu51dd9052020-05-22 16:32:12 +08008913 hba->dev_info.b_rpm_dev_flush_capable = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008914out:
Stanley Chu51dd9052020-05-22 16:32:12 +08008915 if (hba->dev_info.b_rpm_dev_flush_capable) {
8916 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8917 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8918 }
8919
Asutosh Dasb294ff32021-04-23 17:20:16 -07008920 if (ret) {
8921 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
8922 hba->clk_gating.is_suspended = false;
8923 ufshcd_release(hba);
Daejun Parkf02bc972021-07-12 17:58:30 +09008924 ufshpb_resume(hba);
Asutosh Dasb294ff32021-04-23 17:20:16 -07008925 }
8926 hba->pm_op_in_progress = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008927 return ret;
8928}
8929
YueHaibing75d645a2021-06-17 11:13:26 +08008930#ifdef CONFIG_PM
Asutosh Dasb294ff32021-04-23 17:20:16 -07008931static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008932{
8933 int ret;
Asutosh Dasb294ff32021-04-23 17:20:16 -07008934 enum uic_link_state old_link_state = hba->uic_link_state;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008935
Asutosh Dasb294ff32021-04-23 17:20:16 -07008936 hba->pm_op_in_progress = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008937
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008938 /*
8939 * Call vendor specific resume callback. As these callbacks may access
8940 * vendor specific host controller register space call them when the
8941 * host clocks are ON.
8942 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008943 ret = ufshcd_vops_resume(hba, pm_op);
8944 if (ret)
Asutosh Dasb294ff32021-04-23 17:20:16 -07008945 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008946
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008947 /* For DeepSleep, the only supported option is to have the link off */
8948 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
8949
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008950 if (ufshcd_is_link_hibern8(hba)) {
8951 ret = ufshcd_uic_hibern8_exit(hba);
Can Guo4db7a232020-08-09 05:15:51 -07008952 if (!ret) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008953 ufshcd_set_link_active(hba);
Can Guo4db7a232020-08-09 05:15:51 -07008954 } else {
8955 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8956 __func__, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008957 goto vendor_suspend;
Can Guo4db7a232020-08-09 05:15:51 -07008958 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008959 } else if (ufshcd_is_link_off(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008960 /*
Asutosh Das089f5b62020-04-13 23:14:48 -07008961 * A full initialization of the host and the device is
8962 * required since the link was put to off during suspend.
Adrian Hunterfe1d4c22020-11-03 16:14:02 +02008963 * Note, in the case of DeepSleep, the device will exit
8964 * DeepSleep due to device reset.
Asutosh Das089f5b62020-04-13 23:14:48 -07008965 */
8966 ret = ufshcd_reset_and_restore(hba);
8967 /*
8968 * ufshcd_reset_and_restore() should have already
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008969 * set the link state as active
8970 */
8971 if (ret || !ufshcd_is_link_active(hba))
8972 goto vendor_suspend;
8973 }
8974
8975 if (!ufshcd_is_ufs_dev_active(hba)) {
8976 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8977 if (ret)
8978 goto set_old_link_state;
8979 }
8980
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08008981 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8982 ufshcd_enable_auto_bkops(hba);
8983 else
8984 /*
8985 * If BKOPs operations are urgently needed at this moment then
8986 * keep auto-bkops enabled or else disable it.
8987 */
8988 ufshcd_urgent_bkops(hba);
8989
Adrian Huntercd469472021-02-09 08:24:36 +02008990 if (hba->ee_usr_mask)
8991 ufshcd_write_ee_control(hba);
8992
Stanley Chu348e1bc2021-01-20 23:01:42 +08008993 if (ufshcd_is_clkscaling_supported(hba))
8994 ufshcd_clk_scaling_suspend(hba, false);
Sahitya Tummala856b3482014-09-25 15:32:34 +03008995
Stanley Chu51dd9052020-05-22 16:32:12 +08008996 if (hba->dev_info.b_rpm_dev_flush_capable) {
8997 hba->dev_info.b_rpm_dev_flush_capable = false;
8998 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8999 }
9000
Asutosh Dasb294ff32021-04-23 17:20:16 -07009001 /* Enable Auto-Hibernate if configured */
9002 ufshcd_auto_hibern8_enable(hba);
Daejun Parkf02bc972021-07-12 17:58:30 +09009003
9004 ufshpb_resume(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009005 goto out;
9006
9007set_old_link_state:
9008 ufshcd_link_state_transition(hba, old_link_state, 0);
9009vendor_suspend:
Peter Wang9561f582021-10-06 13:47:05 +08009010 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9011 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
Asutosh Dasb294ff32021-04-23 17:20:16 -07009012out:
9013 if (ret)
9014 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9015 hba->clk_gating.is_suspended = false;
9016 ufshcd_release(hba);
9017 hba->pm_op_in_progress = false;
9018 return ret;
9019}
9020
9021static int ufshcd_wl_runtime_suspend(struct device *dev)
9022{
9023 struct scsi_device *sdev = to_scsi_device(dev);
9024 struct ufs_hba *hba;
9025 int ret;
9026 ktime_t start = ktime_get();
9027
9028 hba = shost_priv(sdev->host);
9029
9030 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9031 if (ret)
9032 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9033
9034 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9035 ktime_to_us(ktime_sub(ktime_get(), start)),
9036 hba->curr_dev_pwr_mode, hba->uic_link_state);
9037
9038 return ret;
9039}
9040
9041static int ufshcd_wl_runtime_resume(struct device *dev)
9042{
9043 struct scsi_device *sdev = to_scsi_device(dev);
9044 struct ufs_hba *hba;
9045 int ret = 0;
9046 ktime_t start = ktime_get();
9047
9048 hba = shost_priv(sdev->host);
9049
9050 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9051 if (ret)
9052 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9053
9054 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
9055 ktime_to_us(ktime_sub(ktime_get(), start)),
9056 hba->curr_dev_pwr_mode, hba->uic_link_state);
9057
9058 return ret;
9059}
YueHaibing75d645a2021-06-17 11:13:26 +08009060#endif
Asutosh Dasb294ff32021-04-23 17:20:16 -07009061
9062#ifdef CONFIG_PM_SLEEP
9063static int ufshcd_wl_suspend(struct device *dev)
9064{
9065 struct scsi_device *sdev = to_scsi_device(dev);
9066 struct ufs_hba *hba;
9067 int ret = 0;
9068 ktime_t start = ktime_get();
9069
9070 hba = shost_priv(sdev->host);
9071 down(&hba->host_sem);
9072
9073 if (pm_runtime_suspended(dev))
9074 goto out;
9075
9076 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
9077 if (ret) {
9078 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9079 up(&hba->host_sem);
9080 }
9081
9082out:
9083 if (!ret)
9084 hba->is_sys_suspended = true;
9085 trace_ufshcd_wl_suspend(dev_name(dev), ret,
9086 ktime_to_us(ktime_sub(ktime_get(), start)),
9087 hba->curr_dev_pwr_mode, hba->uic_link_state);
9088
9089 return ret;
9090}
9091
9092static int ufshcd_wl_resume(struct device *dev)
9093{
9094 struct scsi_device *sdev = to_scsi_device(dev);
9095 struct ufs_hba *hba;
9096 int ret = 0;
9097 ktime_t start = ktime_get();
9098
9099 hba = shost_priv(sdev->host);
9100
9101 if (pm_runtime_suspended(dev))
9102 goto out;
9103
9104 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
9105 if (ret)
9106 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9107out:
9108 trace_ufshcd_wl_resume(dev_name(dev), ret,
9109 ktime_to_us(ktime_sub(ktime_get(), start)),
9110 hba->curr_dev_pwr_mode, hba->uic_link_state);
9111 if (!ret)
9112 hba->is_sys_suspended = false;
9113 up(&hba->host_sem);
9114 return ret;
9115}
9116#endif
9117
9118static void ufshcd_wl_shutdown(struct device *dev)
9119{
9120 struct scsi_device *sdev = to_scsi_device(dev);
9121 struct ufs_hba *hba;
9122
9123 hba = shost_priv(sdev->host);
9124
9125 down(&hba->host_sem);
9126 hba->shutting_down = true;
9127 up(&hba->host_sem);
9128
9129 /* Turn on everything while shutting down */
9130 ufshcd_rpm_get_sync(hba);
9131 scsi_device_quiesce(sdev);
9132 shost_for_each_device(sdev, hba->host) {
9133 if (sdev == hba->sdev_ufs_device)
9134 continue;
9135 scsi_device_quiesce(sdev);
9136 }
9137 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9138}
9139
9140/**
9141 * ufshcd_suspend - helper function for suspend operations
9142 * @hba: per adapter instance
9143 *
9144 * This function will put disable irqs, turn off clocks
9145 * and set vreg and hba-vreg in lpm mode.
Asutosh Dasb294ff32021-04-23 17:20:16 -07009146 */
9147static int ufshcd_suspend(struct ufs_hba *hba)
9148{
9149 int ret;
9150
9151 if (!hba->is_powered)
9152 return 0;
9153 /*
9154 * Disable the host irq as host controller as there won't be any
9155 * host controller transaction expected till resume.
9156 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009157 ufshcd_disable_irq(hba);
Asutosh Dasb294ff32021-04-23 17:20:16 -07009158 ret = ufshcd_setup_clocks(hba, false);
9159 if (ret) {
9160 ufshcd_enable_irq(hba);
9161 return ret;
9162 }
Can Guo2dec9472020-08-09 05:15:47 -07009163 if (ufshcd_is_clkgating_allowed(hba)) {
9164 hba->clk_gating.state = CLKS_OFF;
9165 trace_ufshcd_clk_gating(dev_name(hba->dev),
9166 hba->clk_gating.state);
9167 }
Asutosh Dasb294ff32021-04-23 17:20:16 -07009168
9169 ufshcd_vreg_set_lpm(hba);
9170 /* Put the host controller in low power mode if possible */
9171 ufshcd_hba_vreg_set_lpm(hba);
9172 return ret;
9173}
9174
Bart Van Assche9bb25e52021-07-21 20:34:24 -07009175#ifdef CONFIG_PM
Asutosh Dasb294ff32021-04-23 17:20:16 -07009176/**
9177 * ufshcd_resume - helper function for resume operations
9178 * @hba: per adapter instance
9179 *
9180 * This function basically turns on the regulators, clocks and
9181 * irqs of the hba.
Asutosh Dasb294ff32021-04-23 17:20:16 -07009182 *
9183 * Returns 0 for success and non-zero for failure
9184 */
9185static int ufshcd_resume(struct ufs_hba *hba)
9186{
9187 int ret;
9188
9189 if (!hba->is_powered)
9190 return 0;
9191
9192 ufshcd_hba_vreg_set_hpm(hba);
9193 ret = ufshcd_vreg_set_hpm(hba);
9194 if (ret)
9195 goto out;
9196
9197 /* Make sure clocks are enabled before accessing controller */
9198 ret = ufshcd_setup_clocks(hba, true);
9199 if (ret)
9200 goto disable_vreg;
9201
9202 /* enable the host irq as host controller would be active soon */
9203 ufshcd_enable_irq(hba);
9204 goto out;
9205
Ziqi Chen528db9e2021-01-08 18:56:24 +08009206disable_vreg:
9207 ufshcd_vreg_set_lpm(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009208out:
Stanley Chu8808b4e2019-07-10 21:38:21 +08009209 if (ret)
Stanley Chue965e5e2020-12-05 19:58:59 +08009210 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009211 return ret;
9212}
Bart Van Assche9bb25e52021-07-21 20:34:24 -07009213#endif /* CONFIG_PM */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009214
Bart Van Assche9bb25e52021-07-21 20:34:24 -07009215#ifdef CONFIG_PM_SLEEP
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009216/**
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009217 * ufshcd_system_suspend - system suspend callback
9218 * @dev: Device associated with the UFS controller.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009219 *
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009220 * Executed before putting the system into a sleep state in which the contents
9221 * of main memory are preserved.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009222 *
9223 * Returns 0 for success and non-zero for failure
9224 */
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009225int ufshcd_system_suspend(struct device *dev)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009226{
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009227 struct ufs_hba *hba = dev_get_drvdata(dev);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009228 int ret = 0;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009229 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009230
Asutosh Dasb294ff32021-04-23 17:20:16 -07009231 if (pm_runtime_suspended(hba->dev))
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08009232 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009233
Asutosh Dasb294ff32021-04-23 17:20:16 -07009234 ret = ufshcd_suspend(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009235out:
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009236 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9237 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08009238 hba->curr_dev_pwr_mode, hba->uic_link_state);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009239 return ret;
9240}
9241EXPORT_SYMBOL(ufshcd_system_suspend);
9242
9243/**
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009244 * ufshcd_system_resume - system resume callback
9245 * @dev: Device associated with the UFS controller.
9246 *
9247 * Executed after waking the system up from a sleep state in which the contents
9248 * of main memory were preserved.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009249 *
9250 * Returns 0 for success and non-zero for failure
9251 */
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009252int ufshcd_system_resume(struct device *dev)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009253{
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009254 struct ufs_hba *hba = dev_get_drvdata(dev);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009255 ktime_t start = ktime_get();
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009256 int ret = 0;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009257
Asutosh Dasb294ff32021-04-23 17:20:16 -07009258 if (pm_runtime_suspended(hba->dev))
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009259 goto out;
Asutosh Dasb294ff32021-04-23 17:20:16 -07009260
9261 ret = ufshcd_resume(hba);
9262
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009263out:
9264 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9265 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08009266 hba->curr_dev_pwr_mode, hba->uic_link_state);
Asutosh Dasb294ff32021-04-23 17:20:16 -07009267
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009268 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009269}
9270EXPORT_SYMBOL(ufshcd_system_resume);
Bart Van Assche9bb25e52021-07-21 20:34:24 -07009271#endif /* CONFIG_PM_SLEEP */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009272
Bart Van Assche9bb25e52021-07-21 20:34:24 -07009273#ifdef CONFIG_PM
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009274/**
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009275 * ufshcd_runtime_suspend - runtime suspend callback
9276 * @dev: Device associated with the UFS controller.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009277 *
9278 * Check the description of ufshcd_suspend() function for more details.
9279 *
9280 * Returns 0 for success and non-zero for failure
9281 */
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009282int ufshcd_runtime_suspend(struct device *dev)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009283{
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009284 struct ufs_hba *hba = dev_get_drvdata(dev);
Asutosh Dasb294ff32021-04-23 17:20:16 -07009285 int ret;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009286 ktime_t start = ktime_get();
9287
Asutosh Dasb294ff32021-04-23 17:20:16 -07009288 ret = ufshcd_suspend(hba);
9289
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009290 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9291 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08009292 hba->curr_dev_pwr_mode, hba->uic_link_state);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009293 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309294}
9295EXPORT_SYMBOL(ufshcd_runtime_suspend);
9296
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009297/**
9298 * ufshcd_runtime_resume - runtime resume routine
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009299 * @dev: Device associated with the UFS controller.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009300 *
Asutosh Dasb294ff32021-04-23 17:20:16 -07009301 * This function basically brings controller
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009302 * to active state. Following operations are done in this function:
9303 *
9304 * 1. Turn on all the controller related clocks
Asutosh Dasb294ff32021-04-23 17:20:16 -07009305 * 2. Turn ON VCC rail
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009306 */
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009307int ufshcd_runtime_resume(struct device *dev)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309308{
Bart Van Asschef1ecbe12021-07-21 20:34:23 -07009309 struct ufs_hba *hba = dev_get_drvdata(dev);
Asutosh Dasb294ff32021-04-23 17:20:16 -07009310 int ret;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009311 ktime_t start = ktime_get();
9312
Asutosh Dasb294ff32021-04-23 17:20:16 -07009313 ret = ufshcd_resume(hba);
9314
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009315 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9316 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08009317 hba->curr_dev_pwr_mode, hba->uic_link_state);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08009318 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309319}
9320EXPORT_SYMBOL(ufshcd_runtime_resume);
Bart Van Assche9bb25e52021-07-21 20:34:24 -07009321#endif /* CONFIG_PM */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309322
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309323/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009324 * ufshcd_shutdown - shutdown routine
9325 * @hba: per adapter instance
9326 *
Asutosh Dasb294ff32021-04-23 17:20:16 -07009327 * This function would turn off both UFS device and UFS hba
9328 * regulators. It would also disable clocks.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009329 *
9330 * Returns 0 always to allow force shutdown even in case of errors.
9331 */
9332int ufshcd_shutdown(struct ufs_hba *hba)
9333{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009334 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9335 goto out;
9336
Stanley Chue92643d2020-11-19 14:29:16 +08009337 pm_runtime_get_sync(hba->dev);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009338
Asutosh Dasb294ff32021-04-23 17:20:16 -07009339 ufshcd_suspend(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009340out:
Can Guo88a92d62020-12-02 04:04:01 -08009341 hba->is_powered = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009342 /* allow force shutdown even in case of errors */
9343 return 0;
9344}
9345EXPORT_SYMBOL(ufshcd_shutdown);
9346
9347/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309348 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309349 * data structure memory
Bart Van Assche8aa29f12018-03-01 15:07:20 -08009350 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309351 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309352void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309353{
Asutosh Dasb294ff32021-04-23 17:20:16 -07009354 if (hba->sdev_ufs_device)
9355 ufshcd_rpm_get_sync(hba);
Avri Altmane88e2d32021-09-15 09:04:06 +03009356 ufs_hwmon_remove(hba);
Avri Altmandf032bf2018-10-07 17:30:35 +03009357 ufs_bsg_remove(hba);
Daejun Park4b5f4902021-07-12 17:58:59 +09009358 ufshpb_remove(hba);
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +02009359 ufs_sysfs_remove_nodes(hba->dev);
Bart Van Assche69a6c262019-12-09 10:13:09 -08009360 blk_cleanup_queue(hba->tmf_queue);
9361 blk_mq_free_tag_set(&hba->tmf_tag_set);
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05309362 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309363 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05309364 ufshcd_disable_intr(hba, hba->intr_mask);
Bart Van Assche5cac1092020-05-07 15:27:50 -07009365 ufshcd_hba_stop(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03009366 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309367}
9368EXPORT_SYMBOL_GPL(ufshcd_remove);
9369
9370/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02009371 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9372 * @hba: pointer to Host Bus Adapter (HBA)
9373 */
9374void ufshcd_dealloc_host(struct ufs_hba *hba)
9375{
9376 scsi_host_put(hba->host);
9377}
9378EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9379
9380/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09009381 * ufshcd_set_dma_mask - Set dma mask based on the controller
9382 * addressing capability
9383 * @hba: per adapter instance
9384 *
9385 * Returns 0 for success, non-zero for failure
9386 */
9387static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9388{
9389 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9390 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9391 return 0;
9392 }
9393 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9394}
9395
9396/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009397 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309398 * @dev: pointer to device handle
9399 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309400 * Returns 0 on success, non-zero value on failure
9401 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009402int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309403{
9404 struct Scsi_Host *host;
9405 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009406 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309407
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309408 if (!dev) {
9409 dev_err(dev,
9410 "Invalid memory reference for dev is NULL\n");
9411 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309412 goto out_error;
9413 }
9414
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309415 host = scsi_host_alloc(&ufshcd_driver_template,
9416 sizeof(struct ufs_hba));
9417 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309418 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309419 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309420 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309421 }
9422 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309423 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309424 hba->dev = dev;
Subhash Jadavani9e1e8a72018-10-16 14:29:41 +05309425 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
Adrian Hunter1cbc9ad2021-08-31 17:53:17 +03009426 hba->nop_out_timeout = NOP_OUT_TIMEOUT;
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03009427 INIT_LIST_HEAD(&hba->clk_list_head);
Bart Van Assche169f5eb2021-07-21 20:34:34 -07009428 spin_lock_init(&hba->outstanding_lock);
9429
9430 *hba_handle = hba;
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03009431
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009432out_error:
9433 return err;
9434}
9435EXPORT_SYMBOL(ufshcd_alloc_host);
9436
Bart Van Assche69a6c262019-12-09 10:13:09 -08009437/* This function exists because blk_mq_alloc_tag_set() requires this. */
9438static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9439 const struct blk_mq_queue_data *qd)
9440{
9441 WARN_ON_ONCE(true);
9442 return BLK_STS_NOTSUPP;
9443}
9444
9445static const struct blk_mq_ops ufshcd_tmf_ops = {
9446 .queue_rq = ufshcd_queue_tmf,
9447};
9448
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009449/**
9450 * ufshcd_init - Driver initialization routine
9451 * @hba: per-adapter instance
9452 * @mmio_base: base register address
9453 * @irq: Interrupt line of device
9454 * Returns 0 on success, non-zero value on failure
9455 */
9456int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9457{
9458 int err;
9459 struct Scsi_Host *host = hba->host;
9460 struct device *dev = hba->dev;
Adrian Hunter88b099002021-09-17 17:43:49 +03009461 char eh_wq_name[sizeof("ufs_eh_wq_00")];
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009462
Bart Van Assche21ad0e42021-12-03 15:19:39 -08009463 /*
9464 * dev_set_drvdata() must be called before any callbacks are registered
9465 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
9466 * sysfs).
9467 */
9468 dev_set_drvdata(dev, hba);
9469
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009470 if (!mmio_base) {
9471 dev_err(hba->dev,
9472 "Invalid memory reference for mmio_base is NULL\n");
9473 err = -ENODEV;
9474 goto out_error;
9475 }
9476
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309477 hba->mmio_base = mmio_base;
9478 hba->irq = irq;
Stanley Chu90b84912020-05-09 17:37:13 +08009479 hba->vps = &ufs_hba_vps;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309480
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03009481 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009482 if (err)
9483 goto out_error;
9484
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309485 /* Read capabilities registers */
Satya Tangiraladf043c742020-07-06 20:04:14 +00009486 err = ufshcd_hba_capabilities(hba);
9487 if (err)
9488 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309489
9490 /* Get UFS version supported by the controller */
9491 hba->ufs_version = ufshcd_get_ufs_version(hba);
9492
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05309493 /* Get Interrupt bit mask per version */
9494 hba->intr_mask = ufshcd_get_intr_mask(hba);
9495
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09009496 err = ufshcd_set_dma_mask(hba);
9497 if (err) {
9498 dev_err(hba->dev, "set dma mask failed\n");
9499 goto out_disable;
9500 }
9501
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309502 /* Allocate memory for host memory space */
9503 err = ufshcd_memory_alloc(hba);
9504 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309505 dev_err(hba->dev, "Memory allocation failed\n");
9506 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309507 }
9508
9509 /* Configure LRB */
9510 ufshcd_host_memory_configure(hba);
9511
Bart Van Assche945c3cc2021-12-03 15:19:42 -08009512 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
9513 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309514 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03009515 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309516 host->max_channel = UFSHCD_MAX_CHANNEL;
9517 host->unique_id = host->host_no;
Avri Altmana851b2b2018-10-07 17:30:34 +03009518 host->max_cmd_len = UFS_CDB_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309519
Dolev Raviv7eb584d2014-09-25 15:32:31 +03009520 hba->max_pwr_info.is_valid = false;
9521
Adrian Hunter88b099002021-09-17 17:43:49 +03009522 /* Initialize work queues */
9523 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9524 hba->host->host_no);
9525 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9526 if (!hba->eh_wq) {
9527 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9528 __func__);
9529 err = -ENOMEM;
9530 goto out_disable;
9531 }
9532 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309533 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309534
Can Guo9cd20d32021-01-13 19:13:28 -08009535 sema_init(&hba->host_sem, 1);
Can Guo88a92d62020-12-02 04:04:01 -08009536
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309537 /* Initialize UIC command mutex */
9538 mutex_init(&hba->uic_cmd_mutex);
9539
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05309540 /* Initialize mutex for device management commands */
9541 mutex_init(&hba->dev_cmd.lock);
9542
Adrian Huntercd469472021-02-09 08:24:36 +02009543 /* Initialize mutex for exception event control */
9544 mutex_init(&hba->ee_ctrl_mutex);
9545
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08009546 init_rwsem(&hba->clk_scaling_lock);
9547
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009548 ufshcd_init_clk_gating(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +02009549
Vivek Gautameebcc192018-08-07 23:17:39 +05309550 ufshcd_init_clk_scaling(hba);
9551
Yaniv Gardi199ef132016-03-10 17:37:06 +02009552 /*
9553 * In order to avoid any spurious interrupt immediately after
9554 * registering UFS controller interrupt handler, clear any pending UFS
9555 * interrupt status and disable all the UFS interrupts.
9556 */
9557 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9558 REG_INTERRUPT_STATUS);
9559 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9560 /*
9561 * Make sure that UFS interrupts are disabled and any pending interrupt
9562 * status is cleared before registering UFS interrupt handler.
9563 */
9564 mb();
9565
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309566 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09009567 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309568 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309569 dev_err(hba->dev, "request irq failed\n");
Can Guo4543d9d2021-01-20 02:04:22 -08009570 goto out_disable;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009571 } else {
9572 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309573 }
9574
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309575 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309576 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309577 dev_err(hba->dev, "scsi_add_host failed\n");
Can Guo4543d9d2021-01-20 02:04:22 -08009578 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309579 }
9580
Bart Van Assche69a6c262019-12-09 10:13:09 -08009581 hba->tmf_tag_set = (struct blk_mq_tag_set) {
9582 .nr_hw_queues = 1,
9583 .queue_depth = hba->nutmrs,
9584 .ops = &ufshcd_tmf_ops,
9585 .flags = BLK_MQ_F_NO_SCHED,
9586 };
9587 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9588 if (err < 0)
Bart Van Assche511a0832021-12-03 15:19:43 -08009589 goto out_remove_scsi_host;
Bart Van Assche69a6c262019-12-09 10:13:09 -08009590 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9591 if (IS_ERR(hba->tmf_queue)) {
9592 err = PTR_ERR(hba->tmf_queue);
9593 goto free_tmf_tag_set;
9594 }
Adrian Hunterf5ef3362021-09-22 12:10:59 +03009595 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
9596 sizeof(*hba->tmf_rqs), GFP_KERNEL);
9597 if (!hba->tmf_rqs) {
9598 err = -ENOMEM;
9599 goto free_tmf_queue;
9600 }
Bart Van Assche69a6c262019-12-09 10:13:09 -08009601
Bjorn Anderssond8d9f792019-08-28 12:17:54 -07009602 /* Reset the attached device */
Stanley Chu31a5d9c2020-12-08 21:56:35 +08009603 ufshcd_device_reset(hba);
Bjorn Anderssond8d9f792019-08-28 12:17:54 -07009604
Satya Tangiraladf043c742020-07-06 20:04:14 +00009605 ufshcd_init_crypto(hba);
9606
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309607 /* Host controller enable */
9608 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309609 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309610 dev_err(hba->dev, "Host controller enable failed\n");
Stanley Chue965e5e2020-12-05 19:58:59 +08009611 ufshcd_print_evt_hist(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08009612 ufshcd_print_host_state(hba);
Bart Van Assche69a6c262019-12-09 10:13:09 -08009613 goto free_tmf_queue;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309614 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309615
subhashj@codeaurora.org0c8f7582016-12-22 18:41:11 -08009616 /*
9617 * Set the default power management level for runtime and system PM.
9618 * Default power saving mode is to keep UFS link in Hibern8 state
9619 * and UFS device in sleep state.
9620 */
9621 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9622 UFS_SLEEP_PWR_MODE,
9623 UIC_LINK_HIBERN8_STATE);
9624 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9625 UFS_SLEEP_PWR_MODE,
9626 UIC_LINK_HIBERN8_STATE);
9627
Stanley Chu51dd9052020-05-22 16:32:12 +08009628 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9629 ufshcd_rpm_dev_flush_recheck_work);
9630
Adrian Hunterad448372018-03-20 15:07:38 +02009631 /* Set the default auto-hiberate idle timer value to 150 ms */
Stanley Chuf571b372019-05-21 14:44:53 +08009632 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
Adrian Hunterad448372018-03-20 15:07:38 +02009633 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9634 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9635 }
9636
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05309637 /* Hold auto suspend until async scan completes */
9638 pm_runtime_get_sync(dev);
Subhash Jadavani38135532018-05-03 16:37:18 +05309639 atomic_set(&hba->scsi_block_reqs_cnt, 0);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009640 /*
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08009641 * We are assuming that device wasn't put in sleep/power-down
9642 * state exclusively during the boot stage before kernel.
9643 * This assumption helps avoid doing link startup twice during
9644 * ufshcd_probe_hba().
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009645 */
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08009646 ufshcd_set_ufs_dev_active(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009647
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309648 async_schedule(ufshcd_async_scan, hba);
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +02009649 ufs_sysfs_add_nodes(hba->dev);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309650
Vincent Palomares10845142021-07-27 18:27:43 -07009651 device_enable_async_suspend(dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309652 return 0;
9653
Bart Van Assche69a6c262019-12-09 10:13:09 -08009654free_tmf_queue:
9655 blk_cleanup_queue(hba->tmf_queue);
9656free_tmf_tag_set:
9657 blk_mq_free_tag_set(&hba->tmf_tag_set);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309658out_remove_scsi_host:
9659 scsi_remove_host(hba->host);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309660out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009661 hba->is_irq_enabled = false;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03009662 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309663out_error:
9664 return err;
9665}
9666EXPORT_SYMBOL_GPL(ufshcd_init);
9667
Asutosh Dasb294ff32021-04-23 17:20:16 -07009668void ufshcd_resume_complete(struct device *dev)
9669{
9670 struct ufs_hba *hba = dev_get_drvdata(dev);
9671
9672 if (hba->complete_put) {
9673 ufshcd_rpm_put(hba);
9674 hba->complete_put = false;
9675 }
Asutosh Dasb294ff32021-04-23 17:20:16 -07009676}
9677EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
9678
Adrian Hunterddba1cf2021-10-27 16:06:14 +03009679static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
9680{
9681 struct device *dev = &hba->sdev_ufs_device->sdev_gendev;
9682 enum ufs_dev_pwr_mode dev_pwr_mode;
9683 enum uic_link_state link_state;
9684 unsigned long flags;
9685 bool res;
9686
9687 spin_lock_irqsave(&dev->power.lock, flags);
9688 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
9689 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
9690 res = pm_runtime_suspended(dev) &&
9691 hba->curr_dev_pwr_mode == dev_pwr_mode &&
9692 hba->uic_link_state == link_state &&
9693 !hba->dev_info.b_rpm_dev_flush_capable;
9694 spin_unlock_irqrestore(&dev->power.lock, flags);
9695
9696 return res;
9697}
9698
9699int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
Asutosh Dasb294ff32021-04-23 17:20:16 -07009700{
9701 struct ufs_hba *hba = dev_get_drvdata(dev);
9702 int ret;
9703
9704 /*
9705 * SCSI assumes that runtime-pm and system-pm for scsi drivers
9706 * are same. And it doesn't wake up the device for system-suspend
9707 * if it's runtime suspended. But ufs doesn't follow that.
9708 * Refer ufshcd_resume_complete()
9709 */
9710 if (hba->sdev_ufs_device) {
Adrian Hunterddba1cf2021-10-27 16:06:14 +03009711 /* Prevent runtime suspend */
9712 ufshcd_rpm_get_noresume(hba);
9713 /*
9714 * Check if already runtime suspended in same state as system
9715 * suspend would be.
9716 */
9717 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
9718 /* RPM state is not ok for SPM, so runtime resume */
9719 ret = ufshcd_rpm_resume(hba);
9720 if (ret < 0 && ret != -EACCES) {
9721 ufshcd_rpm_put(hba);
9722 return ret;
9723 }
Asutosh Dasb294ff32021-04-23 17:20:16 -07009724 }
9725 hba->complete_put = true;
9726 }
Asutosh Dasb294ff32021-04-23 17:20:16 -07009727 return 0;
9728}
Adrian Hunterddba1cf2021-10-27 16:06:14 +03009729EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
9730
9731int ufshcd_suspend_prepare(struct device *dev)
9732{
9733 return __ufshcd_suspend_prepare(dev, true);
9734}
Asutosh Dasb294ff32021-04-23 17:20:16 -07009735EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
9736
9737#ifdef CONFIG_PM_SLEEP
9738static int ufshcd_wl_poweroff(struct device *dev)
9739{
9740 struct scsi_device *sdev = to_scsi_device(dev);
9741 struct ufs_hba *hba = shost_priv(sdev->host);
9742
9743 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9744 return 0;
9745}
9746#endif
9747
9748static int ufshcd_wl_probe(struct device *dev)
9749{
9750 struct scsi_device *sdev = to_scsi_device(dev);
9751
9752 if (!is_device_wlun(sdev))
9753 return -ENODEV;
9754
9755 blk_pm_runtime_init(sdev->request_queue, dev);
9756 pm_runtime_set_autosuspend_delay(dev, 0);
9757 pm_runtime_allow(dev);
9758
9759 return 0;
9760}
9761
9762static int ufshcd_wl_remove(struct device *dev)
9763{
9764 pm_runtime_forbid(dev);
9765 return 0;
9766}
9767
9768static const struct dev_pm_ops ufshcd_wl_pm_ops = {
9769#ifdef CONFIG_PM_SLEEP
9770 .suspend = ufshcd_wl_suspend,
9771 .resume = ufshcd_wl_resume,
9772 .freeze = ufshcd_wl_suspend,
9773 .thaw = ufshcd_wl_resume,
9774 .poweroff = ufshcd_wl_poweroff,
9775 .restore = ufshcd_wl_resume,
9776#endif
9777 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
9778};
9779
9780/*
9781 * ufs_dev_wlun_template - describes ufs device wlun
9782 * ufs-device wlun - used to send pm commands
9783 * All luns are consumers of ufs-device wlun.
9784 *
9785 * Currently, no sd driver is present for wluns.
9786 * Hence the no specific pm operations are performed.
9787 * With ufs design, SSU should be sent to ufs-device wlun.
9788 * Hence register a scsi driver for ufs wluns only.
9789 */
9790static struct scsi_driver ufs_dev_wlun_template = {
9791 .gendrv = {
9792 .name = "ufs_device_wlun",
9793 .owner = THIS_MODULE,
9794 .probe = ufshcd_wl_probe,
9795 .remove = ufshcd_wl_remove,
9796 .pm = &ufshcd_wl_pm_ops,
9797 .shutdown = ufshcd_wl_shutdown,
9798 },
9799};
9800
Adrian Hunterb6cacaf2021-01-07 09:25:38 +02009801static int __init ufshcd_core_init(void)
9802{
Asutosh Dasb294ff32021-04-23 17:20:16 -07009803 int ret;
9804
Bart Van Assche9a868c82021-10-20 14:40:23 -07009805 /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
9806 static_assert(sizeof(struct utp_transfer_cmd_desc) ==
9807 2 * ALIGNED_UPIU_SIZE +
9808 SG_ALL * sizeof(struct ufshcd_sg_entry));
9809
Adrian Hunterb6cacaf2021-01-07 09:25:38 +02009810 ufs_debugfs_init();
Asutosh Dasb294ff32021-04-23 17:20:16 -07009811
9812 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
9813 if (ret)
Bart Van Asscheedc05962021-10-01 11:20:15 -07009814 ufs_debugfs_exit();
Asutosh Dasb294ff32021-04-23 17:20:16 -07009815 return ret;
Adrian Hunterb6cacaf2021-01-07 09:25:38 +02009816}
9817
9818static void __exit ufshcd_core_exit(void)
9819{
9820 ufs_debugfs_exit();
Asutosh Dasb294ff32021-04-23 17:20:16 -07009821 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
Adrian Hunterb6cacaf2021-01-07 09:25:38 +02009822}
9823
9824module_init(ufshcd_core_init);
9825module_exit(ufshcd_core_exit);
9826
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309827MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9828MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05309829MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309830MODULE_LICENSE("GPL");
9831MODULE_VERSION(UFSHCD_DRIVER_VERSION);