blob: c0b2cec5dcd852376f707717dcb34f9ebe71abf0 [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02006 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053011 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053016 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053018 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053024 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030035 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053038 */
39
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053040#include <linux/async.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030041#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020042#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020043#include <linux/of.h>
Adrian Hunterad448372018-03-20 15:07:38 +020044#include <linux/bitfield.h>
Can Guofb276f72020-03-25 18:09:59 -070045#include <linux/blk-pm.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053046#include "ufshcd.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020047#include "ufs_quirks.h"
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053048#include "unipro.h"
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +020049#include "ufs-sysfs.h"
Avri Altmandf032bf2018-10-07 17:30:35 +030050#include "ufs_bsg.h"
Asutosh Das3d17b9b2020-04-22 14:41:42 -070051#include <asm/unaligned.h>
52#include <linux/blkdev.h>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053053
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -080054#define CREATE_TRACE_POINTS
55#include <trace/events/ufs.h>
56
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053057#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
58 UTP_TASK_REQ_COMPL |\
59 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053060/* UIC command timeout, unit: ms */
61#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053062
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053063/* NOP OUT retries waiting for NOP IN response */
64#define NOP_OUT_RETRIES 10
65/* Timeout after 30 msecs if NOP OUT hangs without response */
66#define NOP_OUT_TIMEOUT 30 /* msecs */
67
Dolev Raviv68078d52013-07-30 00:35:58 +053068/* Query request retries */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080069#define QUERY_REQ_RETRIES 3
Dolev Raviv68078d52013-07-30 00:35:58 +053070/* Query request timeout */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080071#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
Dolev Raviv68078d52013-07-30 00:35:58 +053072
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053073/* Task management command timeout */
74#define TM_CMD_TIMEOUT 100 /* msecs */
75
Yaniv Gardi64238fb2016-02-01 15:02:43 +020076/* maximum number of retries for a general UIC command */
77#define UFS_UIC_COMMAND_RETRIES 3
78
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030079/* maximum number of link-startup retries */
80#define DME_LINKSTARTUP_RETRIES 3
81
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +020082/* Maximum retries for Hibern8 enter */
83#define UIC_HIBERN8_ENTER_RETRIES 3
84
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030085/* maximum number of reset retries before giving up */
86#define MAX_HOST_RESET_RETRIES 5
87
Dolev Raviv68078d52013-07-30 00:35:58 +053088/* Expose the flag value from utp_upiu_query.value */
89#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
90
Seungwon Jeon7d568652013-08-31 21:40:20 +053091/* Interrupt aggregation default timeout, unit: 40us */
92#define INT_AGGR_DEF_TO 0x02
93
Stanley Chu49615ba2019-09-16 23:56:50 +080094/* default delay of autosuspend: 2000 ms */
95#define RPM_AUTOSUSPEND_DELAY_MS 2000
96
Can Guo09f17792020-02-10 19:40:49 -080097/* Default value of wait time before gating device ref clock */
98#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
99
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +0300100#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
101 ({ \
102 int _ret; \
103 if (_on) \
104 _ret = ufshcd_enable_vreg(_dev, _vreg); \
105 else \
106 _ret = ufshcd_disable_vreg(_dev, _vreg); \
107 _ret; \
108 })
109
Tomas Winklerba809172018-06-14 11:14:09 +0300110#define ufshcd_hex_dump(prefix_str, buf, len) do { \
111 size_t __len = (len); \
112 print_hex_dump(KERN_ERR, prefix_str, \
113 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
114 16, 4, buf, __len, false); \
115} while (0)
116
117int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
118 const char *prefix)
119{
Marc Gonzalezd6724752019-01-22 18:29:22 +0100120 u32 *regs;
121 size_t pos;
122
123 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
124 return -EINVAL;
Tomas Winklerba809172018-06-14 11:14:09 +0300125
Can Guocddaeba2019-11-14 22:09:27 -0800126 regs = kzalloc(len, GFP_ATOMIC);
Tomas Winklerba809172018-06-14 11:14:09 +0300127 if (!regs)
128 return -ENOMEM;
129
Marc Gonzalezd6724752019-01-22 18:29:22 +0100130 for (pos = 0; pos < len; pos += 4)
131 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
132
Tomas Winklerba809172018-06-14 11:14:09 +0300133 ufshcd_hex_dump(prefix, regs, len);
134 kfree(regs);
135
136 return 0;
137}
138EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800139
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530140enum {
141 UFSHCD_MAX_CHANNEL = 0,
142 UFSHCD_MAX_ID = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530143 UFSHCD_CMD_PER_LUN = 32,
144 UFSHCD_CAN_QUEUE = 32,
145};
146
147/* UFSHCD states */
148enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530149 UFSHCD_STATE_RESET,
150 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530151 UFSHCD_STATE_OPERATIONAL,
Zang Leigang141f8162016-11-16 11:29:37 +0800152 UFSHCD_STATE_EH_SCHEDULED,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530153};
154
155/* UFSHCD error handling flags */
156enum {
157 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530158};
159
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530160/* UFSHCD UIC layer error flags */
161enum {
162 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200163 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
164 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
165 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
166 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
167 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530168};
169
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530170#define ufshcd_set_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300171 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530172#define ufshcd_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300173 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530174#define ufshcd_clear_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300175 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530176
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +0200177struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300178 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
179 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
180 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
181 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
182 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
183 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
184};
185
186static inline enum ufs_dev_pwr_mode
187ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
188{
189 return ufs_pm_lvl_states[lvl].dev_state;
190}
191
192static inline enum uic_link_state
193ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
194{
195 return ufs_pm_lvl_states[lvl].link_state;
196}
197
subhashj@codeaurora.org0c8f7582016-12-22 18:41:11 -0800198static inline enum ufs_pm_level
199ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
200 enum uic_link_state link_state)
201{
202 enum ufs_pm_level lvl;
203
204 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
205 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
206 (ufs_pm_lvl_states[lvl].link_state == link_state))
207 return lvl;
208 }
209
210 /* if no match found, return the level 0 */
211 return UFS_PM_LVL_0;
212}
213
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800214static struct ufs_dev_fix ufs_fixups[] = {
215 /* UFS cards deviations table */
216 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
217 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800218 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
219 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
220 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800221 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
222 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
223 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
224 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
225 UFS_DEVICE_QUIRK_PA_TACTIVATE),
226 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
227 UFS_DEVICE_QUIRK_PA_TACTIVATE),
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800228 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
229 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
Wei Li8e4829c2018-11-08 09:08:29 -0800230 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
231 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800232
233 END_FIX
234};
235
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -0800236static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530237static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530238static int ufshcd_reset_and_restore(struct ufs_hba *hba);
Dolev Ravive7d38252016-12-22 18:40:07 -0800239static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530240static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300241static void ufshcd_hba_exit(struct ufs_hba *hba);
Bean Huo1b9e2142020-01-20 14:08:15 +0100242static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300243static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
244 bool skip_ref_clk);
245static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300246static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300247static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300248static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800249static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
250static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -0800251static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800252static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300253static irqreturn_t ufshcd_intr(int irq, void *__hba);
Yaniv Gardi874237f2015-05-17 18:55:03 +0300254static int ufshcd_change_power_mode(struct ufs_hba *hba,
255 struct ufs_pa_layer_attr *pwr_mode);
Asutosh Das3d17b9b2020-04-22 14:41:42 -0700256static bool ufshcd_wb_sup(struct ufs_hba *hba);
257static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
258static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
259static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
260static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
261static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
262
Yaniv Gardi14497322016-02-01 15:02:39 +0200263static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
264{
265 return tag >= 0 && tag < hba->nutrs;
266}
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300267
Can Guo5231d382019-12-05 02:14:46 +0000268static inline void ufshcd_enable_irq(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300269{
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300270 if (!hba->is_irq_enabled) {
Can Guo5231d382019-12-05 02:14:46 +0000271 enable_irq(hba->irq);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300272 hba->is_irq_enabled = true;
273 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300274}
275
276static inline void ufshcd_disable_irq(struct ufs_hba *hba)
277{
278 if (hba->is_irq_enabled) {
Can Guo5231d382019-12-05 02:14:46 +0000279 disable_irq(hba->irq);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300280 hba->is_irq_enabled = false;
281 }
282}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530283
Asutosh Das3d17b9b2020-04-22 14:41:42 -0700284static inline void ufshcd_wb_config(struct ufs_hba *hba)
285{
286 int ret;
287
288 if (!ufshcd_wb_sup(hba))
289 return;
290
291 ret = ufshcd_wb_ctrl(hba, true);
292 if (ret)
293 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
294 else
295 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
296 ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
297 if (ret)
298 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
299 __func__, ret);
300 ufshcd_wb_toggle_flush(hba, true);
301}
302
Subhash Jadavani38135532018-05-03 16:37:18 +0530303static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
304{
305 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
306 scsi_unblock_requests(hba->host);
307}
308
309static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
310{
311 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
312 scsi_block_requests(hba->host);
313}
314
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300315static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
316 const char *str)
317{
318 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
319
320 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
321}
322
323static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
324 const char *str)
325{
326 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
327
328 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
329}
330
331static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
332 const char *str)
333{
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300334 int off = (int)tag - hba->nutrs;
Christoph Hellwig391e3882018-10-07 17:30:32 +0300335 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300336
Christoph Hellwig391e3882018-10-07 17:30:32 +0300337 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
338 &descp->input_param1);
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300339}
340
Lee Susman1a07f2d2016-12-22 18:42:03 -0800341static void ufshcd_add_command_trace(struct ufs_hba *hba,
342 unsigned int tag, const char *str)
343{
344 sector_t lba = -1;
345 u8 opcode = 0;
346 u32 intr, doorbell;
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300347 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
Bart Van Asschee4d2add2019-12-24 14:02:44 -0800348 struct scsi_cmnd *cmd = lrbp->cmd;
Lee Susman1a07f2d2016-12-22 18:42:03 -0800349 int transfer_len = -1;
350
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300351 if (!trace_ufshcd_command_enabled()) {
352 /* trace UPIU W/O tracing command */
Bart Van Asschee4d2add2019-12-24 14:02:44 -0800353 if (cmd)
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300354 ufshcd_add_cmd_upiu_trace(hba, tag, str);
Lee Susman1a07f2d2016-12-22 18:42:03 -0800355 return;
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300356 }
Lee Susman1a07f2d2016-12-22 18:42:03 -0800357
Bart Van Asschee4d2add2019-12-24 14:02:44 -0800358 if (cmd) { /* data phase exists */
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300359 /* trace UPIU also */
360 ufshcd_add_cmd_upiu_trace(hba, tag, str);
Bart Van Asschee4d2add2019-12-24 14:02:44 -0800361 opcode = cmd->cmnd[0];
Lee Susman1a07f2d2016-12-22 18:42:03 -0800362 if ((opcode == READ_10) || (opcode == WRITE_10)) {
363 /*
364 * Currently we only fully trace read(10) and write(10)
365 * commands
366 */
Bart Van Asschee4d2add2019-12-24 14:02:44 -0800367 if (cmd->request && cmd->request->bio)
368 lba = cmd->request->bio->bi_iter.bi_sector;
Lee Susman1a07f2d2016-12-22 18:42:03 -0800369 transfer_len = be32_to_cpu(
370 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
371 }
372 }
373
374 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
375 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
376 trace_ufshcd_command(dev_name(hba->dev), str, tag,
377 doorbell, transfer_len, intr, lba, opcode);
378}
379
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800380static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
381{
382 struct ufs_clk_info *clki;
383 struct list_head *head = &hba->clk_list_head;
384
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +0300385 if (list_empty(head))
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800386 return;
387
388 list_for_each_entry(clki, head, list) {
389 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
390 clki->max_freq)
391 dev_err(hba->dev, "clk: %s, rate: %u\n",
392 clki->name, clki->curr_freq);
393 }
394}
395
Stanley Chu48d5b972019-07-10 21:38:18 +0800396static void ufshcd_print_err_hist(struct ufs_hba *hba,
397 struct ufs_err_reg_hist *err_hist,
398 char *err_name)
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800399{
400 int i;
Stanley Chu27752642019-01-28 22:04:26 +0800401 bool found = false;
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800402
Stanley Chu48d5b972019-07-10 21:38:18 +0800403 for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
404 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800405
Stanley Chu645728a2020-01-04 22:26:06 +0800406 if (err_hist->tstamp[p] == 0)
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800407 continue;
Stanley Chuc5397f12019-07-10 21:38:20 +0800408 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800409 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
Stanley Chu27752642019-01-28 22:04:26 +0800410 found = true;
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800411 }
Stanley Chu27752642019-01-28 22:04:26 +0800412
413 if (!found)
Stanley Chufd1fb4d2020-01-04 22:26:08 +0800414 dev_err(hba->dev, "No record of %s\n", err_name);
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800415}
416
Dolev Raviv66cc8202016-12-22 18:39:42 -0800417static void ufshcd_print_host_regs(struct ufs_hba *hba)
418{
Tomas Winklerba809172018-06-14 11:14:09 +0300419 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
Dolev Raviv66cc8202016-12-22 18:39:42 -0800420 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
421 hba->ufs_version, hba->capabilities);
422 dev_err(hba->dev,
423 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
424 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800425 dev_err(hba->dev,
426 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
427 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
428 hba->ufs_stats.hibern8_exit_cnt);
429
Stanley Chu48d5b972019-07-10 21:38:18 +0800430 ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
431 ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
432 ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
433 ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
434 ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
Stanley Chud3c615b2019-07-10 21:38:19 +0800435 ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
436 "auto_hibern8_err");
Stanley Chu8808b4e2019-07-10 21:38:21 +0800437 ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
438 ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
439 "link_startup_fail");
440 ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
441 ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
442 "suspend_fail");
443 ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
444 ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
445 ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800446
447 ufshcd_print_clk_freqs(hba);
448
Stanley Chu7c486d912019-12-24 21:01:06 +0800449 ufshcd_vops_dbg_register_dump(hba);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800450}
451
452static
453void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
454{
455 struct ufshcd_lrb *lrbp;
Gilad Broner7fabb772017-02-03 16:56:50 -0800456 int prdt_length;
Dolev Raviv66cc8202016-12-22 18:39:42 -0800457 int tag;
458
459 for_each_set_bit(tag, &bitmap, hba->nutrs) {
460 lrbp = &hba->lrb[tag];
461
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800462 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
463 tag, ktime_to_us(lrbp->issue_time_stamp));
Zang Leigang09017182017-09-27 10:06:06 +0800464 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
465 tag, ktime_to_us(lrbp->compl_time_stamp));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800466 dev_err(hba->dev,
467 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
468 tag, (u64)lrbp->utrd_dma_addr);
469
Dolev Raviv66cc8202016-12-22 18:39:42 -0800470 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
471 sizeof(struct utp_transfer_req_desc));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800472 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
473 (u64)lrbp->ucd_req_dma_addr);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800474 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
475 sizeof(struct utp_upiu_req));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800476 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
477 (u64)lrbp->ucd_rsp_dma_addr);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800478 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
479 sizeof(struct utp_upiu_rsp));
Dolev Raviv66cc8202016-12-22 18:39:42 -0800480
Gilad Broner7fabb772017-02-03 16:56:50 -0800481 prdt_length = le16_to_cpu(
482 lrbp->utr_descriptor_ptr->prd_table_length);
483 dev_err(hba->dev,
484 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
485 tag, prdt_length,
486 (u64)lrbp->ucd_prdt_dma_addr);
487
488 if (pr_prdt)
Dolev Raviv66cc8202016-12-22 18:39:42 -0800489 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
Gilad Broner7fabb772017-02-03 16:56:50 -0800490 sizeof(struct ufshcd_sg_entry) * prdt_length);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800491 }
492}
493
494static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
495{
Dolev Raviv66cc8202016-12-22 18:39:42 -0800496 int tag;
497
498 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
Christoph Hellwig391e3882018-10-07 17:30:32 +0300499 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
500
Dolev Raviv66cc8202016-12-22 18:39:42 -0800501 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
Christoph Hellwig391e3882018-10-07 17:30:32 +0300502 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
Dolev Raviv66cc8202016-12-22 18:39:42 -0800503 }
504}
505
Gilad Broner6ba65582017-02-03 16:57:28 -0800506static void ufshcd_print_host_state(struct ufs_hba *hba)
507{
508 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
Bart Van Assche7252a362019-12-09 10:13:08 -0800509 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
510 hba->outstanding_reqs, hba->outstanding_tasks);
Gilad Broner6ba65582017-02-03 16:57:28 -0800511 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
512 hba->saved_err, hba->saved_uic_err);
513 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
514 hba->curr_dev_pwr_mode, hba->uic_link_state);
515 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
516 hba->pm_op_in_progress, hba->is_sys_suspended);
517 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
518 hba->auto_bkops_enabled, hba->host->host_self_blocked);
519 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
520 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
521 hba->eh_flags, hba->req_abort_count);
522 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
523 hba->capabilities, hba->caps);
524 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
525 hba->dev_quirks);
526}
527
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800528/**
529 * ufshcd_print_pwr_info - print power params as saved in hba
530 * power info
531 * @hba: per-adapter instance
532 */
533static void ufshcd_print_pwr_info(struct ufs_hba *hba)
534{
535 static const char * const names[] = {
536 "INVALID MODE",
537 "FAST MODE",
538 "SLOW_MODE",
539 "INVALID MODE",
540 "FASTAUTO_MODE",
541 "SLOWAUTO_MODE",
542 "INVALID MODE",
543 };
544
545 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
546 __func__,
547 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
548 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
549 names[hba->pwr_info.pwr_rx],
550 names[hba->pwr_info.pwr_tx],
551 hba->pwr_info.hs_rate);
552}
553
Stanley Chu5c955c12020-03-18 18:40:12 +0800554void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
555{
556 if (!us)
557 return;
558
559 if (us < 10)
560 udelay(us);
561 else
562 usleep_range(us, us + tolerance);
563}
564EXPORT_SYMBOL_GPL(ufshcd_delay_us);
565
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530566/*
567 * ufshcd_wait_for_register - wait for register value to change
568 * @hba - per-adapter interface
569 * @reg - mmio register offset
570 * @mask - mask to apply to read register value
571 * @val - wait condition
572 * @interval_us - polling interval in microsecs
573 * @timeout_ms - timeout in millisecs
Yaniv Gardi596585a2016-03-10 17:37:08 +0200574 * @can_sleep - perform sleep or just spin
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530575 *
576 * Returns -ETIMEDOUT on error, zero on success
577 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200578int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
579 u32 val, unsigned long interval_us,
580 unsigned long timeout_ms, bool can_sleep)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530581{
582 int err = 0;
583 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
584
585 /* ignore bits that we don't intend to wait on */
586 val = val & mask;
587
588 while ((ufshcd_readl(hba, reg) & mask) != val) {
Yaniv Gardi596585a2016-03-10 17:37:08 +0200589 if (can_sleep)
590 usleep_range(interval_us, interval_us + 50);
591 else
592 udelay(interval_us);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530593 if (time_after(jiffies, timeout)) {
594 if ((ufshcd_readl(hba, reg) & mask) != val)
595 err = -ETIMEDOUT;
596 break;
597 }
598 }
599
600 return err;
601}
602
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530603/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530604 * ufshcd_get_intr_mask - Get the interrupt bit mask
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800605 * @hba: Pointer to adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530606 *
607 * Returns interrupt bit mask per version
608 */
609static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
610{
Yaniv Gardic01848c2016-12-05 19:25:02 -0800611 u32 intr_mask = 0;
612
613 switch (hba->ufs_version) {
614 case UFSHCI_VERSION_10:
615 intr_mask = INTERRUPT_MASK_ALL_VER_10;
616 break;
Yaniv Gardic01848c2016-12-05 19:25:02 -0800617 case UFSHCI_VERSION_11:
618 case UFSHCI_VERSION_20:
619 intr_mask = INTERRUPT_MASK_ALL_VER_11;
620 break;
Yaniv Gardic01848c2016-12-05 19:25:02 -0800621 case UFSHCI_VERSION_21:
622 default:
623 intr_mask = INTERRUPT_MASK_ALL_VER_21;
Tomohiro Kusumi031d1e02017-03-23 12:49:04 +0200624 break;
Yaniv Gardic01848c2016-12-05 19:25:02 -0800625 }
626
627 return intr_mask;
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530628}
629
630/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530631 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800632 * @hba: Pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530633 *
634 * Returns UFSHCI version supported by the controller
635 */
636static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
637{
Yaniv Gardi0263bcd2015-10-28 13:15:48 +0200638 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
639 return ufshcd_vops_get_ufs_hci_version(hba);
Yaniv Gardi9949e702015-05-17 18:55:05 +0300640
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530641 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530642}
643
644/**
645 * ufshcd_is_device_present - Check if any device connected to
646 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300647 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530648 *
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300649 * Returns true if device present, false if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530650 */
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300651static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530652{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300653 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300654 DEVICE_PRESENT) ? true : false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530655}
656
657/**
658 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800659 * @lrbp: pointer to local command reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530660 *
661 * This function is used to get the OCS field from UTRD
662 * Returns the OCS field in the UTRD
663 */
664static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
665{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530666 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530667}
668
669/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530670 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
671 * @hba: per adapter instance
672 * @pos: position of the bit to be cleared
673 */
674static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
675{
Christoph Hellwig492001992020-02-21 06:08:11 -0800676 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Alim Akhtar1399c5b2018-05-06 15:44:15 +0530677}
678
679/**
680 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
681 * @hba: per adapter instance
682 * @pos: position of the bit to be cleared
683 */
684static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
685{
Christoph Hellwig492001992020-02-21 06:08:11 -0800686 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530687}
688
689/**
Yaniv Gardia48353f2016-02-01 15:02:40 +0200690 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
691 * @hba: per adapter instance
692 * @tag: position of the bit to be cleared
693 */
694static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
695{
696 __clear_bit(tag, &hba->outstanding_reqs);
697}
698
699/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530700 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
701 * @reg: Register value of host controller status
702 *
703 * Returns integer, 0 on Success and positive value if failed
704 */
705static inline int ufshcd_get_lists_status(u32 reg)
706{
Tomohiro Kusumi6cf16112017-04-26 20:28:58 +0300707 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530708}
709
710/**
711 * ufshcd_get_uic_cmd_result - Get the UIC command result
712 * @hba: Pointer to adapter instance
713 *
714 * This function gets the result of UIC command completion
715 * Returns 0 on success, non zero value on error
716 */
717static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
718{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530719 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530720 MASK_UIC_COMMAND_RESULT;
721}
722
723/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530724 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
725 * @hba: Pointer to adapter instance
726 *
727 * This function gets UIC command argument3
728 * Returns 0 on success, non zero value on error
729 */
730static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
731{
732 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
733}
734
735/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530736 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530737 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530738 */
739static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530740ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530741{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530742 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530743}
744
745/**
746 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
747 * @ucd_rsp_ptr: pointer to response UPIU
748 *
749 * This function gets the response status and scsi_status from response UPIU
750 * Returns the response result code.
751 */
752static inline int
753ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
754{
755 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
756}
757
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530758/*
759 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
760 * from response UPIU
761 * @ucd_rsp_ptr: pointer to response UPIU
762 *
763 * Return the data segment length.
764 */
765static inline unsigned int
766ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
767{
768 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
769 MASK_RSP_UPIU_DATA_SEG_LEN;
770}
771
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530772/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530773 * ufshcd_is_exception_event - Check if the device raised an exception event
774 * @ucd_rsp_ptr: pointer to response UPIU
775 *
776 * The function checks if the device raised an exception event indicated in
777 * the Device Information field of response UPIU.
778 *
779 * Returns true if exception is raised, false otherwise.
780 */
781static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
782{
783 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
784 MASK_RSP_EXCEPTION_EVENT ? true : false;
785}
786
787/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530788 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530789 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530790 */
791static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530792ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530793{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530794 ufshcd_writel(hba, INT_AGGR_ENABLE |
795 INT_AGGR_COUNTER_AND_TIMER_RESET,
796 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
797}
798
799/**
800 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
801 * @hba: per adapter instance
802 * @cnt: Interrupt aggregation counter threshold
803 * @tmout: Interrupt aggregation timeout value
804 */
805static inline void
806ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
807{
808 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
809 INT_AGGR_COUNTER_THLD_VAL(cnt) |
810 INT_AGGR_TIMEOUT_VAL(tmout),
811 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530812}
813
814/**
Yaniv Gardib8521902015-05-17 18:54:57 +0300815 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
816 * @hba: per adapter instance
817 */
818static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
819{
820 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
821}
822
823/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530824 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
825 * When run-stop registers are set to 1, it indicates the
826 * host controller that it can process the requests
827 * @hba: per adapter instance
828 */
829static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
830{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530831 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
832 REG_UTP_TASK_REQ_LIST_RUN_STOP);
833 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
834 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530835}
836
837/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530838 * ufshcd_hba_start - Start controller initialization sequence
839 * @hba: per adapter instance
840 */
841static inline void ufshcd_hba_start(struct ufs_hba *hba)
842{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530843 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530844}
845
846/**
847 * ufshcd_is_hba_active - Get controller state
848 * @hba: per adapter instance
849 *
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300850 * Returns false if controller is active, true otherwise
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530851 */
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300852static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530853{
Tomohiro Kusumi4a8eec22017-03-28 16:49:25 +0300854 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
855 ? false : true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530856}
857
Yaniv Gardi37113102016-03-10 17:37:16 +0200858u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
859{
860 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
861 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
862 (hba->ufs_version == UFSHCI_VERSION_11))
863 return UFS_UNIPRO_VER_1_41;
864 else
865 return UFS_UNIPRO_VER_1_6;
866}
867EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
868
869static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
870{
871 /*
872 * If both host and device support UniPro ver1.6 or later, PA layer
873 * parameters tuning happens during link startup itself.
874 *
875 * We can manually tune PA layer parameters if either host or device
876 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
877 * logic simple, we will only do manual tuning if local unipro version
878 * doesn't support ver1.6 or later.
879 */
880 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
881 return true;
882 else
883 return false;
884}
885
Subhash Jadavani394b9492020-03-26 02:25:40 -0700886/**
887 * ufshcd_set_clk_freq - set UFS controller clock frequencies
888 * @hba: per adapter instance
889 * @scale_up: If True, set max possible frequency othewise set low frequency
890 *
891 * Returns 0 if successful
892 * Returns < 0 for any other errors
893 */
894static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800895{
896 int ret = 0;
897 struct ufs_clk_info *clki;
898 struct list_head *head = &hba->clk_list_head;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800899
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +0300900 if (list_empty(head))
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800901 goto out;
902
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800903 list_for_each_entry(clki, head, list) {
904 if (!IS_ERR_OR_NULL(clki->clk)) {
905 if (scale_up && clki->max_freq) {
906 if (clki->curr_freq == clki->max_freq)
907 continue;
908
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800909 ret = clk_set_rate(clki->clk, clki->max_freq);
910 if (ret) {
911 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
912 __func__, clki->name,
913 clki->max_freq, ret);
914 break;
915 }
916 trace_ufshcd_clk_scaling(dev_name(hba->dev),
917 "scaled up", clki->name,
918 clki->curr_freq,
919 clki->max_freq);
920
921 clki->curr_freq = clki->max_freq;
922
923 } else if (!scale_up && clki->min_freq) {
924 if (clki->curr_freq == clki->min_freq)
925 continue;
926
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800927 ret = clk_set_rate(clki->clk, clki->min_freq);
928 if (ret) {
929 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
930 __func__, clki->name,
931 clki->min_freq, ret);
932 break;
933 }
934 trace_ufshcd_clk_scaling(dev_name(hba->dev),
935 "scaled down", clki->name,
936 clki->curr_freq,
937 clki->min_freq);
938 clki->curr_freq = clki->min_freq;
939 }
940 }
941 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
942 clki->name, clk_get_rate(clki->clk));
943 }
944
Subhash Jadavani394b9492020-03-26 02:25:40 -0700945out:
946 return ret;
947}
948
949/**
950 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
951 * @hba: per adapter instance
952 * @scale_up: True if scaling up and false if scaling down
953 *
954 * Returns 0 if successful
955 * Returns < 0 for any other errors
956 */
957static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
958{
959 int ret = 0;
960 ktime_t start = ktime_get();
961
962 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
963 if (ret)
964 goto out;
965
966 ret = ufshcd_set_clk_freq(hba, scale_up);
967 if (ret)
968 goto out;
969
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800970 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
Subhash Jadavani394b9492020-03-26 02:25:40 -0700971 if (ret)
972 ufshcd_set_clk_freq(hba, !scale_up);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800973
974out:
Subhash Jadavani394b9492020-03-26 02:25:40 -0700975 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800976 (scale_up ? "up" : "down"),
977 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
978 return ret;
979}
980
981/**
982 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
983 * @hba: per adapter instance
984 * @scale_up: True if scaling up and false if scaling down
985 *
986 * Returns true if scaling is required, false otherwise.
987 */
988static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
989 bool scale_up)
990{
991 struct ufs_clk_info *clki;
992 struct list_head *head = &hba->clk_list_head;
993
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +0300994 if (list_empty(head))
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800995 return false;
996
997 list_for_each_entry(clki, head, list) {
998 if (!IS_ERR_OR_NULL(clki->clk)) {
999 if (scale_up && clki->max_freq) {
1000 if (clki->curr_freq == clki->max_freq)
1001 continue;
1002 return true;
1003 } else if (!scale_up && clki->min_freq) {
1004 if (clki->curr_freq == clki->min_freq)
1005 continue;
1006 return true;
1007 }
1008 }
1009 }
1010
1011 return false;
1012}
1013
1014static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1015 u64 wait_timeout_us)
1016{
1017 unsigned long flags;
1018 int ret = 0;
1019 u32 tm_doorbell;
1020 u32 tr_doorbell;
1021 bool timeout = false, do_last_check = false;
1022 ktime_t start;
1023
1024 ufshcd_hold(hba, false);
1025 spin_lock_irqsave(hba->host->host_lock, flags);
1026 /*
1027 * Wait for all the outstanding tasks/transfer requests.
1028 * Verify by checking the doorbell registers are clear.
1029 */
1030 start = ktime_get();
1031 do {
1032 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1033 ret = -EBUSY;
1034 goto out;
1035 }
1036
1037 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1038 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1039 if (!tm_doorbell && !tr_doorbell) {
1040 timeout = false;
1041 break;
1042 } else if (do_last_check) {
1043 break;
1044 }
1045
1046 spin_unlock_irqrestore(hba->host->host_lock, flags);
1047 schedule();
1048 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1049 wait_timeout_us) {
1050 timeout = true;
1051 /*
1052 * We might have scheduled out for long time so make
1053 * sure to check if doorbells are cleared by this time
1054 * or not.
1055 */
1056 do_last_check = true;
1057 }
1058 spin_lock_irqsave(hba->host->host_lock, flags);
1059 } while (tm_doorbell || tr_doorbell);
1060
1061 if (timeout) {
1062 dev_err(hba->dev,
1063 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1064 __func__, tm_doorbell, tr_doorbell);
1065 ret = -EBUSY;
1066 }
1067out:
1068 spin_unlock_irqrestore(hba->host->host_lock, flags);
1069 ufshcd_release(hba);
1070 return ret;
1071}
1072
1073/**
1074 * ufshcd_scale_gear - scale up/down UFS gear
1075 * @hba: per adapter instance
1076 * @scale_up: True for scaling up gear and false for scaling down
1077 *
1078 * Returns 0 for success,
1079 * Returns -EBUSY if scaling can't happen at this time
1080 * Returns non-zero for any other errors
1081 */
1082static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1083{
1084 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1085 int ret = 0;
1086 struct ufs_pa_layer_attr new_pwr_info;
1087
1088 if (scale_up) {
1089 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1090 sizeof(struct ufs_pa_layer_attr));
1091 } else {
1092 memcpy(&new_pwr_info, &hba->pwr_info,
1093 sizeof(struct ufs_pa_layer_attr));
1094
1095 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1096 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1097 /* save the current power mode */
1098 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1099 &hba->pwr_info,
1100 sizeof(struct ufs_pa_layer_attr));
1101
1102 /* scale down gear */
1103 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1104 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1105 }
1106 }
1107
1108 /* check if the power mode needs to be changed or not? */
Can Guo6a9df812020-02-11 21:38:28 -08001109 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001110 if (ret)
1111 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1112 __func__, ret,
1113 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1114 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1115
1116 return ret;
1117}
1118
1119static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1120{
1121 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1122 int ret = 0;
1123 /*
1124 * make sure that there are no outstanding requests when
1125 * clock scaling is in progress
1126 */
Subhash Jadavani38135532018-05-03 16:37:18 +05301127 ufshcd_scsi_block_requests(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001128 down_write(&hba->clk_scaling_lock);
1129 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1130 ret = -EBUSY;
1131 up_write(&hba->clk_scaling_lock);
Subhash Jadavani38135532018-05-03 16:37:18 +05301132 ufshcd_scsi_unblock_requests(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001133 }
1134
1135 return ret;
1136}
1137
1138static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1139{
1140 up_write(&hba->clk_scaling_lock);
Subhash Jadavani38135532018-05-03 16:37:18 +05301141 ufshcd_scsi_unblock_requests(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001142}
1143
1144/**
1145 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1146 * @hba: per adapter instance
1147 * @scale_up: True for scaling up and false for scalin down
1148 *
1149 * Returns 0 for success,
1150 * Returns -EBUSY if scaling can't happen at this time
1151 * Returns non-zero for any other errors
1152 */
1153static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1154{
1155 int ret = 0;
1156
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001157 /* let's not get into low power until clock scaling is completed */
1158 ufshcd_hold(hba, false);
1159
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001160 ret = ufshcd_clock_scaling_prepare(hba);
1161 if (ret)
Subhash Jadavani394b9492020-03-26 02:25:40 -07001162 goto out;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001163
1164 /* scale down the gear before scaling down clocks */
1165 if (!scale_up) {
1166 ret = ufshcd_scale_gear(hba, false);
1167 if (ret)
Subhash Jadavani394b9492020-03-26 02:25:40 -07001168 goto out_unprepare;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001169 }
1170
1171 ret = ufshcd_scale_clks(hba, scale_up);
1172 if (ret) {
1173 if (!scale_up)
1174 ufshcd_scale_gear(hba, true);
Subhash Jadavani394b9492020-03-26 02:25:40 -07001175 goto out_unprepare;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001176 }
1177
1178 /* scale up the gear after scaling up clocks */
1179 if (scale_up) {
1180 ret = ufshcd_scale_gear(hba, true);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07001181 if (ret) {
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001182 ufshcd_scale_clks(hba, false);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07001183 goto out_unprepare;
1184 }
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001185 }
1186
Asutosh Das3d17b9b2020-04-22 14:41:42 -07001187 /* Enable Write Booster if we have scaled up else disable it */
1188 up_write(&hba->clk_scaling_lock);
1189 ufshcd_wb_ctrl(hba, scale_up);
1190 down_write(&hba->clk_scaling_lock);
1191
Subhash Jadavani394b9492020-03-26 02:25:40 -07001192out_unprepare:
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001193 ufshcd_clock_scaling_unprepare(hba);
Subhash Jadavani394b9492020-03-26 02:25:40 -07001194out:
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001195 ufshcd_release(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001196 return ret;
1197}
1198
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001199static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1200{
1201 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1202 clk_scaling.suspend_work);
1203 unsigned long irq_flags;
1204
1205 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1206 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1207 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1208 return;
1209 }
1210 hba->clk_scaling.is_suspended = true;
1211 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1212
1213 __ufshcd_suspend_clkscaling(hba);
1214}
1215
1216static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1217{
1218 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1219 clk_scaling.resume_work);
1220 unsigned long irq_flags;
1221
1222 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1223 if (!hba->clk_scaling.is_suspended) {
1224 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1225 return;
1226 }
1227 hba->clk_scaling.is_suspended = false;
1228 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1229
1230 devfreq_resume_device(hba->devfreq);
1231}
1232
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001233static int ufshcd_devfreq_target(struct device *dev,
1234 unsigned long *freq, u32 flags)
1235{
1236 int ret = 0;
1237 struct ufs_hba *hba = dev_get_drvdata(dev);
1238 ktime_t start;
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001239 bool scale_up, sched_clk_scaling_suspend_work = false;
Bjorn Andersson092b4552018-05-17 23:26:37 -07001240 struct list_head *clk_list = &hba->clk_list_head;
1241 struct ufs_clk_info *clki;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001242 unsigned long irq_flags;
1243
1244 if (!ufshcd_is_clkscaling_supported(hba))
1245 return -EINVAL;
1246
Asutosh Das91831d32020-03-25 11:29:00 -07001247 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1248 /* Override with the closest supported frequency */
1249 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001250 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1251 if (ufshcd_eh_in_progress(hba)) {
1252 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1253 return 0;
1254 }
1255
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001256 if (!hba->clk_scaling.active_reqs)
1257 sched_clk_scaling_suspend_work = true;
1258
Bjorn Andersson092b4552018-05-17 23:26:37 -07001259 if (list_empty(clk_list)) {
1260 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1261 goto out;
1262 }
1263
Asutosh Das91831d32020-03-25 11:29:00 -07001264 /* Decide based on the rounded-off frequency and update */
Bjorn Andersson092b4552018-05-17 23:26:37 -07001265 scale_up = (*freq == clki->max_freq) ? true : false;
Asutosh Das91831d32020-03-25 11:29:00 -07001266 if (!scale_up)
1267 *freq = clki->min_freq;
1268 /* Update the frequency */
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001269 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1270 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1271 ret = 0;
1272 goto out; /* no state change required */
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001273 }
1274 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1275
1276 start = ktime_get();
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001277 ret = ufshcd_devfreq_scale(hba, scale_up);
1278
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001279 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1280 (scale_up ? "up" : "down"),
1281 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1282
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001283out:
1284 if (sched_clk_scaling_suspend_work)
1285 queue_work(hba->clk_scaling.workq,
1286 &hba->clk_scaling.suspend_work);
1287
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001288 return ret;
1289}
1290
Bart Van Assche7252a362019-12-09 10:13:08 -08001291static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1292{
1293 int *busy = priv;
1294
1295 WARN_ON_ONCE(reserved);
1296 (*busy)++;
1297 return false;
1298}
1299
1300/* Whether or not any tag is in use by a request that is in progress. */
1301static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1302{
1303 struct request_queue *q = hba->cmd_queue;
1304 int busy = 0;
1305
1306 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1307 return busy;
1308}
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001309
1310static int ufshcd_devfreq_get_dev_status(struct device *dev,
1311 struct devfreq_dev_status *stat)
1312{
1313 struct ufs_hba *hba = dev_get_drvdata(dev);
1314 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1315 unsigned long flags;
Asutosh Das91831d32020-03-25 11:29:00 -07001316 struct list_head *clk_list = &hba->clk_list_head;
1317 struct ufs_clk_info *clki;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001318
1319 if (!ufshcd_is_clkscaling_supported(hba))
1320 return -EINVAL;
1321
1322 memset(stat, 0, sizeof(*stat));
1323
1324 spin_lock_irqsave(hba->host->host_lock, flags);
1325 if (!scaling->window_start_t)
1326 goto start_window;
1327
Asutosh Das91831d32020-03-25 11:29:00 -07001328 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1329 /*
1330 * If current frequency is 0, then the ondemand governor considers
1331 * there's no initial frequency set. And it always requests to set
1332 * to max. frequency.
1333 */
1334 stat->current_frequency = clki->curr_freq;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001335 if (scaling->is_busy_started)
1336 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1337 scaling->busy_start_t));
1338
1339 stat->total_time = jiffies_to_usecs((long)jiffies -
1340 (long)scaling->window_start_t);
1341 stat->busy_time = scaling->tot_busy_t;
1342start_window:
1343 scaling->window_start_t = jiffies;
1344 scaling->tot_busy_t = 0;
1345
1346 if (hba->outstanding_reqs) {
1347 scaling->busy_start_t = ktime_get();
1348 scaling->is_busy_started = true;
1349 } else {
1350 scaling->busy_start_t = 0;
1351 scaling->is_busy_started = false;
1352 }
1353 spin_unlock_irqrestore(hba->host->host_lock, flags);
1354 return 0;
1355}
1356
1357static struct devfreq_dev_profile ufs_devfreq_profile = {
1358 .polling_ms = 100,
1359 .target = ufshcd_devfreq_target,
1360 .get_dev_status = ufshcd_devfreq_get_dev_status,
1361};
1362
Asutosh Das2c75f9a52020-03-25 11:29:01 -07001363#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
1364static struct devfreq_simple_ondemand_data ufs_ondemand_data = {
1365 .upthreshold = 70,
1366 .downdifferential = 5,
1367};
1368
1369static void *gov_data = &ufs_ondemand_data;
1370#else
1371static void *gov_data; /* NULL */
1372#endif
1373
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001374static int ufshcd_devfreq_init(struct ufs_hba *hba)
1375{
Bjorn Andersson092b4552018-05-17 23:26:37 -07001376 struct list_head *clk_list = &hba->clk_list_head;
1377 struct ufs_clk_info *clki;
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001378 struct devfreq *devfreq;
1379 int ret;
1380
Bjorn Andersson092b4552018-05-17 23:26:37 -07001381 /* Skip devfreq if we don't have any clocks in the list */
1382 if (list_empty(clk_list))
1383 return 0;
1384
1385 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1386 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1387 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1388
Asutosh Das2c75f9a52020-03-25 11:29:01 -07001389 ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile,
1390 gov_data);
Bjorn Andersson092b4552018-05-17 23:26:37 -07001391 devfreq = devfreq_add_device(hba->dev,
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001392 &ufs_devfreq_profile,
1393 DEVFREQ_GOV_SIMPLE_ONDEMAND,
Asutosh Das2c75f9a52020-03-25 11:29:01 -07001394 gov_data);
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001395 if (IS_ERR(devfreq)) {
1396 ret = PTR_ERR(devfreq);
1397 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
Bjorn Andersson092b4552018-05-17 23:26:37 -07001398
1399 dev_pm_opp_remove(hba->dev, clki->min_freq);
1400 dev_pm_opp_remove(hba->dev, clki->max_freq);
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001401 return ret;
1402 }
1403
1404 hba->devfreq = devfreq;
1405
1406 return 0;
1407}
1408
Bjorn Andersson092b4552018-05-17 23:26:37 -07001409static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1410{
1411 struct list_head *clk_list = &hba->clk_list_head;
1412 struct ufs_clk_info *clki;
1413
1414 if (!hba->devfreq)
1415 return;
1416
1417 devfreq_remove_device(hba->devfreq);
1418 hba->devfreq = NULL;
1419
1420 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1421 dev_pm_opp_remove(hba->dev, clki->min_freq);
1422 dev_pm_opp_remove(hba->dev, clki->max_freq);
1423}
1424
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001425static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1426{
1427 unsigned long flags;
1428
1429 devfreq_suspend_device(hba->devfreq);
1430 spin_lock_irqsave(hba->host->host_lock, flags);
1431 hba->clk_scaling.window_start_t = 0;
1432 spin_unlock_irqrestore(hba->host->host_lock, flags);
1433}
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001434
Gilad Bronera5082532016-10-17 17:10:00 -07001435static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1436{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001437 unsigned long flags;
1438 bool suspend = false;
1439
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001440 if (!ufshcd_is_clkscaling_supported(hba))
1441 return;
1442
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001443 spin_lock_irqsave(hba->host->host_lock, flags);
1444 if (!hba->clk_scaling.is_suspended) {
1445 suspend = true;
1446 hba->clk_scaling.is_suspended = true;
1447 }
1448 spin_unlock_irqrestore(hba->host->host_lock, flags);
1449
1450 if (suspend)
1451 __ufshcd_suspend_clkscaling(hba);
Gilad Bronera5082532016-10-17 17:10:00 -07001452}
1453
1454static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1455{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001456 unsigned long flags;
1457 bool resume = false;
1458
1459 if (!ufshcd_is_clkscaling_supported(hba))
1460 return;
1461
1462 spin_lock_irqsave(hba->host->host_lock, flags);
1463 if (hba->clk_scaling.is_suspended) {
1464 resume = true;
1465 hba->clk_scaling.is_suspended = false;
1466 }
1467 spin_unlock_irqrestore(hba->host->host_lock, flags);
1468
1469 if (resume)
1470 devfreq_resume_device(hba->devfreq);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001471}
1472
1473static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1474 struct device_attribute *attr, char *buf)
1475{
1476 struct ufs_hba *hba = dev_get_drvdata(dev);
1477
1478 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1479}
1480
1481static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1482 struct device_attribute *attr, const char *buf, size_t count)
1483{
1484 struct ufs_hba *hba = dev_get_drvdata(dev);
1485 u32 value;
1486 int err;
1487
1488 if (kstrtou32(buf, 0, &value))
1489 return -EINVAL;
1490
1491 value = !!value;
1492 if (value == hba->clk_scaling.is_allowed)
1493 goto out;
1494
1495 pm_runtime_get_sync(hba->dev);
1496 ufshcd_hold(hba, false);
1497
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001498 cancel_work_sync(&hba->clk_scaling.suspend_work);
1499 cancel_work_sync(&hba->clk_scaling.resume_work);
1500
1501 hba->clk_scaling.is_allowed = value;
1502
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001503 if (value) {
1504 ufshcd_resume_clkscaling(hba);
1505 } else {
1506 ufshcd_suspend_clkscaling(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001507 err = ufshcd_devfreq_scale(hba, true);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001508 if (err)
1509 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1510 __func__, err);
1511 }
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001512
1513 ufshcd_release(hba);
1514 pm_runtime_put_sync(hba->dev);
1515out:
1516 return count;
Gilad Bronera5082532016-10-17 17:10:00 -07001517}
1518
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001519static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1520{
1521 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1522 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1523 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1524 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1525 hba->clk_scaling.enable_attr.attr.mode = 0644;
1526 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1527 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1528}
1529
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001530static void ufshcd_ungate_work(struct work_struct *work)
1531{
1532 int ret;
1533 unsigned long flags;
1534 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1535 clk_gating.ungate_work);
1536
1537 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1538
1539 spin_lock_irqsave(hba->host->host_lock, flags);
1540 if (hba->clk_gating.state == CLKS_ON) {
1541 spin_unlock_irqrestore(hba->host->host_lock, flags);
1542 goto unblock_reqs;
1543 }
1544
1545 spin_unlock_irqrestore(hba->host->host_lock, flags);
1546 ufshcd_setup_clocks(hba, true);
1547
Stanley Chu8b0bbf02019-12-07 20:22:01 +08001548 ufshcd_enable_irq(hba);
1549
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001550 /* Exit from hibern8 */
1551 if (ufshcd_can_hibern8_during_gating(hba)) {
1552 /* Prevent gating in this path */
1553 hba->clk_gating.is_suspended = true;
1554 if (ufshcd_is_link_hibern8(hba)) {
1555 ret = ufshcd_uic_hibern8_exit(hba);
1556 if (ret)
1557 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1558 __func__, ret);
1559 else
1560 ufshcd_set_link_active(hba);
1561 }
1562 hba->clk_gating.is_suspended = false;
1563 }
1564unblock_reqs:
Subhash Jadavani38135532018-05-03 16:37:18 +05301565 ufshcd_scsi_unblock_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001566}
1567
1568/**
1569 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1570 * Also, exit from hibern8 mode and set the link as active.
1571 * @hba: per adapter instance
1572 * @async: This indicates whether caller should ungate clocks asynchronously.
1573 */
1574int ufshcd_hold(struct ufs_hba *hba, bool async)
1575{
1576 int rc = 0;
1577 unsigned long flags;
1578
1579 if (!ufshcd_is_clkgating_allowed(hba))
1580 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001581 spin_lock_irqsave(hba->host->host_lock, flags);
1582 hba->clk_gating.active_reqs++;
1583
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001584 if (ufshcd_eh_in_progress(hba)) {
1585 spin_unlock_irqrestore(hba->host->host_lock, flags);
1586 return 0;
1587 }
1588
Sahitya Tummala856b3482014-09-25 15:32:34 +03001589start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001590 switch (hba->clk_gating.state) {
1591 case CLKS_ON:
Venkat Gopalakrishnanf2a785a2016-10-17 17:10:53 -07001592 /*
1593 * Wait for the ungate work to complete if in progress.
1594 * Though the clocks may be in ON state, the link could
1595 * still be in hibner8 state if hibern8 is allowed
1596 * during clock gating.
1597 * Make sure we exit hibern8 state also in addition to
1598 * clocks being ON.
1599 */
1600 if (ufshcd_can_hibern8_during_gating(hba) &&
1601 ufshcd_is_link_hibern8(hba)) {
Can Guoc63d6092020-02-10 19:40:48 -08001602 if (async) {
1603 rc = -EAGAIN;
1604 hba->clk_gating.active_reqs--;
1605 break;
1606 }
Venkat Gopalakrishnanf2a785a2016-10-17 17:10:53 -07001607 spin_unlock_irqrestore(hba->host->host_lock, flags);
1608 flush_work(&hba->clk_gating.ungate_work);
1609 spin_lock_irqsave(hba->host->host_lock, flags);
1610 goto start;
1611 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001612 break;
1613 case REQ_CLKS_OFF:
1614 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1615 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001616 trace_ufshcd_clk_gating(dev_name(hba->dev),
1617 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001618 break;
1619 }
1620 /*
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +03001621 * If we are here, it means gating work is either done or
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001622 * currently running. Hence, fall through to cancel gating
1623 * work and to enable clocks.
1624 */
Tomas Winkler30eb2e42018-11-26 10:10:34 +02001625 /* fallthrough */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001626 case CLKS_OFF:
Subhash Jadavani38135532018-05-03 16:37:18 +05301627 ufshcd_scsi_block_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001628 hba->clk_gating.state = REQ_CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001629 trace_ufshcd_clk_gating(dev_name(hba->dev),
1630 hba->clk_gating.state);
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301631 queue_work(hba->clk_gating.clk_gating_workq,
1632 &hba->clk_gating.ungate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001633 /*
1634 * fall through to check if we should wait for this
1635 * work to be done or not.
1636 */
Tomas Winkler30eb2e42018-11-26 10:10:34 +02001637 /* fallthrough */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001638 case REQ_CLKS_ON:
1639 if (async) {
1640 rc = -EAGAIN;
1641 hba->clk_gating.active_reqs--;
1642 break;
1643 }
1644
1645 spin_unlock_irqrestore(hba->host->host_lock, flags);
1646 flush_work(&hba->clk_gating.ungate_work);
1647 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +03001648 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001649 goto start;
1650 default:
1651 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1652 __func__, hba->clk_gating.state);
1653 break;
1654 }
1655 spin_unlock_irqrestore(hba->host->host_lock, flags);
1656out:
1657 return rc;
1658}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001659EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001660
1661static void ufshcd_gate_work(struct work_struct *work)
1662{
1663 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1664 clk_gating.gate_work.work);
1665 unsigned long flags;
1666
1667 spin_lock_irqsave(hba->host->host_lock, flags);
Venkat Gopalakrishnan3f0c06d2016-10-17 17:11:07 -07001668 /*
1669 * In case you are here to cancel this work the gating state
1670 * would be marked as REQ_CLKS_ON. In this case save time by
1671 * skipping the gating work and exit after changing the clock
1672 * state to CLKS_ON.
1673 */
1674 if (hba->clk_gating.is_suspended ||
Asutosh Das18f013742019-11-14 22:09:29 -08001675 (hba->clk_gating.state != REQ_CLKS_OFF)) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001676 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001677 trace_ufshcd_clk_gating(dev_name(hba->dev),
1678 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001679 goto rel_lock;
1680 }
1681
1682 if (hba->clk_gating.active_reqs
1683 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
Bart Van Assche7252a362019-12-09 10:13:08 -08001684 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001685 || hba->active_uic_cmd || hba->uic_async_done)
1686 goto rel_lock;
1687
1688 spin_unlock_irqrestore(hba->host->host_lock, flags);
1689
1690 /* put the link into hibern8 mode before turning off clocks */
1691 if (ufshcd_can_hibern8_during_gating(hba)) {
1692 if (ufshcd_uic_hibern8_enter(hba)) {
1693 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001694 trace_ufshcd_clk_gating(dev_name(hba->dev),
1695 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001696 goto out;
1697 }
1698 ufshcd_set_link_hibern8(hba);
1699 }
1700
Stanley Chu8b0bbf02019-12-07 20:22:01 +08001701 ufshcd_disable_irq(hba);
1702
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001703 if (!ufshcd_is_link_active(hba))
1704 ufshcd_setup_clocks(hba, false);
1705 else
1706 /* If link is active, device ref_clk can't be switched off */
1707 __ufshcd_setup_clocks(hba, false, true);
1708
1709 /*
1710 * In case you are here to cancel this work the gating state
1711 * would be marked as REQ_CLKS_ON. In this case keep the state
1712 * as REQ_CLKS_ON which would anyway imply that clocks are off
1713 * and a request to turn them on is pending. By doing this way,
1714 * we keep the state machine in tact and this would ultimately
1715 * prevent from doing cancel work multiple times when there are
1716 * new requests arriving before the current cancel work is done.
1717 */
1718 spin_lock_irqsave(hba->host->host_lock, flags);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001719 if (hba->clk_gating.state == REQ_CLKS_OFF) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001720 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001721 trace_ufshcd_clk_gating(dev_name(hba->dev),
1722 hba->clk_gating.state);
1723 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001724rel_lock:
1725 spin_unlock_irqrestore(hba->host->host_lock, flags);
1726out:
1727 return;
1728}
1729
1730/* host lock must be held before calling this variant */
1731static void __ufshcd_release(struct ufs_hba *hba)
1732{
1733 if (!ufshcd_is_clkgating_allowed(hba))
1734 return;
1735
1736 hba->clk_gating.active_reqs--;
1737
1738 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1739 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
Bart Van Assche7252a362019-12-09 10:13:08 -08001740 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001741 || hba->active_uic_cmd || hba->uic_async_done
1742 || ufshcd_eh_in_progress(hba))
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001743 return;
1744
1745 hba->clk_gating.state = REQ_CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001746 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Evan Greenf4bb7702018-10-05 10:27:32 -07001747 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1748 &hba->clk_gating.gate_work,
1749 msecs_to_jiffies(hba->clk_gating.delay_ms));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001750}
1751
1752void ufshcd_release(struct ufs_hba *hba)
1753{
1754 unsigned long flags;
1755
1756 spin_lock_irqsave(hba->host->host_lock, flags);
1757 __ufshcd_release(hba);
1758 spin_unlock_irqrestore(hba->host->host_lock, flags);
1759}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001760EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001761
1762static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1763 struct device_attribute *attr, char *buf)
1764{
1765 struct ufs_hba *hba = dev_get_drvdata(dev);
1766
1767 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1768}
1769
1770static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1771 struct device_attribute *attr, const char *buf, size_t count)
1772{
1773 struct ufs_hba *hba = dev_get_drvdata(dev);
1774 unsigned long flags, value;
1775
1776 if (kstrtoul(buf, 0, &value))
1777 return -EINVAL;
1778
1779 spin_lock_irqsave(hba->host->host_lock, flags);
1780 hba->clk_gating.delay_ms = value;
1781 spin_unlock_irqrestore(hba->host->host_lock, flags);
1782 return count;
1783}
1784
Sahitya Tummalab4274112016-12-22 18:40:39 -08001785static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1786 struct device_attribute *attr, char *buf)
1787{
1788 struct ufs_hba *hba = dev_get_drvdata(dev);
1789
1790 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1791}
1792
1793static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1794 struct device_attribute *attr, const char *buf, size_t count)
1795{
1796 struct ufs_hba *hba = dev_get_drvdata(dev);
1797 unsigned long flags;
1798 u32 value;
1799
1800 if (kstrtou32(buf, 0, &value))
1801 return -EINVAL;
1802
1803 value = !!value;
1804 if (value == hba->clk_gating.is_enabled)
1805 goto out;
1806
1807 if (value) {
1808 ufshcd_release(hba);
1809 } else {
1810 spin_lock_irqsave(hba->host->host_lock, flags);
1811 hba->clk_gating.active_reqs++;
1812 spin_unlock_irqrestore(hba->host->host_lock, flags);
1813 }
1814
1815 hba->clk_gating.is_enabled = value;
1816out:
1817 return count;
1818}
1819
Vivek Gautameebcc192018-08-07 23:17:39 +05301820static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1821{
1822 char wq_name[sizeof("ufs_clkscaling_00")];
1823
1824 if (!ufshcd_is_clkscaling_supported(hba))
1825 return;
1826
1827 INIT_WORK(&hba->clk_scaling.suspend_work,
1828 ufshcd_clk_scaling_suspend_work);
1829 INIT_WORK(&hba->clk_scaling.resume_work,
1830 ufshcd_clk_scaling_resume_work);
1831
1832 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1833 hba->host->host_no);
1834 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1835
1836 ufshcd_clkscaling_init_sysfs(hba);
1837}
1838
1839static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1840{
1841 if (!ufshcd_is_clkscaling_supported(hba))
1842 return;
1843
1844 destroy_workqueue(hba->clk_scaling.workq);
1845 ufshcd_devfreq_remove(hba);
1846}
1847
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001848static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1849{
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301850 char wq_name[sizeof("ufs_clk_gating_00")];
1851
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001852 if (!ufshcd_is_clkgating_allowed(hba))
1853 return;
1854
1855 hba->clk_gating.delay_ms = 150;
1856 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1857 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1858
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301859 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1860 hba->host->host_no);
1861 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1862 WQ_MEM_RECLAIM);
1863
Sahitya Tummalab4274112016-12-22 18:40:39 -08001864 hba->clk_gating.is_enabled = true;
1865
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001866 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1867 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1868 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1869 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
Sahitya Tummalab4274112016-12-22 18:40:39 -08001870 hba->clk_gating.delay_attr.attr.mode = 0644;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001871 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1872 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
Sahitya Tummalab4274112016-12-22 18:40:39 -08001873
1874 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1875 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1876 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1877 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1878 hba->clk_gating.enable_attr.attr.mode = 0644;
1879 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1880 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001881}
1882
1883static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1884{
1885 if (!ufshcd_is_clkgating_allowed(hba))
1886 return;
1887 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
Sahitya Tummalab4274112016-12-22 18:40:39 -08001888 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
Akinobu Mita97cd6802014-11-24 14:24:18 +09001889 cancel_work_sync(&hba->clk_gating.ungate_work);
1890 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301891 destroy_workqueue(hba->clk_gating.clk_gating_workq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001892}
1893
Sahitya Tummala856b3482014-09-25 15:32:34 +03001894/* Must be called with host lock acquired */
1895static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1896{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001897 bool queue_resume_work = false;
1898
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001899 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001900 return;
1901
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001902 if (!hba->clk_scaling.active_reqs++)
1903 queue_resume_work = true;
1904
1905 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1906 return;
1907
1908 if (queue_resume_work)
1909 queue_work(hba->clk_scaling.workq,
1910 &hba->clk_scaling.resume_work);
1911
1912 if (!hba->clk_scaling.window_start_t) {
1913 hba->clk_scaling.window_start_t = jiffies;
1914 hba->clk_scaling.tot_busy_t = 0;
1915 hba->clk_scaling.is_busy_started = false;
1916 }
1917
Sahitya Tummala856b3482014-09-25 15:32:34 +03001918 if (!hba->clk_scaling.is_busy_started) {
1919 hba->clk_scaling.busy_start_t = ktime_get();
1920 hba->clk_scaling.is_busy_started = true;
1921 }
1922}
1923
1924static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1925{
1926 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1927
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001928 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001929 return;
1930
1931 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1932 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1933 scaling->busy_start_t));
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01001934 scaling->busy_start_t = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +03001935 scaling->is_busy_started = false;
1936 }
1937}
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301938/**
1939 * ufshcd_send_command - Send SCSI or device management commands
1940 * @hba: per adapter instance
1941 * @task_tag: Task tag of the command
1942 */
1943static inline
1944void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1945{
Dolev Ravivff8e20c2016-12-22 18:42:18 -08001946 hba->lrb[task_tag].issue_time_stamp = ktime_get();
Zang Leigang09017182017-09-27 10:06:06 +08001947 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
Bart Van Asscheeacf36f2019-12-24 14:02:46 -08001948 ufshcd_add_command_trace(hba, task_tag, "send");
Sahitya Tummala856b3482014-09-25 15:32:34 +03001949 ufshcd_clk_scaling_start_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301950 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301951 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001952 /* Make sure that doorbell is committed immediately */
1953 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301954}
1955
1956/**
1957 * ufshcd_copy_sense_data - Copy sense data in case of check condition
Bart Van Assche8aa29f12018-03-01 15:07:20 -08001958 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301959 */
1960static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1961{
1962 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05301963 if (lrbp->sense_buffer &&
1964 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07001965 int len_to_copy;
1966
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301967 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Avri Altman09a5a242018-11-22 20:04:56 +02001968 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07001969
Avri Altman09a5a242018-11-22 20:04:56 +02001970 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1971 len_to_copy);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301972 }
1973}
1974
1975/**
Dolev Raviv68078d52013-07-30 00:35:58 +05301976 * ufshcd_copy_query_response() - Copy the Query Response and the data
1977 * descriptor
1978 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08001979 * @lrbp: pointer to local reference block
Dolev Raviv68078d52013-07-30 00:35:58 +05301980 */
1981static
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001982int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +05301983{
1984 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1985
Dolev Raviv68078d52013-07-30 00:35:58 +05301986 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05301987
Dolev Raviv68078d52013-07-30 00:35:58 +05301988 /* Get the descriptor */
Avri Altman1c908362019-05-21 11:24:22 +03001989 if (hba->dev_cmd.query.descriptor &&
1990 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001991 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +05301992 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001993 u16 resp_len;
1994 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +05301995
1996 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001997 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +05301998 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03001999 buf_len = be16_to_cpu(
2000 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002001 if (likely(buf_len >= resp_len)) {
2002 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2003 } else {
2004 dev_warn(hba->dev,
Bean Huo3d4881d2019-11-12 23:34:35 +01002005 "%s: rsp size %d is bigger than buffer size %d",
2006 __func__, resp_len, buf_len);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002007 return -EINVAL;
2008 }
Dolev Raviv68078d52013-07-30 00:35:58 +05302009 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002010
2011 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302012}
2013
2014/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302015 * ufshcd_hba_capabilities - Read controller capabilities
2016 * @hba: per adapter instance
2017 */
2018static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2019{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302020 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302021
2022 /* nutrs and nutmrs are 0 based values */
2023 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2024 hba->nutmrs =
2025 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2026}
2027
2028/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302029 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2030 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302031 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302032 * Return true on success, else false
2033 */
2034static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2035{
2036 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2037 return true;
2038 else
2039 return false;
2040}
2041
2042/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302043 * ufshcd_get_upmcrs - Get the power mode change request status
2044 * @hba: Pointer to adapter instance
2045 *
2046 * This function gets the UPMCRS field of HCS register
2047 * Returns value of UPMCRS field
2048 */
2049static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2050{
2051 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2052}
2053
2054/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302055 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2056 * @hba: per adapter instance
2057 * @uic_cmd: UIC command
2058 *
2059 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302060 */
2061static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302062ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302063{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302064 WARN_ON(hba->active_uic_cmd);
2065
2066 hba->active_uic_cmd = uic_cmd;
2067
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302068 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302069 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2070 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2071 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302072
2073 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302074 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302075 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302076}
2077
2078/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302079 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2080 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002081 * @uic_cmd: UIC command
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302082 *
2083 * Must be called with mutex held.
2084 * Returns 0 only if success.
2085 */
2086static int
2087ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2088{
2089 int ret;
2090 unsigned long flags;
2091
2092 if (wait_for_completion_timeout(&uic_cmd->done,
2093 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2094 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2095 else
2096 ret = -ETIMEDOUT;
2097
2098 spin_lock_irqsave(hba->host->host_lock, flags);
2099 hba->active_uic_cmd = NULL;
2100 spin_unlock_irqrestore(hba->host->host_lock, flags);
2101
2102 return ret;
2103}
2104
2105/**
2106 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2107 * @hba: per adapter instance
2108 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002109 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302110 *
2111 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002112 * with mutex held and host_lock locked.
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302113 * Returns 0 only if success.
2114 */
2115static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002116__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2117 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302118{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302119 if (!ufshcd_ready_for_uic_cmd(hba)) {
2120 dev_err(hba->dev,
2121 "Controller not ready to accept UIC commands\n");
2122 return -EIO;
2123 }
2124
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002125 if (completion)
2126 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302127
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302128 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302129
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002130 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302131}
2132
2133/**
2134 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2135 * @hba: per adapter instance
2136 * @uic_cmd: UIC command
2137 *
2138 * Returns 0 only if success.
2139 */
Avri Altmane77044c52018-10-07 17:30:39 +03002140int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302141{
2142 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002143 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302144
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002145 ufshcd_hold(hba, false);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302146 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002147 ufshcd_add_delay_before_dme_cmd(hba);
2148
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002149 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002150 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002151 spin_unlock_irqrestore(hba->host->host_lock, flags);
2152 if (!ret)
2153 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2154
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302155 mutex_unlock(&hba->uic_cmd_mutex);
2156
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002157 ufshcd_release(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302158 return ret;
2159}
2160
2161/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302162 * ufshcd_map_sg - Map scatter-gather list to prdt
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002163 * @hba: per adapter instance
2164 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302165 *
2166 * Returns 0 in case of success, non-zero value in case of failure
2167 */
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002168static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302169{
2170 struct ufshcd_sg_entry *prd_table;
2171 struct scatterlist *sg;
2172 struct scsi_cmnd *cmd;
2173 int sg_segments;
2174 int i;
2175
2176 cmd = lrbp->cmd;
2177 sg_segments = scsi_dma_map(cmd);
2178 if (sg_segments < 0)
2179 return sg_segments;
2180
2181 if (sg_segments) {
Christoph Hellwig492001992020-02-21 06:08:11 -08002182 lrbp->utr_descriptor_ptr->prd_table_length =
2183 cpu_to_le16((u16)sg_segments);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302184
2185 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2186
2187 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2188 prd_table[i].size =
2189 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2190 prd_table[i].base_addr =
2191 cpu_to_le32(lower_32_bits(sg->dma_address));
2192 prd_table[i].upper_addr =
2193 cpu_to_le32(upper_32_bits(sg->dma_address));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002194 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302195 }
2196 } else {
2197 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2198 }
2199
2200 return 0;
2201}
2202
2203/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302204 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302205 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302206 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302207 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302208static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302209{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302210 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2211
2212 if (hba->ufs_version == UFSHCI_VERSION_10) {
2213 u32 rw;
2214 rw = set & INTERRUPT_MASK_RW_VER_10;
2215 set = rw | ((set ^ intrs) & intrs);
2216 } else {
2217 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302218 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302219
2220 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2221}
2222
2223/**
2224 * ufshcd_disable_intr - disable interrupts
2225 * @hba: per adapter instance
2226 * @intrs: interrupt bits
2227 */
2228static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2229{
2230 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2231
2232 if (hba->ufs_version == UFSHCI_VERSION_10) {
2233 u32 rw;
2234 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2235 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2236 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2237
2238 } else {
2239 set &= ~intrs;
2240 }
2241
2242 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302243}
2244
2245/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302246 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2247 * descriptor according to request
2248 * @lrbp: pointer to local reference block
2249 * @upiu_flags: flags required in the header
2250 * @cmd_dir: requests data direction
2251 */
2252static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
Joao Pinto300bb132016-05-11 12:21:27 +01002253 u32 *upiu_flags, enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302254{
2255 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2256 u32 data_direction;
2257 u32 dword_0;
2258
2259 if (cmd_dir == DMA_FROM_DEVICE) {
2260 data_direction = UTP_DEVICE_TO_HOST;
2261 *upiu_flags = UPIU_CMD_FLAGS_READ;
2262 } else if (cmd_dir == DMA_TO_DEVICE) {
2263 data_direction = UTP_HOST_TO_DEVICE;
2264 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2265 } else {
2266 data_direction = UTP_NO_DATA_TRANSFER;
2267 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2268 }
2269
2270 dword_0 = data_direction | (lrbp->command_type
2271 << UPIU_COMMAND_TYPE_OFFSET);
2272 if (lrbp->intr_cmd)
2273 dword_0 |= UTP_REQ_DESC_INT_CMD;
2274
2275 /* Transfer request descriptor header fields */
2276 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002277 /* dword_1 is reserved, hence it is set to 0 */
2278 req_desc->header.dword_1 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302279 /*
2280 * assigning invalid value for command status. Controller
2281 * updates OCS on command completion, with the command
2282 * status
2283 */
2284 req_desc->header.dword_2 =
2285 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002286 /* dword_3 is reserved, hence it is set to 0 */
2287 req_desc->header.dword_3 = 0;
Yaniv Gardi51047262016-02-01 15:02:38 +02002288
2289 req_desc->prd_table_length = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302290}
2291
2292/**
2293 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2294 * for scsi commands
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002295 * @lrbp: local reference block pointer
2296 * @upiu_flags: flags
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302297 */
2298static
2299void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2300{
Bart Van Assche1b21b8f2019-12-24 14:02:45 -08002301 struct scsi_cmnd *cmd = lrbp->cmd;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302302 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002303 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302304
2305 /* command descriptor fields */
2306 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2307 UPIU_TRANSACTION_COMMAND, upiu_flags,
2308 lrbp->lun, lrbp->task_tag);
2309 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2310 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2311
2312 /* Total EHS length and Data segment length will be zero */
2313 ucd_req_ptr->header.dword_2 = 0;
2314
Bart Van Assche1b21b8f2019-12-24 14:02:45 -08002315 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302316
Bart Van Assche1b21b8f2019-12-24 14:02:45 -08002317 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
Avri Altmana851b2b2018-10-07 17:30:34 +03002318 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
Bart Van Assche1b21b8f2019-12-24 14:02:45 -08002319 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002320
2321 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302322}
2323
Dolev Raviv68078d52013-07-30 00:35:58 +05302324/**
2325 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2326 * for query requsts
2327 * @hba: UFS hba
2328 * @lrbp: local reference block pointer
2329 * @upiu_flags: flags
2330 */
2331static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2332 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2333{
2334 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2335 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302336 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05302337
2338 /* Query request header */
2339 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2340 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2341 lrbp->lun, lrbp->task_tag);
2342 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2343 0, query->request.query_func, 0, 0);
2344
Zang Leigang68612852016-08-25 17:39:19 +08002345 /* Data segment length only need for WRITE_DESC */
2346 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2347 ucd_req_ptr->header.dword_2 =
2348 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2349 else
2350 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302351
2352 /* Copy the Query Request buffer as is */
2353 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2354 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05302355
2356 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002357 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
Avri Altman220d17a62018-10-07 17:30:36 +03002358 memcpy(ucd_req_ptr + 1, query->descriptor, len);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002359
Yaniv Gardi51047262016-02-01 15:02:38 +02002360 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05302361}
2362
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302363static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2364{
2365 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2366
2367 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2368
2369 /* command descriptor fields */
2370 ucd_req_ptr->header.dword_0 =
2371 UPIU_HEADER_DWORD(
2372 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02002373 /* clear rest of the fields of basic header */
2374 ucd_req_ptr->header.dword_1 = 0;
2375 ucd_req_ptr->header.dword_2 = 0;
2376
2377 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302378}
2379
2380/**
Joao Pinto300bb132016-05-11 12:21:27 +01002381 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2382 * for Device Management Purposes
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002383 * @hba: per adapter instance
2384 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302385 */
Joao Pinto300bb132016-05-11 12:21:27 +01002386static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302387{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302388 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302389 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302390
kehuanlin83dc7e32017-09-06 17:58:39 +08002391 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2392 (hba->ufs_version == UFSHCI_VERSION_11))
Joao Pinto300bb132016-05-11 12:21:27 +01002393 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
kehuanlin83dc7e32017-09-06 17:58:39 +08002394 else
2395 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
Joao Pinto300bb132016-05-11 12:21:27 +01002396
2397 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2398 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2399 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2400 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2401 ufshcd_prepare_utp_nop_upiu(lrbp);
2402 else
2403 ret = -EINVAL;
2404
2405 return ret;
2406}
2407
2408/**
2409 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2410 * for SCSI Purposes
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002411 * @hba: per adapter instance
2412 * @lrbp: pointer to local reference block
Joao Pinto300bb132016-05-11 12:21:27 +01002413 */
2414static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2415{
2416 u32 upiu_flags;
2417 int ret = 0;
2418
kehuanlin83dc7e32017-09-06 17:58:39 +08002419 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2420 (hba->ufs_version == UFSHCI_VERSION_11))
Joao Pinto300bb132016-05-11 12:21:27 +01002421 lrbp->command_type = UTP_CMD_TYPE_SCSI;
kehuanlin83dc7e32017-09-06 17:58:39 +08002422 else
2423 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
Joao Pinto300bb132016-05-11 12:21:27 +01002424
2425 if (likely(lrbp->cmd)) {
2426 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2427 lrbp->cmd->sc_data_direction);
2428 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2429 } else {
2430 ret = -EINVAL;
2431 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302432
2433 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302434}
2435
2436/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002437 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002438 * @upiu_wlun_id: UPIU W-LUN id
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002439 *
2440 * Returns SCSI W-LUN id
2441 */
2442static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2443{
2444 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2445}
2446
Bart Van Assche4d2b8d42020-01-22 19:56:35 -08002447static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2448{
2449 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2450 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2451 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2452 i * sizeof(struct utp_transfer_cmd_desc);
2453 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2454 response_upiu);
2455 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2456
2457 lrb->utr_descriptor_ptr = utrdlp + i;
2458 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2459 i * sizeof(struct utp_transfer_req_desc);
2460 lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2461 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2462 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2463 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2464 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2465 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2466}
2467
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002468/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302469 * ufshcd_queuecommand - main entry point for SCSI requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002470 * @host: SCSI host pointer
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302471 * @cmd: command from SCSI Midlayer
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302472 *
2473 * Returns 0 for success, non-zero in case of failure
2474 */
2475static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2476{
2477 struct ufshcd_lrb *lrbp;
2478 struct ufs_hba *hba;
2479 unsigned long flags;
2480 int tag;
2481 int err = 0;
2482
2483 hba = shost_priv(host);
2484
2485 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02002486 if (!ufshcd_valid_tag(hba, tag)) {
2487 dev_err(hba->dev,
2488 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2489 __func__, tag, cmd, cmd->request);
2490 BUG();
2491 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302492
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002493 if (!down_read_trylock(&hba->clk_scaling_lock))
2494 return SCSI_MLQUEUE_HOST_BUSY;
2495
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302496 spin_lock_irqsave(hba->host->host_lock, flags);
2497 switch (hba->ufshcd_state) {
2498 case UFSHCD_STATE_OPERATIONAL:
2499 break;
Zang Leigang141f8162016-11-16 11:29:37 +08002500 case UFSHCD_STATE_EH_SCHEDULED:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302501 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302502 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302503 goto out_unlock;
2504 case UFSHCD_STATE_ERROR:
2505 set_host_byte(cmd, DID_ERROR);
2506 cmd->scsi_done(cmd);
2507 goto out_unlock;
2508 default:
2509 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2510 __func__, hba->ufshcd_state);
2511 set_host_byte(cmd, DID_BAD_TARGET);
2512 cmd->scsi_done(cmd);
2513 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302514 }
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002515
2516 /* if error handling is in progress, don't issue commands */
2517 if (ufshcd_eh_in_progress(hba)) {
2518 set_host_byte(cmd, DID_ERROR);
2519 cmd->scsi_done(cmd);
2520 goto out_unlock;
2521 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302522 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302523
Gilad Broner7fabb772017-02-03 16:56:50 -08002524 hba->req_abort_count = 0;
2525
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002526 err = ufshcd_hold(hba, true);
2527 if (err) {
2528 err = SCSI_MLQUEUE_HOST_BUSY;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002529 goto out;
2530 }
2531 WARN_ON(hba->clk_gating.state != CLKS_ON);
2532
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302533 lrbp = &hba->lrb[tag];
2534
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302535 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302536 lrbp->cmd = cmd;
Avri Altman09a5a242018-11-22 20:04:56 +02002537 lrbp->sense_bufflen = UFS_SENSE_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302538 lrbp->sense_buffer = cmd->sense_buffer;
2539 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002540 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03002541 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Gilad Bronere0b299e2017-02-03 16:56:40 -08002542 lrbp->req_abort_skip = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302543
Joao Pinto300bb132016-05-11 12:21:27 +01002544 ufshcd_comp_scsi_upiu(hba, lrbp);
2545
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002546 err = ufshcd_map_sg(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302547 if (err) {
2548 lrbp->cmd = NULL;
Can Guo17c7d352019-12-05 02:14:33 +00002549 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302550 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302551 }
Gilad Bronerad1a1b92016-10-17 17:09:36 -07002552 /* Make sure descriptors are ready before ringing the doorbell */
2553 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302554
2555 /* issue command to the controller */
2556 spin_lock_irqsave(hba->host->host_lock, flags);
Bart Van Assche5905d462020-01-22 19:56:36 -08002557 ufshcd_vops_setup_xfer_req(hba, tag, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302558 ufshcd_send_command(hba, tag);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302559out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302560 spin_unlock_irqrestore(hba->host->host_lock, flags);
2561out:
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002562 up_read(&hba->clk_scaling_lock);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302563 return err;
2564}
2565
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302566static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2567 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2568{
2569 lrbp->cmd = NULL;
2570 lrbp->sense_bufflen = 0;
2571 lrbp->sense_buffer = NULL;
2572 lrbp->task_tag = tag;
2573 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302574 lrbp->intr_cmd = true; /* No interrupt aggregation */
2575 hba->dev_cmd.type = cmd_type;
2576
Joao Pinto300bb132016-05-11 12:21:27 +01002577 return ufshcd_comp_devman_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302578}
2579
2580static int
2581ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2582{
2583 int err = 0;
2584 unsigned long flags;
2585 u32 mask = 1 << tag;
2586
2587 /* clear outstanding transaction before retry */
2588 spin_lock_irqsave(hba->host->host_lock, flags);
2589 ufshcd_utrl_clear(hba, tag);
2590 spin_unlock_irqrestore(hba->host->host_lock, flags);
2591
2592 /*
2593 * wait for for h/w to clear corresponding bit in door-bell.
2594 * max. wait is 1 sec.
2595 */
2596 err = ufshcd_wait_for_register(hba,
2597 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02002598 mask, ~mask, 1000, 1000, true);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302599
2600 return err;
2601}
2602
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002603static int
2604ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2605{
2606 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2607
2608 /* Get the UPIU response */
2609 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2610 UPIU_RSP_CODE_OFFSET;
2611 return query_res->response;
2612}
2613
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302614/**
2615 * ufshcd_dev_cmd_completion() - handles device management command responses
2616 * @hba: per adapter instance
2617 * @lrbp: pointer to local reference block
2618 */
2619static int
2620ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2621{
2622 int resp;
2623 int err = 0;
2624
Dolev Ravivff8e20c2016-12-22 18:42:18 -08002625 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302626 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2627
2628 switch (resp) {
2629 case UPIU_TRANSACTION_NOP_IN:
2630 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2631 err = -EINVAL;
2632 dev_err(hba->dev, "%s: unexpected response %x\n",
2633 __func__, resp);
2634 }
2635 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05302636 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002637 err = ufshcd_check_query_response(hba, lrbp);
2638 if (!err)
2639 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05302640 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302641 case UPIU_TRANSACTION_REJECT_UPIU:
2642 /* TODO: handle Reject UPIU Response */
2643 err = -EPERM;
2644 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2645 __func__);
2646 break;
2647 default:
2648 err = -EINVAL;
2649 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2650 __func__, resp);
2651 break;
2652 }
2653
2654 return err;
2655}
2656
2657static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2658 struct ufshcd_lrb *lrbp, int max_timeout)
2659{
2660 int err = 0;
2661 unsigned long time_left;
2662 unsigned long flags;
2663
2664 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2665 msecs_to_jiffies(max_timeout));
2666
Gilad Bronerad1a1b92016-10-17 17:09:36 -07002667 /* Make sure descriptors are ready before ringing the doorbell */
2668 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302669 spin_lock_irqsave(hba->host->host_lock, flags);
2670 hba->dev_cmd.complete = NULL;
2671 if (likely(time_left)) {
2672 err = ufshcd_get_tr_ocs(lrbp);
2673 if (!err)
2674 err = ufshcd_dev_cmd_completion(hba, lrbp);
2675 }
2676 spin_unlock_irqrestore(hba->host->host_lock, flags);
2677
2678 if (!time_left) {
2679 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02002680 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2681 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302682 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02002683 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302684 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02002685 /*
2686 * in case of an error, after clearing the doorbell,
2687 * we also need to clear the outstanding_request
2688 * field in hba
2689 */
2690 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302691 }
2692
2693 return err;
2694}
2695
2696/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302697 * ufshcd_exec_dev_cmd - API for sending device management requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002698 * @hba: UFS hba
2699 * @cmd_type: specifies the type (NOP, Query...)
2700 * @timeout: time in seconds
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302701 *
Dolev Raviv68078d52013-07-30 00:35:58 +05302702 * NOTE: Since there is only one available tag for device management commands,
2703 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302704 */
2705static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2706 enum dev_cmd_type cmd_type, int timeout)
2707{
Bart Van Assche7252a362019-12-09 10:13:08 -08002708 struct request_queue *q = hba->cmd_queue;
2709 struct request *req;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302710 struct ufshcd_lrb *lrbp;
2711 int err;
2712 int tag;
2713 struct completion wait;
2714 unsigned long flags;
2715
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002716 down_read(&hba->clk_scaling_lock);
2717
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302718 /*
2719 * Get free slot, sleep if slots are unavailable.
2720 * Even though we use wait_event() which sleeps indefinitely,
2721 * the maximum wait time is bounded by SCSI request timeout.
2722 */
Bart Van Assche7252a362019-12-09 10:13:08 -08002723 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
Dan Carpenterbb14dd12019-12-13 13:48:28 +03002724 if (IS_ERR(req)) {
2725 err = PTR_ERR(req);
2726 goto out_unlock;
2727 }
Bart Van Assche7252a362019-12-09 10:13:08 -08002728 tag = req->tag;
2729 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302730
2731 init_completion(&wait);
2732 lrbp = &hba->lrb[tag];
2733 WARN_ON(lrbp->cmd);
2734 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2735 if (unlikely(err))
2736 goto out_put_tag;
2737
2738 hba->dev_cmd.complete = &wait;
2739
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03002740 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
Yaniv Gardie3dfdc52016-02-01 15:02:49 +02002741 /* Make sure descriptors are ready before ringing the doorbell */
2742 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302743 spin_lock_irqsave(hba->host->host_lock, flags);
Bart Van Assche5905d462020-01-22 19:56:36 -08002744 ufshcd_vops_setup_xfer_req(hba, tag, false);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302745 ufshcd_send_command(hba, tag);
2746 spin_unlock_irqrestore(hba->host->host_lock, flags);
2747
2748 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2749
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03002750 ufshcd_add_query_upiu_trace(hba, tag,
2751 err ? "query_complete_err" : "query_complete");
2752
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302753out_put_tag:
Bart Van Assche7252a362019-12-09 10:13:08 -08002754 blk_put_request(req);
Dan Carpenterbb14dd12019-12-13 13:48:28 +03002755out_unlock:
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002756 up_read(&hba->clk_scaling_lock);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302757 return err;
2758}
2759
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302760/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002761 * ufshcd_init_query() - init the query response and request parameters
2762 * @hba: per-adapter instance
2763 * @request: address of the request pointer to be initialized
2764 * @response: address of the response pointer to be initialized
2765 * @opcode: operation to perform
2766 * @idn: flag idn to access
2767 * @index: LU number to access
2768 * @selector: query/flag/descriptor further identification
2769 */
2770static inline void ufshcd_init_query(struct ufs_hba *hba,
2771 struct ufs_query_req **request, struct ufs_query_res **response,
2772 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2773{
2774 *request = &hba->dev_cmd.query.request;
2775 *response = &hba->dev_cmd.query.response;
2776 memset(*request, 0, sizeof(struct ufs_query_req));
2777 memset(*response, 0, sizeof(struct ufs_query_res));
2778 (*request)->upiu_req.opcode = opcode;
2779 (*request)->upiu_req.idn = idn;
2780 (*request)->upiu_req.index = index;
2781 (*request)->upiu_req.selector = selector;
2782}
2783
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002784static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2785 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2786{
2787 int ret;
2788 int retries;
2789
2790 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2791 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2792 if (ret)
2793 dev_dbg(hba->dev,
2794 "%s: failed with error %d, retries %d\n",
2795 __func__, ret, retries);
2796 else
2797 break;
2798 }
2799
2800 if (ret)
2801 dev_err(hba->dev,
2802 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2803 __func__, opcode, idn, ret, retries);
2804 return ret;
2805}
2806
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002807/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302808 * ufshcd_query_flag() - API function for sending flag query requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002809 * @hba: per-adapter instance
2810 * @opcode: flag query to perform
2811 * @idn: flag idn to access
2812 * @flag_res: the flag value after the query request completes
Dolev Raviv68078d52013-07-30 00:35:58 +05302813 *
2814 * Returns 0 for success, non-zero in case of failure
2815 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002816int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Dolev Raviv68078d52013-07-30 00:35:58 +05302817 enum flag_idn idn, bool *flag_res)
2818{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002819 struct ufs_query_req *request = NULL;
2820 struct ufs_query_res *response = NULL;
2821 int err, index = 0, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02002822 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05302823
2824 BUG_ON(!hba);
2825
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002826 ufshcd_hold(hba, false);
Dolev Raviv68078d52013-07-30 00:35:58 +05302827 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002828 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2829 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05302830
2831 switch (opcode) {
2832 case UPIU_QUERY_OPCODE_SET_FLAG:
2833 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2834 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2835 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2836 break;
2837 case UPIU_QUERY_OPCODE_READ_FLAG:
2838 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2839 if (!flag_res) {
2840 /* No dummy reads */
2841 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2842 __func__);
2843 err = -EINVAL;
2844 goto out_unlock;
2845 }
2846 break;
2847 default:
2848 dev_err(hba->dev,
2849 "%s: Expected query flag opcode but got = %d\n",
2850 __func__, opcode);
2851 err = -EINVAL;
2852 goto out_unlock;
2853 }
Dolev Raviv68078d52013-07-30 00:35:58 +05302854
Yaniv Gardie5ad4062016-02-01 15:02:41 +02002855 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05302856
2857 if (err) {
2858 dev_err(hba->dev,
2859 "%s: Sending flag query for idn %d failed, err = %d\n",
2860 __func__, idn, err);
2861 goto out_unlock;
2862 }
2863
2864 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302865 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05302866 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2867
2868out_unlock:
2869 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002870 ufshcd_release(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05302871 return err;
2872}
2873
2874/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302875 * ufshcd_query_attr - API function for sending attribute requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002876 * @hba: per-adapter instance
2877 * @opcode: attribute opcode
2878 * @idn: attribute idn to access
2879 * @index: index field
2880 * @selector: selector field
2881 * @attr_val: the attribute value after the query request completes
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302882 *
2883 * Returns 0 for success, non-zero in case of failure
2884*/
Stanislav Nijnikovec92b592018-02-15 14:14:11 +02002885int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2886 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302887{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002888 struct ufs_query_req *request = NULL;
2889 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302890 int err;
2891
2892 BUG_ON(!hba);
2893
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002894 ufshcd_hold(hba, false);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302895 if (!attr_val) {
2896 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2897 __func__, opcode);
2898 err = -EINVAL;
2899 goto out;
2900 }
2901
2902 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002903 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2904 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302905
2906 switch (opcode) {
2907 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2908 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302909 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302910 break;
2911 case UPIU_QUERY_OPCODE_READ_ATTR:
2912 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2913 break;
2914 default:
2915 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2916 __func__, opcode);
2917 err = -EINVAL;
2918 goto out_unlock;
2919 }
2920
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002921 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302922
2923 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08002924 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2925 __func__, opcode, idn, index, err);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302926 goto out_unlock;
2927 }
2928
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302929 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302930
2931out_unlock:
2932 mutex_unlock(&hba->dev_cmd.lock);
2933out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002934 ufshcd_release(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302935 return err;
2936}
2937
2938/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02002939 * ufshcd_query_attr_retry() - API function for sending query
2940 * attribute with retries
2941 * @hba: per-adapter instance
2942 * @opcode: attribute opcode
2943 * @idn: attribute idn to access
2944 * @index: index field
2945 * @selector: selector field
2946 * @attr_val: the attribute value after the query request
2947 * completes
2948 *
2949 * Returns 0 for success, non-zero in case of failure
2950*/
2951static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2952 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2953 u32 *attr_val)
2954{
2955 int ret = 0;
2956 u32 retries;
2957
Bart Van Assche68c9fcf2019-12-24 14:02:43 -08002958 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02002959 ret = ufshcd_query_attr(hba, opcode, idn, index,
2960 selector, attr_val);
2961 if (ret)
2962 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2963 __func__, ret, retries);
2964 else
2965 break;
2966 }
2967
2968 if (ret)
2969 dev_err(hba->dev,
2970 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2971 __func__, idn, ret, QUERY_REQ_RETRIES);
2972 return ret;
2973}
2974
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002975static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002976 enum query_opcode opcode, enum desc_idn idn, u8 index,
2977 u8 selector, u8 *desc_buf, int *buf_len)
2978{
2979 struct ufs_query_req *request = NULL;
2980 struct ufs_query_res *response = NULL;
2981 int err;
2982
2983 BUG_ON(!hba);
2984
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002985 ufshcd_hold(hba, false);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002986 if (!desc_buf) {
2987 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2988 __func__, opcode);
2989 err = -EINVAL;
2990 goto out;
2991 }
2992
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00002993 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002994 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2995 __func__, *buf_len);
2996 err = -EINVAL;
2997 goto out;
2998 }
2999
3000 mutex_lock(&hba->dev_cmd.lock);
3001 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3002 selector);
3003 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003004 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003005
3006 switch (opcode) {
3007 case UPIU_QUERY_OPCODE_WRITE_DESC:
3008 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3009 break;
3010 case UPIU_QUERY_OPCODE_READ_DESC:
3011 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3012 break;
3013 default:
3014 dev_err(hba->dev,
3015 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3016 __func__, opcode);
3017 err = -EINVAL;
3018 goto out_unlock;
3019 }
3020
3021 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3022
3023 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08003024 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3025 __func__, opcode, idn, index, err);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003026 goto out_unlock;
3027 }
3028
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003029 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003030
3031out_unlock:
Bean Huocfcbae32019-11-12 23:34:36 +01003032 hba->dev_cmd.query.descriptor = NULL;
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003033 mutex_unlock(&hba->dev_cmd.lock);
3034out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003035 ufshcd_release(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003036 return err;
3037}
3038
3039/**
Bart Van Assche8aa29f12018-03-01 15:07:20 -08003040 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3041 * @hba: per-adapter instance
3042 * @opcode: attribute opcode
3043 * @idn: attribute idn to access
3044 * @index: index field
3045 * @selector: selector field
3046 * @desc_buf: the buffer that contains the descriptor
3047 * @buf_len: length parameter passed to the device
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003048 *
3049 * Returns 0 for success, non-zero in case of failure.
3050 * The buf_len parameter will contain, on return, the length parameter
3051 * received on the response.
3052 */
Stanislav Nijnikov2238d312018-02-15 14:14:07 +02003053int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3054 enum query_opcode opcode,
3055 enum desc_idn idn, u8 index,
3056 u8 selector,
3057 u8 *desc_buf, int *buf_len)
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003058{
3059 int err;
3060 int retries;
3061
3062 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3063 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3064 selector, desc_buf, buf_len);
3065 if (!err || err == -EINVAL)
3066 break;
3067 }
3068
3069 return err;
3070}
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003071
3072/**
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003073 * ufshcd_read_desc_length - read the specified descriptor length from header
3074 * @hba: Pointer to adapter instance
3075 * @desc_id: descriptor idn value
3076 * @desc_index: descriptor index
3077 * @desc_length: pointer to variable to read the length of descriptor
3078 *
3079 * Return 0 in case of success, non-zero otherwise
3080 */
3081static int ufshcd_read_desc_length(struct ufs_hba *hba,
3082 enum desc_idn desc_id,
3083 int desc_index,
3084 int *desc_length)
3085{
3086 int ret;
3087 u8 header[QUERY_DESC_HDR_SIZE];
3088 int header_len = QUERY_DESC_HDR_SIZE;
3089
3090 if (desc_id >= QUERY_DESC_IDN_MAX)
3091 return -EINVAL;
3092
3093 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3094 desc_id, desc_index, 0, header,
3095 &header_len);
3096
3097 if (ret) {
3098 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3099 __func__, desc_id);
3100 return ret;
3101 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3102 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3103 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3104 desc_id);
3105 ret = -EINVAL;
3106 }
3107
3108 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3109 return ret;
3110
3111}
3112
3113/**
3114 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3115 * @hba: Pointer to adapter instance
3116 * @desc_id: descriptor idn value
3117 * @desc_len: mapped desc length (out)
3118 *
3119 * Return 0 in case of success, non-zero otherwise
3120 */
3121int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3122 enum desc_idn desc_id, int *desc_len)
3123{
3124 switch (desc_id) {
3125 case QUERY_DESC_IDN_DEVICE:
3126 *desc_len = hba->desc_size.dev_desc;
3127 break;
3128 case QUERY_DESC_IDN_POWER:
3129 *desc_len = hba->desc_size.pwr_desc;
3130 break;
3131 case QUERY_DESC_IDN_GEOMETRY:
3132 *desc_len = hba->desc_size.geom_desc;
3133 break;
3134 case QUERY_DESC_IDN_CONFIGURATION:
3135 *desc_len = hba->desc_size.conf_desc;
3136 break;
3137 case QUERY_DESC_IDN_UNIT:
3138 *desc_len = hba->desc_size.unit_desc;
3139 break;
3140 case QUERY_DESC_IDN_INTERCONNECT:
3141 *desc_len = hba->desc_size.interc_desc;
3142 break;
3143 case QUERY_DESC_IDN_STRING:
3144 *desc_len = QUERY_DESC_MAX_SIZE;
3145 break;
Stanislav Nijnikovc648c2d2018-02-15 14:14:05 +02003146 case QUERY_DESC_IDN_HEALTH:
3147 *desc_len = hba->desc_size.hlth_desc;
3148 break;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003149 case QUERY_DESC_IDN_RFU_0:
3150 case QUERY_DESC_IDN_RFU_1:
3151 *desc_len = 0;
3152 break;
3153 default:
3154 *desc_len = 0;
3155 return -EINVAL;
3156 }
3157 return 0;
3158}
3159EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3160
3161/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003162 * ufshcd_read_desc_param - read the specified descriptor parameter
3163 * @hba: Pointer to adapter instance
3164 * @desc_id: descriptor idn value
3165 * @desc_index: descriptor index
3166 * @param_offset: offset of the parameter to read
3167 * @param_read_buf: pointer to buffer where parameter would be read
3168 * @param_size: sizeof(param_read_buf)
3169 *
3170 * Return 0 in case of success, non-zero otherwise
3171 */
Stanislav Nijnikov45bced82018-02-15 14:14:02 +02003172int ufshcd_read_desc_param(struct ufs_hba *hba,
3173 enum desc_idn desc_id,
3174 int desc_index,
3175 u8 param_offset,
3176 u8 *param_read_buf,
3177 u8 param_size)
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003178{
3179 int ret;
3180 u8 *desc_buf;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003181 int buff_len;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003182 bool is_kmalloc = true;
3183
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003184 /* Safety check */
3185 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003186 return -EINVAL;
3187
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003188 /* Get the max length of descriptor from structure filled up at probe
3189 * time.
3190 */
3191 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003192
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003193 /* Sanity checks */
3194 if (ret || !buff_len) {
3195 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3196 __func__);
3197 return ret;
3198 }
3199
3200 /* Check whether we need temp memory */
3201 if (param_offset != 0 || param_size < buff_len) {
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003202 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3203 if (!desc_buf)
3204 return -ENOMEM;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003205 } else {
3206 desc_buf = param_read_buf;
3207 is_kmalloc = false;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003208 }
3209
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003210 /* Request for full descriptor */
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003211 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003212 desc_id, desc_index, 0,
3213 desc_buf, &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003214
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003215 if (ret) {
3216 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3217 __func__, desc_id, desc_index, param_offset, ret);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003218 goto out;
3219 }
3220
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003221 /* Sanity check */
3222 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3223 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3224 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3225 ret = -EINVAL;
3226 goto out;
3227 }
3228
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003229 /* Check wherher we will not copy more data, than available */
3230 if (is_kmalloc && param_size > buff_len)
3231 param_size = buff_len;
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003232
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003233 if (is_kmalloc)
3234 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3235out:
3236 if (is_kmalloc)
3237 kfree(desc_buf);
3238 return ret;
3239}
3240
3241static inline int ufshcd_read_desc(struct ufs_hba *hba,
3242 enum desc_idn desc_id,
3243 int desc_index,
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003244 void *buf,
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003245 u32 size)
3246{
3247 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3248}
3249
Yaniv Gardib573d482016-03-10 17:37:09 +02003250
3251/**
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003252 * struct uc_string_id - unicode string
3253 *
3254 * @len: size of this descriptor inclusive
3255 * @type: descriptor type
3256 * @uc: unicode string character
3257 */
3258struct uc_string_id {
3259 u8 len;
3260 u8 type;
Gustavo A. R. Silvaec38c0a2020-05-07 14:25:50 -05003261 wchar_t uc[];
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003262} __packed;
3263
3264/* replace non-printable or non-ASCII characters with spaces */
3265static inline char ufshcd_remove_non_printable(u8 ch)
3266{
3267 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3268}
3269
3270/**
Yaniv Gardib573d482016-03-10 17:37:09 +02003271 * ufshcd_read_string_desc - read string descriptor
3272 * @hba: pointer to adapter instance
3273 * @desc_index: descriptor index
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003274 * @buf: pointer to buffer where descriptor would be read,
3275 * the caller should free the memory.
Yaniv Gardib573d482016-03-10 17:37:09 +02003276 * @ascii: if true convert from unicode to ascii characters
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003277 * null terminated string.
Yaniv Gardib573d482016-03-10 17:37:09 +02003278 *
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003279 * Return:
3280 * * string size on success.
3281 * * -ENOMEM: on allocation failure
3282 * * -EINVAL: on a wrong parameter
Yaniv Gardib573d482016-03-10 17:37:09 +02003283 */
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003284int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3285 u8 **buf, bool ascii)
Yaniv Gardib573d482016-03-10 17:37:09 +02003286{
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003287 struct uc_string_id *uc_str;
3288 u8 *str;
3289 int ret;
Yaniv Gardib573d482016-03-10 17:37:09 +02003290
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003291 if (!buf)
3292 return -EINVAL;
Yaniv Gardib573d482016-03-10 17:37:09 +02003293
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003294 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3295 if (!uc_str)
3296 return -ENOMEM;
3297
3298 ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
3299 desc_index, uc_str,
3300 QUERY_DESC_MAX_SIZE);
3301 if (ret < 0) {
3302 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3303 QUERY_REQ_RETRIES, ret);
3304 str = NULL;
3305 goto out;
3306 }
3307
3308 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3309 dev_dbg(hba->dev, "String Desc is of zero length\n");
3310 str = NULL;
3311 ret = 0;
Yaniv Gardib573d482016-03-10 17:37:09 +02003312 goto out;
3313 }
3314
3315 if (ascii) {
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003316 ssize_t ascii_len;
Yaniv Gardib573d482016-03-10 17:37:09 +02003317 int i;
Yaniv Gardib573d482016-03-10 17:37:09 +02003318 /* remove header and divide by 2 to move from UTF16 to UTF8 */
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003319 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3320 str = kzalloc(ascii_len, GFP_KERNEL);
3321 if (!str) {
3322 ret = -ENOMEM;
Tiezhu Yangfcbefc32016-06-25 12:35:22 +08003323 goto out;
Yaniv Gardib573d482016-03-10 17:37:09 +02003324 }
3325
3326 /*
3327 * the descriptor contains string in UTF16 format
3328 * we need to convert to utf-8 so it can be displayed
3329 */
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003330 ret = utf16s_to_utf8s(uc_str->uc,
3331 uc_str->len - QUERY_DESC_HDR_SIZE,
3332 UTF16_BIG_ENDIAN, str, ascii_len);
Yaniv Gardib573d482016-03-10 17:37:09 +02003333
3334 /* replace non-printable or non-ASCII characters with spaces */
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003335 for (i = 0; i < ret; i++)
3336 str[i] = ufshcd_remove_non_printable(str[i]);
Yaniv Gardib573d482016-03-10 17:37:09 +02003337
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003338 str[ret++] = '\0';
3339
3340 } else {
YueHaibing5f577042019-08-31 12:44:24 +00003341 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003342 if (!str) {
3343 ret = -ENOMEM;
3344 goto out;
3345 }
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003346 ret = uc_str->len;
Yaniv Gardib573d482016-03-10 17:37:09 +02003347 }
3348out:
Tomas Winkler4b828fe2019-07-30 08:55:17 +03003349 *buf = str;
3350 kfree(uc_str);
3351 return ret;
Yaniv Gardib573d482016-03-10 17:37:09 +02003352}
Yaniv Gardib573d482016-03-10 17:37:09 +02003353
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003354/**
3355 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3356 * @hba: Pointer to adapter instance
3357 * @lun: lun id
3358 * @param_offset: offset of the parameter to read
3359 * @param_read_buf: pointer to buffer where parameter would be read
3360 * @param_size: sizeof(param_read_buf)
3361 *
3362 * Return 0 in case of success, non-zero otherwise
3363 */
3364static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3365 int lun,
3366 enum unit_desc_param param_offset,
3367 u8 *param_read_buf,
3368 u32 param_size)
3369{
3370 /*
3371 * Unit descriptors are only available for general purpose LUs (LUN id
3372 * from 0 to 7) and RPMB Well known LU.
3373 */
Bean Huo1baa8012020-01-20 14:08:20 +01003374 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003375 return -EOPNOTSUPP;
3376
3377 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3378 param_offset, param_read_buf, param_size);
3379}
3380
Can Guo09f17792020-02-10 19:40:49 -08003381static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3382{
3383 int err = 0;
3384 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3385
3386 if (hba->dev_info.wspecversion >= 0x300) {
3387 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3388 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3389 &gating_wait);
3390 if (err)
3391 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3392 err, gating_wait);
3393
3394 if (gating_wait == 0) {
3395 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3396 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3397 gating_wait);
3398 }
3399
3400 hba->dev_info.clk_gating_wait_us = gating_wait;
3401 }
3402
3403 return err;
3404}
3405
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003406/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303407 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3408 * @hba: per adapter instance
3409 *
3410 * 1. Allocate DMA memory for Command Descriptor array
3411 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3412 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3413 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3414 * (UTMRDL)
3415 * 4. Allocate memory for local reference block(lrb).
3416 *
3417 * Returns 0 for success, non-zero in case of failure
3418 */
3419static int ufshcd_memory_alloc(struct ufs_hba *hba)
3420{
3421 size_t utmrdl_size, utrdl_size, ucdl_size;
3422
3423 /* Allocate memory for UTP command descriptors */
3424 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09003425 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3426 ucdl_size,
3427 &hba->ucdl_dma_addr,
3428 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303429
3430 /*
3431 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3432 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3433 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3434 * be aligned to 128 bytes as well
3435 */
3436 if (!hba->ucdl_base_addr ||
3437 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303438 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303439 "Command Descriptor Memory allocation failed\n");
3440 goto out;
3441 }
3442
3443 /*
3444 * Allocate memory for UTP Transfer descriptors
3445 * UFSHCI requires 1024 byte alignment of UTRD
3446 */
3447 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09003448 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3449 utrdl_size,
3450 &hba->utrdl_dma_addr,
3451 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303452 if (!hba->utrdl_base_addr ||
3453 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303454 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303455 "Transfer Descriptor Memory allocation failed\n");
3456 goto out;
3457 }
3458
3459 /*
3460 * Allocate memory for UTP Task Management descriptors
3461 * UFSHCI requires 1024 byte alignment of UTMRD
3462 */
3463 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09003464 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3465 utmrdl_size,
3466 &hba->utmrdl_dma_addr,
3467 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303468 if (!hba->utmrdl_base_addr ||
3469 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303470 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303471 "Task Management Descriptor Memory allocation failed\n");
3472 goto out;
3473 }
3474
3475 /* Allocate memory for local reference block */
Kees Cooka86854d2018-06-12 14:07:58 -07003476 hba->lrb = devm_kcalloc(hba->dev,
3477 hba->nutrs, sizeof(struct ufshcd_lrb),
Seungwon Jeon2953f852013-06-27 13:31:54 +09003478 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303479 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303480 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303481 goto out;
3482 }
3483 return 0;
3484out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303485 return -ENOMEM;
3486}
3487
3488/**
3489 * ufshcd_host_memory_configure - configure local reference block with
3490 * memory offsets
3491 * @hba: per adapter instance
3492 *
3493 * Configure Host memory space
3494 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3495 * address.
3496 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3497 * and PRDT offset.
3498 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3499 * into local reference block.
3500 */
3501static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3502{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303503 struct utp_transfer_req_desc *utrdlp;
3504 dma_addr_t cmd_desc_dma_addr;
3505 dma_addr_t cmd_desc_element_addr;
3506 u16 response_offset;
3507 u16 prdt_offset;
3508 int cmd_desc_size;
3509 int i;
3510
3511 utrdlp = hba->utrdl_base_addr;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303512
3513 response_offset =
3514 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3515 prdt_offset =
3516 offsetof(struct utp_transfer_cmd_desc, prd_table);
3517
3518 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3519 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3520
3521 for (i = 0; i < hba->nutrs; i++) {
3522 /* Configure UTRD with command descriptor base address */
3523 cmd_desc_element_addr =
3524 (cmd_desc_dma_addr + (cmd_desc_size * i));
3525 utrdlp[i].command_desc_base_addr_lo =
3526 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3527 utrdlp[i].command_desc_base_addr_hi =
3528 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3529
3530 /* Response upiu and prdt offset should be in double words */
Christoph Hellwig492001992020-02-21 06:08:11 -08003531 utrdlp[i].response_upiu_offset =
3532 cpu_to_le16(response_offset >> 2);
3533 utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
3534 utrdlp[i].response_upiu_length =
3535 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303536
Bart Van Assche4d2b8d42020-01-22 19:56:35 -08003537 ufshcd_init_lrb(hba, &hba->lrb[i], i);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303538 }
3539}
3540
3541/**
3542 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3543 * @hba: per adapter instance
3544 *
3545 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3546 * in order to initialize the Unipro link startup procedure.
3547 * Once the Unipro links are up, the device connected to the controller
3548 * is detected.
3549 *
3550 * Returns 0 on success, non-zero value on failure
3551 */
3552static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3553{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303554 struct uic_command uic_cmd = {0};
3555 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303556
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303557 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3558
3559 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3560 if (ret)
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003561 dev_dbg(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303562 "dme-link-startup: error code %d\n", ret);
3563 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303564}
3565
Yaniv Gardicad2e032015-03-31 17:37:14 +03003566static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3567{
3568 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3569 unsigned long min_sleep_time_us;
3570
3571 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3572 return;
3573
3574 /*
3575 * last_dme_cmd_tstamp will be 0 only for 1st call to
3576 * this function
3577 */
3578 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3579 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3580 } else {
3581 unsigned long delta =
3582 (unsigned long) ktime_to_us(
3583 ktime_sub(ktime_get(),
3584 hba->last_dme_cmd_tstamp));
3585
3586 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3587 min_sleep_time_us =
3588 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3589 else
3590 return; /* no more delay required */
3591 }
3592
3593 /* allow sleep for extra 50us if needed */
3594 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3595}
3596
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303597/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303598 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3599 * @hba: per adapter instance
3600 * @attr_sel: uic command argument1
3601 * @attr_set: attribute set type as uic command argument2
3602 * @mib_val: setting value as uic command argument3
3603 * @peer: indicate whether peer or local
3604 *
3605 * Returns 0 on success, non-zero value on failure
3606 */
3607int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3608 u8 attr_set, u32 mib_val, u8 peer)
3609{
3610 struct uic_command uic_cmd = {0};
3611 static const char *const action[] = {
3612 "dme-set",
3613 "dme-peer-set"
3614 };
3615 const char *set = action[!!peer];
3616 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003617 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303618
3619 uic_cmd.command = peer ?
3620 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3621 uic_cmd.argument1 = attr_sel;
3622 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3623 uic_cmd.argument3 = mib_val;
3624
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003625 do {
3626 /* for peer attributes we retry upon failure */
3627 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3628 if (ret)
3629 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3630 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3631 } while (ret && peer && --retries);
3632
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003633 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003634 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003635 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3636 UFS_UIC_COMMAND_RETRIES - retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303637
3638 return ret;
3639}
3640EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3641
3642/**
3643 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3644 * @hba: per adapter instance
3645 * @attr_sel: uic command argument1
3646 * @mib_val: the value of the attribute as returned by the UIC command
3647 * @peer: indicate whether peer or local
3648 *
3649 * Returns 0 on success, non-zero value on failure
3650 */
3651int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3652 u32 *mib_val, u8 peer)
3653{
3654 struct uic_command uic_cmd = {0};
3655 static const char *const action[] = {
3656 "dme-get",
3657 "dme-peer-get"
3658 };
3659 const char *get = action[!!peer];
3660 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003661 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03003662 struct ufs_pa_layer_attr orig_pwr_info;
3663 struct ufs_pa_layer_attr temp_pwr_info;
3664 bool pwr_mode_change = false;
3665
3666 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3667 orig_pwr_info = hba->pwr_info;
3668 temp_pwr_info = orig_pwr_info;
3669
3670 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3671 orig_pwr_info.pwr_rx == FAST_MODE) {
3672 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3673 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3674 pwr_mode_change = true;
3675 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3676 orig_pwr_info.pwr_rx == SLOW_MODE) {
3677 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3678 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3679 pwr_mode_change = true;
3680 }
3681 if (pwr_mode_change) {
3682 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3683 if (ret)
3684 goto out;
3685 }
3686 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303687
3688 uic_cmd.command = peer ?
3689 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3690 uic_cmd.argument1 = attr_sel;
3691
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003692 do {
3693 /* for peer attributes we retry upon failure */
3694 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3695 if (ret)
3696 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3697 get, UIC_GET_ATTR_ID(attr_sel), ret);
3698 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303699
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003700 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003701 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003702 get, UIC_GET_ATTR_ID(attr_sel),
3703 UFS_UIC_COMMAND_RETRIES - retries);
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003704
3705 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303706 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03003707
3708 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3709 && pwr_mode_change)
3710 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303711out:
3712 return ret;
3713}
3714EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3715
3716/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003717 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3718 * state) and waits for it to take effect.
3719 *
3720 * @hba: per adapter instance
3721 * @cmd: UIC command to execute
3722 *
3723 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3724 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3725 * and device UniPro link and hence it's final completion would be indicated by
3726 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3727 * addition to normal UIC command completion Status (UCCS). This function only
3728 * returns after the relevant status bits indicate the completion.
3729 *
3730 * Returns 0 on success, non-zero value on failure
3731 */
3732static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3733{
3734 struct completion uic_async_done;
3735 unsigned long flags;
3736 u8 status;
3737 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003738 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003739
3740 mutex_lock(&hba->uic_cmd_mutex);
3741 init_completion(&uic_async_done);
Yaniv Gardicad2e032015-03-31 17:37:14 +03003742 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003743
3744 spin_lock_irqsave(hba->host->host_lock, flags);
3745 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003746 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3747 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3748 /*
3749 * Make sure UIC command completion interrupt is disabled before
3750 * issuing UIC command.
3751 */
3752 wmb();
3753 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003754 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003755 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3756 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003757 if (ret) {
3758 dev_err(hba->dev,
3759 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3760 cmd->command, cmd->argument3, ret);
3761 goto out;
3762 }
3763
3764 if (!wait_for_completion_timeout(hba->uic_async_done,
3765 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3766 dev_err(hba->dev,
3767 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3768 cmd->command, cmd->argument3);
3769 ret = -ETIMEDOUT;
3770 goto out;
3771 }
3772
3773 status = ufshcd_get_upmcrs(hba);
3774 if (status != PWR_LOCAL) {
3775 dev_err(hba->dev,
Zang Leigang479da362017-09-19 16:50:30 +08003776 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003777 cmd->command, status);
3778 ret = (status != PWR_OK) ? status : -1;
3779 }
3780out:
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08003781 if (ret) {
3782 ufshcd_print_host_state(hba);
3783 ufshcd_print_pwr_info(hba);
3784 ufshcd_print_host_regs(hba);
3785 }
3786
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003787 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003788 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003789 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003790 if (reenable_intr)
3791 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003792 spin_unlock_irqrestore(hba->host->host_lock, flags);
3793 mutex_unlock(&hba->uic_cmd_mutex);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003794
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003795 return ret;
3796}
3797
3798/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303799 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3800 * using DME_SET primitives.
3801 * @hba: per adapter instance
3802 * @mode: powr mode value
3803 *
3804 * Returns 0 on success, non-zero value on failure
3805 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05303806static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303807{
3808 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003809 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303810
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03003811 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3812 ret = ufshcd_dme_set(hba,
3813 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3814 if (ret) {
3815 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3816 __func__, ret);
3817 goto out;
3818 }
3819 }
3820
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303821 uic_cmd.command = UIC_CMD_DME_SET;
3822 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3823 uic_cmd.argument3 = mode;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003824 ufshcd_hold(hba, false);
3825 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3826 ufshcd_release(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303827
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03003828out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003829 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003830}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303831
Stanley Chu087c5ef2020-03-27 17:53:28 +08003832int ufshcd_link_recovery(struct ufs_hba *hba)
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003833{
3834 int ret;
3835 unsigned long flags;
3836
3837 spin_lock_irqsave(hba->host->host_lock, flags);
3838 hba->ufshcd_state = UFSHCD_STATE_RESET;
3839 ufshcd_set_eh_in_progress(hba);
3840 spin_unlock_irqrestore(hba->host->host_lock, flags);
3841
Can Guoebdd1df2019-11-14 22:09:24 -08003842 /* Reset the attached device */
3843 ufshcd_vops_device_reset(hba);
3844
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003845 ret = ufshcd_host_reset_and_restore(hba);
3846
3847 spin_lock_irqsave(hba->host->host_lock, flags);
3848 if (ret)
3849 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3850 ufshcd_clear_eh_in_progress(hba);
3851 spin_unlock_irqrestore(hba->host->host_lock, flags);
3852
3853 if (ret)
3854 dev_err(hba->dev, "%s: link recovery failed, err %d",
3855 __func__, ret);
3856
3857 return ret;
3858}
Stanley Chu087c5ef2020-03-27 17:53:28 +08003859EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003860
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003861static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003862{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003863 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003864 struct uic_command uic_cmd = {0};
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003865 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003866
Kiwoong Kimee32c902016-11-10 21:17:43 +09003867 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3868
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003869 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003870 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003871 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3872 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003873
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003874 if (ret) {
Subhash Jadavani6d303e42019-11-14 22:09:30 -08003875 int err;
3876
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003877 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3878 __func__, ret);
3879
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003880 /*
Subhash Jadavani6d303e42019-11-14 22:09:30 -08003881 * If link recovery fails then return error code returned from
3882 * ufshcd_link_recovery().
3883 * If link recovery succeeds then return -EAGAIN to attempt
3884 * hibern8 enter retry again.
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003885 */
Subhash Jadavani6d303e42019-11-14 22:09:30 -08003886 err = ufshcd_link_recovery(hba);
3887 if (err) {
3888 dev_err(hba->dev, "%s: link recovery failed", __func__);
3889 ret = err;
3890 } else {
3891 ret = -EAGAIN;
3892 }
Kiwoong Kimee32c902016-11-10 21:17:43 +09003893 } else
3894 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3895 POST_CHANGE);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003896
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003897 return ret;
3898}
3899
3900static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3901{
3902 int ret = 0, retries;
3903
3904 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3905 ret = __ufshcd_uic_hibern8_enter(hba);
Subhash Jadavani6d303e42019-11-14 22:09:30 -08003906 if (!ret)
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003907 goto out;
3908 }
3909out:
3910 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003911}
3912
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08003913int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003914{
3915 struct uic_command uic_cmd = {0};
3916 int ret;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003917 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003918
Kiwoong Kimee32c902016-11-10 21:17:43 +09003919 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3920
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003921 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3922 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003923 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3924 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3925
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303926 if (ret) {
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003927 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3928 __func__, ret);
3929 ret = ufshcd_link_recovery(hba);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003930 } else {
Kiwoong Kimee32c902016-11-10 21:17:43 +09003931 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3932 POST_CHANGE);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003933 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3934 hba->ufs_stats.hibern8_exit_cnt++;
3935 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303936
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303937 return ret;
3938}
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08003939EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303940
Stanley Chuba7af5e2019-12-30 13:32:28 +08003941void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
3942{
3943 unsigned long flags;
Can Guobe7594a2020-03-05 00:53:07 -08003944 bool update = false;
Stanley Chuba7af5e2019-12-30 13:32:28 +08003945
Can Guobe7594a2020-03-05 00:53:07 -08003946 if (!ufshcd_is_auto_hibern8_supported(hba))
Stanley Chuba7af5e2019-12-30 13:32:28 +08003947 return;
3948
3949 spin_lock_irqsave(hba->host->host_lock, flags);
Can Guobe7594a2020-03-05 00:53:07 -08003950 if (hba->ahit != ahit) {
3951 hba->ahit = ahit;
3952 update = true;
3953 }
Stanley Chuba7af5e2019-12-30 13:32:28 +08003954 spin_unlock_irqrestore(hba->host->host_lock, flags);
Can Guobe7594a2020-03-05 00:53:07 -08003955
3956 if (update && !pm_runtime_suspended(hba->dev)) {
3957 pm_runtime_get_sync(hba->dev);
3958 ufshcd_hold(hba, false);
3959 ufshcd_auto_hibern8_enable(hba);
3960 ufshcd_release(hba);
3961 pm_runtime_put(hba->dev);
3962 }
Stanley Chuba7af5e2019-12-30 13:32:28 +08003963}
3964EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
3965
Can Guo71d848b2019-11-14 22:09:26 -08003966void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
Adrian Hunterad448372018-03-20 15:07:38 +02003967{
3968 unsigned long flags;
3969
Stanley Chuee5f1042019-05-21 14:44:52 +08003970 if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
Adrian Hunterad448372018-03-20 15:07:38 +02003971 return;
3972
3973 spin_lock_irqsave(hba->host->host_lock, flags);
3974 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3975 spin_unlock_irqrestore(hba->host->host_lock, flags);
3976}
3977
Yaniv Gardi50646362014-10-23 13:25:13 +03003978 /**
3979 * ufshcd_init_pwr_info - setting the POR (power on reset)
3980 * values in hba power info
3981 * @hba: per-adapter instance
3982 */
3983static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3984{
3985 hba->pwr_info.gear_rx = UFS_PWM_G1;
3986 hba->pwr_info.gear_tx = UFS_PWM_G1;
3987 hba->pwr_info.lane_rx = 1;
3988 hba->pwr_info.lane_tx = 1;
3989 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3990 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3991 hba->pwr_info.hs_rate = 0;
3992}
3993
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303994/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003995 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3996 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303997 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003998static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303999{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004000 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4001
4002 if (hba->max_pwr_info.is_valid)
4003 return 0;
4004
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08004005 pwr_info->pwr_tx = FAST_MODE;
4006 pwr_info->pwr_rx = FAST_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004007 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304008
4009 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004010 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4011 &pwr_info->lane_rx);
4012 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4013 &pwr_info->lane_tx);
4014
4015 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4016 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4017 __func__,
4018 pwr_info->lane_rx,
4019 pwr_info->lane_tx);
4020 return -EINVAL;
4021 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304022
4023 /*
4024 * First, get the maximum gears of HS speed.
4025 * If a zero value, it means there is no HSGEAR capability.
4026 * Then, get the maximum gears of PWM speed.
4027 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004028 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4029 if (!pwr_info->gear_rx) {
4030 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4031 &pwr_info->gear_rx);
4032 if (!pwr_info->gear_rx) {
4033 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4034 __func__, pwr_info->gear_rx);
4035 return -EINVAL;
4036 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08004037 pwr_info->pwr_rx = SLOW_MODE;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304038 }
4039
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004040 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4041 &pwr_info->gear_tx);
4042 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304043 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004044 &pwr_info->gear_tx);
4045 if (!pwr_info->gear_tx) {
4046 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4047 __func__, pwr_info->gear_tx);
4048 return -EINVAL;
4049 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08004050 pwr_info->pwr_tx = SLOW_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004051 }
4052
4053 hba->max_pwr_info.is_valid = true;
4054 return 0;
4055}
4056
4057static int ufshcd_change_power_mode(struct ufs_hba *hba,
4058 struct ufs_pa_layer_attr *pwr_mode)
4059{
4060 int ret;
4061
4062 /* if already configured to the requested pwr_mode */
4063 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4064 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4065 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4066 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4067 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4068 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4069 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4070 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4071 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304072 }
4073
4074 /*
4075 * Configure attributes for power mode change with below.
4076 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4077 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4078 * - PA_HSSERIES
4079 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004080 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4081 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4082 pwr_mode->lane_rx);
4083 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4084 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304085 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004086 else
4087 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304088
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004089 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4090 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4091 pwr_mode->lane_tx);
4092 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4093 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304094 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004095 else
4096 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304097
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004098 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4099 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4100 pwr_mode->pwr_rx == FAST_MODE ||
4101 pwr_mode->pwr_tx == FAST_MODE)
4102 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4103 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304104
Can Guo08342532019-12-05 02:14:42 +00004105 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4106 DL_FC0ProtectionTimeOutVal_Default);
4107 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4108 DL_TC0ReplayTimeOutVal_Default);
4109 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4110 DL_AFC0ReqTimeOutVal_Default);
4111 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4112 DL_FC1ProtectionTimeOutVal_Default);
4113 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4114 DL_TC1ReplayTimeOutVal_Default);
4115 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4116 DL_AFC1ReqTimeOutVal_Default);
4117
4118 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4119 DL_FC0ProtectionTimeOutVal_Default);
4120 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4121 DL_TC0ReplayTimeOutVal_Default);
4122 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4123 DL_AFC0ReqTimeOutVal_Default);
4124
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004125 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4126 | pwr_mode->pwr_tx);
4127
4128 if (ret) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304129 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004130 "%s: power mode change failed %d\n", __func__, ret);
4131 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004132 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4133 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004134
4135 memcpy(&hba->pwr_info, pwr_mode,
4136 sizeof(struct ufs_pa_layer_attr));
4137 }
4138
4139 return ret;
4140}
4141
4142/**
4143 * ufshcd_config_pwr_mode - configure a new power mode
4144 * @hba: per-adapter instance
4145 * @desired_pwr_mode: desired power configuration
4146 */
Alim Akhtar0d846e72018-05-06 15:44:18 +05304147int ufshcd_config_pwr_mode(struct ufs_hba *hba,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004148 struct ufs_pa_layer_attr *desired_pwr_mode)
4149{
4150 struct ufs_pa_layer_attr final_params = { 0 };
4151 int ret;
4152
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004153 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4154 desired_pwr_mode, &final_params);
4155
4156 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004157 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4158
4159 ret = ufshcd_change_power_mode(hba, &final_params);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304160
4161 return ret;
4162}
Alim Akhtar0d846e72018-05-06 15:44:18 +05304163EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304164
4165/**
Dolev Raviv68078d52013-07-30 00:35:58 +05304166 * ufshcd_complete_dev_init() - checks device readiness
Bart Van Assche8aa29f12018-03-01 15:07:20 -08004167 * @hba: per-adapter instance
Dolev Raviv68078d52013-07-30 00:35:58 +05304168 *
4169 * Set fDeviceInit flag and poll until device toggles it.
4170 */
4171static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4172{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004173 int i;
4174 int err;
Jason Yan7dfdcc32020-04-26 17:43:05 +08004175 bool flag_res = true;
Dolev Raviv68078d52013-07-30 00:35:58 +05304176
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004177 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4178 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05304179 if (err) {
4180 dev_err(hba->dev,
4181 "%s setting fDeviceInit flag failed with error %d\n",
4182 __func__, err);
4183 goto out;
4184 }
4185
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004186 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4187 for (i = 0; i < 1000 && !err && flag_res; i++)
4188 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4189 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4190
Dolev Raviv68078d52013-07-30 00:35:58 +05304191 if (err)
4192 dev_err(hba->dev,
4193 "%s reading fDeviceInit flag failed with error %d\n",
4194 __func__, err);
4195 else if (flag_res)
4196 dev_err(hba->dev,
4197 "%s fDeviceInit was not cleared by the device\n",
4198 __func__);
4199
4200out:
4201 return err;
4202}
4203
4204/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304205 * ufshcd_make_hba_operational - Make UFS controller operational
4206 * @hba: per adapter instance
4207 *
4208 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004209 * 1. Enable required interrupts
4210 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02004211 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004212 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304213 *
4214 * Returns 0 on success, non-zero value on failure
4215 */
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08004216int ufshcd_make_hba_operational(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304217{
4218 int err = 0;
4219 u32 reg;
4220
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304221 /* Enable required interrupts */
4222 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4223
4224 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03004225 if (ufshcd_is_intr_aggr_allowed(hba))
4226 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4227 else
4228 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304229
4230 /* Configure UTRL and UTMRL base address registers */
4231 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4232 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4233 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4234 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4235 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4236 REG_UTP_TASK_REQ_LIST_BASE_L);
4237 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4238 REG_UTP_TASK_REQ_LIST_BASE_H);
4239
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304240 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02004241 * Make sure base address and interrupt setup are updated before
4242 * enabling the run/stop registers below.
4243 */
4244 wmb();
4245
4246 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304247 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304248 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004249 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304250 if (!(ufshcd_get_lists_status(reg))) {
4251 ufshcd_enable_run_stop_reg(hba);
4252 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304253 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304254 "Host controller not ready to process requests");
4255 err = -EIO;
4256 goto out;
4257 }
4258
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304259out:
4260 return err;
4261}
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08004262EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304263
4264/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02004265 * ufshcd_hba_stop - Send controller to reset state
4266 * @hba: per adapter instance
4267 * @can_sleep: perform sleep or just spin
4268 */
4269static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4270{
4271 int err;
4272
4273 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4274 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4275 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4276 10, 1, can_sleep);
4277 if (err)
4278 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4279}
4280
4281/**
Christoph Hellwig492001992020-02-21 06:08:11 -08004282 * ufshcd_hba_enable - initialize the controller
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304283 * @hba: per adapter instance
4284 *
4285 * The controller resets itself and controller firmware initialization
4286 * sequence kicks off. When controller is ready it will set
4287 * the Host Controller Enable bit to 1.
4288 *
4289 * Returns 0 on success, non-zero value on failure
4290 */
Christoph Hellwig492001992020-02-21 06:08:11 -08004291int ufshcd_hba_enable(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304292{
4293 int retry;
4294
Yaniv Gardi596585a2016-03-10 17:37:08 +02004295 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304296 /* change controller state to "reset state" */
Yaniv Gardi596585a2016-03-10 17:37:08 +02004297 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304298
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004299 /* UniPro link is disabled at this point */
4300 ufshcd_set_link_off(hba);
4301
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004302 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004303
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304304 /* start controller initialization sequence */
4305 ufshcd_hba_start(hba);
4306
4307 /*
4308 * To initialize a UFS host controller HCE bit must be set to 1.
4309 * During initialization the HCE bit value changes from 1->0->1.
4310 * When the host controller completes initialization sequence
4311 * it sets the value of HCE bit to 1. The same HCE bit is read back
4312 * to check if the controller has completed initialization sequence.
4313 * So without this delay the value HCE = 1, set in the previous
4314 * instruction might be read back.
4315 * This delay can be changed based on the controller.
4316 */
Stanley Chub9dc8ac2020-03-18 18:40:14 +08004317 ufshcd_delay_us(hba->hba_enable_delay_us, 100);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304318
4319 /* wait for the host controller to complete initialization */
Stanley Chu9fc305e2020-03-18 18:40:15 +08004320 retry = 50;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304321 while (ufshcd_is_hba_active(hba)) {
4322 if (retry) {
4323 retry--;
4324 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304325 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304326 "Controller enable failed\n");
4327 return -EIO;
4328 }
Stanley Chu9fc305e2020-03-18 18:40:15 +08004329 usleep_range(1000, 1100);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304330 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004331
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004332 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004333 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004334
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004335 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004336
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304337 return 0;
4338}
Stanley Chu9d19bf7a2020-01-17 11:51:07 +08004339EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4340
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004341static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4342{
Stanley Chuba0320f2020-03-18 18:40:10 +08004343 int tx_lanes = 0, i, err = 0;
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004344
4345 if (!peer)
4346 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4347 &tx_lanes);
4348 else
4349 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4350 &tx_lanes);
4351 for (i = 0; i < tx_lanes; i++) {
4352 if (!peer)
4353 err = ufshcd_dme_set(hba,
4354 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4355 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4356 0);
4357 else
4358 err = ufshcd_dme_peer_set(hba,
4359 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4360 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4361 0);
4362 if (err) {
4363 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4364 __func__, peer, i, err);
4365 break;
4366 }
4367 }
4368
4369 return err;
4370}
4371
4372static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4373{
4374 return ufshcd_disable_tx_lcc(hba, true);
4375}
4376
Stanley Chua5fe372d2020-01-04 22:26:07 +08004377void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4378 u32 reg)
Stanley Chu8808b4e2019-07-10 21:38:21 +08004379{
4380 reg_hist->reg[reg_hist->pos] = reg;
4381 reg_hist->tstamp[reg_hist->pos] = ktime_get();
4382 reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4383}
Stanley Chua5fe372d2020-01-04 22:26:07 +08004384EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
Stanley Chu8808b4e2019-07-10 21:38:21 +08004385
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304386/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304387 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304388 * @hba: per adapter instance
4389 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304390 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304391 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304392static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304393{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304394 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004395 int retries = DME_LINKSTARTUP_RETRIES;
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004396 bool link_startup_again = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304397
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004398 /*
4399 * If UFS device isn't active then we will have to issue link startup
4400 * 2 times to make sure the device state move to active.
4401 */
4402 if (!ufshcd_is_ufs_dev_active(hba))
4403 link_startup_again = true;
4404
4405link_startup:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004406 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004407 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304408
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004409 ret = ufshcd_dme_link_startup(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004410
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004411 /* check if device is detected by inter-connect layer */
4412 if (!ret && !ufshcd_is_device_present(hba)) {
Stanley Chu8808b4e2019-07-10 21:38:21 +08004413 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4414 0);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004415 dev_err(hba->dev, "%s: Device not present\n", __func__);
4416 ret = -ENXIO;
4417 goto out;
4418 }
4419
4420 /*
4421 * DME link lost indication is only received when link is up,
4422 * but we can't be sure if the link is up until link startup
4423 * succeeds. So reset the local Uni-Pro and try again.
4424 */
Stanley Chu8808b4e2019-07-10 21:38:21 +08004425 if (ret && ufshcd_hba_enable(hba)) {
4426 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4427 (u32)ret);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004428 goto out;
Stanley Chu8808b4e2019-07-10 21:38:21 +08004429 }
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004430 } while (ret && retries--);
4431
Stanley Chu8808b4e2019-07-10 21:38:21 +08004432 if (ret) {
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004433 /* failed to get the link up... retire */
Stanley Chu8808b4e2019-07-10 21:38:21 +08004434 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4435 (u32)ret);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304436 goto out;
Stanley Chu8808b4e2019-07-10 21:38:21 +08004437 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304438
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004439 if (link_startup_again) {
4440 link_startup_again = false;
4441 retries = DME_LINKSTARTUP_RETRIES;
4442 goto link_startup;
4443 }
4444
subhashj@codeaurora.orgd2aebb92016-12-22 18:41:33 -08004445 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4446 ufshcd_init_pwr_info(hba);
4447 ufshcd_print_pwr_info(hba);
4448
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004449 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4450 ret = ufshcd_disable_device_tx_lcc(hba);
4451 if (ret)
4452 goto out;
4453 }
4454
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004455 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004456 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4457 if (ret)
4458 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004459
4460 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304461out:
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004462 if (ret) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304463 dev_err(hba->dev, "link startup failed %d\n", ret);
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004464 ufshcd_print_host_state(hba);
4465 ufshcd_print_pwr_info(hba);
4466 ufshcd_print_host_regs(hba);
4467 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304468 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304469}
4470
4471/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304472 * ufshcd_verify_dev_init() - Verify device initialization
4473 * @hba: per-adapter instance
4474 *
4475 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4476 * device Transport Protocol (UTP) layer is ready after a reset.
4477 * If the UTP layer at the device side is not initialized, it may
4478 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4479 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4480 */
4481static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4482{
4483 int err = 0;
4484 int retries;
4485
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004486 ufshcd_hold(hba, false);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304487 mutex_lock(&hba->dev_cmd.lock);
4488 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4489 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4490 NOP_OUT_TIMEOUT);
4491
4492 if (!err || err == -ETIMEDOUT)
4493 break;
4494
4495 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4496 }
4497 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004498 ufshcd_release(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304499
4500 if (err)
4501 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4502 return err;
4503}
4504
4505/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004506 * ufshcd_set_queue_depth - set lun queue depth
4507 * @sdev: pointer to SCSI device
4508 *
4509 * Read bLUQueueDepth value and activate scsi tagged command
4510 * queueing. For WLUN, queue depth is set to 1. For best-effort
4511 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4512 * value that host can queue.
4513 */
4514static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4515{
4516 int ret = 0;
4517 u8 lun_qdepth;
4518 struct ufs_hba *hba;
4519
4520 hba = shost_priv(sdev->host);
4521
4522 lun_qdepth = hba->nutrs;
Szymon Mielczarekdbd34a62017-03-29 08:19:21 +02004523 ret = ufshcd_read_unit_desc_param(hba,
4524 ufshcd_scsi_to_upiu_lun(sdev->lun),
4525 UNIT_DESC_PARAM_LU_Q_DEPTH,
4526 &lun_qdepth,
4527 sizeof(lun_qdepth));
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004528
4529 /* Some WLUN doesn't support unit descriptor */
4530 if (ret == -EOPNOTSUPP)
4531 lun_qdepth = 1;
4532 else if (!lun_qdepth)
4533 /* eventually, we can figure out the real queue depth */
4534 lun_qdepth = hba->nutrs;
4535 else
4536 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4537
4538 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4539 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004540 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004541}
4542
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004543/*
4544 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4545 * @hba: per-adapter instance
4546 * @lun: UFS device lun id
4547 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4548 *
4549 * Returns 0 in case of success and b_lu_write_protect status would be returned
4550 * @b_lu_write_protect parameter.
4551 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4552 * Returns -EINVAL in case of invalid parameters passed to this function.
4553 */
4554static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4555 u8 lun,
4556 u8 *b_lu_write_protect)
4557{
4558 int ret;
4559
4560 if (!b_lu_write_protect)
4561 ret = -EINVAL;
4562 /*
4563 * According to UFS device spec, RPMB LU can't be write
4564 * protected so skip reading bLUWriteProtect parameter for
4565 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4566 */
Bean Huo1baa8012020-01-20 14:08:20 +01004567 else if (lun >= hba->dev_info.max_lu_supported)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004568 ret = -ENOTSUPP;
4569 else
4570 ret = ufshcd_read_unit_desc_param(hba,
4571 lun,
4572 UNIT_DESC_PARAM_LU_WR_PROTECT,
4573 b_lu_write_protect,
4574 sizeof(*b_lu_write_protect));
4575 return ret;
4576}
4577
4578/**
4579 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4580 * status
4581 * @hba: per-adapter instance
4582 * @sdev: pointer to SCSI device
4583 *
4584 */
4585static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4586 struct scsi_device *sdev)
4587{
4588 if (hba->dev_info.f_power_on_wp_en &&
4589 !hba->dev_info.is_lu_power_on_wp) {
4590 u8 b_lu_write_protect;
4591
4592 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4593 &b_lu_write_protect) &&
4594 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4595 hba->dev_info.is_lu_power_on_wp = true;
4596 }
4597}
4598
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004599/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304600 * ufshcd_slave_alloc - handle initial SCSI device configurations
4601 * @sdev: pointer to SCSI device
4602 *
4603 * Returns success
4604 */
4605static int ufshcd_slave_alloc(struct scsi_device *sdev)
4606{
4607 struct ufs_hba *hba;
4608
4609 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304610
4611 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4612 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304613
Can Guoa3a76392019-12-05 02:14:30 +00004614 /* DBD field should be set to 1 in mode sense(10) */
4615 sdev->set_dbd_for_ms = 1;
4616
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304617 /* allow SCSI layer to restart the device in case of errors */
4618 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004619
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03004620 /* REPORT SUPPORTED OPERATION CODES is not supported */
4621 sdev->no_report_opcodes = 1;
4622
Sujit Reddy Thumma84af7e82018-01-24 09:52:35 +05304623 /* WRITE_SAME command is not supported */
4624 sdev->no_write_same = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004625
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004626 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004627
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004628 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4629
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004630 return 0;
4631}
4632
4633/**
4634 * ufshcd_change_queue_depth - change queue depth
4635 * @sdev: pointer to SCSI device
4636 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004637 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004638 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004639 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004640static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004641{
4642 struct ufs_hba *hba = shost_priv(sdev->host);
4643
4644 if (depth > hba->nutrs)
4645 depth = hba->nutrs;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004646 return scsi_change_queue_depth(sdev, depth);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304647}
4648
4649/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004650 * ufshcd_slave_configure - adjust SCSI device configurations
4651 * @sdev: pointer to SCSI device
4652 */
4653static int ufshcd_slave_configure(struct scsi_device *sdev)
4654{
Stanley Chu49615ba2019-09-16 23:56:50 +08004655 struct ufs_hba *hba = shost_priv(sdev->host);
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004656 struct request_queue *q = sdev->request_queue;
4657
4658 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
Stanley Chu49615ba2019-09-16 23:56:50 +08004659
4660 if (ufshcd_is_rpm_autosuspend_allowed(hba))
4661 sdev->rpm_autosuspend = 1;
4662
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004663 return 0;
4664}
4665
4666/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304667 * ufshcd_slave_destroy - remove SCSI device configurations
4668 * @sdev: pointer to SCSI device
4669 */
4670static void ufshcd_slave_destroy(struct scsi_device *sdev)
4671{
4672 struct ufs_hba *hba;
4673
4674 hba = shost_priv(sdev->host);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004675 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004676 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4677 unsigned long flags;
4678
4679 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004680 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004681 spin_unlock_irqrestore(hba->host->host_lock, flags);
4682 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304683}
4684
4685/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304686 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
Bart Van Assche8aa29f12018-03-01 15:07:20 -08004687 * @lrbp: pointer to local reference block of completed command
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304688 * @scsi_status: SCSI command status
4689 *
4690 * Returns value base on SCSI command status
4691 */
4692static inline int
4693ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4694{
4695 int result = 0;
4696
4697 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304698 case SAM_STAT_CHECK_CONDITION:
4699 ufshcd_copy_sense_data(lrbp);
Tomas Winkler30eb2e42018-11-26 10:10:34 +02004700 /* fallthrough */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304701 case SAM_STAT_GOOD:
4702 result |= DID_OK << 16 |
4703 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304704 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304705 break;
4706 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304707 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304708 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304709 ufshcd_copy_sense_data(lrbp);
4710 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304711 break;
4712 default:
4713 result |= DID_ERROR << 16;
4714 break;
4715 } /* end of switch */
4716
4717 return result;
4718}
4719
4720/**
4721 * ufshcd_transfer_rsp_status - Get overall status of the response
4722 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08004723 * @lrbp: pointer to local reference block of completed command
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304724 *
4725 * Returns result of the command to notify SCSI midlayer
4726 */
4727static inline int
4728ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4729{
4730 int result = 0;
4731 int scsi_status;
4732 int ocs;
4733
4734 /* overall command status of utrd */
4735 ocs = ufshcd_get_tr_ocs(lrbp);
4736
4737 switch (ocs) {
4738 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304739 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08004740 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304741 switch (result) {
4742 case UPIU_TRANSACTION_RESPONSE:
4743 /*
4744 * get the response UPIU result to extract
4745 * the SCSI command status
4746 */
4747 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4748
4749 /*
4750 * get the result based on SCSI status response
4751 * to notify the SCSI midlayer of the command status
4752 */
4753 scsi_status = result & MASK_SCSI_STATUS;
4754 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304755
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02004756 /*
4757 * Currently we are only supporting BKOPs exception
4758 * events hence we can ignore BKOPs exception event
4759 * during power management callbacks. BKOPs exception
4760 * event is not expected to be raised in runtime suspend
4761 * callback as it allows the urgent bkops.
4762 * During system suspend, we are anyway forcefully
4763 * disabling the bkops and if urgent bkops is needed
4764 * it will be enabled on system resume. Long term
4765 * solution could be to abort the system suspend if
4766 * UFS device needs urgent BKOPs.
4767 */
4768 if (!hba->pm_op_in_progress &&
Sayali Lokhande2824ec92020-02-10 19:40:44 -08004769 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4770 schedule_work(&hba->eeh_work)) {
4771 /*
4772 * Prevent suspend once eeh_work is scheduled
4773 * to avoid deadlock between ufshcd_suspend
4774 * and exception event handler.
4775 */
4776 pm_runtime_get_noresume(hba->dev);
4777 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304778 break;
4779 case UPIU_TRANSACTION_REJECT_UPIU:
4780 /* TODO: handle Reject UPIU Response */
4781 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304782 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304783 "Reject UPIU not fully implemented\n");
4784 break;
4785 default:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304786 dev_err(hba->dev,
4787 "Unexpected request response code = %x\n",
4788 result);
Stanley Chue0347d82019-04-15 20:23:38 +08004789 result = DID_ERROR << 16;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304790 break;
4791 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304792 break;
4793 case OCS_ABORTED:
4794 result |= DID_ABORT << 16;
4795 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304796 case OCS_INVALID_COMMAND_STATUS:
4797 result |= DID_REQUEUE << 16;
4798 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304799 case OCS_INVALID_CMD_TABLE_ATTR:
4800 case OCS_INVALID_PRDT_ATTR:
4801 case OCS_MISMATCH_DATA_BUF_SIZE:
4802 case OCS_MISMATCH_RESP_UPIU_SIZE:
4803 case OCS_PEER_COMM_FAILURE:
4804 case OCS_FATAL_ERROR:
4805 default:
4806 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304807 dev_err(hba->dev,
Dolev Ravivff8e20c2016-12-22 18:42:18 -08004808 "OCS error from controller = %x for tag %d\n",
4809 ocs, lrbp->task_tag);
4810 ufshcd_print_host_regs(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08004811 ufshcd_print_host_state(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304812 break;
4813 } /* end of switch */
4814
Can Guo2df74b62019-11-25 22:53:33 -08004815 if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
Dolev Raviv66cc8202016-12-22 18:39:42 -08004816 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304817 return result;
4818}
4819
4820/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304821 * ufshcd_uic_cmd_compl - handle completion of uic command
4822 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304823 * @intr_status: interrupt status generated by the controller
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004824 *
4825 * Returns
4826 * IRQ_HANDLED - If interrupt is valid
4827 * IRQ_NONE - If invalid interrupt
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304828 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004829static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304830{
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004831 irqreturn_t retval = IRQ_NONE;
4832
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304833 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304834 hba->active_uic_cmd->argument2 |=
4835 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304836 hba->active_uic_cmd->argument3 =
4837 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304838 complete(&hba->active_uic_cmd->done);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004839 retval = IRQ_HANDLED;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304840 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304841
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004842 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004843 complete(hba->uic_async_done);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004844 retval = IRQ_HANDLED;
4845 }
4846 return retval;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304847}
4848
4849/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004850 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304851 * @hba: per adapter instance
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004852 * @completed_reqs: requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304853 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004854static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4855 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304856{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304857 struct ufshcd_lrb *lrbp;
4858 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304859 int result;
4860 int index;
Dolev Ravive9d501b2014-07-01 12:22:37 +03004861
Dolev Ravive9d501b2014-07-01 12:22:37 +03004862 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4863 lrbp = &hba->lrb[index];
4864 cmd = lrbp->cmd;
4865 if (cmd) {
Lee Susman1a07f2d2016-12-22 18:42:03 -08004866 ufshcd_add_command_trace(hba, index, "complete");
Dolev Ravive9d501b2014-07-01 12:22:37 +03004867 result = ufshcd_transfer_rsp_status(hba, lrbp);
4868 scsi_dma_unmap(cmd);
4869 cmd->result = result;
4870 /* Mark completed command as NULL in LRB */
4871 lrbp->cmd = NULL;
Can Guo74a527a2019-11-25 22:53:32 -08004872 lrbp->compl_time_stamp = ktime_get();
Dolev Ravive9d501b2014-07-01 12:22:37 +03004873 /* Do not touch lrbp after scsi done */
4874 cmd->scsi_done(cmd);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004875 __ufshcd_release(hba);
Joao Pinto300bb132016-05-11 12:21:27 +01004876 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4877 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
Can Guo74a527a2019-11-25 22:53:32 -08004878 lrbp->compl_time_stamp = ktime_get();
Lee Susman1a07f2d2016-12-22 18:42:03 -08004879 if (hba->dev_cmd.complete) {
4880 ufshcd_add_command_trace(hba, index,
4881 "dev_complete");
Dolev Ravive9d501b2014-07-01 12:22:37 +03004882 complete(hba->dev_cmd.complete);
Lee Susman1a07f2d2016-12-22 18:42:03 -08004883 }
Dolev Ravive9d501b2014-07-01 12:22:37 +03004884 }
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08004885 if (ufshcd_is_clkscaling_supported(hba))
4886 hba->clk_scaling.active_reqs--;
Dolev Ravive9d501b2014-07-01 12:22:37 +03004887 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304888
4889 /* clear corresponding bits of completed commands */
4890 hba->outstanding_reqs ^= completed_reqs;
4891
Sahitya Tummala856b3482014-09-25 15:32:34 +03004892 ufshcd_clk_scaling_update_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304893}
4894
4895/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004896 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4897 * @hba: per adapter instance
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004898 *
4899 * Returns
4900 * IRQ_HANDLED - If interrupt is valid
4901 * IRQ_NONE - If invalid interrupt
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004902 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004903static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004904{
4905 unsigned long completed_reqs;
4906 u32 tr_doorbell;
4907
4908 /* Resetting interrupt aggregation counters first and reading the
4909 * DOOR_BELL afterward allows us to handle all the completed requests.
4910 * In order to prevent other interrupts starvation the DB is read once
4911 * after reset. The down side of this solution is the possibility of
4912 * false interrupt if device completes another request after resetting
4913 * aggregation and before reading the DB.
4914 */
Christoph Hellwig492001992020-02-21 06:08:11 -08004915 if (ufshcd_is_intr_aggr_allowed(hba))
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004916 ufshcd_reset_intr_aggr(hba);
4917
4918 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4919 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4920
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08004921 if (completed_reqs) {
4922 __ufshcd_transfer_req_compl(hba, completed_reqs);
4923 return IRQ_HANDLED;
4924 } else {
4925 return IRQ_NONE;
4926 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004927}
4928
4929/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304930 * ufshcd_disable_ee - disable exception event
4931 * @hba: per-adapter instance
4932 * @mask: exception event to disable
4933 *
4934 * Disables exception event in the device so that the EVENT_ALERT
4935 * bit is not set.
4936 *
4937 * Returns zero on success, non-zero error value on failure.
4938 */
4939static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4940{
4941 int err = 0;
4942 u32 val;
4943
4944 if (!(hba->ee_ctrl_mask & mask))
4945 goto out;
4946
4947 val = hba->ee_ctrl_mask & ~mask;
Tomohiro Kusumid7e2ddd2017-04-20 15:01:44 +03004948 val &= MASK_EE_STATUS;
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004949 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304950 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4951 if (!err)
4952 hba->ee_ctrl_mask &= ~mask;
4953out:
4954 return err;
4955}
4956
4957/**
4958 * ufshcd_enable_ee - enable exception event
4959 * @hba: per-adapter instance
4960 * @mask: exception event to enable
4961 *
4962 * Enable corresponding exception event in the device to allow
4963 * device to alert host in critical scenarios.
4964 *
4965 * Returns zero on success, non-zero error value on failure.
4966 */
4967static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4968{
4969 int err = 0;
4970 u32 val;
4971
4972 if (hba->ee_ctrl_mask & mask)
4973 goto out;
4974
4975 val = hba->ee_ctrl_mask | mask;
Tomohiro Kusumid7e2ddd2017-04-20 15:01:44 +03004976 val &= MASK_EE_STATUS;
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004977 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304978 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4979 if (!err)
4980 hba->ee_ctrl_mask |= mask;
4981out:
4982 return err;
4983}
4984
4985/**
4986 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4987 * @hba: per-adapter instance
4988 *
4989 * Allow device to manage background operations on its own. Enabling
4990 * this might lead to inconsistent latencies during normal data transfers
4991 * as the device is allowed to manage its own way of handling background
4992 * operations.
4993 *
4994 * Returns zero on success, non-zero on failure.
4995 */
4996static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4997{
4998 int err = 0;
4999
5000 if (hba->auto_bkops_enabled)
5001 goto out;
5002
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005003 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305004 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5005 if (err) {
5006 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5007 __func__, err);
5008 goto out;
5009 }
5010
5011 hba->auto_bkops_enabled = true;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005012 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305013
5014 /* No need of URGENT_BKOPS exception from the device */
5015 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5016 if (err)
5017 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5018 __func__, err);
5019out:
5020 return err;
5021}
5022
5023/**
5024 * ufshcd_disable_auto_bkops - block device in doing background operations
5025 * @hba: per-adapter instance
5026 *
5027 * Disabling background operations improves command response latency but
5028 * has drawback of device moving into critical state where the device is
5029 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5030 * host is idle so that BKOPS are managed effectively without any negative
5031 * impacts.
5032 *
5033 * Returns zero on success, non-zero on failure.
5034 */
5035static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5036{
5037 int err = 0;
5038
5039 if (!hba->auto_bkops_enabled)
5040 goto out;
5041
5042 /*
5043 * If host assisted BKOPs is to be enabled, make sure
5044 * urgent bkops exception is allowed.
5045 */
5046 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5047 if (err) {
5048 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5049 __func__, err);
5050 goto out;
5051 }
5052
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005053 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305054 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5055 if (err) {
5056 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5057 __func__, err);
5058 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5059 goto out;
5060 }
5061
5062 hba->auto_bkops_enabled = false;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005063 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
Asutosh Das24366c2a2019-11-25 22:53:30 -08005064 hba->is_urgent_bkops_lvl_checked = false;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305065out:
5066 return err;
5067}
5068
5069/**
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005070 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305071 * @hba: per adapter instance
5072 *
5073 * After a device reset the device may toggle the BKOPS_EN flag
5074 * to default value. The s/w tracking variables should be updated
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005075 * as well. This function would change the auto-bkops state based on
5076 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305077 */
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005078static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305079{
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005080 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5081 hba->auto_bkops_enabled = false;
5082 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5083 ufshcd_enable_auto_bkops(hba);
5084 } else {
5085 hba->auto_bkops_enabled = true;
5086 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5087 ufshcd_disable_auto_bkops(hba);
5088 }
Asutosh Das24366c2a2019-11-25 22:53:30 -08005089 hba->is_urgent_bkops_lvl_checked = false;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305090}
5091
5092static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5093{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005094 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305095 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5096}
5097
5098/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005099 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5100 * @hba: per-adapter instance
5101 * @status: bkops_status value
5102 *
5103 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5104 * flag in the device to permit background operations if the device
5105 * bkops_status is greater than or equal to "status" argument passed to
5106 * this function, disable otherwise.
5107 *
5108 * Returns 0 for success, non-zero in case of failure.
5109 *
5110 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5111 * to know whether auto bkops is enabled or disabled after this function
5112 * returns control to it.
5113 */
5114static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5115 enum bkops_status status)
5116{
5117 int err;
5118 u32 curr_status = 0;
5119
5120 err = ufshcd_get_bkops_status(hba, &curr_status);
5121 if (err) {
5122 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5123 __func__, err);
5124 goto out;
5125 } else if (curr_status > BKOPS_STATUS_MAX) {
5126 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5127 __func__, curr_status);
5128 err = -EINVAL;
5129 goto out;
5130 }
5131
5132 if (curr_status >= status)
5133 err = ufshcd_enable_auto_bkops(hba);
5134 else
5135 err = ufshcd_disable_auto_bkops(hba);
Asutosh Das24366c2a2019-11-25 22:53:30 -08005136 hba->urgent_bkops_lvl = curr_status;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005137out:
5138 return err;
5139}
5140
5141/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305142 * ufshcd_urgent_bkops - handle urgent bkops exception event
5143 * @hba: per-adapter instance
5144 *
5145 * Enable fBackgroundOpsEn flag in the device to permit background
5146 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005147 *
5148 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5149 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305150 */
5151static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5152{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005153 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305154}
5155
5156static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5157{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005158 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305159 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5160}
5161
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005162static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5163{
5164 int err;
5165 u32 curr_status = 0;
5166
5167 if (hba->is_urgent_bkops_lvl_checked)
5168 goto enable_auto_bkops;
5169
5170 err = ufshcd_get_bkops_status(hba, &curr_status);
5171 if (err) {
5172 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5173 __func__, err);
5174 goto out;
5175 }
5176
5177 /*
5178 * We are seeing that some devices are raising the urgent bkops
5179 * exception events even when BKOPS status doesn't indicate performace
5180 * impacted or critical. Handle these device by determining their urgent
5181 * bkops status at runtime.
5182 */
5183 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5184 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5185 __func__, curr_status);
5186 /* update the current status as the urgent bkops level */
5187 hba->urgent_bkops_lvl = curr_status;
5188 hba->is_urgent_bkops_lvl_checked = true;
5189 }
5190
5191enable_auto_bkops:
5192 err = ufshcd_enable_auto_bkops(hba);
5193out:
5194 if (err < 0)
5195 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5196 __func__, err);
5197}
5198
Asutosh Das3d17b9b2020-04-22 14:41:42 -07005199static bool ufshcd_wb_sup(struct ufs_hba *hba)
5200{
5201 return ufshcd_is_wb_allowed(hba);
5202}
5203
5204static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5205{
5206 int ret;
5207 enum query_opcode opcode;
5208
5209 if (!ufshcd_wb_sup(hba))
5210 return 0;
5211
5212 if (!(enable ^ hba->wb_enabled))
5213 return 0;
5214 if (enable)
5215 opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5216 else
5217 opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5218
5219 ret = ufshcd_query_flag_retry(hba, opcode,
5220 QUERY_FLAG_IDN_WB_EN, NULL);
5221 if (ret) {
5222 dev_err(hba->dev, "%s write booster %s failed %d\n",
5223 __func__, enable ? "enable" : "disable", ret);
5224 return ret;
5225 }
5226
5227 hba->wb_enabled = enable;
5228 dev_dbg(hba->dev, "%s write booster %s %d\n",
5229 __func__, enable ? "enable" : "disable", ret);
5230
5231 return ret;
5232}
5233
5234static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5235{
5236 int val;
5237
5238 if (set)
5239 val = UPIU_QUERY_OPCODE_SET_FLAG;
5240 else
5241 val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5242
5243 return ufshcd_query_flag_retry(hba, val,
5244 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5245 NULL);
5246}
5247
5248static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5249{
5250 if (enable)
5251 ufshcd_wb_buf_flush_enable(hba);
5252 else
5253 ufshcd_wb_buf_flush_disable(hba);
5254
5255}
5256
5257static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5258{
5259 int ret;
5260
5261 if (!ufshcd_wb_sup(hba) || hba->wb_buf_flush_enabled)
5262 return 0;
5263
5264 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5265 QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, NULL);
5266 if (ret)
5267 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5268 __func__, ret);
5269 else
5270 hba->wb_buf_flush_enabled = true;
5271
5272 dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5273 return ret;
5274}
5275
5276static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5277{
5278 int ret;
5279
5280 if (!ufshcd_wb_sup(hba) || !hba->wb_buf_flush_enabled)
5281 return 0;
5282
5283 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5284 QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, NULL);
5285 if (ret) {
5286 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5287 __func__, ret);
5288 } else {
5289 hba->wb_buf_flush_enabled = false;
5290 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5291 }
5292
5293 return ret;
5294}
5295
5296static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5297 u32 avail_buf)
5298{
5299 u32 cur_buf;
5300 int ret;
5301
5302 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5303 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5304 0, 0, &cur_buf);
5305 if (ret) {
5306 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5307 __func__, ret);
5308 return false;
5309 }
5310
5311 if (!cur_buf) {
5312 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5313 cur_buf);
5314 return false;
5315 }
5316 /* Let it continue to flush when >60% full */
5317 if (avail_buf < UFS_WB_40_PERCENT_BUF_REMAIN)
5318 return true;
5319
5320 return false;
5321}
5322
5323static bool ufshcd_wb_keep_vcc_on(struct ufs_hba *hba)
5324{
5325 int ret;
5326 u32 avail_buf;
5327
5328 if (!ufshcd_wb_sup(hba))
5329 return false;
5330 /*
5331 * The ufs device needs the vcc to be ON to flush.
5332 * With user-space reduction enabled, it's enough to enable flush
5333 * by checking only the available buffer. The threshold
5334 * defined here is > 90% full.
5335 * With user-space preserved enabled, the current-buffer
5336 * should be checked too because the wb buffer size can reduce
5337 * when disk tends to be full. This info is provided by current
5338 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5339 * keeping vcc on when current buffer is empty.
5340 */
5341 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5342 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5343 0, 0, &avail_buf);
5344 if (ret) {
5345 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5346 __func__, ret);
5347 return false;
5348 }
5349
5350 if (!hba->dev_info.b_presrv_uspc_en) {
5351 if (avail_buf <= UFS_WB_10_PERCENT_BUF_REMAIN)
5352 return true;
5353 return false;
5354 }
5355
5356 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5357}
5358
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305359/**
5360 * ufshcd_exception_event_handler - handle exceptions raised by device
5361 * @work: pointer to work data
5362 *
5363 * Read bExceptionEventStatus attribute from the device and handle the
5364 * exception event accordingly.
5365 */
5366static void ufshcd_exception_event_handler(struct work_struct *work)
5367{
5368 struct ufs_hba *hba;
5369 int err;
5370 u32 status = 0;
5371 hba = container_of(work, struct ufs_hba, eeh_work);
5372
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305373 pm_runtime_get_sync(hba->dev);
Stanley Chu03e1d282019-12-24 21:01:05 +08005374 ufshcd_scsi_block_requests(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305375 err = ufshcd_get_ee_status(hba, &status);
5376 if (err) {
5377 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5378 __func__, err);
5379 goto out;
5380 }
5381
5382 status &= hba->ee_ctrl_mask;
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005383
5384 if (status & MASK_EE_URGENT_BKOPS)
5385 ufshcd_bkops_exception_event_handler(hba);
5386
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305387out:
Stanley Chu03e1d282019-12-24 21:01:05 +08005388 ufshcd_scsi_unblock_requests(hba);
Sayali Lokhande2824ec92020-02-10 19:40:44 -08005389 /*
5390 * pm_runtime_get_noresume is called while scheduling
5391 * eeh_work to avoid suspend racing with exception work.
5392 * Hence decrement usage counter using pm_runtime_put_noidle
5393 * to allow suspend on completion of exception event handler.
5394 */
5395 pm_runtime_put_noidle(hba->dev);
5396 pm_runtime_put(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305397 return;
5398}
5399
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005400/* Complete requests that have door-bell cleared */
5401static void ufshcd_complete_requests(struct ufs_hba *hba)
5402{
5403 ufshcd_transfer_req_compl(hba);
5404 ufshcd_tmc_handler(hba);
5405}
5406
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305407/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02005408 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5409 * to recover from the DL NAC errors or not.
5410 * @hba: per-adapter instance
5411 *
5412 * Returns true if error handling is required, false otherwise
5413 */
5414static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5415{
5416 unsigned long flags;
5417 bool err_handling = true;
5418
5419 spin_lock_irqsave(hba->host->host_lock, flags);
5420 /*
5421 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5422 * device fatal error and/or DL NAC & REPLAY timeout errors.
5423 */
5424 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5425 goto out;
5426
5427 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5428 ((hba->saved_err & UIC_ERROR) &&
5429 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5430 goto out;
5431
5432 if ((hba->saved_err & UIC_ERROR) &&
5433 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5434 int err;
5435 /*
5436 * wait for 50ms to see if we can get any other errors or not.
5437 */
5438 spin_unlock_irqrestore(hba->host->host_lock, flags);
5439 msleep(50);
5440 spin_lock_irqsave(hba->host->host_lock, flags);
5441
5442 /*
5443 * now check if we have got any other severe errors other than
5444 * DL NAC error?
5445 */
5446 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5447 ((hba->saved_err & UIC_ERROR) &&
5448 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5449 goto out;
5450
5451 /*
5452 * As DL NAC is the only error received so far, send out NOP
5453 * command to confirm if link is still active or not.
5454 * - If we don't get any response then do error recovery.
5455 * - If we get response then clear the DL NAC error bit.
5456 */
5457
5458 spin_unlock_irqrestore(hba->host->host_lock, flags);
5459 err = ufshcd_verify_dev_init(hba);
5460 spin_lock_irqsave(hba->host->host_lock, flags);
5461
5462 if (err)
5463 goto out;
5464
5465 /* Link seems to be alive hence ignore the DL NAC errors */
5466 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5467 hba->saved_err &= ~UIC_ERROR;
5468 /* clear NAC error */
5469 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5470 if (!hba->saved_uic_err) {
5471 err_handling = false;
5472 goto out;
5473 }
5474 }
5475out:
5476 spin_unlock_irqrestore(hba->host->host_lock, flags);
5477 return err_handling;
5478}
5479
5480/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305481 * ufshcd_err_handler - handle UFS errors that require s/w attention
5482 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305483 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305484static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305485{
5486 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305487 unsigned long flags;
5488 u32 err_xfer = 0;
5489 u32 err_tm = 0;
5490 int err = 0;
5491 int tag;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005492 bool needs_reset = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305493
5494 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305495
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305496 pm_runtime_get_sync(hba->dev);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005497 ufshcd_hold(hba, false);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305498
5499 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005500 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305501 goto out;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305502
5503 hba->ufshcd_state = UFSHCD_STATE_RESET;
5504 ufshcd_set_eh_in_progress(hba);
5505
5506 /* Complete requests that have door-bell cleared by h/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005507 ufshcd_complete_requests(hba);
Yaniv Gardi583fa622016-03-10 17:37:13 +02005508
5509 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5510 bool ret;
5511
5512 spin_unlock_irqrestore(hba->host->host_lock, flags);
5513 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5514 ret = ufshcd_quirk_dl_nac_errors(hba);
5515 spin_lock_irqsave(hba->host->host_lock, flags);
5516 if (!ret)
5517 goto skip_err_handling;
5518 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005519 if ((hba->saved_err & INT_FATAL_ERRORS) ||
Stanley Chu82174442019-05-21 14:44:54 +08005520 (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005521 ((hba->saved_err & UIC_ERROR) &&
5522 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5523 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5524 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5525 needs_reset = true;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305526
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005527 /*
5528 * if host reset is required then skip clearing the pending
Can Guo2df74b62019-11-25 22:53:33 -08005529 * transfers forcefully because they will get cleared during
5530 * host reset and restore
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005531 */
5532 if (needs_reset)
5533 goto skip_pending_xfer_clear;
5534
5535 /* release lock as clear command might sleep */
5536 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305537 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005538 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5539 if (ufshcd_clear_cmd(hba, tag)) {
5540 err_xfer = true;
5541 goto lock_skip_pending_xfer_clear;
5542 }
5543 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305544
5545 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005546 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5547 if (ufshcd_clear_tm_cmd(hba, tag)) {
5548 err_tm = true;
5549 goto lock_skip_pending_xfer_clear;
5550 }
5551 }
5552
5553lock_skip_pending_xfer_clear:
5554 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305555
5556 /* Complete the requests that are cleared by s/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005557 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305558
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005559 if (err_xfer || err_tm)
5560 needs_reset = true;
5561
5562skip_pending_xfer_clear:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305563 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005564 if (needs_reset) {
5565 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5566
5567 /*
5568 * ufshcd_reset_and_restore() does the link reinitialization
5569 * which will need atleast one empty doorbell slot to send the
5570 * device management commands (NOP and query commands).
5571 * If there is no slot empty at this moment then free up last
5572 * slot forcefully.
5573 */
5574 if (hba->outstanding_reqs == max_doorbells)
5575 __ufshcd_transfer_req_compl(hba,
5576 (1UL << (hba->nutrs - 1)));
5577
5578 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305579 err = ufshcd_reset_and_restore(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005580 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305581 if (err) {
5582 dev_err(hba->dev, "%s: reset and restore failed\n",
5583 __func__);
5584 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5585 }
5586 /*
5587 * Inform scsi mid-layer that we did reset and allow to handle
5588 * Unit Attention properly.
5589 */
5590 scsi_report_bus_reset(hba->host, 0);
5591 hba->saved_err = 0;
5592 hba->saved_uic_err = 0;
5593 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005594
Yaniv Gardi583fa622016-03-10 17:37:13 +02005595skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005596 if (!needs_reset) {
5597 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5598 if (hba->saved_err || hba->saved_uic_err)
5599 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5600 __func__, hba->saved_err, hba->saved_uic_err);
5601 }
5602
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305603 ufshcd_clear_eh_in_progress(hba);
5604
5605out:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005606 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani38135532018-05-03 16:37:18 +05305607 ufshcd_scsi_unblock_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005608 ufshcd_release(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305609 pm_runtime_put_sync(hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305610}
5611
5612/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305613 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5614 * @hba: per-adapter instance
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005615 *
5616 * Returns
5617 * IRQ_HANDLED - If interrupt is valid
5618 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305619 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005620static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305621{
5622 u32 reg;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005623 irqreturn_t retval = IRQ_NONE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305624
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08005625 /* PHY layer lane error */
5626 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5627 /* Ignore LINERESET indication, as this is not an error */
5628 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005629 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08005630 /*
5631 * To know whether this error is fatal or not, DB timeout
5632 * must be checked but this error is handled separately.
5633 */
5634 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
Stanley Chu48d5b972019-07-10 21:38:18 +08005635 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005636 retval |= IRQ_HANDLED;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005637 }
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08005638
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305639 /* PA_INIT_ERROR is fatal and needs UIC reset */
5640 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005641 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
5642 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
Stanley Chu48d5b972019-07-10 21:38:18 +08005643 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005644
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005645 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5646 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5647 else if (hba->dev_quirks &
5648 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5649 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5650 hba->uic_error |=
5651 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5652 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5653 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5654 }
5655 retval |= IRQ_HANDLED;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005656 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305657
5658 /* UIC NL/TL/DME errors needs software retry */
5659 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005660 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
5661 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
Stanley Chu48d5b972019-07-10 21:38:18 +08005662 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305663 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005664 retval |= IRQ_HANDLED;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005665 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305666
5667 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005668 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
5669 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
Stanley Chu48d5b972019-07-10 21:38:18 +08005670 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305671 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005672 retval |= IRQ_HANDLED;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005673 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305674
5675 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005676 if ((reg & UIC_DME_ERROR) &&
5677 (reg & UIC_DME_ERROR_CODE_MASK)) {
Stanley Chu48d5b972019-07-10 21:38:18 +08005678 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305679 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005680 retval |= IRQ_HANDLED;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005681 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305682
5683 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5684 __func__, hba->uic_error);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005685 return retval;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305686}
5687
Stanley Chu82174442019-05-21 14:44:54 +08005688static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5689 u32 intr_mask)
5690{
Stanley Chu5a244e02020-01-29 18:52:50 +08005691 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5692 !ufshcd_is_auto_hibern8_enabled(hba))
Stanley Chu82174442019-05-21 14:44:54 +08005693 return false;
5694
5695 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5696 return false;
5697
5698 if (hba->active_uic_cmd &&
5699 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5700 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5701 return false;
5702
5703 return true;
5704}
5705
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305706/**
5707 * ufshcd_check_errors - Check for errors that need s/w attention
5708 * @hba: per-adapter instance
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005709 *
5710 * Returns
5711 * IRQ_HANDLED - If interrupt is valid
5712 * IRQ_NONE - If invalid interrupt
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305713 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005714static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305715{
5716 bool queue_eh_work = false;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005717 irqreturn_t retval = IRQ_NONE;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305718
Stanley Chud3c615b2019-07-10 21:38:19 +08005719 if (hba->errors & INT_FATAL_ERRORS) {
5720 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305721 queue_eh_work = true;
Stanley Chud3c615b2019-07-10 21:38:19 +08005722 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305723
5724 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305725 hba->uic_error = 0;
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005726 retval = ufshcd_update_uic_error(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305727 if (hba->uic_error)
5728 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305729 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305730
Stanley Chu82174442019-05-21 14:44:54 +08005731 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5732 dev_err(hba->dev,
5733 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5734 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5735 "Enter" : "Exit",
5736 hba->errors, ufshcd_get_upmcrs(hba));
Stanley Chud3c615b2019-07-10 21:38:19 +08005737 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5738 hba->errors);
Stanley Chu82174442019-05-21 14:44:54 +08005739 queue_eh_work = true;
5740 }
5741
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305742 if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005743 /*
5744 * update the transfer error masks to sticky bits, let's do this
5745 * irrespective of current ufshcd_state.
5746 */
5747 hba->saved_err |= hba->errors;
5748 hba->saved_uic_err |= hba->uic_error;
5749
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305750 /* handle fatal errors only when link is functional */
5751 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5752 /* block commands from scsi mid-layer */
Subhash Jadavani38135532018-05-03 16:37:18 +05305753 ufshcd_scsi_block_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305754
Zang Leigang141f8162016-11-16 11:29:37 +08005755 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
Dolev Raviv66cc8202016-12-22 18:39:42 -08005756
5757 /* dump controller state before resetting */
5758 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5759 bool pr_prdt = !!(hba->saved_err &
5760 SYSTEM_BUS_FATAL_ERROR);
5761
5762 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5763 __func__, hba->saved_err,
5764 hba->saved_uic_err);
5765
5766 ufshcd_print_host_regs(hba);
5767 ufshcd_print_pwr_info(hba);
5768 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5769 ufshcd_print_trs(hba, hba->outstanding_reqs,
5770 pr_prdt);
5771 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305772 schedule_work(&hba->eh_work);
5773 }
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005774 retval |= IRQ_HANDLED;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305775 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305776 /*
5777 * if (!queue_eh_work) -
5778 * Other errors are either non-fatal where host recovers
5779 * itself without s/w intervention or errors that will be
5780 * handled by the SCSI core layer.
5781 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005782 return retval;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305783}
5784
Bart Van Assche69a6c262019-12-09 10:13:09 -08005785struct ctm_info {
5786 struct ufs_hba *hba;
5787 unsigned long pending;
5788 unsigned int ncpl;
5789};
5790
5791static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
5792{
5793 struct ctm_info *const ci = priv;
5794 struct completion *c;
5795
5796 WARN_ON_ONCE(reserved);
5797 if (test_bit(req->tag, &ci->pending))
5798 return true;
5799 ci->ncpl++;
5800 c = req->end_io_data;
5801 if (c)
5802 complete(c);
5803 return true;
5804}
5805
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305806/**
5807 * ufshcd_tmc_handler - handle task management function completion
5808 * @hba: per adapter instance
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005809 *
5810 * Returns
5811 * IRQ_HANDLED - If interrupt is valid
5812 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305813 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005814static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305815{
Bart Van Assche69a6c262019-12-09 10:13:09 -08005816 struct request_queue *q = hba->tmf_queue;
5817 struct ctm_info ci = {
5818 .hba = hba,
5819 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
5820 };
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305821
Bart Van Assche69a6c262019-12-09 10:13:09 -08005822 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
5823 return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305824}
5825
5826/**
5827 * ufshcd_sl_intr - Interrupt service routine
5828 * @hba: per adapter instance
5829 * @intr_status: contains interrupts generated by the controller
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005830 *
5831 * Returns
5832 * IRQ_HANDLED - If interrupt is valid
5833 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305834 */
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005835static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305836{
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005837 irqreturn_t retval = IRQ_NONE;
5838
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305839 hba->errors = UFSHCD_ERROR_MASK & intr_status;
Stanley Chu82174442019-05-21 14:44:54 +08005840
5841 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5842 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5843
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305844 if (hba->errors)
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005845 retval |= ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305846
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305847 if (intr_status & UFSHCD_UIC_MASK)
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005848 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305849
5850 if (intr_status & UTP_TASK_REQ_COMPL)
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005851 retval |= ufshcd_tmc_handler(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305852
5853 if (intr_status & UTP_TRANSFER_REQ_COMPL)
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005854 retval |= ufshcd_transfer_req_compl(hba);
5855
5856 return retval;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305857}
5858
5859/**
5860 * ufshcd_intr - Main interrupt service routine
5861 * @irq: irq number
5862 * @__hba: pointer to adapter instance
5863 *
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005864 * Returns
5865 * IRQ_HANDLED - If interrupt is valid
5866 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305867 */
5868static irqreturn_t ufshcd_intr(int irq, void *__hba)
5869{
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02005870 u32 intr_status, enabled_intr_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305871 irqreturn_t retval = IRQ_NONE;
5872 struct ufs_hba *hba = __hba;
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05305873 int retries = hba->nutrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305874
5875 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05305876 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305877
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05305878 /*
5879 * There could be max of hba->nutrs reqs in flight and in worst case
5880 * if the reqs get finished 1 by 1 after the interrupt status is
5881 * read, make sure we handle them by checking the interrupt status
5882 * again in a loop until we process all of the reqs before returning.
5883 */
5884 do {
5885 enabled_intr_status =
5886 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5887 if (intr_status)
5888 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005889 if (enabled_intr_status)
5890 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02005891
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05305892 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5893 } while (intr_status && --retries);
5894
Venkat Gopalakrishnan9333d772019-11-14 22:09:28 -08005895 if (retval == IRQ_NONE) {
5896 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
5897 __func__, intr_status);
5898 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
5899 }
5900
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305901 spin_unlock(hba->host->host_lock);
5902 return retval;
5903}
5904
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305905static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5906{
5907 int err = 0;
5908 u32 mask = 1 << tag;
5909 unsigned long flags;
5910
5911 if (!test_bit(tag, &hba->outstanding_tasks))
5912 goto out;
5913
5914 spin_lock_irqsave(hba->host->host_lock, flags);
Alim Akhtar1399c5b2018-05-06 15:44:15 +05305915 ufshcd_utmrl_clear(hba, tag);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305916 spin_unlock_irqrestore(hba->host->host_lock, flags);
5917
5918 /* poll for max. 1 sec to clear door bell register by h/w */
5919 err = ufshcd_wait_for_register(hba,
5920 REG_UTP_TASK_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02005921 mask, 0, 1000, 1000, true);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305922out:
5923 return err;
5924}
5925
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03005926static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5927 struct utp_task_req_desc *treq, u8 tm_function)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305928{
Bart Van Assche69a6c262019-12-09 10:13:09 -08005929 struct request_queue *q = hba->tmf_queue;
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03005930 struct Scsi_Host *host = hba->host;
Bart Van Assche69a6c262019-12-09 10:13:09 -08005931 DECLARE_COMPLETION_ONSTACK(wait);
5932 struct request *req;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305933 unsigned long flags;
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03005934 int free_slot, task_tag, err;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305935
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305936 /*
5937 * Get free slot, sleep if slots are unavailable.
5938 * Even though we use wait_event() which sleeps indefinitely,
5939 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5940 */
Bart Van Assche69a6c262019-12-09 10:13:09 -08005941 req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
5942 req->end_io_data = &wait;
5943 free_slot = req->tag;
5944 WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005945 ufshcd_hold(hba, false);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305946
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305947 spin_lock_irqsave(host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305948 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305949
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03005950 treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5951
5952 memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
Kiwoong Kimd2877be2016-11-10 21:16:15 +09005953 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5954
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305955 /* send command to the controller */
5956 __set_bit(free_slot, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02005957
5958 /* Make sure descriptors are ready before ringing the task doorbell */
5959 wmb();
5960
Seungwon Jeonb873a2752013-06-26 22:39:26 +05305961 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07005962 /* Make sure that doorbell is committed immediately */
5963 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305964
5965 spin_unlock_irqrestore(host->host_lock, flags);
5966
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03005967 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5968
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305969 /* wait until the task management command is completed */
Bart Van Assche69a6c262019-12-09 10:13:09 -08005970 err = wait_for_completion_io_timeout(&wait,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305971 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305972 if (!err) {
Bart Van Assche69a6c262019-12-09 10:13:09 -08005973 /*
5974 * Make sure that ufshcd_compl_tm() does not trigger a
5975 * use-after-free.
5976 */
5977 req->end_io_data = NULL;
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03005978 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305979 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5980 __func__, tm_function);
5981 if (ufshcd_clear_tm_cmd(hba, free_slot))
5982 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5983 __func__, free_slot);
5984 err = -ETIMEDOUT;
5985 } else {
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03005986 err = 0;
5987 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
5988
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03005989 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305990 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305991
Stanley Chub5572172019-08-19 21:43:28 +08005992 spin_lock_irqsave(hba->host->host_lock, flags);
5993 __clear_bit(free_slot, &hba->outstanding_tasks);
5994 spin_unlock_irqrestore(hba->host->host_lock, flags);
5995
Bart Van Assche69a6c262019-12-09 10:13:09 -08005996 blk_put_request(req);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305997
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005998 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305999 return err;
6000}
6001
6002/**
Christoph Hellwigc6049cd2018-10-07 17:30:33 +03006003 * ufshcd_issue_tm_cmd - issues task management commands to controller
6004 * @hba: per adapter instance
6005 * @lun_id: LUN ID to which TM command is sent
6006 * @task_id: task ID to which the TM command is applicable
6007 * @tm_function: task management function opcode
6008 * @tm_response: task management service response return value
6009 *
6010 * Returns non-zero value on error, zero on success.
6011 */
6012static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6013 u8 tm_function, u8 *tm_response)
6014{
6015 struct utp_task_req_desc treq = { { 0 }, };
6016 int ocs_value, err;
6017
6018 /* Configure task request descriptor */
6019 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6020 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6021
6022 /* Configure task request UPIU */
6023 treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6024 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6025 treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6026
6027 /*
6028 * The host shall provide the same value for LUN field in the basic
6029 * header and for Input Parameter.
6030 */
6031 treq.input_param1 = cpu_to_be32(lun_id);
6032 treq.input_param2 = cpu_to_be32(task_id);
6033
6034 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6035 if (err == -ETIMEDOUT)
6036 return err;
6037
6038 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6039 if (ocs_value != OCS_SUCCESS)
6040 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6041 __func__, ocs_value);
6042 else if (tm_response)
6043 *tm_response = be32_to_cpu(treq.output_param1) &
6044 MASK_TM_SERVICE_RESP;
6045 return err;
6046}
6047
6048/**
Avri Altman5e0a86e2018-10-07 17:30:37 +03006049 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6050 * @hba: per-adapter instance
6051 * @req_upiu: upiu request
6052 * @rsp_upiu: upiu reply
Avri Altman5e0a86e2018-10-07 17:30:37 +03006053 * @desc_buff: pointer to descriptor buffer, NULL if NA
6054 * @buff_len: descriptor size, 0 if NA
Bart Van Assched0e97602019-10-29 16:07:08 -07006055 * @cmd_type: specifies the type (NOP, Query...)
Avri Altman5e0a86e2018-10-07 17:30:37 +03006056 * @desc_op: descriptor operation
6057 *
6058 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6059 * Therefore, it "rides" the device management infrastructure: uses its tag and
6060 * tasks work queues.
6061 *
6062 * Since there is only one available tag for device management commands,
6063 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6064 */
6065static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6066 struct utp_upiu_req *req_upiu,
6067 struct utp_upiu_req *rsp_upiu,
6068 u8 *desc_buff, int *buff_len,
Bart Van Assche7f674c32019-10-29 16:07:09 -07006069 enum dev_cmd_type cmd_type,
Avri Altman5e0a86e2018-10-07 17:30:37 +03006070 enum query_opcode desc_op)
6071{
Bart Van Assche7252a362019-12-09 10:13:08 -08006072 struct request_queue *q = hba->cmd_queue;
6073 struct request *req;
Avri Altman5e0a86e2018-10-07 17:30:37 +03006074 struct ufshcd_lrb *lrbp;
6075 int err = 0;
6076 int tag;
6077 struct completion wait;
6078 unsigned long flags;
6079 u32 upiu_flags;
6080
6081 down_read(&hba->clk_scaling_lock);
6082
Bart Van Assche7252a362019-12-09 10:13:08 -08006083 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
Dan Carpenterbb14dd12019-12-13 13:48:28 +03006084 if (IS_ERR(req)) {
6085 err = PTR_ERR(req);
6086 goto out_unlock;
6087 }
Bart Van Assche7252a362019-12-09 10:13:08 -08006088 tag = req->tag;
6089 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
Avri Altman5e0a86e2018-10-07 17:30:37 +03006090
6091 init_completion(&wait);
6092 lrbp = &hba->lrb[tag];
6093 WARN_ON(lrbp->cmd);
6094
6095 lrbp->cmd = NULL;
6096 lrbp->sense_bufflen = 0;
6097 lrbp->sense_buffer = NULL;
6098 lrbp->task_tag = tag;
6099 lrbp->lun = 0;
6100 lrbp->intr_cmd = true;
6101 hba->dev_cmd.type = cmd_type;
6102
6103 switch (hba->ufs_version) {
6104 case UFSHCI_VERSION_10:
6105 case UFSHCI_VERSION_11:
6106 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6107 break;
6108 default:
6109 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6110 break;
6111 }
6112
6113 /* update the task tag in the request upiu */
6114 req_upiu->header.dword_0 |= cpu_to_be32(tag);
6115
6116 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6117
6118 /* just copy the upiu request as it is */
6119 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6120 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6121 /* The Data Segment Area is optional depending upon the query
6122 * function value. for WRITE DESCRIPTOR, the data segment
6123 * follows right after the tsf.
6124 */
6125 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6126 *buff_len = 0;
6127 }
6128
6129 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6130
6131 hba->dev_cmd.complete = &wait;
6132
6133 /* Make sure descriptors are ready before ringing the doorbell */
6134 wmb();
6135 spin_lock_irqsave(hba->host->host_lock, flags);
6136 ufshcd_send_command(hba, tag);
6137 spin_unlock_irqrestore(hba->host->host_lock, flags);
6138
6139 /*
6140 * ignore the returning value here - ufshcd_check_query_response is
6141 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6142 * read the response directly ignoring all errors.
6143 */
6144 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6145
6146 /* just copy the upiu response as it is */
6147 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
Avri Altman4bbbe242019-02-20 09:11:13 +02006148 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6149 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6150 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6151 MASK_QUERY_DATA_SEG_LEN;
6152
6153 if (*buff_len >= resp_len) {
6154 memcpy(desc_buff, descp, resp_len);
6155 *buff_len = resp_len;
6156 } else {
Bean Huo3d4881d2019-11-12 23:34:35 +01006157 dev_warn(hba->dev,
6158 "%s: rsp size %d is bigger than buffer size %d",
6159 __func__, resp_len, *buff_len);
Avri Altman4bbbe242019-02-20 09:11:13 +02006160 *buff_len = 0;
6161 err = -EINVAL;
6162 }
6163 }
Avri Altman5e0a86e2018-10-07 17:30:37 +03006164
Bart Van Assche7252a362019-12-09 10:13:08 -08006165 blk_put_request(req);
Dan Carpenterbb14dd12019-12-13 13:48:28 +03006166out_unlock:
Avri Altman5e0a86e2018-10-07 17:30:37 +03006167 up_read(&hba->clk_scaling_lock);
6168 return err;
6169}
6170
6171/**
6172 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6173 * @hba: per-adapter instance
6174 * @req_upiu: upiu request
6175 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
6176 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
6177 * @desc_buff: pointer to descriptor buffer, NULL if NA
6178 * @buff_len: descriptor size, 0 if NA
6179 * @desc_op: descriptor operation
6180 *
6181 * Supports UTP Transfer requests (nop and query), and UTP Task
6182 * Management requests.
6183 * It is up to the caller to fill the upiu conent properly, as it will
6184 * be copied without any further input validations.
6185 */
6186int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6187 struct utp_upiu_req *req_upiu,
6188 struct utp_upiu_req *rsp_upiu,
6189 int msgcode,
6190 u8 *desc_buff, int *buff_len,
6191 enum query_opcode desc_op)
6192{
6193 int err;
Bart Van Assche7f674c32019-10-29 16:07:09 -07006194 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
Avri Altman5e0a86e2018-10-07 17:30:37 +03006195 struct utp_task_req_desc treq = { { 0 }, };
6196 int ocs_value;
6197 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6198
Avri Altman5e0a86e2018-10-07 17:30:37 +03006199 switch (msgcode) {
6200 case UPIU_TRANSACTION_NOP_OUT:
6201 cmd_type = DEV_CMD_TYPE_NOP;
6202 /* fall through */
6203 case UPIU_TRANSACTION_QUERY_REQ:
6204 ufshcd_hold(hba, false);
6205 mutex_lock(&hba->dev_cmd.lock);
6206 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6207 desc_buff, buff_len,
6208 cmd_type, desc_op);
6209 mutex_unlock(&hba->dev_cmd.lock);
6210 ufshcd_release(hba);
6211
6212 break;
6213 case UPIU_TRANSACTION_TASK_REQ:
6214 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6215 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6216
6217 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6218
6219 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6220 if (err == -ETIMEDOUT)
6221 break;
6222
6223 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6224 if (ocs_value != OCS_SUCCESS) {
6225 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6226 ocs_value);
6227 break;
6228 }
6229
6230 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6231
6232 break;
6233 default:
6234 err = -EINVAL;
6235
6236 break;
6237 }
6238
Avri Altman5e0a86e2018-10-07 17:30:37 +03006239 return err;
6240}
6241
6242/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306243 * ufshcd_eh_device_reset_handler - device reset handler registered to
6244 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306245 * @cmd: SCSI command pointer
6246 *
6247 * Returns SUCCESS/FAILED
6248 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306249static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306250{
6251 struct Scsi_Host *host;
6252 struct ufs_hba *hba;
6253 unsigned int tag;
6254 u32 pos;
6255 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306256 u8 resp = 0xF;
6257 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306258 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306259
6260 host = cmd->device->host;
6261 hba = shost_priv(host);
6262 tag = cmd->request->tag;
6263
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306264 lrbp = &hba->lrb[tag];
6265 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6266 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306267 if (!err)
6268 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306269 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306270 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306271
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306272 /* clear the commands that were pending for corresponding LUN */
6273 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6274 if (hba->lrb[pos].lun == lrbp->lun) {
6275 err = ufshcd_clear_cmd(hba, pos);
6276 if (err)
6277 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306278 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306279 }
6280 spin_lock_irqsave(host->host_lock, flags);
6281 ufshcd_transfer_req_compl(hba);
6282 spin_unlock_irqrestore(host->host_lock, flags);
Gilad Broner7fabb772017-02-03 16:56:50 -08006283
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306284out:
Gilad Broner7fabb772017-02-03 16:56:50 -08006285 hba->req_abort_count = 0;
Stanley Chu8808b4e2019-07-10 21:38:21 +08006286 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306287 if (!err) {
6288 err = SUCCESS;
6289 } else {
6290 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6291 err = FAILED;
6292 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306293 return err;
6294}
6295
Gilad Bronere0b299e2017-02-03 16:56:40 -08006296static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6297{
6298 struct ufshcd_lrb *lrbp;
6299 int tag;
6300
6301 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6302 lrbp = &hba->lrb[tag];
6303 lrbp->req_abort_skip = true;
6304 }
6305}
6306
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306307/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306308 * ufshcd_abort - abort a specific command
6309 * @cmd: SCSI command pointer
6310 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306311 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6312 * command, and in host controller by clearing the door-bell register. There can
6313 * be race between controller sending the command to the device while abort is
6314 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6315 * really issued and then try to abort it.
6316 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306317 * Returns SUCCESS/FAILED
6318 */
6319static int ufshcd_abort(struct scsi_cmnd *cmd)
6320{
6321 struct Scsi_Host *host;
6322 struct ufs_hba *hba;
6323 unsigned long flags;
6324 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306325 int err = 0;
6326 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306327 u8 resp = 0xF;
6328 struct ufshcd_lrb *lrbp;
Dolev Ravive9d501b2014-07-01 12:22:37 +03006329 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306330
6331 host = cmd->device->host;
6332 hba = shost_priv(host);
6333 tag = cmd->request->tag;
Dolev Ravive7d38252016-12-22 18:40:07 -08006334 lrbp = &hba->lrb[tag];
Yaniv Gardi14497322016-02-01 15:02:39 +02006335 if (!ufshcd_valid_tag(hba, tag)) {
6336 dev_err(hba->dev,
6337 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6338 __func__, tag, cmd, cmd->request);
6339 BUG();
6340 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306341
Dolev Ravive7d38252016-12-22 18:40:07 -08006342 /*
6343 * Task abort to the device W-LUN is illegal. When this command
6344 * will fail, due to spec violation, scsi err handling next step
6345 * will be to send LU reset which, again, is a spec violation.
6346 * To avoid these unnecessary/illegal step we skip to the last error
6347 * handling stage: reset and restore.
6348 */
6349 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6350 return ufshcd_eh_host_reset_handler(cmd);
6351
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006352 ufshcd_hold(hba, false);
Dolev Ravive9d501b2014-07-01 12:22:37 +03006353 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Yaniv Gardi14497322016-02-01 15:02:39 +02006354 /* If command is already aborted/completed, return SUCCESS */
6355 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6356 dev_err(hba->dev,
6357 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6358 __func__, tag, hba->outstanding_reqs, reg);
6359 goto out;
6360 }
6361
Dolev Ravive9d501b2014-07-01 12:22:37 +03006362 if (!(reg & (1 << tag))) {
6363 dev_err(hba->dev,
6364 "%s: cmd was completed, but without a notifying intr, tag = %d",
6365 __func__, tag);
6366 }
6367
Dolev Raviv66cc8202016-12-22 18:39:42 -08006368 /* Print Transfer Request of aborted task */
6369 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
Dolev Raviv66cc8202016-12-22 18:39:42 -08006370
Gilad Broner7fabb772017-02-03 16:56:50 -08006371 /*
6372 * Print detailed info about aborted request.
6373 * As more than one request might get aborted at the same time,
6374 * print full information only for the first aborted request in order
6375 * to reduce repeated printouts. For other aborted requests only print
6376 * basic details.
6377 */
6378 scsi_print_command(hba->lrb[tag].cmd);
6379 if (!hba->req_abort_count) {
Stanley Chu8808b4e2019-07-10 21:38:21 +08006380 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
Gilad Broner7fabb772017-02-03 16:56:50 -08006381 ufshcd_print_host_regs(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08006382 ufshcd_print_host_state(hba);
Gilad Broner7fabb772017-02-03 16:56:50 -08006383 ufshcd_print_pwr_info(hba);
6384 ufshcd_print_trs(hba, 1 << tag, true);
6385 } else {
6386 ufshcd_print_trs(hba, 1 << tag, false);
6387 }
6388 hba->req_abort_count++;
Gilad Bronere0b299e2017-02-03 16:56:40 -08006389
6390 /* Skip task abort in case previous aborts failed and report failure */
6391 if (lrbp->req_abort_skip) {
6392 err = -EIO;
6393 goto out;
6394 }
6395
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306396 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6397 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6398 UFS_QUERY_TASK, &resp);
6399 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6400 /* cmd pending in the device */
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006401 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6402 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306403 break;
6404 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306405 /*
6406 * cmd not pending in the device, check if it is
6407 * in transition.
6408 */
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006409 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6410 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306411 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6412 if (reg & (1 << tag)) {
6413 /* sleep for max. 200us to stabilize */
6414 usleep_range(100, 200);
6415 continue;
6416 }
6417 /* command completed already */
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006418 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6419 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306420 goto out;
6421 } else {
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006422 dev_err(hba->dev,
6423 "%s: no response from device. tag = %d, err %d\n",
6424 __func__, tag, err);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306425 if (!err)
6426 err = resp; /* service response error */
6427 goto out;
6428 }
6429 }
6430
6431 if (!poll_cnt) {
6432 err = -EBUSY;
6433 goto out;
6434 }
6435
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306436 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6437 UFS_ABORT_TASK, &resp);
6438 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006439 if (!err) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306440 err = resp; /* service response error */
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006441 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6442 __func__, tag, err);
6443 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306444 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306445 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306446
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306447 err = ufshcd_clear_cmd(hba, tag);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006448 if (err) {
6449 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6450 __func__, tag, err);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306451 goto out;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006452 }
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306453
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306454 scsi_dma_unmap(cmd);
6455
6456 spin_lock_irqsave(host->host_lock, flags);
Yaniv Gardia48353f2016-02-01 15:02:40 +02006457 ufshcd_outstanding_req_clear(hba, tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306458 hba->lrb[tag].cmd = NULL;
6459 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306460
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306461out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306462 if (!err) {
6463 err = SUCCESS;
6464 } else {
6465 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
Gilad Bronere0b299e2017-02-03 16:56:40 -08006466 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306467 err = FAILED;
6468 }
6469
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006470 /*
6471 * This ufshcd_release() corresponds to the original scsi cmd that got
6472 * aborted here (as we won't get any IRQ for it).
6473 */
6474 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306475 return err;
6476}
6477
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306478/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306479 * ufshcd_host_reset_and_restore - reset and restore host controller
6480 * @hba: per-adapter instance
6481 *
6482 * Note that host controller reset may issue DME_RESET to
6483 * local and remote (device) Uni-Pro stack and the attributes
6484 * are reset to default state.
6485 *
6486 * Returns zero on success, non-zero on failure
6487 */
6488static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6489{
6490 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306491 unsigned long flags;
6492
Can Guo2df74b62019-11-25 22:53:33 -08006493 /*
6494 * Stop the host controller and complete the requests
6495 * cleared by h/w
6496 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306497 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02006498 ufshcd_hba_stop(hba, false);
Can Guo2df74b62019-11-25 22:53:33 -08006499 hba->silence_err_logs = true;
6500 ufshcd_complete_requests(hba);
6501 hba->silence_err_logs = false;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306502 spin_unlock_irqrestore(hba->host->host_lock, flags);
6503
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08006504 /* scale up clocks to max frequency before full reinitialization */
Subhash Jadavani394b9492020-03-26 02:25:40 -07006505 ufshcd_set_clk_freq(hba, true);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08006506
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306507 err = ufshcd_hba_enable(hba);
6508 if (err)
6509 goto out;
6510
6511 /* Establish the link again and restore the device */
Bean Huo1b9e2142020-01-20 14:08:15 +01006512 err = ufshcd_probe_hba(hba, false);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006513
6514 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306515 err = -EIO;
6516out:
6517 if (err)
6518 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
Stanley Chu8808b4e2019-07-10 21:38:21 +08006519 ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306520 return err;
6521}
6522
6523/**
6524 * ufshcd_reset_and_restore - reset and re-initialize host/device
6525 * @hba: per-adapter instance
6526 *
6527 * Reset and recover device, host and re-establish link. This
6528 * is helpful to recover the communication in fatal error conditions.
6529 *
6530 * Returns zero on success, non-zero on failure
6531 */
6532static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6533{
6534 int err = 0;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006535 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306536
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006537 do {
Bjorn Anderssond8d9f792019-08-28 12:17:54 -07006538 /* Reset the attached device */
6539 ufshcd_vops_device_reset(hba);
6540
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006541 err = ufshcd_host_reset_and_restore(hba);
6542 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306543
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306544 return err;
6545}
6546
6547/**
6548 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
Bart Van Assche8aa29f12018-03-01 15:07:20 -08006549 * @cmd: SCSI command pointer
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306550 *
6551 * Returns SUCCESS/FAILED
6552 */
6553static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6554{
6555 int err;
6556 unsigned long flags;
6557 struct ufs_hba *hba;
6558
6559 hba = shost_priv(cmd->device->host);
6560
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006561 ufshcd_hold(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306562 /*
6563 * Check if there is any race with fatal error handling.
6564 * If so, wait for it to complete. Even though fatal error
6565 * handling does reset and restore in some cases, don't assume
6566 * anything out of it. We are just avoiding race here.
6567 */
6568 do {
6569 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306570 if (!(work_pending(&hba->eh_work) ||
Zang Leigang8dc0da72017-06-24 19:14:32 +08006571 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6572 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306573 break;
6574 spin_unlock_irqrestore(hba->host->host_lock, flags);
6575 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306576 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306577 } while (1);
6578
6579 hba->ufshcd_state = UFSHCD_STATE_RESET;
6580 ufshcd_set_eh_in_progress(hba);
6581 spin_unlock_irqrestore(hba->host->host_lock, flags);
6582
6583 err = ufshcd_reset_and_restore(hba);
6584
6585 spin_lock_irqsave(hba->host->host_lock, flags);
6586 if (!err) {
6587 err = SUCCESS;
6588 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6589 } else {
6590 err = FAILED;
6591 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6592 }
6593 ufshcd_clear_eh_in_progress(hba);
6594 spin_unlock_irqrestore(hba->host->host_lock, flags);
6595
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006596 ufshcd_release(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306597 return err;
6598}
6599
6600/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006601 * ufshcd_get_max_icc_level - calculate the ICC level
6602 * @sup_curr_uA: max. current supported by the regulator
6603 * @start_scan: row at the desc table to start scan from
6604 * @buff: power descriptor buffer
6605 *
6606 * Returns calculated max ICC level for specific regulator
6607 */
6608static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6609{
6610 int i;
6611 int curr_uA;
6612 u16 data;
6613 u16 unit;
6614
6615 for (i = start_scan; i >= 0; i--) {
Tomas Winklerd79713f2017-01-05 10:45:11 +02006616 data = be16_to_cpup((__be16 *)&buff[2 * i]);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006617 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6618 ATTR_ICC_LVL_UNIT_OFFSET;
6619 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6620 switch (unit) {
6621 case UFSHCD_NANO_AMP:
6622 curr_uA = curr_uA / 1000;
6623 break;
6624 case UFSHCD_MILI_AMP:
6625 curr_uA = curr_uA * 1000;
6626 break;
6627 case UFSHCD_AMP:
6628 curr_uA = curr_uA * 1000 * 1000;
6629 break;
6630 case UFSHCD_MICRO_AMP:
6631 default:
6632 break;
6633 }
6634 if (sup_curr_uA >= curr_uA)
6635 break;
6636 }
6637 if (i < 0) {
6638 i = 0;
6639 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6640 }
6641
6642 return (u32)i;
6643}
6644
6645/**
6646 * ufshcd_calc_icc_level - calculate the max ICC level
6647 * In case regulators are not initialized we'll return 0
6648 * @hba: per-adapter instance
6649 * @desc_buf: power descriptor buffer to extract ICC levels from.
6650 * @len: length of desc_buff
6651 *
6652 * Returns calculated ICC level
6653 */
6654static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6655 u8 *desc_buf, int len)
6656{
6657 u32 icc_level = 0;
6658
6659 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6660 !hba->vreg_info.vccq2) {
6661 dev_err(hba->dev,
6662 "%s: Regulator capability was not set, actvIccLevel=%d",
6663 __func__, icc_level);
6664 goto out;
6665 }
6666
Stanley Chu0487fff2019-03-28 17:16:25 +08006667 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006668 icc_level = ufshcd_get_max_icc_level(
6669 hba->vreg_info.vcc->max_uA,
6670 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6671 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6672
Stanley Chu0487fff2019-03-28 17:16:25 +08006673 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006674 icc_level = ufshcd_get_max_icc_level(
6675 hba->vreg_info.vccq->max_uA,
6676 icc_level,
6677 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6678
Stanley Chu0487fff2019-03-28 17:16:25 +08006679 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006680 icc_level = ufshcd_get_max_icc_level(
6681 hba->vreg_info.vccq2->max_uA,
6682 icc_level,
6683 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6684out:
6685 return icc_level;
6686}
6687
Can Guoe89860f2020-03-26 02:25:41 -07006688static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006689{
6690 int ret;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00006691 int buff_len = hba->desc_size.pwr_desc;
Kees Cookbbe21d72018-05-02 16:58:09 -07006692 u8 *desc_buf;
Can Guoe89860f2020-03-26 02:25:41 -07006693 u32 icc_level;
Kees Cookbbe21d72018-05-02 16:58:09 -07006694
6695 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6696 if (!desc_buf)
6697 return;
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006698
Bean Huo8c9a51b2020-01-20 14:08:17 +01006699 ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
6700 desc_buf, buff_len);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006701 if (ret) {
6702 dev_err(hba->dev,
6703 "%s: Failed reading power descriptor.len = %d ret = %d",
6704 __func__, buff_len, ret);
Kees Cookbbe21d72018-05-02 16:58:09 -07006705 goto out;
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006706 }
6707
Can Guoe89860f2020-03-26 02:25:41 -07006708 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
6709 buff_len);
6710 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006711
Szymon Mielczarekdbd34a62017-03-29 08:19:21 +02006712 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Can Guoe89860f2020-03-26 02:25:41 -07006713 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006714
6715 if (ret)
6716 dev_err(hba->dev,
6717 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
Can Guoe89860f2020-03-26 02:25:41 -07006718 __func__, icc_level, ret);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006719
Kees Cookbbe21d72018-05-02 16:58:09 -07006720out:
6721 kfree(desc_buf);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006722}
6723
Can Guofb276f72020-03-25 18:09:59 -07006724static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
6725{
6726 scsi_autopm_get_device(sdev);
6727 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
6728 if (sdev->rpm_autosuspend)
6729 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
6730 RPM_AUTOSUSPEND_DELAY_MS);
6731 scsi_autopm_put_device(sdev);
6732}
6733
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006734/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006735 * ufshcd_scsi_add_wlus - Adds required W-LUs
6736 * @hba: per-adapter instance
6737 *
6738 * UFS device specification requires the UFS devices to support 4 well known
6739 * logical units:
6740 * "REPORT_LUNS" (address: 01h)
6741 * "UFS Device" (address: 50h)
6742 * "RPMB" (address: 44h)
6743 * "BOOT" (address: 30h)
6744 * UFS device's power management needs to be controlled by "POWER CONDITION"
6745 * field of SSU (START STOP UNIT) command. But this "power condition" field
6746 * will take effect only when its sent to "UFS device" well known logical unit
6747 * hence we require the scsi_device instance to represent this logical unit in
6748 * order for the UFS host driver to send the SSU command for power management.
Bart Van Assche8aa29f12018-03-01 15:07:20 -08006749 *
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006750 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6751 * Block) LU so user space process can control this LU. User space may also
6752 * want to have access to BOOT LU.
Bart Van Assche8aa29f12018-03-01 15:07:20 -08006753 *
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006754 * This function adds scsi device instances for each of all well known LUs
6755 * (except "REPORT LUNS" LU).
6756 *
6757 * Returns zero on success (all required W-LUs are added successfully),
6758 * non-zero error value on failure (if failed to add any of the required W-LU).
6759 */
6760static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6761{
6762 int ret = 0;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006763 struct scsi_device *sdev_rpmb;
6764 struct scsi_device *sdev_boot;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006765
6766 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6767 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6768 if (IS_ERR(hba->sdev_ufs_device)) {
6769 ret = PTR_ERR(hba->sdev_ufs_device);
6770 hba->sdev_ufs_device = NULL;
6771 goto out;
6772 }
Can Guofb276f72020-03-25 18:09:59 -07006773 ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006774 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006775
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006776 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006777 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006778 if (IS_ERR(sdev_rpmb)) {
6779 ret = PTR_ERR(sdev_rpmb);
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08006780 goto remove_sdev_ufs_device;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006781 }
Can Guofb276f72020-03-25 18:09:59 -07006782 ufshcd_blk_pm_runtime_init(sdev_rpmb);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006783 scsi_device_put(sdev_rpmb);
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08006784
6785 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6786 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
Can Guofb276f72020-03-25 18:09:59 -07006787 if (IS_ERR(sdev_boot)) {
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08006788 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
Can Guofb276f72020-03-25 18:09:59 -07006789 } else {
6790 ufshcd_blk_pm_runtime_init(sdev_boot);
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08006791 scsi_device_put(sdev_boot);
Can Guofb276f72020-03-25 18:09:59 -07006792 }
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006793 goto out;
6794
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006795remove_sdev_ufs_device:
6796 scsi_remove_device(hba->sdev_ufs_device);
6797out:
6798 return ret;
6799}
6800
Asutosh Das3d17b9b2020-04-22 14:41:42 -07006801static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
6802{
Stanley Chu817d7e12020-05-08 16:01:08 +08006803 if (!ufshcd_is_wb_allowed(hba))
6804 return;
6805
6806 if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
6807 goto wb_disabled;
6808
Asutosh Das3d17b9b2020-04-22 14:41:42 -07006809 hba->dev_info.d_ext_ufs_feature_sup =
6810 get_unaligned_be32(desc_buf +
6811 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
Stanley Chu817d7e12020-05-08 16:01:08 +08006812
6813 if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
6814 goto wb_disabled;
6815
Asutosh Das3d17b9b2020-04-22 14:41:42 -07006816 /*
6817 * WB may be supported but not configured while provisioning.
6818 * The spec says, in dedicated wb buffer mode,
6819 * a max of 1 lun would have wb buffer configured.
6820 * Now only shared buffer mode is supported.
6821 */
6822 hba->dev_info.b_wb_buffer_type =
6823 desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
6824
6825 hba->dev_info.d_wb_alloc_units =
6826 get_unaligned_be32(desc_buf +
6827 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
6828 hba->dev_info.b_presrv_uspc_en =
6829 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
6830
Stanley Chu817d7e12020-05-08 16:01:08 +08006831 if (!(hba->dev_info.b_wb_buffer_type &&
Asutosh Das3d17b9b2020-04-22 14:41:42 -07006832 hba->dev_info.d_wb_alloc_units))
Stanley Chu817d7e12020-05-08 16:01:08 +08006833 goto wb_disabled;
6834
6835 return;
6836
6837wb_disabled:
6838 hba->caps &= ~UFSHCD_CAP_WB_EN;
6839}
6840
Stanley Chu8db269a2020-05-08 16:01:10 +08006841void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
Stanley Chu817d7e12020-05-08 16:01:08 +08006842{
6843 struct ufs_dev_fix *f;
6844 struct ufs_dev_info *dev_info = &hba->dev_info;
6845
Stanley Chu8db269a2020-05-08 16:01:10 +08006846 if (!fixups)
6847 return;
6848
6849 for (f = fixups; f->quirk; f++) {
Stanley Chu817d7e12020-05-08 16:01:08 +08006850 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
6851 f->wmanufacturerid == UFS_ANY_VENDOR) &&
6852 ((dev_info->model &&
6853 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
6854 !strcmp(f->model, UFS_ANY_MODEL)))
6855 hba->dev_quirks |= f->quirk;
6856 }
Asutosh Das3d17b9b2020-04-22 14:41:42 -07006857}
Stanley Chu8db269a2020-05-08 16:01:10 +08006858EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
Asutosh Das3d17b9b2020-04-22 14:41:42 -07006859
Stanley Chuc28c00b2020-05-08 16:01:09 +08006860static void ufs_fixup_device_setup(struct ufs_hba *hba)
6861{
6862 /* fix by general quirk table */
Stanley Chu8db269a2020-05-08 16:01:10 +08006863 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
Stanley Chuc28c00b2020-05-08 16:01:09 +08006864
6865 /* allow vendors to fix quirks */
6866 ufshcd_vops_fixup_dev_quirks(hba);
6867}
6868
Bean Huo09750062020-01-20 14:08:14 +01006869static int ufs_get_device_desc(struct ufs_hba *hba)
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006870{
6871 int err;
Kees Cookbbe21d72018-05-02 16:58:09 -07006872 size_t buff_len;
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006873 u8 model_index;
Kees Cookbbe21d72018-05-02 16:58:09 -07006874 u8 *desc_buf;
Bean Huo09750062020-01-20 14:08:14 +01006875 struct ufs_dev_info *dev_info = &hba->dev_info;
Tomas Winkler4b828fe2019-07-30 08:55:17 +03006876
Kees Cookbbe21d72018-05-02 16:58:09 -07006877 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6878 QUERY_DESC_MAX_SIZE + 1);
6879 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6880 if (!desc_buf) {
6881 err = -ENOMEM;
6882 goto out;
6883 }
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006884
Bean Huo8c9a51b2020-01-20 14:08:17 +01006885 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
6886 hba->desc_size.dev_desc);
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006887 if (err) {
6888 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6889 __func__, err);
6890 goto out;
6891 }
6892
6893 /*
6894 * getting vendor (manufacturerID) and Bank Index in big endian
6895 * format
6896 */
Bean Huo09750062020-01-20 14:08:14 +01006897 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006898 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6899
Can Guo09f17792020-02-10 19:40:49 -08006900 /* getting Specification Version in big endian format */
6901 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
6902 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
6903
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006904 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
Asutosh Das3d17b9b2020-04-22 14:41:42 -07006905
Tomas Winkler4b828fe2019-07-30 08:55:17 +03006906 err = ufshcd_read_string_desc(hba, model_index,
Bean Huo09750062020-01-20 14:08:14 +01006907 &dev_info->model, SD_ASCII_STD);
Tomas Winkler4b828fe2019-07-30 08:55:17 +03006908 if (err < 0) {
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006909 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6910 __func__, err);
6911 goto out;
6912 }
6913
Stanley Chu817d7e12020-05-08 16:01:08 +08006914 ufs_fixup_device_setup(hba);
6915
6916 /*
6917 * Probe WB only for UFS-3.1 devices or UFS devices with quirk
6918 * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled
6919 */
6920 if (dev_info->wspecversion >= 0x310 ||
6921 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))
6922 ufshcd_wb_probe(hba, desc_buf);
6923
Tomas Winkler4b828fe2019-07-30 08:55:17 +03006924 /*
6925 * ufshcd_read_string_desc returns size of the string
6926 * reset the error value
6927 */
6928 err = 0;
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006929
6930out:
Kees Cookbbe21d72018-05-02 16:58:09 -07006931 kfree(desc_buf);
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006932 return err;
6933}
6934
Bean Huo09750062020-01-20 14:08:14 +01006935static void ufs_put_device_desc(struct ufs_hba *hba)
Tomas Winkler4b828fe2019-07-30 08:55:17 +03006936{
Bean Huo09750062020-01-20 14:08:14 +01006937 struct ufs_dev_info *dev_info = &hba->dev_info;
6938
6939 kfree(dev_info->model);
6940 dev_info->model = NULL;
Tomas Winkler4b828fe2019-07-30 08:55:17 +03006941}
6942
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006943/**
Yaniv Gardi37113102016-03-10 17:37:16 +02006944 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6945 * @hba: per-adapter instance
6946 *
6947 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6948 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6949 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6950 * the hibern8 exit latency.
6951 *
6952 * Returns zero on success, non-zero error value on failure.
6953 */
6954static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6955{
6956 int ret = 0;
6957 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6958
6959 ret = ufshcd_dme_peer_get(hba,
6960 UIC_ARG_MIB_SEL(
6961 RX_MIN_ACTIVATETIME_CAPABILITY,
6962 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6963 &peer_rx_min_activatetime);
6964 if (ret)
6965 goto out;
6966
6967 /* make sure proper unit conversion is applied */
6968 tuned_pa_tactivate =
6969 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6970 / PA_TACTIVATE_TIME_UNIT_US);
6971 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6972 tuned_pa_tactivate);
6973
6974out:
6975 return ret;
6976}
6977
6978/**
6979 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6980 * @hba: per-adapter instance
6981 *
6982 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6983 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6984 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6985 * This optimal value can help reduce the hibern8 exit latency.
6986 *
6987 * Returns zero on success, non-zero error value on failure.
6988 */
6989static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6990{
6991 int ret = 0;
6992 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6993 u32 max_hibern8_time, tuned_pa_hibern8time;
6994
6995 ret = ufshcd_dme_get(hba,
6996 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6997 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6998 &local_tx_hibern8_time_cap);
6999 if (ret)
7000 goto out;
7001
7002 ret = ufshcd_dme_peer_get(hba,
7003 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7004 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7005 &peer_rx_hibern8_time_cap);
7006 if (ret)
7007 goto out;
7008
7009 max_hibern8_time = max(local_tx_hibern8_time_cap,
7010 peer_rx_hibern8_time_cap);
7011 /* make sure proper unit conversion is applied */
7012 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7013 / PA_HIBERN8_TIME_UNIT_US);
7014 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7015 tuned_pa_hibern8time);
7016out:
7017 return ret;
7018}
7019
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08007020/**
7021 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7022 * less than device PA_TACTIVATE time.
7023 * @hba: per-adapter instance
7024 *
7025 * Some UFS devices require host PA_TACTIVATE to be lower than device
7026 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7027 * for such devices.
7028 *
7029 * Returns zero on success, non-zero error value on failure.
7030 */
7031static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7032{
7033 int ret = 0;
7034 u32 granularity, peer_granularity;
7035 u32 pa_tactivate, peer_pa_tactivate;
7036 u32 pa_tactivate_us, peer_pa_tactivate_us;
7037 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7038
7039 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7040 &granularity);
7041 if (ret)
7042 goto out;
7043
7044 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7045 &peer_granularity);
7046 if (ret)
7047 goto out;
7048
7049 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7050 (granularity > PA_GRANULARITY_MAX_VAL)) {
7051 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7052 __func__, granularity);
7053 return -EINVAL;
7054 }
7055
7056 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7057 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7058 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7059 __func__, peer_granularity);
7060 return -EINVAL;
7061 }
7062
7063 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7064 if (ret)
7065 goto out;
7066
7067 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7068 &peer_pa_tactivate);
7069 if (ret)
7070 goto out;
7071
7072 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7073 peer_pa_tactivate_us = peer_pa_tactivate *
7074 gran_to_us_table[peer_granularity - 1];
7075
7076 if (pa_tactivate_us > peer_pa_tactivate_us) {
7077 u32 new_peer_pa_tactivate;
7078
7079 new_peer_pa_tactivate = pa_tactivate_us /
7080 gran_to_us_table[peer_granularity - 1];
7081 new_peer_pa_tactivate++;
7082 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7083 new_peer_pa_tactivate);
7084 }
7085
7086out:
7087 return ret;
7088}
7089
Bean Huo09750062020-01-20 14:08:14 +01007090static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
Yaniv Gardi37113102016-03-10 17:37:16 +02007091{
7092 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7093 ufshcd_tune_pa_tactivate(hba);
7094 ufshcd_tune_pa_hibern8time(hba);
7095 }
7096
Can Guoe91ed9e2020-02-23 20:09:21 -08007097 ufshcd_vops_apply_dev_quirks(hba);
7098
Yaniv Gardi37113102016-03-10 17:37:16 +02007099 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7100 /* set 1ms timeout for PA_TACTIVATE */
7101 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08007102
7103 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7104 ufshcd_quirk_tune_host_pa_tactivate(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02007105}
7106
Dolev Ravivff8e20c2016-12-22 18:42:18 -08007107static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7108{
Dolev Ravivff8e20c2016-12-22 18:42:18 -08007109 hba->ufs_stats.hibern8_exit_cnt = 0;
7110 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Gilad Broner7fabb772017-02-03 16:56:50 -08007111 hba->req_abort_count = 0;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08007112}
7113
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00007114static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
7115{
7116 int err;
7117
7118 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
7119 &hba->desc_size.dev_desc);
7120 if (err)
7121 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
7122
7123 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
7124 &hba->desc_size.pwr_desc);
7125 if (err)
7126 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
7127
7128 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
7129 &hba->desc_size.interc_desc);
7130 if (err)
7131 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
7132
7133 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
7134 &hba->desc_size.conf_desc);
7135 if (err)
7136 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
7137
7138 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
7139 &hba->desc_size.unit_desc);
7140 if (err)
7141 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
7142
7143 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
7144 &hba->desc_size.geom_desc);
7145 if (err)
7146 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
Bean Huo059efd82019-10-29 14:22:45 +00007147
Stanislav Nijnikovc648c2d2018-02-15 14:14:05 +02007148 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
7149 &hba->desc_size.hlth_desc);
7150 if (err)
7151 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00007152}
7153
Bean Huo731f0622020-01-20 14:08:19 +01007154static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7155{
7156 int err;
7157 size_t buff_len;
7158 u8 *desc_buf;
7159
7160 buff_len = hba->desc_size.geom_desc;
7161 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7162 if (!desc_buf) {
7163 err = -ENOMEM;
7164 goto out;
7165 }
7166
7167 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
7168 desc_buf, buff_len);
7169 if (err) {
7170 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7171 __func__, err);
7172 goto out;
7173 }
7174
7175 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7176 hba->dev_info.max_lu_supported = 32;
7177 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7178 hba->dev_info.max_lu_supported = 8;
7179
7180out:
7181 kfree(desc_buf);
7182 return err;
7183}
7184
Subhash Jadavani9e1e8a72018-10-16 14:29:41 +05307185static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7186 {19200000, REF_CLK_FREQ_19_2_MHZ},
7187 {26000000, REF_CLK_FREQ_26_MHZ},
7188 {38400000, REF_CLK_FREQ_38_4_MHZ},
7189 {52000000, REF_CLK_FREQ_52_MHZ},
7190 {0, REF_CLK_FREQ_INVAL},
7191};
7192
7193static enum ufs_ref_clk_freq
7194ufs_get_bref_clk_from_hz(unsigned long freq)
7195{
7196 int i;
7197
7198 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7199 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7200 return ufs_ref_clk_freqs[i].val;
7201
7202 return REF_CLK_FREQ_INVAL;
7203}
7204
7205void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7206{
7207 unsigned long freq;
7208
7209 freq = clk_get_rate(refclk);
7210
7211 hba->dev_ref_clk_freq =
7212 ufs_get_bref_clk_from_hz(freq);
7213
7214 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7215 dev_err(hba->dev,
7216 "invalid ref_clk setting = %ld\n", freq);
7217}
7218
7219static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7220{
7221 int err;
7222 u32 ref_clk;
7223 u32 freq = hba->dev_ref_clk_freq;
7224
7225 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7226 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7227
7228 if (err) {
7229 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7230 err);
7231 goto out;
7232 }
7233
7234 if (ref_clk == freq)
7235 goto out; /* nothing to update */
7236
7237 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7238 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7239
7240 if (err) {
7241 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7242 ufs_ref_clk_freqs[freq].freq_hz);
7243 goto out;
7244 }
7245
7246 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7247 ufs_ref_clk_freqs[freq].freq_hz);
7248
7249out:
7250 return err;
7251}
7252
Bean Huo1b9e2142020-01-20 14:08:15 +01007253static int ufshcd_device_params_init(struct ufs_hba *hba)
7254{
7255 bool flag;
7256 int ret;
7257
Bean Huo731f0622020-01-20 14:08:19 +01007258 /* Clear any previous UFS device information */
7259 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
7260
Bean Huo1b9e2142020-01-20 14:08:15 +01007261 /* Init check for device descriptor sizes */
7262 ufshcd_init_desc_sizes(hba);
7263
Bean Huo731f0622020-01-20 14:08:19 +01007264 /* Init UFS geometry descriptor related parameters */
7265 ret = ufshcd_device_geo_params_init(hba);
7266 if (ret)
7267 goto out;
7268
Bean Huo1b9e2142020-01-20 14:08:15 +01007269 /* Check and apply UFS device quirks */
7270 ret = ufs_get_device_desc(hba);
7271 if (ret) {
7272 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7273 __func__, ret);
7274 goto out;
7275 }
7276
Can Guo09f17792020-02-10 19:40:49 -08007277 ufshcd_get_ref_clk_gating_wait(hba);
7278
Bean Huo1b9e2142020-01-20 14:08:15 +01007279 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7280 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
7281 hba->dev_info.f_power_on_wp_en = flag;
7282
Bean Huo2b35b2a2020-01-20 14:08:16 +01007283 /* Probe maximum power mode co-supported by both UFS host and device */
7284 if (ufshcd_get_max_pwr_mode(hba))
7285 dev_err(hba->dev,
7286 "%s: Failed getting max supported power mode\n",
7287 __func__);
Bean Huo1b9e2142020-01-20 14:08:15 +01007288out:
7289 return ret;
7290}
7291
7292/**
7293 * ufshcd_add_lus - probe and add UFS logical units
7294 * @hba: per-adapter instance
7295 */
7296static int ufshcd_add_lus(struct ufs_hba *hba)
7297{
7298 int ret;
7299
Bean Huo1b9e2142020-01-20 14:08:15 +01007300 /* Add required well known logical units to scsi mid layer */
7301 ret = ufshcd_scsi_add_wlus(hba);
7302 if (ret)
7303 goto out;
7304
7305 /* Initialize devfreq after UFS device is detected */
7306 if (ufshcd_is_clkscaling_supported(hba)) {
7307 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7308 &hba->pwr_info,
7309 sizeof(struct ufs_pa_layer_attr));
7310 hba->clk_scaling.saved_pwr_info.is_valid = true;
7311 if (!hba->devfreq) {
7312 ret = ufshcd_devfreq_init(hba);
7313 if (ret)
7314 goto out;
7315 }
7316
7317 hba->clk_scaling.is_allowed = true;
7318 }
7319
7320 ufs_bsg_probe(hba);
7321 scsi_scan_host(hba->host);
7322 pm_runtime_put_sync(hba->dev);
7323
Bean Huo1b9e2142020-01-20 14:08:15 +01007324out:
7325 return ret;
7326}
7327
Yaniv Gardi37113102016-03-10 17:37:16 +02007328/**
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007329 * ufshcd_probe_hba - probe hba to detect device and initialize
7330 * @hba: per-adapter instance
Bean Huo1b9e2142020-01-20 14:08:15 +01007331 * @async: asynchronous execution or not
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007332 *
7333 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307334 */
Bean Huo1b9e2142020-01-20 14:08:15 +01007335static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307336{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307337 int ret;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007338 ktime_t start = ktime_get();
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307339
7340 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307341 if (ret)
7342 goto out;
7343
Yaniv Gardiafdfff52016-03-10 17:37:15 +02007344 /* set the default level for urgent bkops */
7345 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
7346 hba->is_urgent_bkops_lvl_checked = false;
7347
Dolev Ravivff8e20c2016-12-22 18:42:18 -08007348 /* Debug counters initialization */
7349 ufshcd_clear_dbg_ufs_stats(hba);
7350
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007351 /* UniPro link is active now */
7352 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05307353
Bean Huo1b9e2142020-01-20 14:08:15 +01007354 /* Verify device initialization by sending NOP OUT UPIU */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307355 ret = ufshcd_verify_dev_init(hba);
7356 if (ret)
7357 goto out;
7358
Bean Huo1b9e2142020-01-20 14:08:15 +01007359 /* Initiate UFS initialization, and waiting until completion */
Dolev Raviv68078d52013-07-30 00:35:58 +05307360 ret = ufshcd_complete_dev_init(hba);
7361 if (ret)
7362 goto out;
7363
Bean Huo1b9e2142020-01-20 14:08:15 +01007364 /*
7365 * Initialize UFS device parameters used by driver, these
7366 * parameters are associated with UFS descriptors.
7367 */
7368 if (async) {
7369 ret = ufshcd_device_params_init(hba);
7370 if (ret)
7371 goto out;
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02007372 }
7373
Bean Huo09750062020-01-20 14:08:14 +01007374 ufshcd_tune_unipro_params(hba);
Tomas Winkler4b828fe2019-07-30 08:55:17 +03007375
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007376 /* UFS device is also active now */
7377 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307378 ufshcd_force_reset_auto_bkops(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007379 hba->wlun_dev_clr_ua = true;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307380
Bean Huo2b35b2a2020-01-20 14:08:16 +01007381 /* Gear up to HS gear if supported */
7382 if (hba->max_pwr_info.is_valid) {
Subhash Jadavani9e1e8a72018-10-16 14:29:41 +05307383 /*
7384 * Set the right value to bRefClkFreq before attempting to
7385 * switch to HS gears.
7386 */
7387 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7388 ufshcd_set_dev_ref_clk(hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007389 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
Dov Levenglick8643ae62016-10-17 17:10:14 -07007390 if (ret) {
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007391 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7392 __func__, ret);
Dov Levenglick8643ae62016-10-17 17:10:14 -07007393 goto out;
7394 }
Can Guo6a9df812020-02-11 21:38:28 -08007395 ufshcd_print_pwr_info(hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007396 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007397
Can Guoe89860f2020-03-26 02:25:41 -07007398 /*
7399 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7400 * and for removable UFS card as well, hence always set the parameter.
7401 * Note: Error handler may issue the device reset hence resetting
7402 * bActiveICCLevel as well so it is always safe to set this here.
7403 */
7404 ufshcd_set_active_icc_lvl(hba);
7405
Yaniv Gardi53c12d02016-02-01 15:02:45 +02007406 /* set the state as operational after switching to desired gear */
7407 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00007408
Asutosh Das3d17b9b2020-04-22 14:41:42 -07007409 ufshcd_wb_config(hba);
Can Guo71d848b2019-11-14 22:09:26 -08007410 /* Enable Auto-Hibernate if configured */
7411 ufshcd_auto_hibern8_enable(hba);
7412
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307413out:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007414
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007415 trace_ufshcd_init(dev_name(hba->dev), ret,
7416 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08007417 hba->curr_dev_pwr_mode, hba->uic_link_state);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007418 return ret;
7419}
7420
7421/**
7422 * ufshcd_async_scan - asynchronous execution for probing hba
7423 * @data: data pointer to pass to this function
7424 * @cookie: cookie data
7425 */
7426static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7427{
7428 struct ufs_hba *hba = (struct ufs_hba *)data;
Bean Huo1b9e2142020-01-20 14:08:15 +01007429 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007430
Bean Huo1b9e2142020-01-20 14:08:15 +01007431 /* Initialize hba, detect and initialize UFS device */
7432 ret = ufshcd_probe_hba(hba, true);
7433 if (ret)
7434 goto out;
7435
7436 /* Probe and add UFS logical units */
7437 ret = ufshcd_add_lus(hba);
7438out:
7439 /*
7440 * If we failed to initialize the device or the device is not
7441 * present, turn off the power/clocks etc.
7442 */
7443 if (ret) {
7444 pm_runtime_put_sync(hba->dev);
7445 ufshcd_exit_clk_scaling(hba);
7446 ufshcd_hba_exit(hba);
7447 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307448}
7449
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02007450static const struct attribute_group *ufshcd_driver_groups[] = {
7451 &ufs_sysfs_unit_descriptor_group,
Stanislav Nijnikovec92b592018-02-15 14:14:11 +02007452 &ufs_sysfs_lun_attributes_group,
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02007453 NULL,
7454};
7455
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307456static struct scsi_host_template ufshcd_driver_template = {
7457 .module = THIS_MODULE,
7458 .name = UFSHCD,
7459 .proc_name = UFSHCD,
7460 .queuecommand = ufshcd_queuecommand,
7461 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09007462 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307463 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03007464 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307465 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307466 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7467 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307468 .this_id = -1,
7469 .sg_tablesize = SG_ALL,
7470 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7471 .can_queue = UFSHCD_CAN_QUEUE,
Christoph Hellwig552a9902019-06-17 14:19:55 +02007472 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007473 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01007474 .track_queue_depth = 1,
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02007475 .sdev_groups = ufshcd_driver_groups,
Christoph Hellwig4af14d12018-12-13 16:17:09 +01007476 .dma_boundary = PAGE_SIZE - 1,
Stanley Chu49615ba2019-09-16 23:56:50 +08007477 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307478};
7479
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007480static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7481 int ua)
7482{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08007483 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007484
Bjorn Andersson7b16a072015-02-11 19:35:28 -08007485 if (!vreg)
7486 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007487
Stanley Chu0487fff2019-03-28 17:16:25 +08007488 /*
7489 * "set_load" operation shall be required on those regulators
7490 * which specifically configured current limitation. Otherwise
7491 * zero max_uA may cause unexpected behavior when regulator is
7492 * enabled or set as high power mode.
7493 */
7494 if (!vreg->max_uA)
7495 return 0;
7496
Bjorn Andersson7b16a072015-02-11 19:35:28 -08007497 ret = regulator_set_load(vreg->reg, ua);
7498 if (ret < 0) {
7499 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7500 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007501 }
7502
7503 return ret;
7504}
7505
7506static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7507 struct ufs_vreg *vreg)
7508{
Marc Gonzalez73067982019-02-27 11:41:45 +01007509 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007510}
7511
7512static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7513 struct ufs_vreg *vreg)
7514{
Adrian Hunter7c7cfdc2019-08-14 15:59:50 +03007515 if (!vreg)
7516 return 0;
7517
Marc Gonzalez73067982019-02-27 11:41:45 +01007518 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007519}
7520
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007521static int ufshcd_config_vreg(struct device *dev,
7522 struct ufs_vreg *vreg, bool on)
7523{
7524 int ret = 0;
Gustavo A. R. Silva72753592017-11-20 08:12:29 -06007525 struct regulator *reg;
7526 const char *name;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007527 int min_uV, uA_load;
7528
7529 BUG_ON(!vreg);
7530
Gustavo A. R. Silva72753592017-11-20 08:12:29 -06007531 reg = vreg->reg;
7532 name = vreg->name;
7533
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007534 if (regulator_count_voltages(reg) > 0) {
Asutosh Das90d88f42020-02-10 19:40:45 -08007535 uA_load = on ? vreg->max_uA : 0;
7536 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7537 if (ret)
7538 goto out;
7539
Stanley Chu3b141e82019-03-28 17:16:24 +08007540 if (vreg->min_uV && vreg->max_uV) {
7541 min_uV = on ? vreg->min_uV : 0;
7542 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7543 if (ret) {
7544 dev_err(dev,
7545 "%s: %s set voltage failed, err=%d\n",
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007546 __func__, name, ret);
Stanley Chu3b141e82019-03-28 17:16:24 +08007547 goto out;
7548 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007549 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007550 }
7551out:
7552 return ret;
7553}
7554
7555static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7556{
7557 int ret = 0;
7558
Marc Gonzalez73067982019-02-27 11:41:45 +01007559 if (!vreg || vreg->enabled)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007560 goto out;
7561
7562 ret = ufshcd_config_vreg(dev, vreg, true);
7563 if (!ret)
7564 ret = regulator_enable(vreg->reg);
7565
7566 if (!ret)
7567 vreg->enabled = true;
7568 else
7569 dev_err(dev, "%s: %s enable failed, err=%d\n",
7570 __func__, vreg->name, ret);
7571out:
7572 return ret;
7573}
7574
7575static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7576{
7577 int ret = 0;
7578
Marc Gonzalez73067982019-02-27 11:41:45 +01007579 if (!vreg || !vreg->enabled)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007580 goto out;
7581
7582 ret = regulator_disable(vreg->reg);
7583
7584 if (!ret) {
7585 /* ignore errors on applying disable config */
7586 ufshcd_config_vreg(dev, vreg, false);
7587 vreg->enabled = false;
7588 } else {
7589 dev_err(dev, "%s: %s disable failed, err=%d\n",
7590 __func__, vreg->name, ret);
7591 }
7592out:
7593 return ret;
7594}
7595
7596static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7597{
7598 int ret = 0;
7599 struct device *dev = hba->dev;
7600 struct ufs_vreg_info *info = &hba->vreg_info;
7601
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007602 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7603 if (ret)
7604 goto out;
7605
7606 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7607 if (ret)
7608 goto out;
7609
7610 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7611 if (ret)
7612 goto out;
7613
7614out:
7615 if (ret) {
7616 ufshcd_toggle_vreg(dev, info->vccq2, false);
7617 ufshcd_toggle_vreg(dev, info->vccq, false);
7618 ufshcd_toggle_vreg(dev, info->vcc, false);
7619 }
7620 return ret;
7621}
7622
Raviv Shvili6a771a62014-09-25 15:32:24 +03007623static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7624{
7625 struct ufs_vreg_info *info = &hba->vreg_info;
7626
Zeng Guangyue60b7b822019-03-30 17:03:13 +08007627 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
Raviv Shvili6a771a62014-09-25 15:32:24 +03007628}
7629
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007630static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7631{
7632 int ret = 0;
7633
7634 if (!vreg)
7635 goto out;
7636
7637 vreg->reg = devm_regulator_get(dev, vreg->name);
7638 if (IS_ERR(vreg->reg)) {
7639 ret = PTR_ERR(vreg->reg);
7640 dev_err(dev, "%s: %s get failed, err=%d\n",
7641 __func__, vreg->name, ret);
7642 }
7643out:
7644 return ret;
7645}
7646
7647static int ufshcd_init_vreg(struct ufs_hba *hba)
7648{
7649 int ret = 0;
7650 struct device *dev = hba->dev;
7651 struct ufs_vreg_info *info = &hba->vreg_info;
7652
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007653 ret = ufshcd_get_vreg(dev, info->vcc);
7654 if (ret)
7655 goto out;
7656
7657 ret = ufshcd_get_vreg(dev, info->vccq);
7658 if (ret)
7659 goto out;
7660
7661 ret = ufshcd_get_vreg(dev, info->vccq2);
7662out:
7663 return ret;
7664}
7665
Raviv Shvili6a771a62014-09-25 15:32:24 +03007666static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7667{
7668 struct ufs_vreg_info *info = &hba->vreg_info;
7669
7670 if (info)
7671 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7672
7673 return 0;
7674}
7675
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007676static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7677 bool skip_ref_clk)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007678{
7679 int ret = 0;
7680 struct ufs_clk_info *clki;
7681 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007682 unsigned long flags;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007683 ktime_t start = ktime_get();
7684 bool clk_state_changed = false;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007685
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03007686 if (list_empty(head))
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007687 goto out;
7688
Can Guo38f32422020-02-10 19:40:47 -08007689 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7690 if (ret)
7691 return ret;
Subhash Jadavani1e879e82016-10-06 21:48:22 -07007692
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007693 list_for_each_entry(clki, head, list) {
7694 if (!IS_ERR_OR_NULL(clki->clk)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007695 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7696 continue;
7697
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007698 clk_state_changed = on ^ clki->enabled;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007699 if (on && !clki->enabled) {
7700 ret = clk_prepare_enable(clki->clk);
7701 if (ret) {
7702 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7703 __func__, clki->name, ret);
7704 goto out;
7705 }
7706 } else if (!on && clki->enabled) {
7707 clk_disable_unprepare(clki->clk);
7708 }
7709 clki->enabled = on;
7710 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7711 clki->name, on ? "en" : "dis");
7712 }
7713 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007714
Can Guo38f32422020-02-10 19:40:47 -08007715 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7716 if (ret)
7717 return ret;
Subhash Jadavani1e879e82016-10-06 21:48:22 -07007718
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007719out:
7720 if (ret) {
7721 list_for_each_entry(clki, head, list) {
7722 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7723 clk_disable_unprepare(clki->clk);
7724 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007725 } else if (!ret && on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007726 spin_lock_irqsave(hba->host->host_lock, flags);
7727 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007728 trace_ufshcd_clk_gating(dev_name(hba->dev),
7729 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007730 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007731 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007732
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007733 if (clk_state_changed)
7734 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7735 (on ? "on" : "off"),
7736 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007737 return ret;
7738}
7739
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007740static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7741{
7742 return __ufshcd_setup_clocks(hba, on, false);
7743}
7744
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007745static int ufshcd_init_clocks(struct ufs_hba *hba)
7746{
7747 int ret = 0;
7748 struct ufs_clk_info *clki;
7749 struct device *dev = hba->dev;
7750 struct list_head *head = &hba->clk_list_head;
7751
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03007752 if (list_empty(head))
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007753 goto out;
7754
7755 list_for_each_entry(clki, head, list) {
7756 if (!clki->name)
7757 continue;
7758
7759 clki->clk = devm_clk_get(dev, clki->name);
7760 if (IS_ERR(clki->clk)) {
7761 ret = PTR_ERR(clki->clk);
7762 dev_err(dev, "%s: %s clk get failed, %d\n",
7763 __func__, clki->name, ret);
7764 goto out;
7765 }
7766
Subhash Jadavani9e1e8a72018-10-16 14:29:41 +05307767 /*
7768 * Parse device ref clk freq as per device tree "ref_clk".
7769 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7770 * in ufshcd_alloc_host().
7771 */
7772 if (!strcmp(clki->name, "ref_clk"))
7773 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7774
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007775 if (clki->max_freq) {
7776 ret = clk_set_rate(clki->clk, clki->max_freq);
7777 if (ret) {
7778 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7779 __func__, clki->name,
7780 clki->max_freq, ret);
7781 goto out;
7782 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03007783 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007784 }
7785 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7786 clki->name, clk_get_rate(clki->clk));
7787 }
7788out:
7789 return ret;
7790}
7791
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007792static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7793{
7794 int err = 0;
7795
7796 if (!hba->vops)
7797 goto out;
7798
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007799 err = ufshcd_vops_init(hba);
7800 if (err)
7801 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007802
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007803 err = ufshcd_vops_setup_regulators(hba, true);
7804 if (err)
7805 goto out_exit;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007806
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007807 goto out;
7808
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007809out_exit:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007810 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007811out:
7812 if (err)
7813 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007814 __func__, ufshcd_get_var_name(hba), err);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007815 return err;
7816}
7817
7818static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7819{
7820 if (!hba->vops)
7821 return;
7822
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007823 ufshcd_vops_setup_regulators(hba, false);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007824
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007825 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007826}
7827
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007828static int ufshcd_hba_init(struct ufs_hba *hba)
7829{
7830 int err;
7831
Raviv Shvili6a771a62014-09-25 15:32:24 +03007832 /*
7833 * Handle host controller power separately from the UFS device power
7834 * rails as it will help controlling the UFS host controller power
7835 * collapse easily which is different than UFS device power collapse.
7836 * Also, enable the host controller power before we go ahead with rest
7837 * of the initialization here.
7838 */
7839 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007840 if (err)
7841 goto out;
7842
Raviv Shvili6a771a62014-09-25 15:32:24 +03007843 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007844 if (err)
7845 goto out;
7846
Raviv Shvili6a771a62014-09-25 15:32:24 +03007847 err = ufshcd_init_clocks(hba);
7848 if (err)
7849 goto out_disable_hba_vreg;
7850
7851 err = ufshcd_setup_clocks(hba, true);
7852 if (err)
7853 goto out_disable_hba_vreg;
7854
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007855 err = ufshcd_init_vreg(hba);
7856 if (err)
7857 goto out_disable_clks;
7858
7859 err = ufshcd_setup_vreg(hba, true);
7860 if (err)
7861 goto out_disable_clks;
7862
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007863 err = ufshcd_variant_hba_init(hba);
7864 if (err)
7865 goto out_disable_vreg;
7866
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007867 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007868 goto out;
7869
7870out_disable_vreg:
7871 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007872out_disable_clks:
7873 ufshcd_setup_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03007874out_disable_hba_vreg:
7875 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007876out:
7877 return err;
7878}
7879
7880static void ufshcd_hba_exit(struct ufs_hba *hba)
7881{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007882 if (hba->is_powered) {
7883 ufshcd_variant_hba_exit(hba);
7884 ufshcd_setup_vreg(hba, false);
Gilad Bronera5082532016-10-17 17:10:00 -07007885 ufshcd_suspend_clkscaling(hba);
Vivek Gautameebcc192018-08-07 23:17:39 +05307886 if (ufshcd_is_clkscaling_supported(hba))
subhashj@codeaurora.org0701e492017-02-03 16:58:01 -08007887 if (hba->devfreq)
7888 ufshcd_suspend_clkscaling(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007889 ufshcd_setup_clocks(hba, false);
7890 ufshcd_setup_hba_vreg(hba, false);
7891 hba->is_powered = false;
Bean Huo09750062020-01-20 14:08:14 +01007892 ufs_put_device_desc(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007893 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007894}
7895
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007896static int
7897ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307898{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007899 unsigned char cmd[6] = {REQUEST_SENSE,
7900 0,
7901 0,
7902 0,
Avri Altman09a5a242018-11-22 20:04:56 +02007903 UFS_SENSE_SIZE,
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007904 0};
7905 char *buffer;
7906 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307907
Avri Altman09a5a242018-11-22 20:04:56 +02007908 buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007909 if (!buffer) {
7910 ret = -ENOMEM;
7911 goto out;
7912 }
7913
Christoph Hellwigfcbfffe2017-02-23 16:02:37 +01007914 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
Avri Altman09a5a242018-11-22 20:04:56 +02007915 UFS_SENSE_SIZE, NULL, NULL,
Christoph Hellwigfcbfffe2017-02-23 16:02:37 +01007916 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007917 if (ret)
7918 pr_err("%s: failed with err %d\n", __func__, ret);
7919
7920 kfree(buffer);
7921out:
7922 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307923}
7924
7925/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007926 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7927 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307928 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007929 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307930 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007931 * Returns 0 if requested power mode is set successfully
7932 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307933 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007934static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7935 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307936{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007937 unsigned char cmd[6] = { START_STOP };
7938 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007939 struct scsi_device *sdp;
7940 unsigned long flags;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007941 int ret;
7942
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007943 spin_lock_irqsave(hba->host->host_lock, flags);
7944 sdp = hba->sdev_ufs_device;
7945 if (sdp) {
7946 ret = scsi_device_get(sdp);
7947 if (!ret && !scsi_device_online(sdp)) {
7948 ret = -ENODEV;
7949 scsi_device_put(sdp);
7950 }
7951 } else {
7952 ret = -ENODEV;
7953 }
7954 spin_unlock_irqrestore(hba->host->host_lock, flags);
7955
7956 if (ret)
7957 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007958
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307959 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007960 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7961 * handling, which would wait for host to be resumed. Since we know
7962 * we are functional while we are here, skip host resume in error
7963 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307964 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007965 hba->host->eh_noresume = 1;
7966 if (hba->wlun_dev_clr_ua) {
7967 ret = ufshcd_send_request_sense(hba, sdp);
7968 if (ret)
7969 goto out;
7970 /* Unit attention condition is cleared now */
7971 hba->wlun_dev_clr_ua = false;
7972 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307973
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007974 cmd[4] = pwr_mode << 4;
7975
7976 /*
7977 * Current function would be generally called from the power management
Christoph Hellwige8064022016-10-20 15:12:13 +02007978 * callbacks hence set the RQF_PM flag so that it doesn't resume the
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007979 * already suspended childs.
7980 */
Christoph Hellwigfcbfffe2017-02-23 16:02:37 +01007981 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7982 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007983 if (ret) {
7984 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02007985 "START_STOP failed for power mode: %d, result %x\n",
7986 pwr_mode, ret);
Johannes Thumshirnc65be1a2018-06-25 13:20:58 +02007987 if (driver_byte(ret) == DRIVER_SENSE)
Hannes Reinecke21045512015-01-08 07:43:46 +01007988 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007989 }
7990
7991 if (!ret)
7992 hba->curr_dev_pwr_mode = pwr_mode;
7993out:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007994 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007995 hba->host->eh_noresume = 0;
7996 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307997}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307998
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007999static int ufshcd_link_state_transition(struct ufs_hba *hba,
8000 enum uic_link_state req_link_state,
8001 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308002{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008003 int ret = 0;
8004
8005 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308006 return 0;
8007
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008008 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8009 ret = ufshcd_uic_hibern8_enter(hba);
8010 if (!ret)
8011 ufshcd_set_link_hibern8(hba);
8012 else
8013 goto out;
8014 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308015 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008016 * If autobkops is enabled, link can't be turned off because
8017 * turning off the link would also turn off the device.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308018 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008019 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
Dan Carpenterdc30c9e2019-12-13 13:49:35 +03008020 (!check_for_bkops || !hba->auto_bkops_enabled)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008021 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02008022 * Let's make sure that link is in low power mode, we are doing
8023 * this currently by putting the link in Hibern8. Otherway to
8024 * put the link in low power mode is to send the DME end point
8025 * to device and then send the DME reset command to local
8026 * unipro. But putting the link in hibern8 is much faster.
8027 */
8028 ret = ufshcd_uic_hibern8_enter(hba);
8029 if (ret)
8030 goto out;
8031 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008032 * Change controller state to "reset state" which
8033 * should also put the link in off/reset state
8034 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02008035 ufshcd_hba_stop(hba, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008036 /*
8037 * TODO: Check if we need any delay to make sure that
8038 * controller is reset
8039 */
8040 ufshcd_set_link_off(hba);
8041 }
8042
8043out:
8044 return ret;
8045}
8046
8047static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8048{
8049 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02008050 * It seems some UFS devices may keep drawing more than sleep current
8051 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8052 * To avoid this situation, add 2ms delay before putting these UFS
8053 * rails in LPM mode.
8054 */
8055 if (!ufshcd_is_link_active(hba) &&
8056 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8057 usleep_range(2000, 2100);
8058
8059 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008060 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8061 * power.
8062 *
8063 * If UFS device and link is in OFF state, all power supplies (VCC,
8064 * VCCQ, VCCQ2) can be turned off if power on write protect is not
8065 * required. If UFS link is inactive (Hibern8 or OFF state) and device
8066 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8067 *
8068 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8069 * in low power state which would save some power.
Asutosh Das3d17b9b2020-04-22 14:41:42 -07008070 *
8071 * If Write Booster is enabled and the device needs to flush the WB
8072 * buffer OR if bkops status is urgent for WB, keep Vcc on.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008073 */
8074 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8075 !hba->dev_info.is_lu_power_on_wp) {
8076 ufshcd_setup_vreg(hba, false);
8077 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Asutosh Das3d17b9b2020-04-22 14:41:42 -07008078 if (!hba->dev_info.keep_vcc_on)
8079 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008080 if (!ufshcd_is_link_active(hba)) {
8081 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8082 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8083 }
8084 }
8085}
8086
8087static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8088{
8089 int ret = 0;
8090
8091 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8092 !hba->dev_info.is_lu_power_on_wp) {
8093 ret = ufshcd_setup_vreg(hba, true);
8094 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008095 if (!ret && !ufshcd_is_link_active(hba)) {
8096 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8097 if (ret)
8098 goto vcc_disable;
8099 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8100 if (ret)
8101 goto vccq_lpm;
8102 }
Subhash Jadavani69d72ac2016-10-27 17:26:24 -07008103 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008104 }
8105 goto out;
8106
8107vccq_lpm:
8108 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8109vcc_disable:
8110 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8111out:
8112 return ret;
8113}
8114
8115static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8116{
8117 if (ufshcd_is_link_off(hba))
8118 ufshcd_setup_hba_vreg(hba, false);
8119}
8120
8121static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8122{
8123 if (ufshcd_is_link_off(hba))
8124 ufshcd_setup_hba_vreg(hba, true);
8125}
8126
8127/**
8128 * ufshcd_suspend - helper function for suspend operations
8129 * @hba: per adapter instance
8130 * @pm_op: desired low power operation type
8131 *
8132 * This function will try to put the UFS device and link into low power
8133 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8134 * (System PM level).
8135 *
8136 * If this function is called during shutdown, it will make sure that
8137 * both UFS device and UFS link is powered off.
8138 *
8139 * NOTE: UFS device & link must be active before we enter in this function.
8140 *
8141 * Returns 0 for success and non-zero for failure
8142 */
8143static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8144{
8145 int ret = 0;
8146 enum ufs_pm_level pm_lvl;
8147 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8148 enum uic_link_state req_link_state;
8149
8150 hba->pm_op_in_progress = 1;
8151 if (!ufshcd_is_shutdown_pm(pm_op)) {
8152 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8153 hba->rpm_lvl : hba->spm_lvl;
8154 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8155 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8156 } else {
8157 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8158 req_link_state = UIC_LINK_OFF_STATE;
8159 }
8160
8161 /*
8162 * If we can't transition into any of the low power modes
8163 * just gate the clocks.
8164 */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008165 ufshcd_hold(hba, false);
8166 hba->clk_gating.is_suspended = true;
8167
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08008168 if (hba->clk_scaling.is_allowed) {
8169 cancel_work_sync(&hba->clk_scaling.suspend_work);
8170 cancel_work_sync(&hba->clk_scaling.resume_work);
8171 ufshcd_suspend_clkscaling(hba);
8172 }
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07008173
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008174 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8175 req_link_state == UIC_LINK_ACTIVE_STATE) {
8176 goto disable_clks;
8177 }
8178
8179 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8180 (req_link_state == hba->uic_link_state))
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07008181 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008182
8183 /* UFS device & link must be active before we enter in this function */
8184 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8185 ret = -EINVAL;
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07008186 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008187 }
8188
8189 if (ufshcd_is_runtime_pm(pm_op)) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03008190 if (ufshcd_can_autobkops_during_suspend(hba)) {
8191 /*
8192 * The device is idle with no requests in the queue,
8193 * allow background operations if bkops status shows
8194 * that performance might be impacted.
8195 */
8196 ret = ufshcd_urgent_bkops(hba);
8197 if (ret)
8198 goto enable_gating;
8199 } else {
8200 /* make sure that auto bkops is disabled */
8201 ufshcd_disable_auto_bkops(hba);
8202 }
Asutosh Das3d17b9b2020-04-22 14:41:42 -07008203 /*
8204 * With wb enabled, if the bkops is enabled or if the
8205 * configured WB type is 70% full, keep vcc ON
8206 * for the device to flush the wb buffer
8207 */
8208 if ((hba->auto_bkops_enabled && ufshcd_wb_sup(hba)) ||
8209 ufshcd_wb_keep_vcc_on(hba))
8210 hba->dev_info.keep_vcc_on = true;
8211 else
8212 hba->dev_info.keep_vcc_on = false;
8213 } else if (!ufshcd_is_runtime_pm(pm_op)) {
8214 hba->dev_info.keep_vcc_on = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008215 }
8216
8217 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
Asutosh Das3d17b9b2020-04-22 14:41:42 -07008218 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8219 !ufshcd_is_runtime_pm(pm_op))) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008220 /* ensure that bkops is disabled */
8221 ufshcd_disable_auto_bkops(hba);
8222 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8223 if (ret)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008224 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008225 }
8226
Sayali Lokhande2824ec92020-02-10 19:40:44 -08008227 flush_work(&hba->eeh_work);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008228 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8229 if (ret)
8230 goto set_dev_active;
8231
8232 ufshcd_vreg_set_lpm(hba);
8233
8234disable_clks:
8235 /*
8236 * Call vendor specific suspend callback. As these callbacks may access
8237 * vendor specific host controller register space call them before the
8238 * host clocks are ON.
8239 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008240 ret = ufshcd_vops_suspend(hba, pm_op);
8241 if (ret)
8242 goto set_link_active;
Stanley Chudcb6cec2019-12-07 20:22:00 +08008243 /*
8244 * Disable the host irq as host controller as there won't be any
8245 * host controller transaction expected till resume.
8246 */
8247 ufshcd_disable_irq(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008248
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008249 if (!ufshcd_is_link_active(hba))
8250 ufshcd_setup_clocks(hba, false);
8251 else
8252 /* If link is active, device ref_clk can't be switched off */
8253 __ufshcd_setup_clocks(hba, false, true);
8254
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008255 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008256 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Stanley Chudcb6cec2019-12-07 20:22:00 +08008257
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008258 /* Put the host controller in low power mode if possible */
8259 ufshcd_hba_vreg_set_lpm(hba);
8260 goto out;
8261
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008262set_link_active:
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08008263 if (hba->clk_scaling.is_allowed)
8264 ufshcd_resume_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008265 ufshcd_vreg_set_hpm(hba);
8266 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8267 ufshcd_set_link_active(hba);
8268 else if (ufshcd_is_link_off(hba))
8269 ufshcd_host_reset_and_restore(hba);
8270set_dev_active:
8271 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8272 ufshcd_disable_auto_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008273enable_gating:
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08008274 if (hba->clk_scaling.is_allowed)
8275 ufshcd_resume_clkscaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008276 hba->clk_gating.is_suspended = false;
8277 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008278out:
8279 hba->pm_op_in_progress = 0;
Stanley Chu8808b4e2019-07-10 21:38:21 +08008280 if (ret)
8281 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008282 return ret;
8283}
8284
8285/**
8286 * ufshcd_resume - helper function for resume operations
8287 * @hba: per adapter instance
8288 * @pm_op: runtime PM or system PM
8289 *
8290 * This function basically brings the UFS device, UniPro link and controller
8291 * to active state.
8292 *
8293 * Returns 0 for success and non-zero for failure
8294 */
8295static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8296{
8297 int ret;
8298 enum uic_link_state old_link_state;
8299
8300 hba->pm_op_in_progress = 1;
8301 old_link_state = hba->uic_link_state;
8302
8303 ufshcd_hba_vreg_set_hpm(hba);
8304 /* Make sure clocks are enabled before accessing controller */
8305 ret = ufshcd_setup_clocks(hba, true);
8306 if (ret)
8307 goto out;
8308
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008309 /* enable the host irq as host controller would be active soon */
Can Guo5231d382019-12-05 02:14:46 +00008310 ufshcd_enable_irq(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008311
8312 ret = ufshcd_vreg_set_hpm(hba);
8313 if (ret)
8314 goto disable_irq_and_vops_clks;
8315
8316 /*
8317 * Call vendor specific resume callback. As these callbacks may access
8318 * vendor specific host controller register space call them when the
8319 * host clocks are ON.
8320 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008321 ret = ufshcd_vops_resume(hba, pm_op);
8322 if (ret)
8323 goto disable_vreg;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008324
8325 if (ufshcd_is_link_hibern8(hba)) {
8326 ret = ufshcd_uic_hibern8_exit(hba);
8327 if (!ret)
8328 ufshcd_set_link_active(hba);
8329 else
8330 goto vendor_suspend;
8331 } else if (ufshcd_is_link_off(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008332 /*
Asutosh Das089f5b62020-04-13 23:14:48 -07008333 * A full initialization of the host and the device is
8334 * required since the link was put to off during suspend.
8335 */
8336 ret = ufshcd_reset_and_restore(hba);
8337 /*
8338 * ufshcd_reset_and_restore() should have already
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008339 * set the link state as active
8340 */
8341 if (ret || !ufshcd_is_link_active(hba))
8342 goto vendor_suspend;
8343 }
8344
8345 if (!ufshcd_is_ufs_dev_active(hba)) {
8346 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8347 if (ret)
8348 goto set_old_link_state;
8349 }
8350
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08008351 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8352 ufshcd_enable_auto_bkops(hba);
8353 else
8354 /*
8355 * If BKOPs operations are urgently needed at this moment then
8356 * keep auto-bkops enabled or else disable it.
8357 */
8358 ufshcd_urgent_bkops(hba);
8359
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008360 hba->clk_gating.is_suspended = false;
8361
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08008362 if (hba->clk_scaling.is_allowed)
8363 ufshcd_resume_clkscaling(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03008364
Adrian Hunterad448372018-03-20 15:07:38 +02008365 /* Enable Auto-Hibernate if configured */
8366 ufshcd_auto_hibern8_enable(hba);
8367
Can Guo71d848b2019-11-14 22:09:26 -08008368 /* Schedule clock gating in case of no access to UFS device yet */
8369 ufshcd_release(hba);
8370
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008371 goto out;
8372
8373set_old_link_state:
8374 ufshcd_link_state_transition(hba, old_link_state, 0);
8375vendor_suspend:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008376 ufshcd_vops_suspend(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008377disable_vreg:
8378 ufshcd_vreg_set_lpm(hba);
8379disable_irq_and_vops_clks:
8380 ufshcd_disable_irq(hba);
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08008381 if (hba->clk_scaling.is_allowed)
8382 ufshcd_suspend_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008383 ufshcd_setup_clocks(hba, false);
8384out:
8385 hba->pm_op_in_progress = 0;
Stanley Chu8808b4e2019-07-10 21:38:21 +08008386 if (ret)
8387 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008388 return ret;
8389}
8390
8391/**
8392 * ufshcd_system_suspend - system suspend routine
8393 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008394 *
8395 * Check the description of ufshcd_suspend() function for more details.
8396 *
8397 * Returns 0 for success and non-zero for failure
8398 */
8399int ufshcd_system_suspend(struct ufs_hba *hba)
8400{
8401 int ret = 0;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008402 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008403
8404 if (!hba || !hba->is_powered)
Dolev Raviv233b5942014-10-23 13:25:14 +03008405 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008406
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08008407 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8408 hba->curr_dev_pwr_mode) &&
8409 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8410 hba->uic_link_state))
8411 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008412
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08008413 if (pm_runtime_suspended(hba->dev)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008414 /*
8415 * UFS device and/or UFS link low power states during runtime
8416 * suspend seems to be different than what is expected during
8417 * system suspend. Hence runtime resume the devic & link and
8418 * let the system suspend low power states to take effect.
8419 * TODO: If resume takes longer time, we might have optimize
8420 * it in future by not resuming everything if possible.
8421 */
8422 ret = ufshcd_runtime_resume(hba);
8423 if (ret)
8424 goto out;
8425 }
8426
8427 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8428out:
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008429 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8430 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08008431 hba->curr_dev_pwr_mode, hba->uic_link_state);
Dolev Ravive7850602014-09-25 15:32:36 +03008432 if (!ret)
8433 hba->is_sys_suspended = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008434 return ret;
8435}
8436EXPORT_SYMBOL(ufshcd_system_suspend);
8437
8438/**
8439 * ufshcd_system_resume - system resume routine
8440 * @hba: per adapter instance
8441 *
8442 * Returns 0 for success and non-zero for failure
8443 */
8444
8445int ufshcd_system_resume(struct ufs_hba *hba)
8446{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008447 int ret = 0;
8448 ktime_t start = ktime_get();
8449
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07008450 if (!hba)
8451 return -EINVAL;
8452
8453 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008454 /*
8455 * Let the runtime resume take care of resuming
8456 * if runtime suspended.
8457 */
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008458 goto out;
8459 else
8460 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8461out:
8462 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8463 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08008464 hba->curr_dev_pwr_mode, hba->uic_link_state);
Stanley Chuce9e7bc2019-01-07 22:19:34 +08008465 if (!ret)
8466 hba->is_sys_suspended = false;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008467 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008468}
8469EXPORT_SYMBOL(ufshcd_system_resume);
8470
8471/**
8472 * ufshcd_runtime_suspend - runtime suspend routine
8473 * @hba: per adapter instance
8474 *
8475 * Check the description of ufshcd_suspend() function for more details.
8476 *
8477 * Returns 0 for success and non-zero for failure
8478 */
8479int ufshcd_runtime_suspend(struct ufs_hba *hba)
8480{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008481 int ret = 0;
8482 ktime_t start = ktime_get();
8483
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07008484 if (!hba)
8485 return -EINVAL;
8486
8487 if (!hba->is_powered)
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008488 goto out;
8489 else
8490 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8491out:
8492 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8493 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08008494 hba->curr_dev_pwr_mode, hba->uic_link_state);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008495 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308496}
8497EXPORT_SYMBOL(ufshcd_runtime_suspend);
8498
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008499/**
8500 * ufshcd_runtime_resume - runtime resume routine
8501 * @hba: per adapter instance
8502 *
8503 * This function basically brings the UFS device, UniPro link and controller
8504 * to active state. Following operations are done in this function:
8505 *
8506 * 1. Turn on all the controller related clocks
8507 * 2. Bring the UniPro link out of Hibernate state
8508 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8509 * to active state.
8510 * 4. If auto-bkops is enabled on the device, disable it.
8511 *
8512 * So following would be the possible power state after this function return
8513 * successfully:
8514 * S1: UFS device in Active state with VCC rail ON
8515 * UniPro link in Active state
8516 * All the UFS/UniPro controller clocks are ON
8517 *
8518 * Returns 0 for success and non-zero for failure
8519 */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308520int ufshcd_runtime_resume(struct ufs_hba *hba)
8521{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008522 int ret = 0;
8523 ktime_t start = ktime_get();
8524
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07008525 if (!hba)
8526 return -EINVAL;
8527
8528 if (!hba->is_powered)
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008529 goto out;
8530 else
8531 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8532out:
8533 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8534 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08008535 hba->curr_dev_pwr_mode, hba->uic_link_state);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08008536 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308537}
8538EXPORT_SYMBOL(ufshcd_runtime_resume);
8539
8540int ufshcd_runtime_idle(struct ufs_hba *hba)
8541{
8542 return 0;
8543}
8544EXPORT_SYMBOL(ufshcd_runtime_idle);
8545
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308546/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008547 * ufshcd_shutdown - shutdown routine
8548 * @hba: per adapter instance
8549 *
8550 * This function would power off both UFS device and UFS link.
8551 *
8552 * Returns 0 always to allow force shutdown even in case of errors.
8553 */
8554int ufshcd_shutdown(struct ufs_hba *hba)
8555{
8556 int ret = 0;
8557
Stanley Chuf51913e2019-09-18 12:20:38 +08008558 if (!hba->is_powered)
8559 goto out;
8560
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008561 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8562 goto out;
8563
8564 if (pm_runtime_suspended(hba->dev)) {
8565 ret = ufshcd_runtime_resume(hba);
8566 if (ret)
8567 goto out;
8568 }
8569
8570 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8571out:
8572 if (ret)
8573 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8574 /* allow force shutdown even in case of errors */
8575 return 0;
8576}
8577EXPORT_SYMBOL(ufshcd_shutdown);
8578
8579/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308580 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308581 * data structure memory
Bart Van Assche8aa29f12018-03-01 15:07:20 -08008582 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308583 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308584void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308585{
Avri Altmandf032bf2018-10-07 17:30:35 +03008586 ufs_bsg_remove(hba);
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +02008587 ufs_sysfs_remove_nodes(hba->dev);
Bart Van Assche69a6c262019-12-09 10:13:09 -08008588 blk_cleanup_queue(hba->tmf_queue);
8589 blk_mq_free_tag_set(&hba->tmf_tag_set);
Bart Van Assche7252a362019-12-09 10:13:08 -08008590 blk_cleanup_queue(hba->cmd_queue);
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05308591 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308592 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05308593 ufshcd_disable_intr(hba, hba->intr_mask);
Yaniv Gardi596585a2016-03-10 17:37:08 +02008594 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308595
Vivek Gautameebcc192018-08-07 23:17:39 +05308596 ufshcd_exit_clk_scaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008597 ufshcd_exit_clk_gating(hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08008598 if (ufshcd_is_clkscaling_supported(hba))
8599 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008600 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308601}
8602EXPORT_SYMBOL_GPL(ufshcd_remove);
8603
8604/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02008605 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8606 * @hba: pointer to Host Bus Adapter (HBA)
8607 */
8608void ufshcd_dealloc_host(struct ufs_hba *hba)
8609{
8610 scsi_host_put(hba->host);
8611}
8612EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8613
8614/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09008615 * ufshcd_set_dma_mask - Set dma mask based on the controller
8616 * addressing capability
8617 * @hba: per adapter instance
8618 *
8619 * Returns 0 for success, non-zero for failure
8620 */
8621static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8622{
8623 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8624 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8625 return 0;
8626 }
8627 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8628}
8629
8630/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008631 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308632 * @dev: pointer to device handle
8633 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308634 * Returns 0 on success, non-zero value on failure
8635 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008636int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308637{
8638 struct Scsi_Host *host;
8639 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008640 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308641
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308642 if (!dev) {
8643 dev_err(dev,
8644 "Invalid memory reference for dev is NULL\n");
8645 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308646 goto out_error;
8647 }
8648
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308649 host = scsi_host_alloc(&ufshcd_driver_template,
8650 sizeof(struct ufs_hba));
8651 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308652 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308653 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308654 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308655 }
8656 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308657 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308658 hba->dev = dev;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008659 *hba_handle = hba;
Subhash Jadavani9e1e8a72018-10-16 14:29:41 +05308660 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008661
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03008662 INIT_LIST_HEAD(&hba->clk_list_head);
8663
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008664out_error:
8665 return err;
8666}
8667EXPORT_SYMBOL(ufshcd_alloc_host);
8668
Bart Van Assche69a6c262019-12-09 10:13:09 -08008669/* This function exists because blk_mq_alloc_tag_set() requires this. */
8670static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
8671 const struct blk_mq_queue_data *qd)
8672{
8673 WARN_ON_ONCE(true);
8674 return BLK_STS_NOTSUPP;
8675}
8676
8677static const struct blk_mq_ops ufshcd_tmf_ops = {
8678 .queue_rq = ufshcd_queue_tmf,
8679};
8680
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008681/**
8682 * ufshcd_init - Driver initialization routine
8683 * @hba: per-adapter instance
8684 * @mmio_base: base register address
8685 * @irq: Interrupt line of device
8686 * Returns 0 on success, non-zero value on failure
8687 */
8688int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8689{
8690 int err;
8691 struct Scsi_Host *host = hba->host;
8692 struct device *dev = hba->dev;
8693
8694 if (!mmio_base) {
8695 dev_err(hba->dev,
8696 "Invalid memory reference for mmio_base is NULL\n");
8697 err = -ENODEV;
8698 goto out_error;
8699 }
8700
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308701 hba->mmio_base = mmio_base;
8702 hba->irq = irq;
Stanley Chub9dc8ac2020-03-18 18:40:14 +08008703 hba->hba_enable_delay_us = 1000;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308704
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008705 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008706 if (err)
8707 goto out_error;
8708
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308709 /* Read capabilities registers */
8710 ufshcd_hba_capabilities(hba);
8711
8712 /* Get UFS version supported by the controller */
8713 hba->ufs_version = ufshcd_get_ufs_version(hba);
8714
Yaniv Gardic01848c2016-12-05 19:25:02 -08008715 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8716 (hba->ufs_version != UFSHCI_VERSION_11) &&
8717 (hba->ufs_version != UFSHCI_VERSION_20) &&
8718 (hba->ufs_version != UFSHCI_VERSION_21))
8719 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8720 hba->ufs_version);
8721
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05308722 /* Get Interrupt bit mask per version */
8723 hba->intr_mask = ufshcd_get_intr_mask(hba);
8724
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09008725 err = ufshcd_set_dma_mask(hba);
8726 if (err) {
8727 dev_err(hba->dev, "set dma mask failed\n");
8728 goto out_disable;
8729 }
8730
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308731 /* Allocate memory for host memory space */
8732 err = ufshcd_memory_alloc(hba);
8733 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308734 dev_err(hba->dev, "Memory allocation failed\n");
8735 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308736 }
8737
8738 /* Configure LRB */
8739 ufshcd_host_memory_configure(hba);
8740
8741 host->can_queue = hba->nutrs;
8742 host->cmd_per_lun = hba->nutrs;
8743 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03008744 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308745 host->max_channel = UFSHCD_MAX_CHANNEL;
8746 host->unique_id = host->host_no;
Avri Altmana851b2b2018-10-07 17:30:34 +03008747 host->max_cmd_len = UFS_CDB_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308748
Dolev Raviv7eb584d2014-09-25 15:32:31 +03008749 hba->max_pwr_info.is_valid = false;
8750
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308751 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05308752 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308753 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308754
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308755 /* Initialize UIC command mutex */
8756 mutex_init(&hba->uic_cmd_mutex);
8757
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05308758 /* Initialize mutex for device management commands */
8759 mutex_init(&hba->dev_cmd.lock);
8760
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08008761 init_rwsem(&hba->clk_scaling_lock);
8762
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008763 ufshcd_init_clk_gating(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +02008764
Vivek Gautameebcc192018-08-07 23:17:39 +05308765 ufshcd_init_clk_scaling(hba);
8766
Yaniv Gardi199ef132016-03-10 17:37:06 +02008767 /*
8768 * In order to avoid any spurious interrupt immediately after
8769 * registering UFS controller interrupt handler, clear any pending UFS
8770 * interrupt status and disable all the UFS interrupts.
8771 */
8772 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8773 REG_INTERRUPT_STATUS);
8774 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8775 /*
8776 * Make sure that UFS interrupts are disabled and any pending interrupt
8777 * status is cleared before registering UFS interrupt handler.
8778 */
8779 mb();
8780
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308781 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09008782 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308783 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308784 dev_err(hba->dev, "request irq failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008785 goto exit_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008786 } else {
8787 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308788 }
8789
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308790 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308791 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308792 dev_err(hba->dev, "scsi_add_host failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008793 goto exit_gating;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308794 }
8795
Bart Van Assche7252a362019-12-09 10:13:08 -08008796 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
8797 if (IS_ERR(hba->cmd_queue)) {
8798 err = PTR_ERR(hba->cmd_queue);
8799 goto out_remove_scsi_host;
8800 }
8801
Bart Van Assche69a6c262019-12-09 10:13:09 -08008802 hba->tmf_tag_set = (struct blk_mq_tag_set) {
8803 .nr_hw_queues = 1,
8804 .queue_depth = hba->nutmrs,
8805 .ops = &ufshcd_tmf_ops,
8806 .flags = BLK_MQ_F_NO_SCHED,
8807 };
8808 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
8809 if (err < 0)
8810 goto free_cmd_queue;
8811 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
8812 if (IS_ERR(hba->tmf_queue)) {
8813 err = PTR_ERR(hba->tmf_queue);
8814 goto free_tmf_tag_set;
8815 }
8816
Bjorn Anderssond8d9f792019-08-28 12:17:54 -07008817 /* Reset the attached device */
8818 ufshcd_vops_device_reset(hba);
8819
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308820 /* Host controller enable */
8821 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308822 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308823 dev_err(hba->dev, "Host controller enable failed\n");
Dolev Raviv66cc8202016-12-22 18:39:42 -08008824 ufshcd_print_host_regs(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08008825 ufshcd_print_host_state(hba);
Bart Van Assche69a6c262019-12-09 10:13:09 -08008826 goto free_tmf_queue;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308827 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308828
subhashj@codeaurora.org0c8f7582016-12-22 18:41:11 -08008829 /*
8830 * Set the default power management level for runtime and system PM.
8831 * Default power saving mode is to keep UFS link in Hibern8 state
8832 * and UFS device in sleep state.
8833 */
8834 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8835 UFS_SLEEP_PWR_MODE,
8836 UIC_LINK_HIBERN8_STATE);
8837 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8838 UFS_SLEEP_PWR_MODE,
8839 UIC_LINK_HIBERN8_STATE);
8840
Adrian Hunterad448372018-03-20 15:07:38 +02008841 /* Set the default auto-hiberate idle timer value to 150 ms */
Stanley Chuf571b372019-05-21 14:44:53 +08008842 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
Adrian Hunterad448372018-03-20 15:07:38 +02008843 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8844 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8845 }
8846
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05308847 /* Hold auto suspend until async scan completes */
8848 pm_runtime_get_sync(dev);
Subhash Jadavani38135532018-05-03 16:37:18 +05308849 atomic_set(&hba->scsi_block_reqs_cnt, 0);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008850 /*
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08008851 * We are assuming that device wasn't put in sleep/power-down
8852 * state exclusively during the boot stage before kernel.
8853 * This assumption helps avoid doing link startup twice during
8854 * ufshcd_probe_hba().
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008855 */
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08008856 ufshcd_set_ufs_dev_active(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008857
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308858 async_schedule(ufshcd_async_scan, hba);
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +02008859 ufs_sysfs_add_nodes(hba->dev);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308860
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308861 return 0;
8862
Bart Van Assche69a6c262019-12-09 10:13:09 -08008863free_tmf_queue:
8864 blk_cleanup_queue(hba->tmf_queue);
8865free_tmf_tag_set:
8866 blk_mq_free_tag_set(&hba->tmf_tag_set);
Bart Van Assche7252a362019-12-09 10:13:08 -08008867free_cmd_queue:
8868 blk_cleanup_queue(hba->cmd_queue);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308869out_remove_scsi_host:
8870 scsi_remove_host(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008871exit_gating:
Vivek Gautameebcc192018-08-07 23:17:39 +05308872 ufshcd_exit_clk_scaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008873 ufshcd_exit_clk_gating(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308874out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008875 hba->is_irq_enabled = false;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008876 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308877out_error:
8878 return err;
8879}
8880EXPORT_SYMBOL_GPL(ufshcd_init);
8881
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308882MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8883MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05308884MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308885MODULE_LICENSE("GPL");
8886MODULE_VERSION(UFSHCD_DRIVER_VERSION);