blob: 533886233649add87a8087fd3e34a2ff568ed812 [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02006 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053011 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053016 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053018 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053024 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030035 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053038 */
39
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053040#include <linux/async.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030041#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020042#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020043#include <linux/of.h>
Adrian Hunterad448372018-03-20 15:07:38 +020044#include <linux/bitfield.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053045#include "ufshcd.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020046#include "ufs_quirks.h"
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053047#include "unipro.h"
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +020048#include "ufs-sysfs.h"
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053049
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -080050#define CREATE_TRACE_POINTS
51#include <trace/events/ufs.h>
52
Gilad Bronerdcea0bf2016-10-17 17:09:48 -070053#define UFSHCD_REQ_SENSE_SIZE 18
54
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053055#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
56 UTP_TASK_REQ_COMPL |\
57 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053058/* UIC command timeout, unit: ms */
59#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053060
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053061/* NOP OUT retries waiting for NOP IN response */
62#define NOP_OUT_RETRIES 10
63/* Timeout after 30 msecs if NOP OUT hangs without response */
64#define NOP_OUT_TIMEOUT 30 /* msecs */
65
Dolev Raviv68078d52013-07-30 00:35:58 +053066/* Query request retries */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080067#define QUERY_REQ_RETRIES 3
Dolev Raviv68078d52013-07-30 00:35:58 +053068/* Query request timeout */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080069#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
Dolev Raviv68078d52013-07-30 00:35:58 +053070
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053071/* Task management command timeout */
72#define TM_CMD_TIMEOUT 100 /* msecs */
73
Yaniv Gardi64238fb2016-02-01 15:02:43 +020074/* maximum number of retries for a general UIC command */
75#define UFS_UIC_COMMAND_RETRIES 3
76
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030077/* maximum number of link-startup retries */
78#define DME_LINKSTARTUP_RETRIES 3
79
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +020080/* Maximum retries for Hibern8 enter */
81#define UIC_HIBERN8_ENTER_RETRIES 3
82
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030083/* maximum number of reset retries before giving up */
84#define MAX_HOST_RESET_RETRIES 5
85
Dolev Raviv68078d52013-07-30 00:35:58 +053086/* Expose the flag value from utp_upiu_query.value */
87#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
88
Seungwon Jeon7d568652013-08-31 21:40:20 +053089/* Interrupt aggregation default timeout, unit: 40us */
90#define INT_AGGR_DEF_TO 0x02
91
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +030092#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
93 ({ \
94 int _ret; \
95 if (_on) \
96 _ret = ufshcd_enable_vreg(_dev, _vreg); \
97 else \
98 _ret = ufshcd_disable_vreg(_dev, _vreg); \
99 _ret; \
100 })
101
Tomas Winklerba809172018-06-14 11:14:09 +0300102#define ufshcd_hex_dump(prefix_str, buf, len) do { \
103 size_t __len = (len); \
104 print_hex_dump(KERN_ERR, prefix_str, \
105 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
106 16, 4, buf, __len, false); \
107} while (0)
108
109int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
110 const char *prefix)
111{
112 u8 *regs;
113
114 regs = kzalloc(len, GFP_KERNEL);
115 if (!regs)
116 return -ENOMEM;
117
118 memcpy_fromio(regs, hba->mmio_base + offset, len);
119 ufshcd_hex_dump(prefix, regs, len);
120 kfree(regs);
121
122 return 0;
123}
124EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800125
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530126enum {
127 UFSHCD_MAX_CHANNEL = 0,
128 UFSHCD_MAX_ID = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530129 UFSHCD_CMD_PER_LUN = 32,
130 UFSHCD_CAN_QUEUE = 32,
131};
132
133/* UFSHCD states */
134enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530135 UFSHCD_STATE_RESET,
136 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530137 UFSHCD_STATE_OPERATIONAL,
Zang Leigang141f8162016-11-16 11:29:37 +0800138 UFSHCD_STATE_EH_SCHEDULED,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530139};
140
141/* UFSHCD error handling flags */
142enum {
143 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530144};
145
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530146/* UFSHCD UIC layer error flags */
147enum {
148 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200149 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
150 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
151 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
152 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
153 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530154};
155
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530156#define ufshcd_set_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300157 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530158#define ufshcd_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300159 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530160#define ufshcd_clear_eh_in_progress(h) \
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +0300161 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530162
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300163#define ufshcd_set_ufs_dev_active(h) \
164 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
165#define ufshcd_set_ufs_dev_sleep(h) \
166 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
167#define ufshcd_set_ufs_dev_poweroff(h) \
168 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
169#define ufshcd_is_ufs_dev_active(h) \
170 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
171#define ufshcd_is_ufs_dev_sleep(h) \
172 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
173#define ufshcd_is_ufs_dev_poweroff(h) \
174 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
175
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +0200176struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300177 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
178 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
179 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
180 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
181 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
182 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
183};
184
185static inline enum ufs_dev_pwr_mode
186ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
187{
188 return ufs_pm_lvl_states[lvl].dev_state;
189}
190
191static inline enum uic_link_state
192ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
193{
194 return ufs_pm_lvl_states[lvl].link_state;
195}
196
subhashj@codeaurora.org0c8f7582016-12-22 18:41:11 -0800197static inline enum ufs_pm_level
198ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
199 enum uic_link_state link_state)
200{
201 enum ufs_pm_level lvl;
202
203 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
204 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
205 (ufs_pm_lvl_states[lvl].link_state == link_state))
206 return lvl;
207 }
208
209 /* if no match found, return the level 0 */
210 return UFS_PM_LVL_0;
211}
212
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800213static struct ufs_dev_fix ufs_fixups[] = {
214 /* UFS cards deviations table */
215 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
216 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
217 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
218 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
219 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
220 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
221 UFS_DEVICE_NO_FASTAUTO),
222 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
223 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
224 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
225 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
226 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
227 UFS_DEVICE_QUIRK_PA_TACTIVATE),
228 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
229 UFS_DEVICE_QUIRK_PA_TACTIVATE),
230 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
231 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
232 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
233
234 END_FIX
235};
236
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530237static void ufshcd_tmc_handler(struct ufs_hba *hba);
238static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530239static int ufshcd_reset_and_restore(struct ufs_hba *hba);
Dolev Ravive7d38252016-12-22 18:40:07 -0800240static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530241static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300242static void ufshcd_hba_exit(struct ufs_hba *hba);
243static int ufshcd_probe_hba(struct ufs_hba *hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300244static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
245 bool skip_ref_clk);
246static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
Yaniv Gardi60f01872016-03-10 17:37:11 +0200247static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300248static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
249static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300250static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300251static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800252static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
253static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -0800254static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800255static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300256static irqreturn_t ufshcd_intr(int irq, void *__hba);
Yaniv Gardi874237f2015-05-17 18:55:03 +0300257static int ufshcd_change_power_mode(struct ufs_hba *hba,
258 struct ufs_pa_layer_attr *pwr_mode);
Yaniv Gardi14497322016-02-01 15:02:39 +0200259static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
260{
261 return tag >= 0 && tag < hba->nutrs;
262}
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300263
264static inline int ufshcd_enable_irq(struct ufs_hba *hba)
265{
266 int ret = 0;
267
268 if (!hba->is_irq_enabled) {
269 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
270 hba);
271 if (ret)
272 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
273 __func__, ret);
274 hba->is_irq_enabled = true;
275 }
276
277 return ret;
278}
279
280static inline void ufshcd_disable_irq(struct ufs_hba *hba)
281{
282 if (hba->is_irq_enabled) {
283 free_irq(hba->irq, hba);
284 hba->is_irq_enabled = false;
285 }
286}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530287
Subhash Jadavani38135532018-05-03 16:37:18 +0530288static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
289{
290 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
291 scsi_unblock_requests(hba->host);
292}
293
294static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
295{
296 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
297 scsi_block_requests(hba->host);
298}
299
Yaniv Gardib573d482016-03-10 17:37:09 +0200300/* replace non-printable or non-ASCII characters with spaces */
301static inline void ufshcd_remove_non_printable(char *val)
302{
303 if (!val)
304 return;
305
306 if (*val < 0x20 || *val > 0x7e)
307 *val = ' ';
308}
309
Ohad Sharabi6667e6d2018-03-28 12:42:18 +0300310static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
311 const char *str)
312{
313 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
314
315 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
316}
317
318static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
319 const char *str)
320{
321 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
322
323 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
324}
325
326static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
327 const char *str)
328{
329 struct utp_task_req_desc *descp;
330 struct utp_upiu_task_req *task_req;
331 int off = (int)tag - hba->nutrs;
332
333 descp = &hba->utmrdl_base_addr[off];
334 task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
335 trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
336 &task_req->input_param1);
337}
338
Lee Susman1a07f2d2016-12-22 18:42:03 -0800339static void ufshcd_add_command_trace(struct ufs_hba *hba,
340 unsigned int tag, const char *str)
341{
342 sector_t lba = -1;
343 u8 opcode = 0;
344 u32 intr, doorbell;
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300345 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
Lee Susman1a07f2d2016-12-22 18:42:03 -0800346 int transfer_len = -1;
347
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300348 if (!trace_ufshcd_command_enabled()) {
349 /* trace UPIU W/O tracing command */
350 if (lrbp->cmd)
351 ufshcd_add_cmd_upiu_trace(hba, tag, str);
Lee Susman1a07f2d2016-12-22 18:42:03 -0800352 return;
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300353 }
Lee Susman1a07f2d2016-12-22 18:42:03 -0800354
355 if (lrbp->cmd) { /* data phase exists */
Ohad Sharabie7c3b372018-08-05 16:26:23 +0300356 /* trace UPIU also */
357 ufshcd_add_cmd_upiu_trace(hba, tag, str);
Lee Susman1a07f2d2016-12-22 18:42:03 -0800358 opcode = (u8)(*lrbp->cmd->cmnd);
359 if ((opcode == READ_10) || (opcode == WRITE_10)) {
360 /*
361 * Currently we only fully trace read(10) and write(10)
362 * commands
363 */
364 if (lrbp->cmd->request && lrbp->cmd->request->bio)
365 lba =
366 lrbp->cmd->request->bio->bi_iter.bi_sector;
367 transfer_len = be32_to_cpu(
368 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
369 }
370 }
371
372 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
373 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
374 trace_ufshcd_command(dev_name(hba->dev), str, tag,
375 doorbell, transfer_len, intr, lba, opcode);
376}
377
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800378static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
379{
380 struct ufs_clk_info *clki;
381 struct list_head *head = &hba->clk_list_head;
382
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +0300383 if (list_empty(head))
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800384 return;
385
386 list_for_each_entry(clki, head, list) {
387 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
388 clki->max_freq)
389 dev_err(hba->dev, "clk: %s, rate: %u\n",
390 clki->name, clki->curr_freq);
391 }
392}
393
394static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
395 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
396{
397 int i;
398
399 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
400 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
401
402 if (err_hist->reg[p] == 0)
403 continue;
404 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
405 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
406 }
407}
408
Dolev Raviv66cc8202016-12-22 18:39:42 -0800409static void ufshcd_print_host_regs(struct ufs_hba *hba)
410{
Tomas Winklerba809172018-06-14 11:14:09 +0300411 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
Dolev Raviv66cc8202016-12-22 18:39:42 -0800412 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
413 hba->ufs_version, hba->capabilities);
414 dev_err(hba->dev,
415 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
416 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800417 dev_err(hba->dev,
418 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
419 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
420 hba->ufs_stats.hibern8_exit_cnt);
421
422 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
423 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
424 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
425 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
426 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
427
428 ufshcd_print_clk_freqs(hba);
429
430 if (hba->vops && hba->vops->dbg_register_dump)
431 hba->vops->dbg_register_dump(hba);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800432}
433
434static
435void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
436{
437 struct ufshcd_lrb *lrbp;
Gilad Broner7fabb772017-02-03 16:56:50 -0800438 int prdt_length;
Dolev Raviv66cc8202016-12-22 18:39:42 -0800439 int tag;
440
441 for_each_set_bit(tag, &bitmap, hba->nutrs) {
442 lrbp = &hba->lrb[tag];
443
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800444 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
445 tag, ktime_to_us(lrbp->issue_time_stamp));
Zang Leigang09017182017-09-27 10:06:06 +0800446 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
447 tag, ktime_to_us(lrbp->compl_time_stamp));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800448 dev_err(hba->dev,
449 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
450 tag, (u64)lrbp->utrd_dma_addr);
451
Dolev Raviv66cc8202016-12-22 18:39:42 -0800452 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
453 sizeof(struct utp_transfer_req_desc));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800454 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
455 (u64)lrbp->ucd_req_dma_addr);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800456 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
457 sizeof(struct utp_upiu_req));
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800458 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
459 (u64)lrbp->ucd_rsp_dma_addr);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800460 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
461 sizeof(struct utp_upiu_rsp));
Dolev Raviv66cc8202016-12-22 18:39:42 -0800462
Gilad Broner7fabb772017-02-03 16:56:50 -0800463 prdt_length = le16_to_cpu(
464 lrbp->utr_descriptor_ptr->prd_table_length);
465 dev_err(hba->dev,
466 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
467 tag, prdt_length,
468 (u64)lrbp->ucd_prdt_dma_addr);
469
470 if (pr_prdt)
Dolev Raviv66cc8202016-12-22 18:39:42 -0800471 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
Gilad Broner7fabb772017-02-03 16:56:50 -0800472 sizeof(struct ufshcd_sg_entry) * prdt_length);
Dolev Raviv66cc8202016-12-22 18:39:42 -0800473 }
474}
475
476static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
477{
478 struct utp_task_req_desc *tmrdp;
479 int tag;
480
481 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
482 tmrdp = &hba->utmrdl_base_addr[tag];
483 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
484 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
485 sizeof(struct request_desc_header));
486 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
487 tag);
488 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
489 sizeof(struct utp_upiu_req));
490 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
491 tag);
492 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
493 sizeof(struct utp_task_req_desc));
494 }
495}
496
Gilad Broner6ba65582017-02-03 16:57:28 -0800497static void ufshcd_print_host_state(struct ufs_hba *hba)
498{
499 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
500 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
Zang Leigange002e652017-08-24 10:57:15 +0800501 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
Gilad Broner6ba65582017-02-03 16:57:28 -0800502 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
503 hba->saved_err, hba->saved_uic_err);
504 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
505 hba->curr_dev_pwr_mode, hba->uic_link_state);
506 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
507 hba->pm_op_in_progress, hba->is_sys_suspended);
508 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
509 hba->auto_bkops_enabled, hba->host->host_self_blocked);
510 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
511 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
512 hba->eh_flags, hba->req_abort_count);
513 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
514 hba->capabilities, hba->caps);
515 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
516 hba->dev_quirks);
517}
518
Dolev Ravivff8e20c2016-12-22 18:42:18 -0800519/**
520 * ufshcd_print_pwr_info - print power params as saved in hba
521 * power info
522 * @hba: per-adapter instance
523 */
524static void ufshcd_print_pwr_info(struct ufs_hba *hba)
525{
526 static const char * const names[] = {
527 "INVALID MODE",
528 "FAST MODE",
529 "SLOW_MODE",
530 "INVALID MODE",
531 "FASTAUTO_MODE",
532 "SLOWAUTO_MODE",
533 "INVALID MODE",
534 };
535
536 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
537 __func__,
538 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
539 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
540 names[hba->pwr_info.pwr_rx],
541 names[hba->pwr_info.pwr_tx],
542 hba->pwr_info.hs_rate);
543}
544
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530545/*
546 * ufshcd_wait_for_register - wait for register value to change
547 * @hba - per-adapter interface
548 * @reg - mmio register offset
549 * @mask - mask to apply to read register value
550 * @val - wait condition
551 * @interval_us - polling interval in microsecs
552 * @timeout_ms - timeout in millisecs
Yaniv Gardi596585a2016-03-10 17:37:08 +0200553 * @can_sleep - perform sleep or just spin
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530554 *
555 * Returns -ETIMEDOUT on error, zero on success
556 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200557int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
558 u32 val, unsigned long interval_us,
559 unsigned long timeout_ms, bool can_sleep)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530560{
561 int err = 0;
562 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
563
564 /* ignore bits that we don't intend to wait on */
565 val = val & mask;
566
567 while ((ufshcd_readl(hba, reg) & mask) != val) {
Yaniv Gardi596585a2016-03-10 17:37:08 +0200568 if (can_sleep)
569 usleep_range(interval_us, interval_us + 50);
570 else
571 udelay(interval_us);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530572 if (time_after(jiffies, timeout)) {
573 if ((ufshcd_readl(hba, reg) & mask) != val)
574 err = -ETIMEDOUT;
575 break;
576 }
577 }
578
579 return err;
580}
581
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530582/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530583 * ufshcd_get_intr_mask - Get the interrupt bit mask
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800584 * @hba: Pointer to adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530585 *
586 * Returns interrupt bit mask per version
587 */
588static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
589{
Yaniv Gardic01848c2016-12-05 19:25:02 -0800590 u32 intr_mask = 0;
591
592 switch (hba->ufs_version) {
593 case UFSHCI_VERSION_10:
594 intr_mask = INTERRUPT_MASK_ALL_VER_10;
595 break;
Yaniv Gardic01848c2016-12-05 19:25:02 -0800596 case UFSHCI_VERSION_11:
597 case UFSHCI_VERSION_20:
598 intr_mask = INTERRUPT_MASK_ALL_VER_11;
599 break;
Yaniv Gardic01848c2016-12-05 19:25:02 -0800600 case UFSHCI_VERSION_21:
601 default:
602 intr_mask = INTERRUPT_MASK_ALL_VER_21;
Tomohiro Kusumi031d1e02017-03-23 12:49:04 +0200603 break;
Yaniv Gardic01848c2016-12-05 19:25:02 -0800604 }
605
606 return intr_mask;
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530607}
608
609/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530610 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800611 * @hba: Pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530612 *
613 * Returns UFSHCI version supported by the controller
614 */
615static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
616{
Yaniv Gardi0263bcd2015-10-28 13:15:48 +0200617 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
618 return ufshcd_vops_get_ufs_hci_version(hba);
Yaniv Gardi9949e702015-05-17 18:55:05 +0300619
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530620 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530621}
622
623/**
624 * ufshcd_is_device_present - Check if any device connected to
625 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300626 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530627 *
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300628 * Returns true if device present, false if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530629 */
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300630static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530631{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300632 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300633 DEVICE_PRESENT) ? true : false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530634}
635
636/**
637 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
Bart Van Assche8aa29f12018-03-01 15:07:20 -0800638 * @lrbp: pointer to local command reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530639 *
640 * This function is used to get the OCS field from UTRD
641 * Returns the OCS field in the UTRD
642 */
643static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
644{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530645 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530646}
647
648/**
649 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
650 * @task_req_descp: pointer to utp_task_req_desc structure
651 *
652 * This function is used to get the OCS field from UTMRD
653 * Returns the OCS field in the UTMRD
654 */
655static inline int
656ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
657{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530658 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530659}
660
661/**
662 * ufshcd_get_tm_free_slot - get a free slot for task management request
663 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530664 * @free_slot: pointer to variable with available slot value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530665 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530666 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
667 * Returns 0 if free slot is not available, else return 1 with tag value
668 * in @free_slot.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530669 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530670static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530671{
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530672 int tag;
673 bool ret = false;
674
675 if (!free_slot)
676 goto out;
677
678 do {
679 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
680 if (tag >= hba->nutmrs)
681 goto out;
682 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
683
684 *free_slot = tag;
685 ret = true;
686out:
687 return ret;
688}
689
690static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
691{
692 clear_bit_unlock(slot, &hba->tm_slots_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530693}
694
695/**
696 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
697 * @hba: per adapter instance
698 * @pos: position of the bit to be cleared
699 */
700static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
701{
Alim Akhtar1399c5b2018-05-06 15:44:15 +0530702 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
703 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
704 else
705 ufshcd_writel(hba, ~(1 << pos),
706 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
707}
708
709/**
710 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
711 * @hba: per adapter instance
712 * @pos: position of the bit to be cleared
713 */
714static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
715{
716 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
717 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
718 else
719 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530720}
721
722/**
Yaniv Gardia48353f2016-02-01 15:02:40 +0200723 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
724 * @hba: per adapter instance
725 * @tag: position of the bit to be cleared
726 */
727static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
728{
729 __clear_bit(tag, &hba->outstanding_reqs);
730}
731
732/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530733 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
734 * @reg: Register value of host controller status
735 *
736 * Returns integer, 0 on Success and positive value if failed
737 */
738static inline int ufshcd_get_lists_status(u32 reg)
739{
Tomohiro Kusumi6cf16112017-04-26 20:28:58 +0300740 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530741}
742
743/**
744 * ufshcd_get_uic_cmd_result - Get the UIC command result
745 * @hba: Pointer to adapter instance
746 *
747 * This function gets the result of UIC command completion
748 * Returns 0 on success, non zero value on error
749 */
750static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
751{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530752 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530753 MASK_UIC_COMMAND_RESULT;
754}
755
756/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530757 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
758 * @hba: Pointer to adapter instance
759 *
760 * This function gets UIC command argument3
761 * Returns 0 on success, non zero value on error
762 */
763static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
764{
765 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
766}
767
768/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530769 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530770 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530771 */
772static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530773ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530774{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530775 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530776}
777
778/**
779 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
780 * @ucd_rsp_ptr: pointer to response UPIU
781 *
782 * This function gets the response status and scsi_status from response UPIU
783 * Returns the response result code.
784 */
785static inline int
786ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
787{
788 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
789}
790
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530791/*
792 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
793 * from response UPIU
794 * @ucd_rsp_ptr: pointer to response UPIU
795 *
796 * Return the data segment length.
797 */
798static inline unsigned int
799ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
800{
801 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
802 MASK_RSP_UPIU_DATA_SEG_LEN;
803}
804
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530805/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530806 * ufshcd_is_exception_event - Check if the device raised an exception event
807 * @ucd_rsp_ptr: pointer to response UPIU
808 *
809 * The function checks if the device raised an exception event indicated in
810 * the Device Information field of response UPIU.
811 *
812 * Returns true if exception is raised, false otherwise.
813 */
814static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
815{
816 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
817 MASK_RSP_EXCEPTION_EVENT ? true : false;
818}
819
820/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530821 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530822 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530823 */
824static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530825ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530826{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530827 ufshcd_writel(hba, INT_AGGR_ENABLE |
828 INT_AGGR_COUNTER_AND_TIMER_RESET,
829 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
830}
831
832/**
833 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
834 * @hba: per adapter instance
835 * @cnt: Interrupt aggregation counter threshold
836 * @tmout: Interrupt aggregation timeout value
837 */
838static inline void
839ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
840{
841 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
842 INT_AGGR_COUNTER_THLD_VAL(cnt) |
843 INT_AGGR_TIMEOUT_VAL(tmout),
844 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530845}
846
847/**
Yaniv Gardib8521902015-05-17 18:54:57 +0300848 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
849 * @hba: per adapter instance
850 */
851static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
852{
853 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
854}
855
856/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530857 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
858 * When run-stop registers are set to 1, it indicates the
859 * host controller that it can process the requests
860 * @hba: per adapter instance
861 */
862static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
863{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530864 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
865 REG_UTP_TASK_REQ_LIST_RUN_STOP);
866 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
867 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530868}
869
870/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530871 * ufshcd_hba_start - Start controller initialization sequence
872 * @hba: per adapter instance
873 */
874static inline void ufshcd_hba_start(struct ufs_hba *hba)
875{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530876 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530877}
878
879/**
880 * ufshcd_is_hba_active - Get controller state
881 * @hba: per adapter instance
882 *
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300883 * Returns false if controller is active, true otherwise
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530884 */
Tomohiro Kusumic9e60102017-03-28 16:49:24 +0300885static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530886{
Tomohiro Kusumi4a8eec22017-03-28 16:49:25 +0300887 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
888 ? false : true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530889}
890
Yaniv Gardi37113102016-03-10 17:37:16 +0200891u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
892{
893 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
894 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
895 (hba->ufs_version == UFSHCI_VERSION_11))
896 return UFS_UNIPRO_VER_1_41;
897 else
898 return UFS_UNIPRO_VER_1_6;
899}
900EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
901
902static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
903{
904 /*
905 * If both host and device support UniPro ver1.6 or later, PA layer
906 * parameters tuning happens during link startup itself.
907 *
908 * We can manually tune PA layer parameters if either host or device
909 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
910 * logic simple, we will only do manual tuning if local unipro version
911 * doesn't support ver1.6 or later.
912 */
913 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
914 return true;
915 else
916 return false;
917}
918
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800919static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
920{
921 int ret = 0;
922 struct ufs_clk_info *clki;
923 struct list_head *head = &hba->clk_list_head;
924 ktime_t start = ktime_get();
925 bool clk_state_changed = false;
926
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +0300927 if (list_empty(head))
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -0800928 goto out;
929
930 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
931 if (ret)
932 return ret;
933
934 list_for_each_entry(clki, head, list) {
935 if (!IS_ERR_OR_NULL(clki->clk)) {
936 if (scale_up && clki->max_freq) {
937 if (clki->curr_freq == clki->max_freq)
938 continue;
939
940 clk_state_changed = true;
941 ret = clk_set_rate(clki->clk, clki->max_freq);
942 if (ret) {
943 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
944 __func__, clki->name,
945 clki->max_freq, ret);
946 break;
947 }
948 trace_ufshcd_clk_scaling(dev_name(hba->dev),
949 "scaled up", clki->name,
950 clki->curr_freq,
951 clki->max_freq);
952
953 clki->curr_freq = clki->max_freq;
954
955 } else if (!scale_up && clki->min_freq) {
956 if (clki->curr_freq == clki->min_freq)
957 continue;
958
959 clk_state_changed = true;
960 ret = clk_set_rate(clki->clk, clki->min_freq);
961 if (ret) {
962 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
963 __func__, clki->name,
964 clki->min_freq, ret);
965 break;
966 }
967 trace_ufshcd_clk_scaling(dev_name(hba->dev),
968 "scaled down", clki->name,
969 clki->curr_freq,
970 clki->min_freq);
971 clki->curr_freq = clki->min_freq;
972 }
973 }
974 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
975 clki->name, clk_get_rate(clki->clk));
976 }
977
978 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
979
980out:
981 if (clk_state_changed)
982 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
983 (scale_up ? "up" : "down"),
984 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
985 return ret;
986}
987
988/**
989 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
990 * @hba: per adapter instance
991 * @scale_up: True if scaling up and false if scaling down
992 *
993 * Returns true if scaling is required, false otherwise.
994 */
995static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
996 bool scale_up)
997{
998 struct ufs_clk_info *clki;
999 struct list_head *head = &hba->clk_list_head;
1000
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03001001 if (list_empty(head))
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001002 return false;
1003
1004 list_for_each_entry(clki, head, list) {
1005 if (!IS_ERR_OR_NULL(clki->clk)) {
1006 if (scale_up && clki->max_freq) {
1007 if (clki->curr_freq == clki->max_freq)
1008 continue;
1009 return true;
1010 } else if (!scale_up && clki->min_freq) {
1011 if (clki->curr_freq == clki->min_freq)
1012 continue;
1013 return true;
1014 }
1015 }
1016 }
1017
1018 return false;
1019}
1020
1021static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1022 u64 wait_timeout_us)
1023{
1024 unsigned long flags;
1025 int ret = 0;
1026 u32 tm_doorbell;
1027 u32 tr_doorbell;
1028 bool timeout = false, do_last_check = false;
1029 ktime_t start;
1030
1031 ufshcd_hold(hba, false);
1032 spin_lock_irqsave(hba->host->host_lock, flags);
1033 /*
1034 * Wait for all the outstanding tasks/transfer requests.
1035 * Verify by checking the doorbell registers are clear.
1036 */
1037 start = ktime_get();
1038 do {
1039 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1040 ret = -EBUSY;
1041 goto out;
1042 }
1043
1044 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1045 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1046 if (!tm_doorbell && !tr_doorbell) {
1047 timeout = false;
1048 break;
1049 } else if (do_last_check) {
1050 break;
1051 }
1052
1053 spin_unlock_irqrestore(hba->host->host_lock, flags);
1054 schedule();
1055 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1056 wait_timeout_us) {
1057 timeout = true;
1058 /*
1059 * We might have scheduled out for long time so make
1060 * sure to check if doorbells are cleared by this time
1061 * or not.
1062 */
1063 do_last_check = true;
1064 }
1065 spin_lock_irqsave(hba->host->host_lock, flags);
1066 } while (tm_doorbell || tr_doorbell);
1067
1068 if (timeout) {
1069 dev_err(hba->dev,
1070 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1071 __func__, tm_doorbell, tr_doorbell);
1072 ret = -EBUSY;
1073 }
1074out:
1075 spin_unlock_irqrestore(hba->host->host_lock, flags);
1076 ufshcd_release(hba);
1077 return ret;
1078}
1079
1080/**
1081 * ufshcd_scale_gear - scale up/down UFS gear
1082 * @hba: per adapter instance
1083 * @scale_up: True for scaling up gear and false for scaling down
1084 *
1085 * Returns 0 for success,
1086 * Returns -EBUSY if scaling can't happen at this time
1087 * Returns non-zero for any other errors
1088 */
1089static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1090{
1091 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1092 int ret = 0;
1093 struct ufs_pa_layer_attr new_pwr_info;
1094
1095 if (scale_up) {
1096 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1097 sizeof(struct ufs_pa_layer_attr));
1098 } else {
1099 memcpy(&new_pwr_info, &hba->pwr_info,
1100 sizeof(struct ufs_pa_layer_attr));
1101
1102 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1103 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1104 /* save the current power mode */
1105 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1106 &hba->pwr_info,
1107 sizeof(struct ufs_pa_layer_attr));
1108
1109 /* scale down gear */
1110 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1111 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1112 }
1113 }
1114
1115 /* check if the power mode needs to be changed or not? */
1116 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1117
1118 if (ret)
1119 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1120 __func__, ret,
1121 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1122 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1123
1124 return ret;
1125}
1126
1127static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1128{
1129 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1130 int ret = 0;
1131 /*
1132 * make sure that there are no outstanding requests when
1133 * clock scaling is in progress
1134 */
Subhash Jadavani38135532018-05-03 16:37:18 +05301135 ufshcd_scsi_block_requests(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001136 down_write(&hba->clk_scaling_lock);
1137 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1138 ret = -EBUSY;
1139 up_write(&hba->clk_scaling_lock);
Subhash Jadavani38135532018-05-03 16:37:18 +05301140 ufshcd_scsi_unblock_requests(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001141 }
1142
1143 return ret;
1144}
1145
1146static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1147{
1148 up_write(&hba->clk_scaling_lock);
Subhash Jadavani38135532018-05-03 16:37:18 +05301149 ufshcd_scsi_unblock_requests(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001150}
1151
1152/**
1153 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1154 * @hba: per adapter instance
1155 * @scale_up: True for scaling up and false for scalin down
1156 *
1157 * Returns 0 for success,
1158 * Returns -EBUSY if scaling can't happen at this time
1159 * Returns non-zero for any other errors
1160 */
1161static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1162{
1163 int ret = 0;
1164
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001165 /* let's not get into low power until clock scaling is completed */
1166 ufshcd_hold(hba, false);
1167
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001168 ret = ufshcd_clock_scaling_prepare(hba);
1169 if (ret)
1170 return ret;
1171
1172 /* scale down the gear before scaling down clocks */
1173 if (!scale_up) {
1174 ret = ufshcd_scale_gear(hba, false);
1175 if (ret)
1176 goto out;
1177 }
1178
1179 ret = ufshcd_scale_clks(hba, scale_up);
1180 if (ret) {
1181 if (!scale_up)
1182 ufshcd_scale_gear(hba, true);
1183 goto out;
1184 }
1185
1186 /* scale up the gear after scaling up clocks */
1187 if (scale_up) {
1188 ret = ufshcd_scale_gear(hba, true);
1189 if (ret) {
1190 ufshcd_scale_clks(hba, false);
1191 goto out;
1192 }
1193 }
1194
1195 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1196
1197out:
1198 ufshcd_clock_scaling_unprepare(hba);
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001199 ufshcd_release(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001200 return ret;
1201}
1202
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001203static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1204{
1205 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1206 clk_scaling.suspend_work);
1207 unsigned long irq_flags;
1208
1209 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1210 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1211 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1212 return;
1213 }
1214 hba->clk_scaling.is_suspended = true;
1215 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1216
1217 __ufshcd_suspend_clkscaling(hba);
1218}
1219
1220static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1221{
1222 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1223 clk_scaling.resume_work);
1224 unsigned long irq_flags;
1225
1226 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1227 if (!hba->clk_scaling.is_suspended) {
1228 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1229 return;
1230 }
1231 hba->clk_scaling.is_suspended = false;
1232 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1233
1234 devfreq_resume_device(hba->devfreq);
1235}
1236
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001237static int ufshcd_devfreq_target(struct device *dev,
1238 unsigned long *freq, u32 flags)
1239{
1240 int ret = 0;
1241 struct ufs_hba *hba = dev_get_drvdata(dev);
1242 ktime_t start;
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001243 bool scale_up, sched_clk_scaling_suspend_work = false;
Bjorn Andersson092b4552018-05-17 23:26:37 -07001244 struct list_head *clk_list = &hba->clk_list_head;
1245 struct ufs_clk_info *clki;
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001246 unsigned long irq_flags;
1247
1248 if (!ufshcd_is_clkscaling_supported(hba))
1249 return -EINVAL;
1250
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001251 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1252 if (ufshcd_eh_in_progress(hba)) {
1253 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1254 return 0;
1255 }
1256
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001257 if (!hba->clk_scaling.active_reqs)
1258 sched_clk_scaling_suspend_work = true;
1259
Bjorn Andersson092b4552018-05-17 23:26:37 -07001260 if (list_empty(clk_list)) {
1261 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1262 goto out;
1263 }
1264
1265 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1266 scale_up = (*freq == clki->max_freq) ? true : false;
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001267 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1268 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1269 ret = 0;
1270 goto out; /* no state change required */
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001271 }
1272 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1273
1274 start = ktime_get();
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001275 ret = ufshcd_devfreq_scale(hba, scale_up);
1276
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001277 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1278 (scale_up ? "up" : "down"),
1279 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1280
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001281out:
1282 if (sched_clk_scaling_suspend_work)
1283 queue_work(hba->clk_scaling.workq,
1284 &hba->clk_scaling.suspend_work);
1285
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001286 return ret;
1287}
1288
1289
1290static int ufshcd_devfreq_get_dev_status(struct device *dev,
1291 struct devfreq_dev_status *stat)
1292{
1293 struct ufs_hba *hba = dev_get_drvdata(dev);
1294 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1295 unsigned long flags;
1296
1297 if (!ufshcd_is_clkscaling_supported(hba))
1298 return -EINVAL;
1299
1300 memset(stat, 0, sizeof(*stat));
1301
1302 spin_lock_irqsave(hba->host->host_lock, flags);
1303 if (!scaling->window_start_t)
1304 goto start_window;
1305
1306 if (scaling->is_busy_started)
1307 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1308 scaling->busy_start_t));
1309
1310 stat->total_time = jiffies_to_usecs((long)jiffies -
1311 (long)scaling->window_start_t);
1312 stat->busy_time = scaling->tot_busy_t;
1313start_window:
1314 scaling->window_start_t = jiffies;
1315 scaling->tot_busy_t = 0;
1316
1317 if (hba->outstanding_reqs) {
1318 scaling->busy_start_t = ktime_get();
1319 scaling->is_busy_started = true;
1320 } else {
1321 scaling->busy_start_t = 0;
1322 scaling->is_busy_started = false;
1323 }
1324 spin_unlock_irqrestore(hba->host->host_lock, flags);
1325 return 0;
1326}
1327
1328static struct devfreq_dev_profile ufs_devfreq_profile = {
1329 .polling_ms = 100,
1330 .target = ufshcd_devfreq_target,
1331 .get_dev_status = ufshcd_devfreq_get_dev_status,
1332};
1333
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001334static int ufshcd_devfreq_init(struct ufs_hba *hba)
1335{
Bjorn Andersson092b4552018-05-17 23:26:37 -07001336 struct list_head *clk_list = &hba->clk_list_head;
1337 struct ufs_clk_info *clki;
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001338 struct devfreq *devfreq;
1339 int ret;
1340
Bjorn Andersson092b4552018-05-17 23:26:37 -07001341 /* Skip devfreq if we don't have any clocks in the list */
1342 if (list_empty(clk_list))
1343 return 0;
1344
1345 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1346 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1347 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1348
1349 devfreq = devfreq_add_device(hba->dev,
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001350 &ufs_devfreq_profile,
1351 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1352 NULL);
1353 if (IS_ERR(devfreq)) {
1354 ret = PTR_ERR(devfreq);
1355 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
Bjorn Andersson092b4552018-05-17 23:26:37 -07001356
1357 dev_pm_opp_remove(hba->dev, clki->min_freq);
1358 dev_pm_opp_remove(hba->dev, clki->max_freq);
Bjorn Anderssondeac4442018-05-17 23:26:36 -07001359 return ret;
1360 }
1361
1362 hba->devfreq = devfreq;
1363
1364 return 0;
1365}
1366
Bjorn Andersson092b4552018-05-17 23:26:37 -07001367static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1368{
1369 struct list_head *clk_list = &hba->clk_list_head;
1370 struct ufs_clk_info *clki;
1371
1372 if (!hba->devfreq)
1373 return;
1374
1375 devfreq_remove_device(hba->devfreq);
1376 hba->devfreq = NULL;
1377
1378 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1379 dev_pm_opp_remove(hba->dev, clki->min_freq);
1380 dev_pm_opp_remove(hba->dev, clki->max_freq);
1381}
1382
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001383static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1384{
1385 unsigned long flags;
1386
1387 devfreq_suspend_device(hba->devfreq);
1388 spin_lock_irqsave(hba->host->host_lock, flags);
1389 hba->clk_scaling.window_start_t = 0;
1390 spin_unlock_irqrestore(hba->host->host_lock, flags);
1391}
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001392
Gilad Bronera5082532016-10-17 17:10:00 -07001393static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1394{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001395 unsigned long flags;
1396 bool suspend = false;
1397
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001398 if (!ufshcd_is_clkscaling_supported(hba))
1399 return;
1400
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001401 spin_lock_irqsave(hba->host->host_lock, flags);
1402 if (!hba->clk_scaling.is_suspended) {
1403 suspend = true;
1404 hba->clk_scaling.is_suspended = true;
1405 }
1406 spin_unlock_irqrestore(hba->host->host_lock, flags);
1407
1408 if (suspend)
1409 __ufshcd_suspend_clkscaling(hba);
Gilad Bronera5082532016-10-17 17:10:00 -07001410}
1411
1412static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1413{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001414 unsigned long flags;
1415 bool resume = false;
1416
1417 if (!ufshcd_is_clkscaling_supported(hba))
1418 return;
1419
1420 spin_lock_irqsave(hba->host->host_lock, flags);
1421 if (hba->clk_scaling.is_suspended) {
1422 resume = true;
1423 hba->clk_scaling.is_suspended = false;
1424 }
1425 spin_unlock_irqrestore(hba->host->host_lock, flags);
1426
1427 if (resume)
1428 devfreq_resume_device(hba->devfreq);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001429}
1430
1431static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1432 struct device_attribute *attr, char *buf)
1433{
1434 struct ufs_hba *hba = dev_get_drvdata(dev);
1435
1436 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1437}
1438
1439static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1440 struct device_attribute *attr, const char *buf, size_t count)
1441{
1442 struct ufs_hba *hba = dev_get_drvdata(dev);
1443 u32 value;
1444 int err;
1445
1446 if (kstrtou32(buf, 0, &value))
1447 return -EINVAL;
1448
1449 value = !!value;
1450 if (value == hba->clk_scaling.is_allowed)
1451 goto out;
1452
1453 pm_runtime_get_sync(hba->dev);
1454 ufshcd_hold(hba, false);
1455
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001456 cancel_work_sync(&hba->clk_scaling.suspend_work);
1457 cancel_work_sync(&hba->clk_scaling.resume_work);
1458
1459 hba->clk_scaling.is_allowed = value;
1460
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001461 if (value) {
1462 ufshcd_resume_clkscaling(hba);
1463 } else {
1464 ufshcd_suspend_clkscaling(hba);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001465 err = ufshcd_devfreq_scale(hba, true);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001466 if (err)
1467 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1468 __func__, err);
1469 }
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001470
1471 ufshcd_release(hba);
1472 pm_runtime_put_sync(hba->dev);
1473out:
1474 return count;
Gilad Bronera5082532016-10-17 17:10:00 -07001475}
1476
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08001477static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1478{
1479 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1480 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1481 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1482 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1483 hba->clk_scaling.enable_attr.attr.mode = 0644;
1484 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1485 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1486}
1487
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001488static void ufshcd_ungate_work(struct work_struct *work)
1489{
1490 int ret;
1491 unsigned long flags;
1492 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1493 clk_gating.ungate_work);
1494
1495 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1496
1497 spin_lock_irqsave(hba->host->host_lock, flags);
1498 if (hba->clk_gating.state == CLKS_ON) {
1499 spin_unlock_irqrestore(hba->host->host_lock, flags);
1500 goto unblock_reqs;
1501 }
1502
1503 spin_unlock_irqrestore(hba->host->host_lock, flags);
1504 ufshcd_setup_clocks(hba, true);
1505
1506 /* Exit from hibern8 */
1507 if (ufshcd_can_hibern8_during_gating(hba)) {
1508 /* Prevent gating in this path */
1509 hba->clk_gating.is_suspended = true;
1510 if (ufshcd_is_link_hibern8(hba)) {
1511 ret = ufshcd_uic_hibern8_exit(hba);
1512 if (ret)
1513 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1514 __func__, ret);
1515 else
1516 ufshcd_set_link_active(hba);
1517 }
1518 hba->clk_gating.is_suspended = false;
1519 }
1520unblock_reqs:
Subhash Jadavani38135532018-05-03 16:37:18 +05301521 ufshcd_scsi_unblock_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001522}
1523
1524/**
1525 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1526 * Also, exit from hibern8 mode and set the link as active.
1527 * @hba: per adapter instance
1528 * @async: This indicates whether caller should ungate clocks asynchronously.
1529 */
1530int ufshcd_hold(struct ufs_hba *hba, bool async)
1531{
1532 int rc = 0;
1533 unsigned long flags;
1534
1535 if (!ufshcd_is_clkgating_allowed(hba))
1536 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001537 spin_lock_irqsave(hba->host->host_lock, flags);
1538 hba->clk_gating.active_reqs++;
1539
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001540 if (ufshcd_eh_in_progress(hba)) {
1541 spin_unlock_irqrestore(hba->host->host_lock, flags);
1542 return 0;
1543 }
1544
Sahitya Tummala856b3482014-09-25 15:32:34 +03001545start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001546 switch (hba->clk_gating.state) {
1547 case CLKS_ON:
Venkat Gopalakrishnanf2a785a2016-10-17 17:10:53 -07001548 /*
1549 * Wait for the ungate work to complete if in progress.
1550 * Though the clocks may be in ON state, the link could
1551 * still be in hibner8 state if hibern8 is allowed
1552 * during clock gating.
1553 * Make sure we exit hibern8 state also in addition to
1554 * clocks being ON.
1555 */
1556 if (ufshcd_can_hibern8_during_gating(hba) &&
1557 ufshcd_is_link_hibern8(hba)) {
1558 spin_unlock_irqrestore(hba->host->host_lock, flags);
1559 flush_work(&hba->clk_gating.ungate_work);
1560 spin_lock_irqsave(hba->host->host_lock, flags);
1561 goto start;
1562 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001563 break;
1564 case REQ_CLKS_OFF:
1565 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1566 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001567 trace_ufshcd_clk_gating(dev_name(hba->dev),
1568 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001569 break;
1570 }
1571 /*
Tomohiro Kusumi9c490d22017-03-28 16:49:26 +03001572 * If we are here, it means gating work is either done or
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001573 * currently running. Hence, fall through to cancel gating
1574 * work and to enable clocks.
1575 */
1576 case CLKS_OFF:
Subhash Jadavani38135532018-05-03 16:37:18 +05301577 ufshcd_scsi_block_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001578 hba->clk_gating.state = REQ_CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001579 trace_ufshcd_clk_gating(dev_name(hba->dev),
1580 hba->clk_gating.state);
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301581 queue_work(hba->clk_gating.clk_gating_workq,
1582 &hba->clk_gating.ungate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001583 /*
1584 * fall through to check if we should wait for this
1585 * work to be done or not.
1586 */
1587 case REQ_CLKS_ON:
1588 if (async) {
1589 rc = -EAGAIN;
1590 hba->clk_gating.active_reqs--;
1591 break;
1592 }
1593
1594 spin_unlock_irqrestore(hba->host->host_lock, flags);
1595 flush_work(&hba->clk_gating.ungate_work);
1596 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +03001597 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001598 goto start;
1599 default:
1600 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1601 __func__, hba->clk_gating.state);
1602 break;
1603 }
1604 spin_unlock_irqrestore(hba->host->host_lock, flags);
1605out:
1606 return rc;
1607}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001608EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001609
1610static void ufshcd_gate_work(struct work_struct *work)
1611{
1612 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1613 clk_gating.gate_work.work);
1614 unsigned long flags;
1615
1616 spin_lock_irqsave(hba->host->host_lock, flags);
Venkat Gopalakrishnan3f0c06d2016-10-17 17:11:07 -07001617 /*
1618 * In case you are here to cancel this work the gating state
1619 * would be marked as REQ_CLKS_ON. In this case save time by
1620 * skipping the gating work and exit after changing the clock
1621 * state to CLKS_ON.
1622 */
1623 if (hba->clk_gating.is_suspended ||
1624 (hba->clk_gating.state == REQ_CLKS_ON)) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001625 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001626 trace_ufshcd_clk_gating(dev_name(hba->dev),
1627 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001628 goto rel_lock;
1629 }
1630
1631 if (hba->clk_gating.active_reqs
1632 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1633 || hba->lrb_in_use || hba->outstanding_tasks
1634 || hba->active_uic_cmd || hba->uic_async_done)
1635 goto rel_lock;
1636
1637 spin_unlock_irqrestore(hba->host->host_lock, flags);
1638
1639 /* put the link into hibern8 mode before turning off clocks */
1640 if (ufshcd_can_hibern8_during_gating(hba)) {
1641 if (ufshcd_uic_hibern8_enter(hba)) {
1642 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001643 trace_ufshcd_clk_gating(dev_name(hba->dev),
1644 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001645 goto out;
1646 }
1647 ufshcd_set_link_hibern8(hba);
1648 }
1649
1650 if (!ufshcd_is_link_active(hba))
1651 ufshcd_setup_clocks(hba, false);
1652 else
1653 /* If link is active, device ref_clk can't be switched off */
1654 __ufshcd_setup_clocks(hba, false, true);
1655
1656 /*
1657 * In case you are here to cancel this work the gating state
1658 * would be marked as REQ_CLKS_ON. In this case keep the state
1659 * as REQ_CLKS_ON which would anyway imply that clocks are off
1660 * and a request to turn them on is pending. By doing this way,
1661 * we keep the state machine in tact and this would ultimately
1662 * prevent from doing cancel work multiple times when there are
1663 * new requests arriving before the current cancel work is done.
1664 */
1665 spin_lock_irqsave(hba->host->host_lock, flags);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001666 if (hba->clk_gating.state == REQ_CLKS_OFF) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001667 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001668 trace_ufshcd_clk_gating(dev_name(hba->dev),
1669 hba->clk_gating.state);
1670 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001671rel_lock:
1672 spin_unlock_irqrestore(hba->host->host_lock, flags);
1673out:
1674 return;
1675}
1676
1677/* host lock must be held before calling this variant */
1678static void __ufshcd_release(struct ufs_hba *hba)
1679{
1680 if (!ufshcd_is_clkgating_allowed(hba))
1681 return;
1682
1683 hba->clk_gating.active_reqs--;
1684
1685 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1686 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1687 || hba->lrb_in_use || hba->outstanding_tasks
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001688 || hba->active_uic_cmd || hba->uic_async_done
1689 || ufshcd_eh_in_progress(hba))
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001690 return;
1691
1692 hba->clk_gating.state = REQ_CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001693 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001694 schedule_delayed_work(&hba->clk_gating.gate_work,
1695 msecs_to_jiffies(hba->clk_gating.delay_ms));
1696}
1697
1698void ufshcd_release(struct ufs_hba *hba)
1699{
1700 unsigned long flags;
1701
1702 spin_lock_irqsave(hba->host->host_lock, flags);
1703 __ufshcd_release(hba);
1704 spin_unlock_irqrestore(hba->host->host_lock, flags);
1705}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001706EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001707
1708static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1709 struct device_attribute *attr, char *buf)
1710{
1711 struct ufs_hba *hba = dev_get_drvdata(dev);
1712
1713 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1714}
1715
1716static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1717 struct device_attribute *attr, const char *buf, size_t count)
1718{
1719 struct ufs_hba *hba = dev_get_drvdata(dev);
1720 unsigned long flags, value;
1721
1722 if (kstrtoul(buf, 0, &value))
1723 return -EINVAL;
1724
1725 spin_lock_irqsave(hba->host->host_lock, flags);
1726 hba->clk_gating.delay_ms = value;
1727 spin_unlock_irqrestore(hba->host->host_lock, flags);
1728 return count;
1729}
1730
Sahitya Tummalab4274112016-12-22 18:40:39 -08001731static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1732 struct device_attribute *attr, char *buf)
1733{
1734 struct ufs_hba *hba = dev_get_drvdata(dev);
1735
1736 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1737}
1738
1739static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1740 struct device_attribute *attr, const char *buf, size_t count)
1741{
1742 struct ufs_hba *hba = dev_get_drvdata(dev);
1743 unsigned long flags;
1744 u32 value;
1745
1746 if (kstrtou32(buf, 0, &value))
1747 return -EINVAL;
1748
1749 value = !!value;
1750 if (value == hba->clk_gating.is_enabled)
1751 goto out;
1752
1753 if (value) {
1754 ufshcd_release(hba);
1755 } else {
1756 spin_lock_irqsave(hba->host->host_lock, flags);
1757 hba->clk_gating.active_reqs++;
1758 spin_unlock_irqrestore(hba->host->host_lock, flags);
1759 }
1760
1761 hba->clk_gating.is_enabled = value;
1762out:
1763 return count;
1764}
1765
Vivek Gautameebcc192018-08-07 23:17:39 +05301766static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1767{
1768 char wq_name[sizeof("ufs_clkscaling_00")];
1769
1770 if (!ufshcd_is_clkscaling_supported(hba))
1771 return;
1772
1773 INIT_WORK(&hba->clk_scaling.suspend_work,
1774 ufshcd_clk_scaling_suspend_work);
1775 INIT_WORK(&hba->clk_scaling.resume_work,
1776 ufshcd_clk_scaling_resume_work);
1777
1778 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1779 hba->host->host_no);
1780 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1781
1782 ufshcd_clkscaling_init_sysfs(hba);
1783}
1784
1785static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1786{
1787 if (!ufshcd_is_clkscaling_supported(hba))
1788 return;
1789
1790 destroy_workqueue(hba->clk_scaling.workq);
1791 ufshcd_devfreq_remove(hba);
1792}
1793
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001794static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1795{
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301796 char wq_name[sizeof("ufs_clk_gating_00")];
1797
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001798 if (!ufshcd_is_clkgating_allowed(hba))
1799 return;
1800
1801 hba->clk_gating.delay_ms = 150;
1802 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1803 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1804
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301805 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1806 hba->host->host_no);
1807 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1808 WQ_MEM_RECLAIM);
1809
Sahitya Tummalab4274112016-12-22 18:40:39 -08001810 hba->clk_gating.is_enabled = true;
1811
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001812 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1813 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1814 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1815 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
Sahitya Tummalab4274112016-12-22 18:40:39 -08001816 hba->clk_gating.delay_attr.attr.mode = 0644;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001817 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1818 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
Sahitya Tummalab4274112016-12-22 18:40:39 -08001819
1820 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1821 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1822 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1823 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1824 hba->clk_gating.enable_attr.attr.mode = 0644;
1825 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1826 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001827}
1828
1829static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1830{
1831 if (!ufshcd_is_clkgating_allowed(hba))
1832 return;
1833 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
Sahitya Tummalab4274112016-12-22 18:40:39 -08001834 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
Akinobu Mita97cd6802014-11-24 14:24:18 +09001835 cancel_work_sync(&hba->clk_gating.ungate_work);
1836 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
Vijay Viswanath10e5e372018-05-03 16:37:22 +05301837 destroy_workqueue(hba->clk_gating.clk_gating_workq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001838}
1839
Sahitya Tummala856b3482014-09-25 15:32:34 +03001840/* Must be called with host lock acquired */
1841static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1842{
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001843 bool queue_resume_work = false;
1844
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001845 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001846 return;
1847
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08001848 if (!hba->clk_scaling.active_reqs++)
1849 queue_resume_work = true;
1850
1851 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1852 return;
1853
1854 if (queue_resume_work)
1855 queue_work(hba->clk_scaling.workq,
1856 &hba->clk_scaling.resume_work);
1857
1858 if (!hba->clk_scaling.window_start_t) {
1859 hba->clk_scaling.window_start_t = jiffies;
1860 hba->clk_scaling.tot_busy_t = 0;
1861 hba->clk_scaling.is_busy_started = false;
1862 }
1863
Sahitya Tummala856b3482014-09-25 15:32:34 +03001864 if (!hba->clk_scaling.is_busy_started) {
1865 hba->clk_scaling.busy_start_t = ktime_get();
1866 hba->clk_scaling.is_busy_started = true;
1867 }
1868}
1869
1870static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1871{
1872 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1873
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001874 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001875 return;
1876
1877 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1878 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1879 scaling->busy_start_t));
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01001880 scaling->busy_start_t = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +03001881 scaling->is_busy_started = false;
1882 }
1883}
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301884/**
1885 * ufshcd_send_command - Send SCSI or device management commands
1886 * @hba: per adapter instance
1887 * @task_tag: Task tag of the command
1888 */
1889static inline
1890void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1891{
Dolev Ravivff8e20c2016-12-22 18:42:18 -08001892 hba->lrb[task_tag].issue_time_stamp = ktime_get();
Zang Leigang09017182017-09-27 10:06:06 +08001893 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
Sahitya Tummala856b3482014-09-25 15:32:34 +03001894 ufshcd_clk_scaling_start_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301895 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301896 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001897 /* Make sure that doorbell is committed immediately */
1898 wmb();
Lee Susman1a07f2d2016-12-22 18:42:03 -08001899 ufshcd_add_command_trace(hba, task_tag, "send");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301900}
1901
1902/**
1903 * ufshcd_copy_sense_data - Copy sense data in case of check condition
Bart Van Assche8aa29f12018-03-01 15:07:20 -08001904 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301905 */
1906static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1907{
1908 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05301909 if (lrbp->sense_buffer &&
1910 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07001911 int len_to_copy;
1912
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301913 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07001914 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1915
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301916 memcpy(lrbp->sense_buffer,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301917 lrbp->ucd_rsp_ptr->sr.sense_data,
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07001918 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301919 }
1920}
1921
1922/**
Dolev Raviv68078d52013-07-30 00:35:58 +05301923 * ufshcd_copy_query_response() - Copy the Query Response and the data
1924 * descriptor
1925 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08001926 * @lrbp: pointer to local reference block
Dolev Raviv68078d52013-07-30 00:35:58 +05301927 */
1928static
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001929int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +05301930{
1931 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1932
Dolev Raviv68078d52013-07-30 00:35:58 +05301933 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05301934
Dolev Raviv68078d52013-07-30 00:35:58 +05301935 /* Get the descriptor */
1936 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001937 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +05301938 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001939 u16 resp_len;
1940 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +05301941
1942 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001943 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +05301944 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03001945 buf_len = be16_to_cpu(
1946 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001947 if (likely(buf_len >= resp_len)) {
1948 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1949 } else {
1950 dev_warn(hba->dev,
1951 "%s: Response size is bigger than buffer",
1952 __func__);
1953 return -EINVAL;
1954 }
Dolev Raviv68078d52013-07-30 00:35:58 +05301955 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001956
1957 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05301958}
1959
1960/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301961 * ufshcd_hba_capabilities - Read controller capabilities
1962 * @hba: per adapter instance
1963 */
1964static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1965{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301966 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301967
1968 /* nutrs and nutmrs are 0 based values */
1969 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1970 hba->nutmrs =
1971 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1972}
1973
1974/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301975 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1976 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301977 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301978 * Return true on success, else false
1979 */
1980static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1981{
1982 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1983 return true;
1984 else
1985 return false;
1986}
1987
1988/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05301989 * ufshcd_get_upmcrs - Get the power mode change request status
1990 * @hba: Pointer to adapter instance
1991 *
1992 * This function gets the UPMCRS field of HCS register
1993 * Returns value of UPMCRS field
1994 */
1995static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1996{
1997 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1998}
1999
2000/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302001 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2002 * @hba: per adapter instance
2003 * @uic_cmd: UIC command
2004 *
2005 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302006 */
2007static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302008ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302009{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302010 WARN_ON(hba->active_uic_cmd);
2011
2012 hba->active_uic_cmd = uic_cmd;
2013
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302014 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302015 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2016 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2017 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302018
2019 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302020 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302021 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302022}
2023
2024/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302025 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2026 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002027 * @uic_cmd: UIC command
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302028 *
2029 * Must be called with mutex held.
2030 * Returns 0 only if success.
2031 */
2032static int
2033ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2034{
2035 int ret;
2036 unsigned long flags;
2037
2038 if (wait_for_completion_timeout(&uic_cmd->done,
2039 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2040 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2041 else
2042 ret = -ETIMEDOUT;
2043
2044 spin_lock_irqsave(hba->host->host_lock, flags);
2045 hba->active_uic_cmd = NULL;
2046 spin_unlock_irqrestore(hba->host->host_lock, flags);
2047
2048 return ret;
2049}
2050
2051/**
2052 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2053 * @hba: per adapter instance
2054 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002055 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302056 *
2057 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002058 * with mutex held and host_lock locked.
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302059 * Returns 0 only if success.
2060 */
2061static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002062__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2063 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302064{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302065 if (!ufshcd_ready_for_uic_cmd(hba)) {
2066 dev_err(hba->dev,
2067 "Controller not ready to accept UIC commands\n");
2068 return -EIO;
2069 }
2070
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002071 if (completion)
2072 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302073
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302074 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302075
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002076 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302077}
2078
2079/**
2080 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2081 * @hba: per adapter instance
2082 * @uic_cmd: UIC command
2083 *
2084 * Returns 0 only if success.
2085 */
2086static int
2087ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2088{
2089 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002090 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302091
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002092 ufshcd_hold(hba, false);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302093 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002094 ufshcd_add_delay_before_dme_cmd(hba);
2095
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002096 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002097 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002098 spin_unlock_irqrestore(hba->host->host_lock, flags);
2099 if (!ret)
2100 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2101
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302102 mutex_unlock(&hba->uic_cmd_mutex);
2103
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002104 ufshcd_release(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302105 return ret;
2106}
2107
2108/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302109 * ufshcd_map_sg - Map scatter-gather list to prdt
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002110 * @hba: per adapter instance
2111 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302112 *
2113 * Returns 0 in case of success, non-zero value in case of failure
2114 */
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002115static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302116{
2117 struct ufshcd_sg_entry *prd_table;
2118 struct scatterlist *sg;
2119 struct scsi_cmnd *cmd;
2120 int sg_segments;
2121 int i;
2122
2123 cmd = lrbp->cmd;
2124 sg_segments = scsi_dma_map(cmd);
2125 if (sg_segments < 0)
2126 return sg_segments;
2127
2128 if (sg_segments) {
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002129 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2130 lrbp->utr_descriptor_ptr->prd_table_length =
2131 cpu_to_le16((u16)(sg_segments *
2132 sizeof(struct ufshcd_sg_entry)));
2133 else
2134 lrbp->utr_descriptor_ptr->prd_table_length =
2135 cpu_to_le16((u16) (sg_segments));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302136
2137 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2138
2139 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2140 prd_table[i].size =
2141 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2142 prd_table[i].base_addr =
2143 cpu_to_le32(lower_32_bits(sg->dma_address));
2144 prd_table[i].upper_addr =
2145 cpu_to_le32(upper_32_bits(sg->dma_address));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002146 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302147 }
2148 } else {
2149 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2150 }
2151
2152 return 0;
2153}
2154
2155/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302156 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302157 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302158 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302159 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302160static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302161{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302162 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2163
2164 if (hba->ufs_version == UFSHCI_VERSION_10) {
2165 u32 rw;
2166 rw = set & INTERRUPT_MASK_RW_VER_10;
2167 set = rw | ((set ^ intrs) & intrs);
2168 } else {
2169 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302170 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302171
2172 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2173}
2174
2175/**
2176 * ufshcd_disable_intr - disable interrupts
2177 * @hba: per adapter instance
2178 * @intrs: interrupt bits
2179 */
2180static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2181{
2182 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2183
2184 if (hba->ufs_version == UFSHCI_VERSION_10) {
2185 u32 rw;
2186 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2187 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2188 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2189
2190 } else {
2191 set &= ~intrs;
2192 }
2193
2194 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302195}
2196
2197/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302198 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2199 * descriptor according to request
2200 * @lrbp: pointer to local reference block
2201 * @upiu_flags: flags required in the header
2202 * @cmd_dir: requests data direction
2203 */
2204static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
Joao Pinto300bb132016-05-11 12:21:27 +01002205 u32 *upiu_flags, enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302206{
2207 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2208 u32 data_direction;
2209 u32 dword_0;
2210
2211 if (cmd_dir == DMA_FROM_DEVICE) {
2212 data_direction = UTP_DEVICE_TO_HOST;
2213 *upiu_flags = UPIU_CMD_FLAGS_READ;
2214 } else if (cmd_dir == DMA_TO_DEVICE) {
2215 data_direction = UTP_HOST_TO_DEVICE;
2216 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2217 } else {
2218 data_direction = UTP_NO_DATA_TRANSFER;
2219 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2220 }
2221
2222 dword_0 = data_direction | (lrbp->command_type
2223 << UPIU_COMMAND_TYPE_OFFSET);
2224 if (lrbp->intr_cmd)
2225 dword_0 |= UTP_REQ_DESC_INT_CMD;
2226
2227 /* Transfer request descriptor header fields */
2228 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002229 /* dword_1 is reserved, hence it is set to 0 */
2230 req_desc->header.dword_1 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302231 /*
2232 * assigning invalid value for command status. Controller
2233 * updates OCS on command completion, with the command
2234 * status
2235 */
2236 req_desc->header.dword_2 =
2237 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002238 /* dword_3 is reserved, hence it is set to 0 */
2239 req_desc->header.dword_3 = 0;
Yaniv Gardi51047262016-02-01 15:02:38 +02002240
2241 req_desc->prd_table_length = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302242}
2243
2244/**
2245 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2246 * for scsi commands
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002247 * @lrbp: local reference block pointer
2248 * @upiu_flags: flags
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302249 */
2250static
2251void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2252{
2253 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002254 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302255
2256 /* command descriptor fields */
2257 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2258 UPIU_TRANSACTION_COMMAND, upiu_flags,
2259 lrbp->lun, lrbp->task_tag);
2260 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2261 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2262
2263 /* Total EHS length and Data segment length will be zero */
2264 ucd_req_ptr->header.dword_2 = 0;
2265
2266 ucd_req_ptr->sc.exp_data_transfer_len =
2267 cpu_to_be32(lrbp->cmd->sdb.length);
2268
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002269 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2270 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2271 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2272
2273 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302274}
2275
Dolev Raviv68078d52013-07-30 00:35:58 +05302276/**
2277 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2278 * for query requsts
2279 * @hba: UFS hba
2280 * @lrbp: local reference block pointer
2281 * @upiu_flags: flags
2282 */
2283static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2284 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2285{
2286 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2287 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302288 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05302289 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2290
2291 /* Query request header */
2292 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2293 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2294 lrbp->lun, lrbp->task_tag);
2295 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2296 0, query->request.query_func, 0, 0);
2297
Zang Leigang68612852016-08-25 17:39:19 +08002298 /* Data segment length only need for WRITE_DESC */
2299 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2300 ucd_req_ptr->header.dword_2 =
2301 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2302 else
2303 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302304
2305 /* Copy the Query Request buffer as is */
2306 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2307 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05302308
2309 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002310 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2311 memcpy(descp, query->descriptor, len);
2312
Yaniv Gardi51047262016-02-01 15:02:38 +02002313 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05302314}
2315
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302316static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2317{
2318 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2319
2320 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2321
2322 /* command descriptor fields */
2323 ucd_req_ptr->header.dword_0 =
2324 UPIU_HEADER_DWORD(
2325 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02002326 /* clear rest of the fields of basic header */
2327 ucd_req_ptr->header.dword_1 = 0;
2328 ucd_req_ptr->header.dword_2 = 0;
2329
2330 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302331}
2332
2333/**
Joao Pinto300bb132016-05-11 12:21:27 +01002334 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2335 * for Device Management Purposes
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002336 * @hba: per adapter instance
2337 * @lrbp: pointer to local reference block
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302338 */
Joao Pinto300bb132016-05-11 12:21:27 +01002339static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302340{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302341 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302342 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302343
kehuanlin83dc7e32017-09-06 17:58:39 +08002344 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2345 (hba->ufs_version == UFSHCI_VERSION_11))
Joao Pinto300bb132016-05-11 12:21:27 +01002346 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
kehuanlin83dc7e32017-09-06 17:58:39 +08002347 else
2348 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
Joao Pinto300bb132016-05-11 12:21:27 +01002349
2350 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2351 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2352 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2353 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2354 ufshcd_prepare_utp_nop_upiu(lrbp);
2355 else
2356 ret = -EINVAL;
2357
2358 return ret;
2359}
2360
2361/**
2362 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2363 * for SCSI Purposes
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002364 * @hba: per adapter instance
2365 * @lrbp: pointer to local reference block
Joao Pinto300bb132016-05-11 12:21:27 +01002366 */
2367static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2368{
2369 u32 upiu_flags;
2370 int ret = 0;
2371
kehuanlin83dc7e32017-09-06 17:58:39 +08002372 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2373 (hba->ufs_version == UFSHCI_VERSION_11))
Joao Pinto300bb132016-05-11 12:21:27 +01002374 lrbp->command_type = UTP_CMD_TYPE_SCSI;
kehuanlin83dc7e32017-09-06 17:58:39 +08002375 else
2376 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
Joao Pinto300bb132016-05-11 12:21:27 +01002377
2378 if (likely(lrbp->cmd)) {
2379 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2380 lrbp->cmd->sc_data_direction);
2381 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2382 } else {
2383 ret = -EINVAL;
2384 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302385
2386 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302387}
2388
2389/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002390 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002391 * @upiu_wlun_id: UPIU W-LUN id
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002392 *
2393 * Returns SCSI W-LUN id
2394 */
2395static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2396{
2397 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2398}
2399
2400/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302401 * ufshcd_queuecommand - main entry point for SCSI requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002402 * @host: SCSI host pointer
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302403 * @cmd: command from SCSI Midlayer
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302404 *
2405 * Returns 0 for success, non-zero in case of failure
2406 */
2407static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2408{
2409 struct ufshcd_lrb *lrbp;
2410 struct ufs_hba *hba;
2411 unsigned long flags;
2412 int tag;
2413 int err = 0;
2414
2415 hba = shost_priv(host);
2416
2417 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02002418 if (!ufshcd_valid_tag(hba, tag)) {
2419 dev_err(hba->dev,
2420 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2421 __func__, tag, cmd, cmd->request);
2422 BUG();
2423 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302424
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002425 if (!down_read_trylock(&hba->clk_scaling_lock))
2426 return SCSI_MLQUEUE_HOST_BUSY;
2427
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302428 spin_lock_irqsave(hba->host->host_lock, flags);
2429 switch (hba->ufshcd_state) {
2430 case UFSHCD_STATE_OPERATIONAL:
2431 break;
Zang Leigang141f8162016-11-16 11:29:37 +08002432 case UFSHCD_STATE_EH_SCHEDULED:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302433 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302434 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302435 goto out_unlock;
2436 case UFSHCD_STATE_ERROR:
2437 set_host_byte(cmd, DID_ERROR);
2438 cmd->scsi_done(cmd);
2439 goto out_unlock;
2440 default:
2441 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2442 __func__, hba->ufshcd_state);
2443 set_host_byte(cmd, DID_BAD_TARGET);
2444 cmd->scsi_done(cmd);
2445 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302446 }
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002447
2448 /* if error handling is in progress, don't issue commands */
2449 if (ufshcd_eh_in_progress(hba)) {
2450 set_host_byte(cmd, DID_ERROR);
2451 cmd->scsi_done(cmd);
2452 goto out_unlock;
2453 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302454 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302455
Gilad Broner7fabb772017-02-03 16:56:50 -08002456 hba->req_abort_count = 0;
2457
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302458 /* acquire the tag to make sure device cmds don't use it */
2459 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2460 /*
2461 * Dev manage command in progress, requeue the command.
2462 * Requeuing the command helps in cases where the request *may*
2463 * find different tag instead of waiting for dev manage command
2464 * completion.
2465 */
2466 err = SCSI_MLQUEUE_HOST_BUSY;
2467 goto out;
2468 }
2469
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002470 err = ufshcd_hold(hba, true);
2471 if (err) {
2472 err = SCSI_MLQUEUE_HOST_BUSY;
2473 clear_bit_unlock(tag, &hba->lrb_in_use);
2474 goto out;
2475 }
2476 WARN_ON(hba->clk_gating.state != CLKS_ON);
2477
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302478 lrbp = &hba->lrb[tag];
2479
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302480 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302481 lrbp->cmd = cmd;
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07002482 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302483 lrbp->sense_buffer = cmd->sense_buffer;
2484 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002485 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03002486 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Gilad Bronere0b299e2017-02-03 16:56:40 -08002487 lrbp->req_abort_skip = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302488
Joao Pinto300bb132016-05-11 12:21:27 +01002489 ufshcd_comp_scsi_upiu(hba, lrbp);
2490
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002491 err = ufshcd_map_sg(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302492 if (err) {
2493 lrbp->cmd = NULL;
2494 clear_bit_unlock(tag, &hba->lrb_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302495 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302496 }
Gilad Bronerad1a1b92016-10-17 17:09:36 -07002497 /* Make sure descriptors are ready before ringing the doorbell */
2498 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302499
2500 /* issue command to the controller */
2501 spin_lock_irqsave(hba->host->host_lock, flags);
Kiwoong Kim0e675ef2016-11-10 21:14:36 +09002502 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302503 ufshcd_send_command(hba, tag);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302504out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302505 spin_unlock_irqrestore(hba->host->host_lock, flags);
2506out:
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002507 up_read(&hba->clk_scaling_lock);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302508 return err;
2509}
2510
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302511static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2512 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2513{
2514 lrbp->cmd = NULL;
2515 lrbp->sense_bufflen = 0;
2516 lrbp->sense_buffer = NULL;
2517 lrbp->task_tag = tag;
2518 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302519 lrbp->intr_cmd = true; /* No interrupt aggregation */
2520 hba->dev_cmd.type = cmd_type;
2521
Joao Pinto300bb132016-05-11 12:21:27 +01002522 return ufshcd_comp_devman_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302523}
2524
2525static int
2526ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2527{
2528 int err = 0;
2529 unsigned long flags;
2530 u32 mask = 1 << tag;
2531
2532 /* clear outstanding transaction before retry */
2533 spin_lock_irqsave(hba->host->host_lock, flags);
2534 ufshcd_utrl_clear(hba, tag);
2535 spin_unlock_irqrestore(hba->host->host_lock, flags);
2536
2537 /*
2538 * wait for for h/w to clear corresponding bit in door-bell.
2539 * max. wait is 1 sec.
2540 */
2541 err = ufshcd_wait_for_register(hba,
2542 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02002543 mask, ~mask, 1000, 1000, true);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302544
2545 return err;
2546}
2547
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002548static int
2549ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2550{
2551 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2552
2553 /* Get the UPIU response */
2554 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2555 UPIU_RSP_CODE_OFFSET;
2556 return query_res->response;
2557}
2558
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302559/**
2560 * ufshcd_dev_cmd_completion() - handles device management command responses
2561 * @hba: per adapter instance
2562 * @lrbp: pointer to local reference block
2563 */
2564static int
2565ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2566{
2567 int resp;
2568 int err = 0;
2569
Dolev Ravivff8e20c2016-12-22 18:42:18 -08002570 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302571 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2572
2573 switch (resp) {
2574 case UPIU_TRANSACTION_NOP_IN:
2575 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2576 err = -EINVAL;
2577 dev_err(hba->dev, "%s: unexpected response %x\n",
2578 __func__, resp);
2579 }
2580 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05302581 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002582 err = ufshcd_check_query_response(hba, lrbp);
2583 if (!err)
2584 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05302585 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302586 case UPIU_TRANSACTION_REJECT_UPIU:
2587 /* TODO: handle Reject UPIU Response */
2588 err = -EPERM;
2589 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2590 __func__);
2591 break;
2592 default:
2593 err = -EINVAL;
2594 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2595 __func__, resp);
2596 break;
2597 }
2598
2599 return err;
2600}
2601
2602static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2603 struct ufshcd_lrb *lrbp, int max_timeout)
2604{
2605 int err = 0;
2606 unsigned long time_left;
2607 unsigned long flags;
2608
2609 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2610 msecs_to_jiffies(max_timeout));
2611
Gilad Bronerad1a1b92016-10-17 17:09:36 -07002612 /* Make sure descriptors are ready before ringing the doorbell */
2613 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302614 spin_lock_irqsave(hba->host->host_lock, flags);
2615 hba->dev_cmd.complete = NULL;
2616 if (likely(time_left)) {
2617 err = ufshcd_get_tr_ocs(lrbp);
2618 if (!err)
2619 err = ufshcd_dev_cmd_completion(hba, lrbp);
2620 }
2621 spin_unlock_irqrestore(hba->host->host_lock, flags);
2622
2623 if (!time_left) {
2624 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02002625 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2626 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302627 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02002628 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302629 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02002630 /*
2631 * in case of an error, after clearing the doorbell,
2632 * we also need to clear the outstanding_request
2633 * field in hba
2634 */
2635 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302636 }
2637
2638 return err;
2639}
2640
2641/**
2642 * ufshcd_get_dev_cmd_tag - Get device management command tag
2643 * @hba: per-adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002644 * @tag_out: pointer to variable with available slot value
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302645 *
2646 * Get a free slot and lock it until device management command
2647 * completes.
2648 *
2649 * Returns false if free slot is unavailable for locking, else
2650 * return true with tag value in @tag.
2651 */
2652static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2653{
2654 int tag;
2655 bool ret = false;
2656 unsigned long tmp;
2657
2658 if (!tag_out)
2659 goto out;
2660
2661 do {
2662 tmp = ~hba->lrb_in_use;
2663 tag = find_last_bit(&tmp, hba->nutrs);
2664 if (tag >= hba->nutrs)
2665 goto out;
2666 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2667
2668 *tag_out = tag;
2669 ret = true;
2670out:
2671 return ret;
2672}
2673
2674static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2675{
2676 clear_bit_unlock(tag, &hba->lrb_in_use);
2677}
2678
2679/**
2680 * ufshcd_exec_dev_cmd - API for sending device management requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002681 * @hba: UFS hba
2682 * @cmd_type: specifies the type (NOP, Query...)
2683 * @timeout: time in seconds
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302684 *
Dolev Raviv68078d52013-07-30 00:35:58 +05302685 * NOTE: Since there is only one available tag for device management commands,
2686 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302687 */
2688static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2689 enum dev_cmd_type cmd_type, int timeout)
2690{
2691 struct ufshcd_lrb *lrbp;
2692 int err;
2693 int tag;
2694 struct completion wait;
2695 unsigned long flags;
2696
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002697 down_read(&hba->clk_scaling_lock);
2698
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302699 /*
2700 * Get free slot, sleep if slots are unavailable.
2701 * Even though we use wait_event() which sleeps indefinitely,
2702 * the maximum wait time is bounded by SCSI request timeout.
2703 */
2704 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2705
2706 init_completion(&wait);
2707 lrbp = &hba->lrb[tag];
2708 WARN_ON(lrbp->cmd);
2709 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2710 if (unlikely(err))
2711 goto out_put_tag;
2712
2713 hba->dev_cmd.complete = &wait;
2714
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03002715 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
Yaniv Gardie3dfdc52016-02-01 15:02:49 +02002716 /* Make sure descriptors are ready before ringing the doorbell */
2717 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302718 spin_lock_irqsave(hba->host->host_lock, flags);
Kiwoong Kim0e675ef2016-11-10 21:14:36 +09002719 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302720 ufshcd_send_command(hba, tag);
2721 spin_unlock_irqrestore(hba->host->host_lock, flags);
2722
2723 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2724
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03002725 ufshcd_add_query_upiu_trace(hba, tag,
2726 err ? "query_complete_err" : "query_complete");
2727
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302728out_put_tag:
2729 ufshcd_put_dev_cmd_tag(hba, tag);
2730 wake_up(&hba->dev_cmd.tag_wq);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08002731 up_read(&hba->clk_scaling_lock);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302732 return err;
2733}
2734
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302735/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002736 * ufshcd_init_query() - init the query response and request parameters
2737 * @hba: per-adapter instance
2738 * @request: address of the request pointer to be initialized
2739 * @response: address of the response pointer to be initialized
2740 * @opcode: operation to perform
2741 * @idn: flag idn to access
2742 * @index: LU number to access
2743 * @selector: query/flag/descriptor further identification
2744 */
2745static inline void ufshcd_init_query(struct ufs_hba *hba,
2746 struct ufs_query_req **request, struct ufs_query_res **response,
2747 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2748{
2749 *request = &hba->dev_cmd.query.request;
2750 *response = &hba->dev_cmd.query.response;
2751 memset(*request, 0, sizeof(struct ufs_query_req));
2752 memset(*response, 0, sizeof(struct ufs_query_res));
2753 (*request)->upiu_req.opcode = opcode;
2754 (*request)->upiu_req.idn = idn;
2755 (*request)->upiu_req.index = index;
2756 (*request)->upiu_req.selector = selector;
2757}
2758
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002759static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2760 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2761{
2762 int ret;
2763 int retries;
2764
2765 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2766 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2767 if (ret)
2768 dev_dbg(hba->dev,
2769 "%s: failed with error %d, retries %d\n",
2770 __func__, ret, retries);
2771 else
2772 break;
2773 }
2774
2775 if (ret)
2776 dev_err(hba->dev,
2777 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2778 __func__, opcode, idn, ret, retries);
2779 return ret;
2780}
2781
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002782/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302783 * ufshcd_query_flag() - API function for sending flag query requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002784 * @hba: per-adapter instance
2785 * @opcode: flag query to perform
2786 * @idn: flag idn to access
2787 * @flag_res: the flag value after the query request completes
Dolev Raviv68078d52013-07-30 00:35:58 +05302788 *
2789 * Returns 0 for success, non-zero in case of failure
2790 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002791int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Dolev Raviv68078d52013-07-30 00:35:58 +05302792 enum flag_idn idn, bool *flag_res)
2793{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002794 struct ufs_query_req *request = NULL;
2795 struct ufs_query_res *response = NULL;
2796 int err, index = 0, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02002797 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05302798
2799 BUG_ON(!hba);
2800
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002801 ufshcd_hold(hba, false);
Dolev Raviv68078d52013-07-30 00:35:58 +05302802 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002803 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2804 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05302805
2806 switch (opcode) {
2807 case UPIU_QUERY_OPCODE_SET_FLAG:
2808 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2809 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2810 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2811 break;
2812 case UPIU_QUERY_OPCODE_READ_FLAG:
2813 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2814 if (!flag_res) {
2815 /* No dummy reads */
2816 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2817 __func__);
2818 err = -EINVAL;
2819 goto out_unlock;
2820 }
2821 break;
2822 default:
2823 dev_err(hba->dev,
2824 "%s: Expected query flag opcode but got = %d\n",
2825 __func__, opcode);
2826 err = -EINVAL;
2827 goto out_unlock;
2828 }
Dolev Raviv68078d52013-07-30 00:35:58 +05302829
Yaniv Gardie5ad4062016-02-01 15:02:41 +02002830 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05302831
2832 if (err) {
2833 dev_err(hba->dev,
2834 "%s: Sending flag query for idn %d failed, err = %d\n",
2835 __func__, idn, err);
2836 goto out_unlock;
2837 }
2838
2839 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302840 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05302841 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2842
2843out_unlock:
2844 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002845 ufshcd_release(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05302846 return err;
2847}
2848
2849/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302850 * ufshcd_query_attr - API function for sending attribute requests
Bart Van Assche8aa29f12018-03-01 15:07:20 -08002851 * @hba: per-adapter instance
2852 * @opcode: attribute opcode
2853 * @idn: attribute idn to access
2854 * @index: index field
2855 * @selector: selector field
2856 * @attr_val: the attribute value after the query request completes
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302857 *
2858 * Returns 0 for success, non-zero in case of failure
2859*/
Stanislav Nijnikovec92b592018-02-15 14:14:11 +02002860int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2861 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302862{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002863 struct ufs_query_req *request = NULL;
2864 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302865 int err;
2866
2867 BUG_ON(!hba);
2868
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002869 ufshcd_hold(hba, false);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302870 if (!attr_val) {
2871 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2872 __func__, opcode);
2873 err = -EINVAL;
2874 goto out;
2875 }
2876
2877 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002878 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2879 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302880
2881 switch (opcode) {
2882 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2883 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302884 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302885 break;
2886 case UPIU_QUERY_OPCODE_READ_ATTR:
2887 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2888 break;
2889 default:
2890 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2891 __func__, opcode);
2892 err = -EINVAL;
2893 goto out_unlock;
2894 }
2895
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002896 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302897
2898 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08002899 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2900 __func__, opcode, idn, index, err);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302901 goto out_unlock;
2902 }
2903
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302904 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302905
2906out_unlock:
2907 mutex_unlock(&hba->dev_cmd.lock);
2908out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002909 ufshcd_release(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302910 return err;
2911}
2912
2913/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02002914 * ufshcd_query_attr_retry() - API function for sending query
2915 * attribute with retries
2916 * @hba: per-adapter instance
2917 * @opcode: attribute opcode
2918 * @idn: attribute idn to access
2919 * @index: index field
2920 * @selector: selector field
2921 * @attr_val: the attribute value after the query request
2922 * completes
2923 *
2924 * Returns 0 for success, non-zero in case of failure
2925*/
2926static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2927 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2928 u32 *attr_val)
2929{
2930 int ret = 0;
2931 u32 retries;
2932
2933 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2934 ret = ufshcd_query_attr(hba, opcode, idn, index,
2935 selector, attr_val);
2936 if (ret)
2937 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2938 __func__, ret, retries);
2939 else
2940 break;
2941 }
2942
2943 if (ret)
2944 dev_err(hba->dev,
2945 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2946 __func__, idn, ret, QUERY_REQ_RETRIES);
2947 return ret;
2948}
2949
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002950static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002951 enum query_opcode opcode, enum desc_idn idn, u8 index,
2952 u8 selector, u8 *desc_buf, int *buf_len)
2953{
2954 struct ufs_query_req *request = NULL;
2955 struct ufs_query_res *response = NULL;
2956 int err;
2957
2958 BUG_ON(!hba);
2959
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002960 ufshcd_hold(hba, false);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002961 if (!desc_buf) {
2962 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2963 __func__, opcode);
2964 err = -EINVAL;
2965 goto out;
2966 }
2967
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00002968 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002969 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2970 __func__, *buf_len);
2971 err = -EINVAL;
2972 goto out;
2973 }
2974
2975 mutex_lock(&hba->dev_cmd.lock);
2976 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2977 selector);
2978 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002979 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002980
2981 switch (opcode) {
2982 case UPIU_QUERY_OPCODE_WRITE_DESC:
2983 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2984 break;
2985 case UPIU_QUERY_OPCODE_READ_DESC:
2986 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2987 break;
2988 default:
2989 dev_err(hba->dev,
2990 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2991 __func__, opcode);
2992 err = -EINVAL;
2993 goto out_unlock;
2994 }
2995
2996 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2997
2998 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08002999 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3000 __func__, opcode, idn, index, err);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003001 goto out_unlock;
3002 }
3003
3004 hba->dev_cmd.query.descriptor = NULL;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003005 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003006
3007out_unlock:
3008 mutex_unlock(&hba->dev_cmd.lock);
3009out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003010 ufshcd_release(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003011 return err;
3012}
3013
3014/**
Bart Van Assche8aa29f12018-03-01 15:07:20 -08003015 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3016 * @hba: per-adapter instance
3017 * @opcode: attribute opcode
3018 * @idn: attribute idn to access
3019 * @index: index field
3020 * @selector: selector field
3021 * @desc_buf: the buffer that contains the descriptor
3022 * @buf_len: length parameter passed to the device
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003023 *
3024 * Returns 0 for success, non-zero in case of failure.
3025 * The buf_len parameter will contain, on return, the length parameter
3026 * received on the response.
3027 */
Stanislav Nijnikov2238d312018-02-15 14:14:07 +02003028int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3029 enum query_opcode opcode,
3030 enum desc_idn idn, u8 index,
3031 u8 selector,
3032 u8 *desc_buf, int *buf_len)
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003033{
3034 int err;
3035 int retries;
3036
3037 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3038 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3039 selector, desc_buf, buf_len);
3040 if (!err || err == -EINVAL)
3041 break;
3042 }
3043
3044 return err;
3045}
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003046
3047/**
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003048 * ufshcd_read_desc_length - read the specified descriptor length from header
3049 * @hba: Pointer to adapter instance
3050 * @desc_id: descriptor idn value
3051 * @desc_index: descriptor index
3052 * @desc_length: pointer to variable to read the length of descriptor
3053 *
3054 * Return 0 in case of success, non-zero otherwise
3055 */
3056static int ufshcd_read_desc_length(struct ufs_hba *hba,
3057 enum desc_idn desc_id,
3058 int desc_index,
3059 int *desc_length)
3060{
3061 int ret;
3062 u8 header[QUERY_DESC_HDR_SIZE];
3063 int header_len = QUERY_DESC_HDR_SIZE;
3064
3065 if (desc_id >= QUERY_DESC_IDN_MAX)
3066 return -EINVAL;
3067
3068 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3069 desc_id, desc_index, 0, header,
3070 &header_len);
3071
3072 if (ret) {
3073 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3074 __func__, desc_id);
3075 return ret;
3076 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3077 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3078 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3079 desc_id);
3080 ret = -EINVAL;
3081 }
3082
3083 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3084 return ret;
3085
3086}
3087
3088/**
3089 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3090 * @hba: Pointer to adapter instance
3091 * @desc_id: descriptor idn value
3092 * @desc_len: mapped desc length (out)
3093 *
3094 * Return 0 in case of success, non-zero otherwise
3095 */
3096int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3097 enum desc_idn desc_id, int *desc_len)
3098{
3099 switch (desc_id) {
3100 case QUERY_DESC_IDN_DEVICE:
3101 *desc_len = hba->desc_size.dev_desc;
3102 break;
3103 case QUERY_DESC_IDN_POWER:
3104 *desc_len = hba->desc_size.pwr_desc;
3105 break;
3106 case QUERY_DESC_IDN_GEOMETRY:
3107 *desc_len = hba->desc_size.geom_desc;
3108 break;
3109 case QUERY_DESC_IDN_CONFIGURATION:
3110 *desc_len = hba->desc_size.conf_desc;
3111 break;
3112 case QUERY_DESC_IDN_UNIT:
3113 *desc_len = hba->desc_size.unit_desc;
3114 break;
3115 case QUERY_DESC_IDN_INTERCONNECT:
3116 *desc_len = hba->desc_size.interc_desc;
3117 break;
3118 case QUERY_DESC_IDN_STRING:
3119 *desc_len = QUERY_DESC_MAX_SIZE;
3120 break;
Stanislav Nijnikovc648c2d2018-02-15 14:14:05 +02003121 case QUERY_DESC_IDN_HEALTH:
3122 *desc_len = hba->desc_size.hlth_desc;
3123 break;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003124 case QUERY_DESC_IDN_RFU_0:
3125 case QUERY_DESC_IDN_RFU_1:
3126 *desc_len = 0;
3127 break;
3128 default:
3129 *desc_len = 0;
3130 return -EINVAL;
3131 }
3132 return 0;
3133}
3134EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3135
3136/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003137 * ufshcd_read_desc_param - read the specified descriptor parameter
3138 * @hba: Pointer to adapter instance
3139 * @desc_id: descriptor idn value
3140 * @desc_index: descriptor index
3141 * @param_offset: offset of the parameter to read
3142 * @param_read_buf: pointer to buffer where parameter would be read
3143 * @param_size: sizeof(param_read_buf)
3144 *
3145 * Return 0 in case of success, non-zero otherwise
3146 */
Stanislav Nijnikov45bced82018-02-15 14:14:02 +02003147int ufshcd_read_desc_param(struct ufs_hba *hba,
3148 enum desc_idn desc_id,
3149 int desc_index,
3150 u8 param_offset,
3151 u8 *param_read_buf,
3152 u8 param_size)
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003153{
3154 int ret;
3155 u8 *desc_buf;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003156 int buff_len;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003157 bool is_kmalloc = true;
3158
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003159 /* Safety check */
3160 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003161 return -EINVAL;
3162
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003163 /* Get the max length of descriptor from structure filled up at probe
3164 * time.
3165 */
3166 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003167
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003168 /* Sanity checks */
3169 if (ret || !buff_len) {
3170 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3171 __func__);
3172 return ret;
3173 }
3174
3175 /* Check whether we need temp memory */
3176 if (param_offset != 0 || param_size < buff_len) {
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003177 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3178 if (!desc_buf)
3179 return -ENOMEM;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003180 } else {
3181 desc_buf = param_read_buf;
3182 is_kmalloc = false;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003183 }
3184
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003185 /* Request for full descriptor */
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003186 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003187 desc_id, desc_index, 0,
3188 desc_buf, &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003189
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003190 if (ret) {
3191 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3192 __func__, desc_id, desc_index, param_offset, ret);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003193 goto out;
3194 }
3195
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003196 /* Sanity check */
3197 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3198 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3199 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3200 ret = -EINVAL;
3201 goto out;
3202 }
3203
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00003204 /* Check wherher we will not copy more data, than available */
3205 if (is_kmalloc && param_size > buff_len)
3206 param_size = buff_len;
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08003207
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003208 if (is_kmalloc)
3209 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3210out:
3211 if (is_kmalloc)
3212 kfree(desc_buf);
3213 return ret;
3214}
3215
3216static inline int ufshcd_read_desc(struct ufs_hba *hba,
3217 enum desc_idn desc_id,
3218 int desc_index,
3219 u8 *buf,
3220 u32 size)
3221{
3222 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3223}
3224
3225static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3226 u8 *buf,
3227 u32 size)
3228{
Szymon Mielczarekdbd34a62017-03-29 08:19:21 +02003229 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003230}
3231
Tomas Winkler8209b6d2017-01-05 10:45:10 +02003232static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
Yaniv Gardib573d482016-03-10 17:37:09 +02003233{
3234 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3235}
Yaniv Gardib573d482016-03-10 17:37:09 +02003236
3237/**
3238 * ufshcd_read_string_desc - read string descriptor
3239 * @hba: pointer to adapter instance
3240 * @desc_index: descriptor index
3241 * @buf: pointer to buffer where descriptor would be read
3242 * @size: size of buf
3243 * @ascii: if true convert from unicode to ascii characters
3244 *
3245 * Return 0 in case of success, non-zero otherwise
3246 */
Stanislav Nijnikov2238d312018-02-15 14:14:07 +02003247int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3248 u8 *buf, u32 size, bool ascii)
Yaniv Gardib573d482016-03-10 17:37:09 +02003249{
3250 int err = 0;
3251
3252 err = ufshcd_read_desc(hba,
3253 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3254
3255 if (err) {
3256 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3257 __func__, QUERY_REQ_RETRIES, err);
3258 goto out;
3259 }
3260
3261 if (ascii) {
3262 int desc_len;
3263 int ascii_len;
3264 int i;
3265 char *buff_ascii;
3266
3267 desc_len = buf[0];
3268 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3269 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3270 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3271 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3272 __func__);
3273 err = -ENOMEM;
3274 goto out;
3275 }
3276
3277 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3278 if (!buff_ascii) {
3279 err = -ENOMEM;
Tiezhu Yangfcbefc32016-06-25 12:35:22 +08003280 goto out;
Yaniv Gardib573d482016-03-10 17:37:09 +02003281 }
3282
3283 /*
3284 * the descriptor contains string in UTF16 format
3285 * we need to convert to utf-8 so it can be displayed
3286 */
3287 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3288 desc_len - QUERY_DESC_HDR_SIZE,
3289 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3290
3291 /* replace non-printable or non-ASCII characters with spaces */
3292 for (i = 0; i < ascii_len; i++)
3293 ufshcd_remove_non_printable(&buff_ascii[i]);
3294
3295 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3296 size - QUERY_DESC_HDR_SIZE);
3297 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3298 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
Yaniv Gardib573d482016-03-10 17:37:09 +02003299 kfree(buff_ascii);
3300 }
3301out:
3302 return err;
3303}
Yaniv Gardib573d482016-03-10 17:37:09 +02003304
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003305/**
3306 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3307 * @hba: Pointer to adapter instance
3308 * @lun: lun id
3309 * @param_offset: offset of the parameter to read
3310 * @param_read_buf: pointer to buffer where parameter would be read
3311 * @param_size: sizeof(param_read_buf)
3312 *
3313 * Return 0 in case of success, non-zero otherwise
3314 */
3315static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3316 int lun,
3317 enum unit_desc_param param_offset,
3318 u8 *param_read_buf,
3319 u32 param_size)
3320{
3321 /*
3322 * Unit descriptors are only available for general purpose LUs (LUN id
3323 * from 0 to 7) and RPMB Well known LU.
3324 */
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02003325 if (!ufs_is_valid_unit_desc_lun(lun))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003326 return -EOPNOTSUPP;
3327
3328 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3329 param_offset, param_read_buf, param_size);
3330}
3331
3332/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303333 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3334 * @hba: per adapter instance
3335 *
3336 * 1. Allocate DMA memory for Command Descriptor array
3337 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3338 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3339 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3340 * (UTMRDL)
3341 * 4. Allocate memory for local reference block(lrb).
3342 *
3343 * Returns 0 for success, non-zero in case of failure
3344 */
3345static int ufshcd_memory_alloc(struct ufs_hba *hba)
3346{
3347 size_t utmrdl_size, utrdl_size, ucdl_size;
3348
3349 /* Allocate memory for UTP command descriptors */
3350 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09003351 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3352 ucdl_size,
3353 &hba->ucdl_dma_addr,
3354 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303355
3356 /*
3357 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3358 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3359 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3360 * be aligned to 128 bytes as well
3361 */
3362 if (!hba->ucdl_base_addr ||
3363 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303364 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303365 "Command Descriptor Memory allocation failed\n");
3366 goto out;
3367 }
3368
3369 /*
3370 * Allocate memory for UTP Transfer descriptors
3371 * UFSHCI requires 1024 byte alignment of UTRD
3372 */
3373 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09003374 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3375 utrdl_size,
3376 &hba->utrdl_dma_addr,
3377 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303378 if (!hba->utrdl_base_addr ||
3379 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303380 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303381 "Transfer Descriptor Memory allocation failed\n");
3382 goto out;
3383 }
3384
3385 /*
3386 * Allocate memory for UTP Task Management descriptors
3387 * UFSHCI requires 1024 byte alignment of UTMRD
3388 */
3389 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09003390 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3391 utmrdl_size,
3392 &hba->utmrdl_dma_addr,
3393 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303394 if (!hba->utmrdl_base_addr ||
3395 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303396 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303397 "Task Management Descriptor Memory allocation failed\n");
3398 goto out;
3399 }
3400
3401 /* Allocate memory for local reference block */
Kees Cooka86854d2018-06-12 14:07:58 -07003402 hba->lrb = devm_kcalloc(hba->dev,
3403 hba->nutrs, sizeof(struct ufshcd_lrb),
Seungwon Jeon2953f852013-06-27 13:31:54 +09003404 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303405 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303406 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303407 goto out;
3408 }
3409 return 0;
3410out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303411 return -ENOMEM;
3412}
3413
3414/**
3415 * ufshcd_host_memory_configure - configure local reference block with
3416 * memory offsets
3417 * @hba: per adapter instance
3418 *
3419 * Configure Host memory space
3420 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3421 * address.
3422 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3423 * and PRDT offset.
3424 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3425 * into local reference block.
3426 */
3427static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3428{
3429 struct utp_transfer_cmd_desc *cmd_descp;
3430 struct utp_transfer_req_desc *utrdlp;
3431 dma_addr_t cmd_desc_dma_addr;
3432 dma_addr_t cmd_desc_element_addr;
3433 u16 response_offset;
3434 u16 prdt_offset;
3435 int cmd_desc_size;
3436 int i;
3437
3438 utrdlp = hba->utrdl_base_addr;
3439 cmd_descp = hba->ucdl_base_addr;
3440
3441 response_offset =
3442 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3443 prdt_offset =
3444 offsetof(struct utp_transfer_cmd_desc, prd_table);
3445
3446 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3447 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3448
3449 for (i = 0; i < hba->nutrs; i++) {
3450 /* Configure UTRD with command descriptor base address */
3451 cmd_desc_element_addr =
3452 (cmd_desc_dma_addr + (cmd_desc_size * i));
3453 utrdlp[i].command_desc_base_addr_lo =
3454 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3455 utrdlp[i].command_desc_base_addr_hi =
3456 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3457
3458 /* Response upiu and prdt offset should be in double words */
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09003459 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3460 utrdlp[i].response_upiu_offset =
3461 cpu_to_le16(response_offset);
3462 utrdlp[i].prd_table_offset =
3463 cpu_to_le16(prdt_offset);
3464 utrdlp[i].response_upiu_length =
3465 cpu_to_le16(ALIGNED_UPIU_SIZE);
3466 } else {
3467 utrdlp[i].response_upiu_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303468 cpu_to_le16((response_offset >> 2));
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09003469 utrdlp[i].prd_table_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303470 cpu_to_le16((prdt_offset >> 2));
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09003471 utrdlp[i].response_upiu_length =
Sujit Reddy Thumma3ca316c2013-06-26 22:39:30 +05303472 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09003473 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303474
3475 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003476 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3477 (i * sizeof(struct utp_transfer_req_desc));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303478 hba->lrb[i].ucd_req_ptr =
3479 (struct utp_upiu_req *)(cmd_descp + i);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003480 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303481 hba->lrb[i].ucd_rsp_ptr =
3482 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003483 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3484 response_offset;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303485 hba->lrb[i].ucd_prdt_ptr =
3486 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003487 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3488 prdt_offset;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303489 }
3490}
3491
3492/**
3493 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3494 * @hba: per adapter instance
3495 *
3496 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3497 * in order to initialize the Unipro link startup procedure.
3498 * Once the Unipro links are up, the device connected to the controller
3499 * is detected.
3500 *
3501 * Returns 0 on success, non-zero value on failure
3502 */
3503static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3504{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303505 struct uic_command uic_cmd = {0};
3506 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303507
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303508 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3509
3510 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3511 if (ret)
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003512 dev_dbg(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303513 "dme-link-startup: error code %d\n", ret);
3514 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303515}
Alim Akhtar4404c5d2018-05-06 15:44:17 +05303516/**
3517 * ufshcd_dme_reset - UIC command for DME_RESET
3518 * @hba: per adapter instance
3519 *
3520 * DME_RESET command is issued in order to reset UniPro stack.
3521 * This function now deal with cold reset.
3522 *
3523 * Returns 0 on success, non-zero value on failure
3524 */
3525static int ufshcd_dme_reset(struct ufs_hba *hba)
3526{
3527 struct uic_command uic_cmd = {0};
3528 int ret;
3529
3530 uic_cmd.command = UIC_CMD_DME_RESET;
3531
3532 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3533 if (ret)
3534 dev_err(hba->dev,
3535 "dme-reset: error code %d\n", ret);
3536
3537 return ret;
3538}
3539
3540/**
3541 * ufshcd_dme_enable - UIC command for DME_ENABLE
3542 * @hba: per adapter instance
3543 *
3544 * DME_ENABLE command is issued in order to enable UniPro stack.
3545 *
3546 * Returns 0 on success, non-zero value on failure
3547 */
3548static int ufshcd_dme_enable(struct ufs_hba *hba)
3549{
3550 struct uic_command uic_cmd = {0};
3551 int ret;
3552
3553 uic_cmd.command = UIC_CMD_DME_ENABLE;
3554
3555 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3556 if (ret)
3557 dev_err(hba->dev,
3558 "dme-reset: error code %d\n", ret);
3559
3560 return ret;
3561}
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303562
Yaniv Gardicad2e032015-03-31 17:37:14 +03003563static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3564{
3565 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3566 unsigned long min_sleep_time_us;
3567
3568 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3569 return;
3570
3571 /*
3572 * last_dme_cmd_tstamp will be 0 only for 1st call to
3573 * this function
3574 */
3575 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3576 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3577 } else {
3578 unsigned long delta =
3579 (unsigned long) ktime_to_us(
3580 ktime_sub(ktime_get(),
3581 hba->last_dme_cmd_tstamp));
3582
3583 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3584 min_sleep_time_us =
3585 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3586 else
3587 return; /* no more delay required */
3588 }
3589
3590 /* allow sleep for extra 50us if needed */
3591 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3592}
3593
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303594/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303595 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3596 * @hba: per adapter instance
3597 * @attr_sel: uic command argument1
3598 * @attr_set: attribute set type as uic command argument2
3599 * @mib_val: setting value as uic command argument3
3600 * @peer: indicate whether peer or local
3601 *
3602 * Returns 0 on success, non-zero value on failure
3603 */
3604int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3605 u8 attr_set, u32 mib_val, u8 peer)
3606{
3607 struct uic_command uic_cmd = {0};
3608 static const char *const action[] = {
3609 "dme-set",
3610 "dme-peer-set"
3611 };
3612 const char *set = action[!!peer];
3613 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003614 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303615
3616 uic_cmd.command = peer ?
3617 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3618 uic_cmd.argument1 = attr_sel;
3619 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3620 uic_cmd.argument3 = mib_val;
3621
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003622 do {
3623 /* for peer attributes we retry upon failure */
3624 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3625 if (ret)
3626 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3627 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3628 } while (ret && peer && --retries);
3629
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003630 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003631 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003632 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3633 UFS_UIC_COMMAND_RETRIES - retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303634
3635 return ret;
3636}
3637EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3638
3639/**
3640 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3641 * @hba: per adapter instance
3642 * @attr_sel: uic command argument1
3643 * @mib_val: the value of the attribute as returned by the UIC command
3644 * @peer: indicate whether peer or local
3645 *
3646 * Returns 0 on success, non-zero value on failure
3647 */
3648int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3649 u32 *mib_val, u8 peer)
3650{
3651 struct uic_command uic_cmd = {0};
3652 static const char *const action[] = {
3653 "dme-get",
3654 "dme-peer-get"
3655 };
3656 const char *get = action[!!peer];
3657 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003658 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03003659 struct ufs_pa_layer_attr orig_pwr_info;
3660 struct ufs_pa_layer_attr temp_pwr_info;
3661 bool pwr_mode_change = false;
3662
3663 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3664 orig_pwr_info = hba->pwr_info;
3665 temp_pwr_info = orig_pwr_info;
3666
3667 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3668 orig_pwr_info.pwr_rx == FAST_MODE) {
3669 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3670 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3671 pwr_mode_change = true;
3672 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3673 orig_pwr_info.pwr_rx == SLOW_MODE) {
3674 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3675 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3676 pwr_mode_change = true;
3677 }
3678 if (pwr_mode_change) {
3679 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3680 if (ret)
3681 goto out;
3682 }
3683 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303684
3685 uic_cmd.command = peer ?
3686 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3687 uic_cmd.argument1 = attr_sel;
3688
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003689 do {
3690 /* for peer attributes we retry upon failure */
3691 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3692 if (ret)
3693 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3694 get, UIC_GET_ATTR_ID(attr_sel), ret);
3695 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303696
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003697 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003698 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08003699 get, UIC_GET_ATTR_ID(attr_sel),
3700 UFS_UIC_COMMAND_RETRIES - retries);
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003701
3702 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303703 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03003704
3705 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3706 && pwr_mode_change)
3707 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303708out:
3709 return ret;
3710}
3711EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3712
3713/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003714 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3715 * state) and waits for it to take effect.
3716 *
3717 * @hba: per adapter instance
3718 * @cmd: UIC command to execute
3719 *
3720 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3721 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3722 * and device UniPro link and hence it's final completion would be indicated by
3723 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3724 * addition to normal UIC command completion Status (UCCS). This function only
3725 * returns after the relevant status bits indicate the completion.
3726 *
3727 * Returns 0 on success, non-zero value on failure
3728 */
3729static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3730{
3731 struct completion uic_async_done;
3732 unsigned long flags;
3733 u8 status;
3734 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003735 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003736
3737 mutex_lock(&hba->uic_cmd_mutex);
3738 init_completion(&uic_async_done);
Yaniv Gardicad2e032015-03-31 17:37:14 +03003739 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003740
3741 spin_lock_irqsave(hba->host->host_lock, flags);
3742 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003743 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3744 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3745 /*
3746 * Make sure UIC command completion interrupt is disabled before
3747 * issuing UIC command.
3748 */
3749 wmb();
3750 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003751 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003752 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3753 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003754 if (ret) {
3755 dev_err(hba->dev,
3756 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3757 cmd->command, cmd->argument3, ret);
3758 goto out;
3759 }
3760
3761 if (!wait_for_completion_timeout(hba->uic_async_done,
3762 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3763 dev_err(hba->dev,
3764 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3765 cmd->command, cmd->argument3);
3766 ret = -ETIMEDOUT;
3767 goto out;
3768 }
3769
3770 status = ufshcd_get_upmcrs(hba);
3771 if (status != PWR_LOCAL) {
3772 dev_err(hba->dev,
Zang Leigang479da362017-09-19 16:50:30 +08003773 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003774 cmd->command, status);
3775 ret = (status != PWR_OK) ? status : -1;
3776 }
3777out:
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08003778 if (ret) {
3779 ufshcd_print_host_state(hba);
3780 ufshcd_print_pwr_info(hba);
3781 ufshcd_print_host_regs(hba);
3782 }
3783
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003784 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003785 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003786 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003787 if (reenable_intr)
3788 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003789 spin_unlock_irqrestore(hba->host->host_lock, flags);
3790 mutex_unlock(&hba->uic_cmd_mutex);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003791
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003792 return ret;
3793}
3794
3795/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303796 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3797 * using DME_SET primitives.
3798 * @hba: per adapter instance
3799 * @mode: powr mode value
3800 *
3801 * Returns 0 on success, non-zero value on failure
3802 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05303803static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303804{
3805 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003806 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303807
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03003808 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3809 ret = ufshcd_dme_set(hba,
3810 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3811 if (ret) {
3812 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3813 __func__, ret);
3814 goto out;
3815 }
3816 }
3817
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303818 uic_cmd.command = UIC_CMD_DME_SET;
3819 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3820 uic_cmd.argument3 = mode;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003821 ufshcd_hold(hba, false);
3822 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3823 ufshcd_release(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303824
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03003825out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003826 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003827}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303828
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003829static int ufshcd_link_recovery(struct ufs_hba *hba)
3830{
3831 int ret;
3832 unsigned long flags;
3833
3834 spin_lock_irqsave(hba->host->host_lock, flags);
3835 hba->ufshcd_state = UFSHCD_STATE_RESET;
3836 ufshcd_set_eh_in_progress(hba);
3837 spin_unlock_irqrestore(hba->host->host_lock, flags);
3838
3839 ret = ufshcd_host_reset_and_restore(hba);
3840
3841 spin_lock_irqsave(hba->host->host_lock, flags);
3842 if (ret)
3843 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3844 ufshcd_clear_eh_in_progress(hba);
3845 spin_unlock_irqrestore(hba->host->host_lock, flags);
3846
3847 if (ret)
3848 dev_err(hba->dev, "%s: link recovery failed, err %d",
3849 __func__, ret);
3850
3851 return ret;
3852}
3853
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003854static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003855{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003856 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003857 struct uic_command uic_cmd = {0};
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003858 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003859
Kiwoong Kimee32c902016-11-10 21:17:43 +09003860 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3861
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003862 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003863 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003864 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3865 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003866
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003867 if (ret) {
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003868 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3869 __func__, ret);
3870
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003871 /*
3872 * If link recovery fails then return error so that caller
3873 * don't retry the hibern8 enter again.
3874 */
3875 if (ufshcd_link_recovery(hba))
3876 ret = -ENOLINK;
Kiwoong Kimee32c902016-11-10 21:17:43 +09003877 } else
3878 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3879 POST_CHANGE);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003880
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003881 return ret;
3882}
3883
3884static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3885{
3886 int ret = 0, retries;
3887
3888 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3889 ret = __ufshcd_uic_hibern8_enter(hba);
3890 if (!ret || ret == -ENOLINK)
3891 goto out;
3892 }
3893out:
3894 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003895}
3896
3897static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3898{
3899 struct uic_command uic_cmd = {0};
3900 int ret;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003901 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003902
Kiwoong Kimee32c902016-11-10 21:17:43 +09003903 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3904
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003905 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3906 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003907 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3908 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3909
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303910 if (ret) {
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003911 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3912 __func__, ret);
3913 ret = ufshcd_link_recovery(hba);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003914 } else {
Kiwoong Kimee32c902016-11-10 21:17:43 +09003915 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3916 POST_CHANGE);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08003917 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3918 hba->ufs_stats.hibern8_exit_cnt++;
3919 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303920
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303921 return ret;
3922}
3923
Adrian Hunterad448372018-03-20 15:07:38 +02003924static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
3925{
3926 unsigned long flags;
3927
3928 if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
3929 return;
3930
3931 spin_lock_irqsave(hba->host->host_lock, flags);
3932 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3933 spin_unlock_irqrestore(hba->host->host_lock, flags);
3934}
3935
Yaniv Gardi50646362014-10-23 13:25:13 +03003936 /**
3937 * ufshcd_init_pwr_info - setting the POR (power on reset)
3938 * values in hba power info
3939 * @hba: per-adapter instance
3940 */
3941static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3942{
3943 hba->pwr_info.gear_rx = UFS_PWM_G1;
3944 hba->pwr_info.gear_tx = UFS_PWM_G1;
3945 hba->pwr_info.lane_rx = 1;
3946 hba->pwr_info.lane_tx = 1;
3947 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3948 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3949 hba->pwr_info.hs_rate = 0;
3950}
3951
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303952/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003953 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3954 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303955 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003956static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303957{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003958 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3959
3960 if (hba->max_pwr_info.is_valid)
3961 return 0;
3962
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08003963 pwr_info->pwr_tx = FAST_MODE;
3964 pwr_info->pwr_rx = FAST_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003965 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303966
3967 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003968 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3969 &pwr_info->lane_rx);
3970 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3971 &pwr_info->lane_tx);
3972
3973 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3974 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3975 __func__,
3976 pwr_info->lane_rx,
3977 pwr_info->lane_tx);
3978 return -EINVAL;
3979 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303980
3981 /*
3982 * First, get the maximum gears of HS speed.
3983 * If a zero value, it means there is no HSGEAR capability.
3984 * Then, get the maximum gears of PWM speed.
3985 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003986 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3987 if (!pwr_info->gear_rx) {
3988 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3989 &pwr_info->gear_rx);
3990 if (!pwr_info->gear_rx) {
3991 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3992 __func__, pwr_info->gear_rx);
3993 return -EINVAL;
3994 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08003995 pwr_info->pwr_rx = SLOW_MODE;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303996 }
3997
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003998 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3999 &pwr_info->gear_tx);
4000 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304001 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004002 &pwr_info->gear_tx);
4003 if (!pwr_info->gear_tx) {
4004 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4005 __func__, pwr_info->gear_tx);
4006 return -EINVAL;
4007 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08004008 pwr_info->pwr_tx = SLOW_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004009 }
4010
4011 hba->max_pwr_info.is_valid = true;
4012 return 0;
4013}
4014
4015static int ufshcd_change_power_mode(struct ufs_hba *hba,
4016 struct ufs_pa_layer_attr *pwr_mode)
4017{
4018 int ret;
4019
4020 /* if already configured to the requested pwr_mode */
4021 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4022 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4023 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4024 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4025 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4026 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4027 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4028 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4029 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304030 }
4031
4032 /*
4033 * Configure attributes for power mode change with below.
4034 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4035 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4036 * - PA_HSSERIES
4037 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004038 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4039 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4040 pwr_mode->lane_rx);
4041 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4042 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304043 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004044 else
4045 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304046
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004047 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4048 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4049 pwr_mode->lane_tx);
4050 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4051 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304052 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004053 else
4054 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304055
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004056 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4057 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4058 pwr_mode->pwr_rx == FAST_MODE ||
4059 pwr_mode->pwr_tx == FAST_MODE)
4060 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4061 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304062
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004063 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4064 | pwr_mode->pwr_tx);
4065
4066 if (ret) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304067 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004068 "%s: power mode change failed %d\n", __func__, ret);
4069 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004070 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4071 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004072
4073 memcpy(&hba->pwr_info, pwr_mode,
4074 sizeof(struct ufs_pa_layer_attr));
4075 }
4076
4077 return ret;
4078}
4079
4080/**
4081 * ufshcd_config_pwr_mode - configure a new power mode
4082 * @hba: per-adapter instance
4083 * @desired_pwr_mode: desired power configuration
4084 */
Alim Akhtar0d846e72018-05-06 15:44:18 +05304085int ufshcd_config_pwr_mode(struct ufs_hba *hba,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004086 struct ufs_pa_layer_attr *desired_pwr_mode)
4087{
4088 struct ufs_pa_layer_attr final_params = { 0 };
4089 int ret;
4090
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004091 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4092 desired_pwr_mode, &final_params);
4093
4094 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004095 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4096
4097 ret = ufshcd_change_power_mode(hba, &final_params);
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08004098 if (!ret)
4099 ufshcd_print_pwr_info(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304100
4101 return ret;
4102}
Alim Akhtar0d846e72018-05-06 15:44:18 +05304103EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304104
4105/**
Dolev Raviv68078d52013-07-30 00:35:58 +05304106 * ufshcd_complete_dev_init() - checks device readiness
Bart Van Assche8aa29f12018-03-01 15:07:20 -08004107 * @hba: per-adapter instance
Dolev Raviv68078d52013-07-30 00:35:58 +05304108 *
4109 * Set fDeviceInit flag and poll until device toggles it.
4110 */
4111static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4112{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004113 int i;
4114 int err;
Dolev Raviv68078d52013-07-30 00:35:58 +05304115 bool flag_res = 1;
4116
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004117 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4118 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05304119 if (err) {
4120 dev_err(hba->dev,
4121 "%s setting fDeviceInit flag failed with error %d\n",
4122 __func__, err);
4123 goto out;
4124 }
4125
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004126 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4127 for (i = 0; i < 1000 && !err && flag_res; i++)
4128 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4129 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4130
Dolev Raviv68078d52013-07-30 00:35:58 +05304131 if (err)
4132 dev_err(hba->dev,
4133 "%s reading fDeviceInit flag failed with error %d\n",
4134 __func__, err);
4135 else if (flag_res)
4136 dev_err(hba->dev,
4137 "%s fDeviceInit was not cleared by the device\n",
4138 __func__);
4139
4140out:
4141 return err;
4142}
4143
4144/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304145 * ufshcd_make_hba_operational - Make UFS controller operational
4146 * @hba: per adapter instance
4147 *
4148 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004149 * 1. Enable required interrupts
4150 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02004151 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004152 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304153 *
4154 * Returns 0 on success, non-zero value on failure
4155 */
4156static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4157{
4158 int err = 0;
4159 u32 reg;
4160
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304161 /* Enable required interrupts */
4162 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4163
4164 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03004165 if (ufshcd_is_intr_aggr_allowed(hba))
4166 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4167 else
4168 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304169
4170 /* Configure UTRL and UTMRL base address registers */
4171 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4172 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4173 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4174 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4175 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4176 REG_UTP_TASK_REQ_LIST_BASE_L);
4177 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4178 REG_UTP_TASK_REQ_LIST_BASE_H);
4179
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304180 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02004181 * Make sure base address and interrupt setup are updated before
4182 * enabling the run/stop registers below.
4183 */
4184 wmb();
4185
4186 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304187 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304188 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004189 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304190 if (!(ufshcd_get_lists_status(reg))) {
4191 ufshcd_enable_run_stop_reg(hba);
4192 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304193 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304194 "Host controller not ready to process requests");
4195 err = -EIO;
4196 goto out;
4197 }
4198
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304199out:
4200 return err;
4201}
4202
4203/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02004204 * ufshcd_hba_stop - Send controller to reset state
4205 * @hba: per adapter instance
4206 * @can_sleep: perform sleep or just spin
4207 */
4208static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4209{
4210 int err;
4211
4212 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4213 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4214 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4215 10, 1, can_sleep);
4216 if (err)
4217 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4218}
4219
4220/**
Alim Akhtar4404c5d2018-05-06 15:44:17 +05304221 * ufshcd_hba_execute_hce - initialize the controller
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304222 * @hba: per adapter instance
4223 *
4224 * The controller resets itself and controller firmware initialization
4225 * sequence kicks off. When controller is ready it will set
4226 * the Host Controller Enable bit to 1.
4227 *
4228 * Returns 0 on success, non-zero value on failure
4229 */
Alim Akhtar4404c5d2018-05-06 15:44:17 +05304230static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304231{
4232 int retry;
4233
4234 /*
4235 * msleep of 1 and 5 used in this function might result in msleep(20),
4236 * but it was necessary to send the UFS FPGA to reset mode during
4237 * development and testing of this driver. msleep can be changed to
4238 * mdelay and retry count can be reduced based on the controller.
4239 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02004240 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304241 /* change controller state to "reset state" */
Yaniv Gardi596585a2016-03-10 17:37:08 +02004242 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304243
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004244 /* UniPro link is disabled at this point */
4245 ufshcd_set_link_off(hba);
4246
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004247 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004248
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304249 /* start controller initialization sequence */
4250 ufshcd_hba_start(hba);
4251
4252 /*
4253 * To initialize a UFS host controller HCE bit must be set to 1.
4254 * During initialization the HCE bit value changes from 1->0->1.
4255 * When the host controller completes initialization sequence
4256 * it sets the value of HCE bit to 1. The same HCE bit is read back
4257 * to check if the controller has completed initialization sequence.
4258 * So without this delay the value HCE = 1, set in the previous
4259 * instruction might be read back.
4260 * This delay can be changed based on the controller.
4261 */
4262 msleep(1);
4263
4264 /* wait for the host controller to complete initialization */
4265 retry = 10;
4266 while (ufshcd_is_hba_active(hba)) {
4267 if (retry) {
4268 retry--;
4269 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304270 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304271 "Controller enable failed\n");
4272 return -EIO;
4273 }
4274 msleep(5);
4275 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004276
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004277 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004278 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004279
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004280 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004281
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304282 return 0;
4283}
4284
Alim Akhtar4404c5d2018-05-06 15:44:17 +05304285static int ufshcd_hba_enable(struct ufs_hba *hba)
4286{
4287 int ret;
4288
4289 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4290 ufshcd_set_link_off(hba);
4291 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4292
4293 /* enable UIC related interrupts */
4294 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4295 ret = ufshcd_dme_reset(hba);
4296 if (!ret) {
4297 ret = ufshcd_dme_enable(hba);
4298 if (!ret)
4299 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4300 if (ret)
4301 dev_err(hba->dev,
4302 "Host controller enable failed with non-hce\n");
4303 }
4304 } else {
4305 ret = ufshcd_hba_execute_hce(hba);
4306 }
4307
4308 return ret;
4309}
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004310static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4311{
4312 int tx_lanes, i, err = 0;
4313
4314 if (!peer)
4315 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4316 &tx_lanes);
4317 else
4318 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4319 &tx_lanes);
4320 for (i = 0; i < tx_lanes; i++) {
4321 if (!peer)
4322 err = ufshcd_dme_set(hba,
4323 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4324 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4325 0);
4326 else
4327 err = ufshcd_dme_peer_set(hba,
4328 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4329 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4330 0);
4331 if (err) {
4332 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4333 __func__, peer, i, err);
4334 break;
4335 }
4336 }
4337
4338 return err;
4339}
4340
4341static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4342{
4343 return ufshcd_disable_tx_lcc(hba, true);
4344}
4345
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304346/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304347 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304348 * @hba: per adapter instance
4349 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304350 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304351 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304352static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304353{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304354 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004355 int retries = DME_LINKSTARTUP_RETRIES;
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004356 bool link_startup_again = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304357
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004358 /*
4359 * If UFS device isn't active then we will have to issue link startup
4360 * 2 times to make sure the device state move to active.
4361 */
4362 if (!ufshcd_is_ufs_dev_active(hba))
4363 link_startup_again = true;
4364
4365link_startup:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004366 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004367 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304368
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004369 ret = ufshcd_dme_link_startup(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004370
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004371 /* check if device is detected by inter-connect layer */
4372 if (!ret && !ufshcd_is_device_present(hba)) {
4373 dev_err(hba->dev, "%s: Device not present\n", __func__);
4374 ret = -ENXIO;
4375 goto out;
4376 }
4377
4378 /*
4379 * DME link lost indication is only received when link is up,
4380 * but we can't be sure if the link is up until link startup
4381 * succeeds. So reset the local Uni-Pro and try again.
4382 */
4383 if (ret && ufshcd_hba_enable(hba))
4384 goto out;
4385 } while (ret && retries--);
4386
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304387 if (ret)
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004388 /* failed to get the link up... retire */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304389 goto out;
4390
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08004391 if (link_startup_again) {
4392 link_startup_again = false;
4393 retries = DME_LINKSTARTUP_RETRIES;
4394 goto link_startup;
4395 }
4396
subhashj@codeaurora.orgd2aebb92016-12-22 18:41:33 -08004397 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4398 ufshcd_init_pwr_info(hba);
4399 ufshcd_print_pwr_info(hba);
4400
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004401 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4402 ret = ufshcd_disable_device_tx_lcc(hba);
4403 if (ret)
4404 goto out;
4405 }
4406
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004407 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004408 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4409 if (ret)
4410 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004411
4412 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304413out:
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004414 if (ret) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304415 dev_err(hba->dev, "link startup failed %d\n", ret);
Venkat Gopalakrishnan7942f7b2017-02-03 16:58:24 -08004416 ufshcd_print_host_state(hba);
4417 ufshcd_print_pwr_info(hba);
4418 ufshcd_print_host_regs(hba);
4419 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304420 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304421}
4422
4423/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304424 * ufshcd_verify_dev_init() - Verify device initialization
4425 * @hba: per-adapter instance
4426 *
4427 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4428 * device Transport Protocol (UTP) layer is ready after a reset.
4429 * If the UTP layer at the device side is not initialized, it may
4430 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4431 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4432 */
4433static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4434{
4435 int err = 0;
4436 int retries;
4437
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004438 ufshcd_hold(hba, false);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304439 mutex_lock(&hba->dev_cmd.lock);
4440 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4441 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4442 NOP_OUT_TIMEOUT);
4443
4444 if (!err || err == -ETIMEDOUT)
4445 break;
4446
4447 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4448 }
4449 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004450 ufshcd_release(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304451
4452 if (err)
4453 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4454 return err;
4455}
4456
4457/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004458 * ufshcd_set_queue_depth - set lun queue depth
4459 * @sdev: pointer to SCSI device
4460 *
4461 * Read bLUQueueDepth value and activate scsi tagged command
4462 * queueing. For WLUN, queue depth is set to 1. For best-effort
4463 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4464 * value that host can queue.
4465 */
4466static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4467{
4468 int ret = 0;
4469 u8 lun_qdepth;
4470 struct ufs_hba *hba;
4471
4472 hba = shost_priv(sdev->host);
4473
4474 lun_qdepth = hba->nutrs;
Szymon Mielczarekdbd34a62017-03-29 08:19:21 +02004475 ret = ufshcd_read_unit_desc_param(hba,
4476 ufshcd_scsi_to_upiu_lun(sdev->lun),
4477 UNIT_DESC_PARAM_LU_Q_DEPTH,
4478 &lun_qdepth,
4479 sizeof(lun_qdepth));
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004480
4481 /* Some WLUN doesn't support unit descriptor */
4482 if (ret == -EOPNOTSUPP)
4483 lun_qdepth = 1;
4484 else if (!lun_qdepth)
4485 /* eventually, we can figure out the real queue depth */
4486 lun_qdepth = hba->nutrs;
4487 else
4488 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4489
4490 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4491 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004492 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004493}
4494
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004495/*
4496 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4497 * @hba: per-adapter instance
4498 * @lun: UFS device lun id
4499 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4500 *
4501 * Returns 0 in case of success and b_lu_write_protect status would be returned
4502 * @b_lu_write_protect parameter.
4503 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4504 * Returns -EINVAL in case of invalid parameters passed to this function.
4505 */
4506static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4507 u8 lun,
4508 u8 *b_lu_write_protect)
4509{
4510 int ret;
4511
4512 if (!b_lu_write_protect)
4513 ret = -EINVAL;
4514 /*
4515 * According to UFS device spec, RPMB LU can't be write
4516 * protected so skip reading bLUWriteProtect parameter for
4517 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4518 */
4519 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4520 ret = -ENOTSUPP;
4521 else
4522 ret = ufshcd_read_unit_desc_param(hba,
4523 lun,
4524 UNIT_DESC_PARAM_LU_WR_PROTECT,
4525 b_lu_write_protect,
4526 sizeof(*b_lu_write_protect));
4527 return ret;
4528}
4529
4530/**
4531 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4532 * status
4533 * @hba: per-adapter instance
4534 * @sdev: pointer to SCSI device
4535 *
4536 */
4537static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4538 struct scsi_device *sdev)
4539{
4540 if (hba->dev_info.f_power_on_wp_en &&
4541 !hba->dev_info.is_lu_power_on_wp) {
4542 u8 b_lu_write_protect;
4543
4544 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4545 &b_lu_write_protect) &&
4546 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4547 hba->dev_info.is_lu_power_on_wp = true;
4548 }
4549}
4550
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004551/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304552 * ufshcd_slave_alloc - handle initial SCSI device configurations
4553 * @sdev: pointer to SCSI device
4554 *
4555 * Returns success
4556 */
4557static int ufshcd_slave_alloc(struct scsi_device *sdev)
4558{
4559 struct ufs_hba *hba;
4560
4561 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304562
4563 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4564 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304565
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304566 /* allow SCSI layer to restart the device in case of errors */
4567 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004568
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03004569 /* REPORT SUPPORTED OPERATION CODES is not supported */
4570 sdev->no_report_opcodes = 1;
4571
Sujit Reddy Thumma84af7e82018-01-24 09:52:35 +05304572 /* WRITE_SAME command is not supported */
4573 sdev->no_write_same = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004574
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004575 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004576
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004577 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4578
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004579 return 0;
4580}
4581
4582/**
4583 * ufshcd_change_queue_depth - change queue depth
4584 * @sdev: pointer to SCSI device
4585 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004586 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004587 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004588 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004589static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004590{
4591 struct ufs_hba *hba = shost_priv(sdev->host);
4592
4593 if (depth > hba->nutrs)
4594 depth = hba->nutrs;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004595 return scsi_change_queue_depth(sdev, depth);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304596}
4597
4598/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004599 * ufshcd_slave_configure - adjust SCSI device configurations
4600 * @sdev: pointer to SCSI device
4601 */
4602static int ufshcd_slave_configure(struct scsi_device *sdev)
4603{
4604 struct request_queue *q = sdev->request_queue;
4605
4606 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4607 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4608
4609 return 0;
4610}
4611
4612/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304613 * ufshcd_slave_destroy - remove SCSI device configurations
4614 * @sdev: pointer to SCSI device
4615 */
4616static void ufshcd_slave_destroy(struct scsi_device *sdev)
4617{
4618 struct ufs_hba *hba;
4619
4620 hba = shost_priv(sdev->host);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004621 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004622 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4623 unsigned long flags;
4624
4625 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004626 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004627 spin_unlock_irqrestore(hba->host->host_lock, flags);
4628 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304629}
4630
4631/**
4632 * ufshcd_task_req_compl - handle task management request completion
4633 * @hba: per adapter instance
4634 * @index: index of the completed request
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304635 * @resp: task management service response
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304636 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304637 * Returns non-zero value on error, zero on success
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304638 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304639static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304640{
4641 struct utp_task_req_desc *task_req_descp;
4642 struct utp_upiu_task_rsp *task_rsp_upiup;
4643 unsigned long flags;
4644 int ocs_value;
4645 int task_result;
4646
4647 spin_lock_irqsave(hba->host->host_lock, flags);
4648
4649 /* Clear completed tasks from outstanding_tasks */
4650 __clear_bit(index, &hba->outstanding_tasks);
4651
4652 task_req_descp = hba->utmrdl_base_addr;
4653 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4654
4655 if (ocs_value == OCS_SUCCESS) {
4656 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4657 task_req_descp[index].task_rsp_upiu;
Kiwoong Kim8794ee02016-09-09 08:22:22 +09004658 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4659 task_result = task_result & MASK_TM_SERVICE_RESP;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304660 if (resp)
4661 *resp = (u8)task_result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304662 } else {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304663 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4664 __func__, ocs_value);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304665 }
4666 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304667
4668 return ocs_value;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304669}
4670
4671/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304672 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
Bart Van Assche8aa29f12018-03-01 15:07:20 -08004673 * @lrbp: pointer to local reference block of completed command
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304674 * @scsi_status: SCSI command status
4675 *
4676 * Returns value base on SCSI command status
4677 */
4678static inline int
4679ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4680{
4681 int result = 0;
4682
4683 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304684 case SAM_STAT_CHECK_CONDITION:
4685 ufshcd_copy_sense_data(lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304686 case SAM_STAT_GOOD:
4687 result |= DID_OK << 16 |
4688 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304689 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304690 break;
4691 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304692 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304693 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304694 ufshcd_copy_sense_data(lrbp);
4695 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304696 break;
4697 default:
4698 result |= DID_ERROR << 16;
4699 break;
4700 } /* end of switch */
4701
4702 return result;
4703}
4704
4705/**
4706 * ufshcd_transfer_rsp_status - Get overall status of the response
4707 * @hba: per adapter instance
Bart Van Assche8aa29f12018-03-01 15:07:20 -08004708 * @lrbp: pointer to local reference block of completed command
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304709 *
4710 * Returns result of the command to notify SCSI midlayer
4711 */
4712static inline int
4713ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4714{
4715 int result = 0;
4716 int scsi_status;
4717 int ocs;
4718
4719 /* overall command status of utrd */
4720 ocs = ufshcd_get_tr_ocs(lrbp);
4721
4722 switch (ocs) {
4723 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304724 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08004725 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304726 switch (result) {
4727 case UPIU_TRANSACTION_RESPONSE:
4728 /*
4729 * get the response UPIU result to extract
4730 * the SCSI command status
4731 */
4732 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4733
4734 /*
4735 * get the result based on SCSI status response
4736 * to notify the SCSI midlayer of the command status
4737 */
4738 scsi_status = result & MASK_SCSI_STATUS;
4739 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304740
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02004741 /*
4742 * Currently we are only supporting BKOPs exception
4743 * events hence we can ignore BKOPs exception event
4744 * during power management callbacks. BKOPs exception
4745 * event is not expected to be raised in runtime suspend
4746 * callback as it allows the urgent bkops.
4747 * During system suspend, we are anyway forcefully
4748 * disabling the bkops and if urgent bkops is needed
4749 * it will be enabled on system resume. Long term
4750 * solution could be to abort the system suspend if
4751 * UFS device needs urgent BKOPs.
4752 */
4753 if (!hba->pm_op_in_progress &&
4754 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304755 schedule_work(&hba->eeh_work);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304756 break;
4757 case UPIU_TRANSACTION_REJECT_UPIU:
4758 /* TODO: handle Reject UPIU Response */
4759 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304760 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304761 "Reject UPIU not fully implemented\n");
4762 break;
4763 default:
4764 result = DID_ERROR << 16;
4765 dev_err(hba->dev,
4766 "Unexpected request response code = %x\n",
4767 result);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304768 break;
4769 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304770 break;
4771 case OCS_ABORTED:
4772 result |= DID_ABORT << 16;
4773 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304774 case OCS_INVALID_COMMAND_STATUS:
4775 result |= DID_REQUEUE << 16;
4776 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304777 case OCS_INVALID_CMD_TABLE_ATTR:
4778 case OCS_INVALID_PRDT_ATTR:
4779 case OCS_MISMATCH_DATA_BUF_SIZE:
4780 case OCS_MISMATCH_RESP_UPIU_SIZE:
4781 case OCS_PEER_COMM_FAILURE:
4782 case OCS_FATAL_ERROR:
4783 default:
4784 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304785 dev_err(hba->dev,
Dolev Ravivff8e20c2016-12-22 18:42:18 -08004786 "OCS error from controller = %x for tag %d\n",
4787 ocs, lrbp->task_tag);
4788 ufshcd_print_host_regs(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08004789 ufshcd_print_host_state(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304790 break;
4791 } /* end of switch */
4792
Dolev Raviv66cc8202016-12-22 18:39:42 -08004793 if (host_byte(result) != DID_OK)
4794 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304795 return result;
4796}
4797
4798/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304799 * ufshcd_uic_cmd_compl - handle completion of uic command
4800 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304801 * @intr_status: interrupt status generated by the controller
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304802 */
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304803static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304804{
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304805 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304806 hba->active_uic_cmd->argument2 |=
4807 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304808 hba->active_uic_cmd->argument3 =
4809 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304810 complete(&hba->active_uic_cmd->done);
4811 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304812
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004813 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4814 complete(hba->uic_async_done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304815}
4816
4817/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004818 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304819 * @hba: per adapter instance
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004820 * @completed_reqs: requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304821 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004822static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4823 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304824{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304825 struct ufshcd_lrb *lrbp;
4826 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304827 int result;
4828 int index;
Dolev Ravive9d501b2014-07-01 12:22:37 +03004829
Dolev Ravive9d501b2014-07-01 12:22:37 +03004830 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4831 lrbp = &hba->lrb[index];
4832 cmd = lrbp->cmd;
4833 if (cmd) {
Lee Susman1a07f2d2016-12-22 18:42:03 -08004834 ufshcd_add_command_trace(hba, index, "complete");
Dolev Ravive9d501b2014-07-01 12:22:37 +03004835 result = ufshcd_transfer_rsp_status(hba, lrbp);
4836 scsi_dma_unmap(cmd);
4837 cmd->result = result;
4838 /* Mark completed command as NULL in LRB */
4839 lrbp->cmd = NULL;
4840 clear_bit_unlock(index, &hba->lrb_in_use);
4841 /* Do not touch lrbp after scsi done */
4842 cmd->scsi_done(cmd);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004843 __ufshcd_release(hba);
Joao Pinto300bb132016-05-11 12:21:27 +01004844 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4845 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
Lee Susman1a07f2d2016-12-22 18:42:03 -08004846 if (hba->dev_cmd.complete) {
4847 ufshcd_add_command_trace(hba, index,
4848 "dev_complete");
Dolev Ravive9d501b2014-07-01 12:22:37 +03004849 complete(hba->dev_cmd.complete);
Lee Susman1a07f2d2016-12-22 18:42:03 -08004850 }
Dolev Ravive9d501b2014-07-01 12:22:37 +03004851 }
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08004852 if (ufshcd_is_clkscaling_supported(hba))
4853 hba->clk_scaling.active_reqs--;
Zang Leigang09017182017-09-27 10:06:06 +08004854
4855 lrbp->compl_time_stamp = ktime_get();
Dolev Ravive9d501b2014-07-01 12:22:37 +03004856 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304857
4858 /* clear corresponding bits of completed commands */
4859 hba->outstanding_reqs ^= completed_reqs;
4860
Sahitya Tummala856b3482014-09-25 15:32:34 +03004861 ufshcd_clk_scaling_update_busy(hba);
4862
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304863 /* we might have free'd some tags above */
4864 wake_up(&hba->dev_cmd.tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304865}
4866
4867/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004868 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4869 * @hba: per adapter instance
4870 */
4871static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4872{
4873 unsigned long completed_reqs;
4874 u32 tr_doorbell;
4875
4876 /* Resetting interrupt aggregation counters first and reading the
4877 * DOOR_BELL afterward allows us to handle all the completed requests.
4878 * In order to prevent other interrupts starvation the DB is read once
4879 * after reset. The down side of this solution is the possibility of
4880 * false interrupt if device completes another request after resetting
4881 * aggregation and before reading the DB.
4882 */
Alim Akhtar5ac6abc2018-05-06 15:44:16 +05304883 if (ufshcd_is_intr_aggr_allowed(hba) &&
4884 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004885 ufshcd_reset_intr_aggr(hba);
4886
4887 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4888 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4889
4890 __ufshcd_transfer_req_compl(hba, completed_reqs);
4891}
4892
4893/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304894 * ufshcd_disable_ee - disable exception event
4895 * @hba: per-adapter instance
4896 * @mask: exception event to disable
4897 *
4898 * Disables exception event in the device so that the EVENT_ALERT
4899 * bit is not set.
4900 *
4901 * Returns zero on success, non-zero error value on failure.
4902 */
4903static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4904{
4905 int err = 0;
4906 u32 val;
4907
4908 if (!(hba->ee_ctrl_mask & mask))
4909 goto out;
4910
4911 val = hba->ee_ctrl_mask & ~mask;
Tomohiro Kusumid7e2ddd2017-04-20 15:01:44 +03004912 val &= MASK_EE_STATUS;
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004913 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304914 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4915 if (!err)
4916 hba->ee_ctrl_mask &= ~mask;
4917out:
4918 return err;
4919}
4920
4921/**
4922 * ufshcd_enable_ee - enable exception event
4923 * @hba: per-adapter instance
4924 * @mask: exception event to enable
4925 *
4926 * Enable corresponding exception event in the device to allow
4927 * device to alert host in critical scenarios.
4928 *
4929 * Returns zero on success, non-zero error value on failure.
4930 */
4931static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4932{
4933 int err = 0;
4934 u32 val;
4935
4936 if (hba->ee_ctrl_mask & mask)
4937 goto out;
4938
4939 val = hba->ee_ctrl_mask | mask;
Tomohiro Kusumid7e2ddd2017-04-20 15:01:44 +03004940 val &= MASK_EE_STATUS;
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004941 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304942 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4943 if (!err)
4944 hba->ee_ctrl_mask |= mask;
4945out:
4946 return err;
4947}
4948
4949/**
4950 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4951 * @hba: per-adapter instance
4952 *
4953 * Allow device to manage background operations on its own. Enabling
4954 * this might lead to inconsistent latencies during normal data transfers
4955 * as the device is allowed to manage its own way of handling background
4956 * operations.
4957 *
4958 * Returns zero on success, non-zero on failure.
4959 */
4960static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4961{
4962 int err = 0;
4963
4964 if (hba->auto_bkops_enabled)
4965 goto out;
4966
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004967 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304968 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4969 if (err) {
4970 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4971 __func__, err);
4972 goto out;
4973 }
4974
4975 hba->auto_bkops_enabled = true;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08004976 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304977
4978 /* No need of URGENT_BKOPS exception from the device */
4979 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4980 if (err)
4981 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4982 __func__, err);
4983out:
4984 return err;
4985}
4986
4987/**
4988 * ufshcd_disable_auto_bkops - block device in doing background operations
4989 * @hba: per-adapter instance
4990 *
4991 * Disabling background operations improves command response latency but
4992 * has drawback of device moving into critical state where the device is
4993 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4994 * host is idle so that BKOPS are managed effectively without any negative
4995 * impacts.
4996 *
4997 * Returns zero on success, non-zero on failure.
4998 */
4999static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5000{
5001 int err = 0;
5002
5003 if (!hba->auto_bkops_enabled)
5004 goto out;
5005
5006 /*
5007 * If host assisted BKOPs is to be enabled, make sure
5008 * urgent bkops exception is allowed.
5009 */
5010 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5011 if (err) {
5012 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5013 __func__, err);
5014 goto out;
5015 }
5016
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005017 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305018 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5019 if (err) {
5020 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5021 __func__, err);
5022 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5023 goto out;
5024 }
5025
5026 hba->auto_bkops_enabled = false;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005027 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305028out:
5029 return err;
5030}
5031
5032/**
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005033 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305034 * @hba: per adapter instance
5035 *
5036 * After a device reset the device may toggle the BKOPS_EN flag
5037 * to default value. The s/w tracking variables should be updated
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005038 * as well. This function would change the auto-bkops state based on
5039 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305040 */
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005041static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305042{
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08005043 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5044 hba->auto_bkops_enabled = false;
5045 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5046 ufshcd_enable_auto_bkops(hba);
5047 } else {
5048 hba->auto_bkops_enabled = true;
5049 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5050 ufshcd_disable_auto_bkops(hba);
5051 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305052}
5053
5054static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5055{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005056 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305057 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5058}
5059
5060/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005061 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5062 * @hba: per-adapter instance
5063 * @status: bkops_status value
5064 *
5065 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5066 * flag in the device to permit background operations if the device
5067 * bkops_status is greater than or equal to "status" argument passed to
5068 * this function, disable otherwise.
5069 *
5070 * Returns 0 for success, non-zero in case of failure.
5071 *
5072 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5073 * to know whether auto bkops is enabled or disabled after this function
5074 * returns control to it.
5075 */
5076static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5077 enum bkops_status status)
5078{
5079 int err;
5080 u32 curr_status = 0;
5081
5082 err = ufshcd_get_bkops_status(hba, &curr_status);
5083 if (err) {
5084 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5085 __func__, err);
5086 goto out;
5087 } else if (curr_status > BKOPS_STATUS_MAX) {
5088 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5089 __func__, curr_status);
5090 err = -EINVAL;
5091 goto out;
5092 }
5093
5094 if (curr_status >= status)
5095 err = ufshcd_enable_auto_bkops(hba);
5096 else
5097 err = ufshcd_disable_auto_bkops(hba);
5098out:
5099 return err;
5100}
5101
5102/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305103 * ufshcd_urgent_bkops - handle urgent bkops exception event
5104 * @hba: per-adapter instance
5105 *
5106 * Enable fBackgroundOpsEn flag in the device to permit background
5107 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005108 *
5109 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5110 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305111 */
5112static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5113{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005114 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305115}
5116
5117static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5118{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005119 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305120 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5121}
5122
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005123static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5124{
5125 int err;
5126 u32 curr_status = 0;
5127
5128 if (hba->is_urgent_bkops_lvl_checked)
5129 goto enable_auto_bkops;
5130
5131 err = ufshcd_get_bkops_status(hba, &curr_status);
5132 if (err) {
5133 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5134 __func__, err);
5135 goto out;
5136 }
5137
5138 /*
5139 * We are seeing that some devices are raising the urgent bkops
5140 * exception events even when BKOPS status doesn't indicate performace
5141 * impacted or critical. Handle these device by determining their urgent
5142 * bkops status at runtime.
5143 */
5144 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5145 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5146 __func__, curr_status);
5147 /* update the current status as the urgent bkops level */
5148 hba->urgent_bkops_lvl = curr_status;
5149 hba->is_urgent_bkops_lvl_checked = true;
5150 }
5151
5152enable_auto_bkops:
5153 err = ufshcd_enable_auto_bkops(hba);
5154out:
5155 if (err < 0)
5156 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5157 __func__, err);
5158}
5159
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305160/**
5161 * ufshcd_exception_event_handler - handle exceptions raised by device
5162 * @work: pointer to work data
5163 *
5164 * Read bExceptionEventStatus attribute from the device and handle the
5165 * exception event accordingly.
5166 */
5167static void ufshcd_exception_event_handler(struct work_struct *work)
5168{
5169 struct ufs_hba *hba;
5170 int err;
5171 u32 status = 0;
5172 hba = container_of(work, struct ufs_hba, eeh_work);
5173
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305174 pm_runtime_get_sync(hba->dev);
Maya Erez2e3611e92018-05-03 16:37:16 +05305175 scsi_block_requests(hba->host);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305176 err = ufshcd_get_ee_status(hba, &status);
5177 if (err) {
5178 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5179 __func__, err);
5180 goto out;
5181 }
5182
5183 status &= hba->ee_ctrl_mask;
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005184
5185 if (status & MASK_EE_URGENT_BKOPS)
5186 ufshcd_bkops_exception_event_handler(hba);
5187
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305188out:
Maya Erez2e3611e92018-05-03 16:37:16 +05305189 scsi_unblock_requests(hba->host);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305190 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305191 return;
5192}
5193
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005194/* Complete requests that have door-bell cleared */
5195static void ufshcd_complete_requests(struct ufs_hba *hba)
5196{
5197 ufshcd_transfer_req_compl(hba);
5198 ufshcd_tmc_handler(hba);
5199}
5200
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305201/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02005202 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5203 * to recover from the DL NAC errors or not.
5204 * @hba: per-adapter instance
5205 *
5206 * Returns true if error handling is required, false otherwise
5207 */
5208static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5209{
5210 unsigned long flags;
5211 bool err_handling = true;
5212
5213 spin_lock_irqsave(hba->host->host_lock, flags);
5214 /*
5215 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5216 * device fatal error and/or DL NAC & REPLAY timeout errors.
5217 */
5218 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5219 goto out;
5220
5221 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5222 ((hba->saved_err & UIC_ERROR) &&
5223 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5224 goto out;
5225
5226 if ((hba->saved_err & UIC_ERROR) &&
5227 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5228 int err;
5229 /*
5230 * wait for 50ms to see if we can get any other errors or not.
5231 */
5232 spin_unlock_irqrestore(hba->host->host_lock, flags);
5233 msleep(50);
5234 spin_lock_irqsave(hba->host->host_lock, flags);
5235
5236 /*
5237 * now check if we have got any other severe errors other than
5238 * DL NAC error?
5239 */
5240 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5241 ((hba->saved_err & UIC_ERROR) &&
5242 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5243 goto out;
5244
5245 /*
5246 * As DL NAC is the only error received so far, send out NOP
5247 * command to confirm if link is still active or not.
5248 * - If we don't get any response then do error recovery.
5249 * - If we get response then clear the DL NAC error bit.
5250 */
5251
5252 spin_unlock_irqrestore(hba->host->host_lock, flags);
5253 err = ufshcd_verify_dev_init(hba);
5254 spin_lock_irqsave(hba->host->host_lock, flags);
5255
5256 if (err)
5257 goto out;
5258
5259 /* Link seems to be alive hence ignore the DL NAC errors */
5260 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5261 hba->saved_err &= ~UIC_ERROR;
5262 /* clear NAC error */
5263 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5264 if (!hba->saved_uic_err) {
5265 err_handling = false;
5266 goto out;
5267 }
5268 }
5269out:
5270 spin_unlock_irqrestore(hba->host->host_lock, flags);
5271 return err_handling;
5272}
5273
5274/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305275 * ufshcd_err_handler - handle UFS errors that require s/w attention
5276 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305277 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305278static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305279{
5280 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305281 unsigned long flags;
5282 u32 err_xfer = 0;
5283 u32 err_tm = 0;
5284 int err = 0;
5285 int tag;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005286 bool needs_reset = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305287
5288 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305289
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305290 pm_runtime_get_sync(hba->dev);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005291 ufshcd_hold(hba, false);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305292
5293 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005294 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305295 goto out;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305296
5297 hba->ufshcd_state = UFSHCD_STATE_RESET;
5298 ufshcd_set_eh_in_progress(hba);
5299
5300 /* Complete requests that have door-bell cleared by h/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005301 ufshcd_complete_requests(hba);
Yaniv Gardi583fa622016-03-10 17:37:13 +02005302
5303 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5304 bool ret;
5305
5306 spin_unlock_irqrestore(hba->host->host_lock, flags);
5307 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5308 ret = ufshcd_quirk_dl_nac_errors(hba);
5309 spin_lock_irqsave(hba->host->host_lock, flags);
5310 if (!ret)
5311 goto skip_err_handling;
5312 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005313 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5314 ((hba->saved_err & UIC_ERROR) &&
5315 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5316 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5317 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5318 needs_reset = true;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305319
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005320 /*
5321 * if host reset is required then skip clearing the pending
5322 * transfers forcefully because they will automatically get
5323 * cleared after link startup.
5324 */
5325 if (needs_reset)
5326 goto skip_pending_xfer_clear;
5327
5328 /* release lock as clear command might sleep */
5329 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305330 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005331 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5332 if (ufshcd_clear_cmd(hba, tag)) {
5333 err_xfer = true;
5334 goto lock_skip_pending_xfer_clear;
5335 }
5336 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305337
5338 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005339 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5340 if (ufshcd_clear_tm_cmd(hba, tag)) {
5341 err_tm = true;
5342 goto lock_skip_pending_xfer_clear;
5343 }
5344 }
5345
5346lock_skip_pending_xfer_clear:
5347 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305348
5349 /* Complete the requests that are cleared by s/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005350 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305351
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005352 if (err_xfer || err_tm)
5353 needs_reset = true;
5354
5355skip_pending_xfer_clear:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305356 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005357 if (needs_reset) {
5358 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5359
5360 /*
5361 * ufshcd_reset_and_restore() does the link reinitialization
5362 * which will need atleast one empty doorbell slot to send the
5363 * device management commands (NOP and query commands).
5364 * If there is no slot empty at this moment then free up last
5365 * slot forcefully.
5366 */
5367 if (hba->outstanding_reqs == max_doorbells)
5368 __ufshcd_transfer_req_compl(hba,
5369 (1UL << (hba->nutrs - 1)));
5370
5371 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305372 err = ufshcd_reset_and_restore(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005373 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305374 if (err) {
5375 dev_err(hba->dev, "%s: reset and restore failed\n",
5376 __func__);
5377 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5378 }
5379 /*
5380 * Inform scsi mid-layer that we did reset and allow to handle
5381 * Unit Attention properly.
5382 */
5383 scsi_report_bus_reset(hba->host, 0);
5384 hba->saved_err = 0;
5385 hba->saved_uic_err = 0;
5386 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005387
Yaniv Gardi583fa622016-03-10 17:37:13 +02005388skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005389 if (!needs_reset) {
5390 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5391 if (hba->saved_err || hba->saved_uic_err)
5392 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5393 __func__, hba->saved_err, hba->saved_uic_err);
5394 }
5395
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305396 ufshcd_clear_eh_in_progress(hba);
5397
5398out:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005399 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani38135532018-05-03 16:37:18 +05305400 ufshcd_scsi_unblock_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005401 ufshcd_release(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305402 pm_runtime_put_sync(hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305403}
5404
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005405static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5406 u32 reg)
5407{
5408 reg_hist->reg[reg_hist->pos] = reg;
5409 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5410 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5411}
5412
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305413/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305414 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5415 * @hba: per-adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305416 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305417static void ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305418{
5419 u32 reg;
5420
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08005421 /* PHY layer lane error */
5422 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5423 /* Ignore LINERESET indication, as this is not an error */
5424 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005425 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08005426 /*
5427 * To know whether this error is fatal or not, DB timeout
5428 * must be checked but this error is handled separately.
5429 */
5430 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005431 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5432 }
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08005433
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305434 /* PA_INIT_ERROR is fatal and needs UIC reset */
5435 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005436 if (reg)
5437 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5438
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305439 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5440 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005441 else if (hba->dev_quirks &
5442 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5443 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5444 hba->uic_error |=
5445 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5446 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5447 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5448 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305449
5450 /* UIC NL/TL/DME errors needs software retry */
5451 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005452 if (reg) {
5453 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305454 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005455 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305456
5457 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005458 if (reg) {
5459 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305460 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005461 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305462
5463 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005464 if (reg) {
5465 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305466 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005467 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305468
5469 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5470 __func__, hba->uic_error);
5471}
5472
5473/**
5474 * ufshcd_check_errors - Check for errors that need s/w attention
5475 * @hba: per-adapter instance
5476 */
5477static void ufshcd_check_errors(struct ufs_hba *hba)
5478{
5479 bool queue_eh_work = false;
5480
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305481 if (hba->errors & INT_FATAL_ERRORS)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305482 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305483
5484 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305485 hba->uic_error = 0;
5486 ufshcd_update_uic_error(hba);
5487 if (hba->uic_error)
5488 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305489 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305490
5491 if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005492 /*
5493 * update the transfer error masks to sticky bits, let's do this
5494 * irrespective of current ufshcd_state.
5495 */
5496 hba->saved_err |= hba->errors;
5497 hba->saved_uic_err |= hba->uic_error;
5498
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305499 /* handle fatal errors only when link is functional */
5500 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5501 /* block commands from scsi mid-layer */
Subhash Jadavani38135532018-05-03 16:37:18 +05305502 ufshcd_scsi_block_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305503
Zang Leigang141f8162016-11-16 11:29:37 +08005504 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
Dolev Raviv66cc8202016-12-22 18:39:42 -08005505
5506 /* dump controller state before resetting */
5507 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5508 bool pr_prdt = !!(hba->saved_err &
5509 SYSTEM_BUS_FATAL_ERROR);
5510
5511 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5512 __func__, hba->saved_err,
5513 hba->saved_uic_err);
5514
5515 ufshcd_print_host_regs(hba);
5516 ufshcd_print_pwr_info(hba);
5517 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5518 ufshcd_print_trs(hba, hba->outstanding_reqs,
5519 pr_prdt);
5520 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305521 schedule_work(&hba->eh_work);
5522 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305523 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305524 /*
5525 * if (!queue_eh_work) -
5526 * Other errors are either non-fatal where host recovers
5527 * itself without s/w intervention or errors that will be
5528 * handled by the SCSI core layer.
5529 */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305530}
5531
5532/**
5533 * ufshcd_tmc_handler - handle task management function completion
5534 * @hba: per adapter instance
5535 */
5536static void ufshcd_tmc_handler(struct ufs_hba *hba)
5537{
5538 u32 tm_doorbell;
5539
Seungwon Jeonb873a2752013-06-26 22:39:26 +05305540 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305541 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305542 wake_up(&hba->tm_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305543}
5544
5545/**
5546 * ufshcd_sl_intr - Interrupt service routine
5547 * @hba: per adapter instance
5548 * @intr_status: contains interrupts generated by the controller
5549 */
5550static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5551{
5552 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5553 if (hba->errors)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305554 ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305555
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305556 if (intr_status & UFSHCD_UIC_MASK)
5557 ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305558
5559 if (intr_status & UTP_TASK_REQ_COMPL)
5560 ufshcd_tmc_handler(hba);
5561
5562 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5563 ufshcd_transfer_req_compl(hba);
5564}
5565
5566/**
5567 * ufshcd_intr - Main interrupt service routine
5568 * @irq: irq number
5569 * @__hba: pointer to adapter instance
5570 *
5571 * Returns IRQ_HANDLED - If interrupt is valid
5572 * IRQ_NONE - If invalid interrupt
5573 */
5574static irqreturn_t ufshcd_intr(int irq, void *__hba)
5575{
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02005576 u32 intr_status, enabled_intr_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305577 irqreturn_t retval = IRQ_NONE;
5578 struct ufs_hba *hba = __hba;
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05305579 int retries = hba->nutrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305580
5581 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05305582 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305583
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05305584 /*
5585 * There could be max of hba->nutrs reqs in flight and in worst case
5586 * if the reqs get finished 1 by 1 after the interrupt status is
5587 * read, make sure we handle them by checking the interrupt status
5588 * again in a loop until we process all of the reqs before returning.
5589 */
5590 do {
5591 enabled_intr_status =
5592 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5593 if (intr_status)
5594 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5595 if (enabled_intr_status) {
5596 ufshcd_sl_intr(hba, enabled_intr_status);
5597 retval = IRQ_HANDLED;
5598 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02005599
Venkat Gopalakrishnan7f6ba4f2018-05-03 16:37:20 +05305600 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5601 } while (intr_status && --retries);
5602
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305603 spin_unlock(hba->host->host_lock);
5604 return retval;
5605}
5606
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305607static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5608{
5609 int err = 0;
5610 u32 mask = 1 << tag;
5611 unsigned long flags;
5612
5613 if (!test_bit(tag, &hba->outstanding_tasks))
5614 goto out;
5615
5616 spin_lock_irqsave(hba->host->host_lock, flags);
Alim Akhtar1399c5b2018-05-06 15:44:15 +05305617 ufshcd_utmrl_clear(hba, tag);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305618 spin_unlock_irqrestore(hba->host->host_lock, flags);
5619
5620 /* poll for max. 1 sec to clear door bell register by h/w */
5621 err = ufshcd_wait_for_register(hba,
5622 REG_UTP_TASK_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02005623 mask, 0, 1000, 1000, true);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305624out:
5625 return err;
5626}
5627
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305628/**
5629 * ufshcd_issue_tm_cmd - issues task management commands to controller
5630 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305631 * @lun_id: LUN ID to which TM command is sent
5632 * @task_id: task ID to which the TM command is applicable
5633 * @tm_function: task management function opcode
5634 * @tm_response: task management service response return value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305635 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305636 * Returns non-zero value on error, zero on success.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305637 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305638static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5639 u8 tm_function, u8 *tm_response)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305640{
5641 struct utp_task_req_desc *task_req_descp;
5642 struct utp_upiu_task_req *task_req_upiup;
5643 struct Scsi_Host *host;
5644 unsigned long flags;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305645 int free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305646 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305647 int task_tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305648
5649 host = hba->host;
5650
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305651 /*
5652 * Get free slot, sleep if slots are unavailable.
5653 * Even though we use wait_event() which sleeps indefinitely,
5654 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5655 */
5656 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005657 ufshcd_hold(hba, false);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305658
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305659 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305660 task_req_descp = hba->utmrdl_base_addr;
5661 task_req_descp += free_slot;
5662
5663 /* Configure task request descriptor */
5664 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5665 task_req_descp->header.dword_2 =
5666 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5667
5668 /* Configure task request UPIU */
5669 task_req_upiup =
5670 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305671 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305672 task_req_upiup->header.dword_0 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305673 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305674 lun_id, task_tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305675 task_req_upiup->header.dword_1 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305676 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005677 /*
5678 * The host shall provide the same value for LUN field in the basic
5679 * header and for Input Parameter.
5680 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305681 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5682 task_req_upiup->input_param2 = cpu_to_be32(task_id);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305683
Kiwoong Kimd2877be2016-11-10 21:16:15 +09005684 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5685
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305686 /* send command to the controller */
5687 __set_bit(free_slot, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02005688
5689 /* Make sure descriptors are ready before ringing the task doorbell */
5690 wmb();
5691
Seungwon Jeonb873a2752013-06-26 22:39:26 +05305692 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07005693 /* Make sure that doorbell is committed immediately */
5694 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305695
5696 spin_unlock_irqrestore(host->host_lock, flags);
5697
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03005698 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5699
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305700 /* wait until the task management command is completed */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305701 err = wait_event_timeout(hba->tm_wq,
5702 test_bit(free_slot, &hba->tm_condition),
5703 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305704 if (!err) {
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03005705 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305706 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5707 __func__, tm_function);
5708 if (ufshcd_clear_tm_cmd(hba, free_slot))
5709 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5710 __func__, free_slot);
5711 err = -ETIMEDOUT;
5712 } else {
5713 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
Ohad Sharabi6667e6d2018-03-28 12:42:18 +03005714 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305715 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305716
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305717 clear_bit(free_slot, &hba->tm_condition);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305718 ufshcd_put_tm_slot(hba, free_slot);
5719 wake_up(&hba->tm_tag_wq);
5720
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005721 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305722 return err;
5723}
5724
5725/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305726 * ufshcd_eh_device_reset_handler - device reset handler registered to
5727 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305728 * @cmd: SCSI command pointer
5729 *
5730 * Returns SUCCESS/FAILED
5731 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305732static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305733{
5734 struct Scsi_Host *host;
5735 struct ufs_hba *hba;
5736 unsigned int tag;
5737 u32 pos;
5738 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305739 u8 resp = 0xF;
5740 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305741 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305742
5743 host = cmd->device->host;
5744 hba = shost_priv(host);
5745 tag = cmd->request->tag;
5746
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305747 lrbp = &hba->lrb[tag];
5748 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5749 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305750 if (!err)
5751 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305752 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305753 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305754
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305755 /* clear the commands that were pending for corresponding LUN */
5756 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5757 if (hba->lrb[pos].lun == lrbp->lun) {
5758 err = ufshcd_clear_cmd(hba, pos);
5759 if (err)
5760 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305761 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305762 }
5763 spin_lock_irqsave(host->host_lock, flags);
5764 ufshcd_transfer_req_compl(hba);
5765 spin_unlock_irqrestore(host->host_lock, flags);
Gilad Broner7fabb772017-02-03 16:56:50 -08005766
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305767out:
Gilad Broner7fabb772017-02-03 16:56:50 -08005768 hba->req_abort_count = 0;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305769 if (!err) {
5770 err = SUCCESS;
5771 } else {
5772 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5773 err = FAILED;
5774 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305775 return err;
5776}
5777
Gilad Bronere0b299e2017-02-03 16:56:40 -08005778static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5779{
5780 struct ufshcd_lrb *lrbp;
5781 int tag;
5782
5783 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5784 lrbp = &hba->lrb[tag];
5785 lrbp->req_abort_skip = true;
5786 }
5787}
5788
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305789/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305790 * ufshcd_abort - abort a specific command
5791 * @cmd: SCSI command pointer
5792 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305793 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5794 * command, and in host controller by clearing the door-bell register. There can
5795 * be race between controller sending the command to the device while abort is
5796 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5797 * really issued and then try to abort it.
5798 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305799 * Returns SUCCESS/FAILED
5800 */
5801static int ufshcd_abort(struct scsi_cmnd *cmd)
5802{
5803 struct Scsi_Host *host;
5804 struct ufs_hba *hba;
5805 unsigned long flags;
5806 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305807 int err = 0;
5808 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305809 u8 resp = 0xF;
5810 struct ufshcd_lrb *lrbp;
Dolev Ravive9d501b2014-07-01 12:22:37 +03005811 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305812
5813 host = cmd->device->host;
5814 hba = shost_priv(host);
5815 tag = cmd->request->tag;
Dolev Ravive7d38252016-12-22 18:40:07 -08005816 lrbp = &hba->lrb[tag];
Yaniv Gardi14497322016-02-01 15:02:39 +02005817 if (!ufshcd_valid_tag(hba, tag)) {
5818 dev_err(hba->dev,
5819 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5820 __func__, tag, cmd, cmd->request);
5821 BUG();
5822 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305823
Dolev Ravive7d38252016-12-22 18:40:07 -08005824 /*
5825 * Task abort to the device W-LUN is illegal. When this command
5826 * will fail, due to spec violation, scsi err handling next step
5827 * will be to send LU reset which, again, is a spec violation.
5828 * To avoid these unnecessary/illegal step we skip to the last error
5829 * handling stage: reset and restore.
5830 */
5831 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5832 return ufshcd_eh_host_reset_handler(cmd);
5833
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005834 ufshcd_hold(hba, false);
Dolev Ravive9d501b2014-07-01 12:22:37 +03005835 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Yaniv Gardi14497322016-02-01 15:02:39 +02005836 /* If command is already aborted/completed, return SUCCESS */
5837 if (!(test_bit(tag, &hba->outstanding_reqs))) {
5838 dev_err(hba->dev,
5839 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5840 __func__, tag, hba->outstanding_reqs, reg);
5841 goto out;
5842 }
5843
Dolev Ravive9d501b2014-07-01 12:22:37 +03005844 if (!(reg & (1 << tag))) {
5845 dev_err(hba->dev,
5846 "%s: cmd was completed, but without a notifying intr, tag = %d",
5847 __func__, tag);
5848 }
5849
Dolev Raviv66cc8202016-12-22 18:39:42 -08005850 /* Print Transfer Request of aborted task */
5851 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
Dolev Raviv66cc8202016-12-22 18:39:42 -08005852
Gilad Broner7fabb772017-02-03 16:56:50 -08005853 /*
5854 * Print detailed info about aborted request.
5855 * As more than one request might get aborted at the same time,
5856 * print full information only for the first aborted request in order
5857 * to reduce repeated printouts. For other aborted requests only print
5858 * basic details.
5859 */
5860 scsi_print_command(hba->lrb[tag].cmd);
5861 if (!hba->req_abort_count) {
5862 ufshcd_print_host_regs(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08005863 ufshcd_print_host_state(hba);
Gilad Broner7fabb772017-02-03 16:56:50 -08005864 ufshcd_print_pwr_info(hba);
5865 ufshcd_print_trs(hba, 1 << tag, true);
5866 } else {
5867 ufshcd_print_trs(hba, 1 << tag, false);
5868 }
5869 hba->req_abort_count++;
Gilad Bronere0b299e2017-02-03 16:56:40 -08005870
5871 /* Skip task abort in case previous aborts failed and report failure */
5872 if (lrbp->req_abort_skip) {
5873 err = -EIO;
5874 goto out;
5875 }
5876
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305877 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
5878 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5879 UFS_QUERY_TASK, &resp);
5880 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
5881 /* cmd pending in the device */
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005882 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
5883 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305884 break;
5885 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305886 /*
5887 * cmd not pending in the device, check if it is
5888 * in transition.
5889 */
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005890 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
5891 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305892 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5893 if (reg & (1 << tag)) {
5894 /* sleep for max. 200us to stabilize */
5895 usleep_range(100, 200);
5896 continue;
5897 }
5898 /* command completed already */
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005899 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
5900 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305901 goto out;
5902 } else {
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005903 dev_err(hba->dev,
5904 "%s: no response from device. tag = %d, err %d\n",
5905 __func__, tag, err);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305906 if (!err)
5907 err = resp; /* service response error */
5908 goto out;
5909 }
5910 }
5911
5912 if (!poll_cnt) {
5913 err = -EBUSY;
5914 goto out;
5915 }
5916
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305917 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5918 UFS_ABORT_TASK, &resp);
5919 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005920 if (!err) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305921 err = resp; /* service response error */
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005922 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
5923 __func__, tag, err);
5924 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305925 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305926 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305927
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305928 err = ufshcd_clear_cmd(hba, tag);
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005929 if (err) {
5930 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
5931 __func__, tag, err);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305932 goto out;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08005933 }
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305934
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305935 scsi_dma_unmap(cmd);
5936
5937 spin_lock_irqsave(host->host_lock, flags);
Yaniv Gardia48353f2016-02-01 15:02:40 +02005938 ufshcd_outstanding_req_clear(hba, tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305939 hba->lrb[tag].cmd = NULL;
5940 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305941
5942 clear_bit_unlock(tag, &hba->lrb_in_use);
5943 wake_up(&hba->dev_cmd.tag_wq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005944
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305945out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305946 if (!err) {
5947 err = SUCCESS;
5948 } else {
5949 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
Gilad Bronere0b299e2017-02-03 16:56:40 -08005950 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305951 err = FAILED;
5952 }
5953
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005954 /*
5955 * This ufshcd_release() corresponds to the original scsi cmd that got
5956 * aborted here (as we won't get any IRQ for it).
5957 */
5958 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305959 return err;
5960}
5961
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305962/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305963 * ufshcd_host_reset_and_restore - reset and restore host controller
5964 * @hba: per-adapter instance
5965 *
5966 * Note that host controller reset may issue DME_RESET to
5967 * local and remote (device) Uni-Pro stack and the attributes
5968 * are reset to default state.
5969 *
5970 * Returns zero on success, non-zero on failure
5971 */
5972static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
5973{
5974 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305975 unsigned long flags;
5976
5977 /* Reset the host controller */
5978 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02005979 ufshcd_hba_stop(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305980 spin_unlock_irqrestore(hba->host->host_lock, flags);
5981
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08005982 /* scale up clocks to max frequency before full reinitialization */
5983 ufshcd_scale_clks(hba, true);
5984
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305985 err = ufshcd_hba_enable(hba);
5986 if (err)
5987 goto out;
5988
5989 /* Establish the link again and restore the device */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005990 err = ufshcd_probe_hba(hba);
5991
5992 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305993 err = -EIO;
5994out:
5995 if (err)
5996 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
5997
5998 return err;
5999}
6000
6001/**
6002 * ufshcd_reset_and_restore - reset and re-initialize host/device
6003 * @hba: per-adapter instance
6004 *
6005 * Reset and recover device, host and re-establish link. This
6006 * is helpful to recover the communication in fatal error conditions.
6007 *
6008 * Returns zero on success, non-zero on failure
6009 */
6010static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6011{
6012 int err = 0;
6013 unsigned long flags;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006014 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306015
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006016 do {
6017 err = ufshcd_host_reset_and_restore(hba);
6018 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306019
6020 /*
6021 * After reset the door-bell might be cleared, complete
6022 * outstanding requests in s/w here.
6023 */
6024 spin_lock_irqsave(hba->host->host_lock, flags);
6025 ufshcd_transfer_req_compl(hba);
6026 ufshcd_tmc_handler(hba);
6027 spin_unlock_irqrestore(hba->host->host_lock, flags);
6028
6029 return err;
6030}
6031
6032/**
6033 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
Bart Van Assche8aa29f12018-03-01 15:07:20 -08006034 * @cmd: SCSI command pointer
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306035 *
6036 * Returns SUCCESS/FAILED
6037 */
6038static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6039{
6040 int err;
6041 unsigned long flags;
6042 struct ufs_hba *hba;
6043
6044 hba = shost_priv(cmd->device->host);
6045
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006046 ufshcd_hold(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306047 /*
6048 * Check if there is any race with fatal error handling.
6049 * If so, wait for it to complete. Even though fatal error
6050 * handling does reset and restore in some cases, don't assume
6051 * anything out of it. We are just avoiding race here.
6052 */
6053 do {
6054 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306055 if (!(work_pending(&hba->eh_work) ||
Zang Leigang8dc0da72017-06-24 19:14:32 +08006056 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6057 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306058 break;
6059 spin_unlock_irqrestore(hba->host->host_lock, flags);
6060 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306061 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306062 } while (1);
6063
6064 hba->ufshcd_state = UFSHCD_STATE_RESET;
6065 ufshcd_set_eh_in_progress(hba);
6066 spin_unlock_irqrestore(hba->host->host_lock, flags);
6067
6068 err = ufshcd_reset_and_restore(hba);
6069
6070 spin_lock_irqsave(hba->host->host_lock, flags);
6071 if (!err) {
6072 err = SUCCESS;
6073 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6074 } else {
6075 err = FAILED;
6076 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6077 }
6078 ufshcd_clear_eh_in_progress(hba);
6079 spin_unlock_irqrestore(hba->host->host_lock, flags);
6080
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006081 ufshcd_release(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306082 return err;
6083}
6084
6085/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006086 * ufshcd_get_max_icc_level - calculate the ICC level
6087 * @sup_curr_uA: max. current supported by the regulator
6088 * @start_scan: row at the desc table to start scan from
6089 * @buff: power descriptor buffer
6090 *
6091 * Returns calculated max ICC level for specific regulator
6092 */
6093static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6094{
6095 int i;
6096 int curr_uA;
6097 u16 data;
6098 u16 unit;
6099
6100 for (i = start_scan; i >= 0; i--) {
Tomas Winklerd79713f2017-01-05 10:45:11 +02006101 data = be16_to_cpup((__be16 *)&buff[2 * i]);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006102 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6103 ATTR_ICC_LVL_UNIT_OFFSET;
6104 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6105 switch (unit) {
6106 case UFSHCD_NANO_AMP:
6107 curr_uA = curr_uA / 1000;
6108 break;
6109 case UFSHCD_MILI_AMP:
6110 curr_uA = curr_uA * 1000;
6111 break;
6112 case UFSHCD_AMP:
6113 curr_uA = curr_uA * 1000 * 1000;
6114 break;
6115 case UFSHCD_MICRO_AMP:
6116 default:
6117 break;
6118 }
6119 if (sup_curr_uA >= curr_uA)
6120 break;
6121 }
6122 if (i < 0) {
6123 i = 0;
6124 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6125 }
6126
6127 return (u32)i;
6128}
6129
6130/**
6131 * ufshcd_calc_icc_level - calculate the max ICC level
6132 * In case regulators are not initialized we'll return 0
6133 * @hba: per-adapter instance
6134 * @desc_buf: power descriptor buffer to extract ICC levels from.
6135 * @len: length of desc_buff
6136 *
6137 * Returns calculated ICC level
6138 */
6139static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6140 u8 *desc_buf, int len)
6141{
6142 u32 icc_level = 0;
6143
6144 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6145 !hba->vreg_info.vccq2) {
6146 dev_err(hba->dev,
6147 "%s: Regulator capability was not set, actvIccLevel=%d",
6148 __func__, icc_level);
6149 goto out;
6150 }
6151
6152 if (hba->vreg_info.vcc)
6153 icc_level = ufshcd_get_max_icc_level(
6154 hba->vreg_info.vcc->max_uA,
6155 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6156 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6157
6158 if (hba->vreg_info.vccq)
6159 icc_level = ufshcd_get_max_icc_level(
6160 hba->vreg_info.vccq->max_uA,
6161 icc_level,
6162 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6163
6164 if (hba->vreg_info.vccq2)
6165 icc_level = ufshcd_get_max_icc_level(
6166 hba->vreg_info.vccq2->max_uA,
6167 icc_level,
6168 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6169out:
6170 return icc_level;
6171}
6172
6173static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6174{
6175 int ret;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00006176 int buff_len = hba->desc_size.pwr_desc;
Kees Cookbbe21d72018-05-02 16:58:09 -07006177 u8 *desc_buf;
6178
6179 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6180 if (!desc_buf)
6181 return;
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006182
6183 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6184 if (ret) {
6185 dev_err(hba->dev,
6186 "%s: Failed reading power descriptor.len = %d ret = %d",
6187 __func__, buff_len, ret);
Kees Cookbbe21d72018-05-02 16:58:09 -07006188 goto out;
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006189 }
6190
6191 hba->init_prefetch_data.icc_level =
6192 ufshcd_find_max_sup_active_icc_level(hba,
6193 desc_buf, buff_len);
6194 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6195 __func__, hba->init_prefetch_data.icc_level);
6196
Szymon Mielczarekdbd34a62017-03-29 08:19:21 +02006197 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6198 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6199 &hba->init_prefetch_data.icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006200
6201 if (ret)
6202 dev_err(hba->dev,
6203 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6204 __func__, hba->init_prefetch_data.icc_level , ret);
6205
Kees Cookbbe21d72018-05-02 16:58:09 -07006206out:
6207 kfree(desc_buf);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006208}
6209
6210/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006211 * ufshcd_scsi_add_wlus - Adds required W-LUs
6212 * @hba: per-adapter instance
6213 *
6214 * UFS device specification requires the UFS devices to support 4 well known
6215 * logical units:
6216 * "REPORT_LUNS" (address: 01h)
6217 * "UFS Device" (address: 50h)
6218 * "RPMB" (address: 44h)
6219 * "BOOT" (address: 30h)
6220 * UFS device's power management needs to be controlled by "POWER CONDITION"
6221 * field of SSU (START STOP UNIT) command. But this "power condition" field
6222 * will take effect only when its sent to "UFS device" well known logical unit
6223 * hence we require the scsi_device instance to represent this logical unit in
6224 * order for the UFS host driver to send the SSU command for power management.
Bart Van Assche8aa29f12018-03-01 15:07:20 -08006225 *
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006226 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6227 * Block) LU so user space process can control this LU. User space may also
6228 * want to have access to BOOT LU.
Bart Van Assche8aa29f12018-03-01 15:07:20 -08006229 *
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006230 * This function adds scsi device instances for each of all well known LUs
6231 * (except "REPORT LUNS" LU).
6232 *
6233 * Returns zero on success (all required W-LUs are added successfully),
6234 * non-zero error value on failure (if failed to add any of the required W-LU).
6235 */
6236static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6237{
6238 int ret = 0;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006239 struct scsi_device *sdev_rpmb;
6240 struct scsi_device *sdev_boot;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006241
6242 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6243 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6244 if (IS_ERR(hba->sdev_ufs_device)) {
6245 ret = PTR_ERR(hba->sdev_ufs_device);
6246 hba->sdev_ufs_device = NULL;
6247 goto out;
6248 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006249 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006250
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006251 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006252 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006253 if (IS_ERR(sdev_rpmb)) {
6254 ret = PTR_ERR(sdev_rpmb);
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08006255 goto remove_sdev_ufs_device;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006256 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006257 scsi_device_put(sdev_rpmb);
Huanlin Ke3d21fbd2017-09-22 18:31:47 +08006258
6259 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6260 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6261 if (IS_ERR(sdev_boot))
6262 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6263 else
6264 scsi_device_put(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006265 goto out;
6266
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006267remove_sdev_ufs_device:
6268 scsi_remove_device(hba->sdev_ufs_device);
6269out:
6270 return ret;
6271}
6272
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02006273static int ufs_get_device_desc(struct ufs_hba *hba,
6274 struct ufs_dev_desc *dev_desc)
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006275{
6276 int err;
Kees Cookbbe21d72018-05-02 16:58:09 -07006277 size_t buff_len;
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006278 u8 model_index;
Kees Cookbbe21d72018-05-02 16:58:09 -07006279 u8 *desc_buf;
6280
6281 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6282 QUERY_DESC_MAX_SIZE + 1);
6283 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6284 if (!desc_buf) {
6285 err = -ENOMEM;
6286 goto out;
6287 }
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006288
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00006289 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006290 if (err) {
6291 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6292 __func__, err);
6293 goto out;
6294 }
6295
6296 /*
6297 * getting vendor (manufacturerID) and Bank Index in big endian
6298 * format
6299 */
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02006300 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006301 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6302
6303 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6304
Kees Cookbbe21d72018-05-02 16:58:09 -07006305 /* Zero-pad entire buffer for string termination. */
6306 memset(desc_buf, 0, buff_len);
6307
6308 err = ufshcd_read_string_desc(hba, model_index, desc_buf,
Bart Van Assche8aa29f12018-03-01 15:07:20 -08006309 QUERY_DESC_MAX_SIZE, true/*ASCII*/);
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006310 if (err) {
6311 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6312 __func__, err);
6313 goto out;
6314 }
6315
Kees Cookbbe21d72018-05-02 16:58:09 -07006316 desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6317 strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
6318 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006319 MAX_MODEL_LEN));
6320
6321 /* Null terminate the model string */
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02006322 dev_desc->model[MAX_MODEL_LEN] = '\0';
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006323
6324out:
Kees Cookbbe21d72018-05-02 16:58:09 -07006325 kfree(desc_buf);
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006326 return err;
6327}
6328
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02006329static void ufs_fixup_device_setup(struct ufs_hba *hba,
6330 struct ufs_dev_desc *dev_desc)
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006331{
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006332 struct ufs_dev_fix *f;
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006333
6334 for (f = ufs_fixups; f->quirk; f++) {
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02006335 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6336 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6337 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02006338 !strcmp(f->card.model, UFS_ANY_MODEL)))
6339 hba->dev_quirks |= f->quirk;
6340 }
6341}
6342
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006343/**
Yaniv Gardi37113102016-03-10 17:37:16 +02006344 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6345 * @hba: per-adapter instance
6346 *
6347 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6348 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6349 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6350 * the hibern8 exit latency.
6351 *
6352 * Returns zero on success, non-zero error value on failure.
6353 */
6354static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6355{
6356 int ret = 0;
6357 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6358
6359 ret = ufshcd_dme_peer_get(hba,
6360 UIC_ARG_MIB_SEL(
6361 RX_MIN_ACTIVATETIME_CAPABILITY,
6362 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6363 &peer_rx_min_activatetime);
6364 if (ret)
6365 goto out;
6366
6367 /* make sure proper unit conversion is applied */
6368 tuned_pa_tactivate =
6369 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6370 / PA_TACTIVATE_TIME_UNIT_US);
6371 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6372 tuned_pa_tactivate);
6373
6374out:
6375 return ret;
6376}
6377
6378/**
6379 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6380 * @hba: per-adapter instance
6381 *
6382 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6383 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6384 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6385 * This optimal value can help reduce the hibern8 exit latency.
6386 *
6387 * Returns zero on success, non-zero error value on failure.
6388 */
6389static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6390{
6391 int ret = 0;
6392 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6393 u32 max_hibern8_time, tuned_pa_hibern8time;
6394
6395 ret = ufshcd_dme_get(hba,
6396 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6397 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6398 &local_tx_hibern8_time_cap);
6399 if (ret)
6400 goto out;
6401
6402 ret = ufshcd_dme_peer_get(hba,
6403 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6404 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6405 &peer_rx_hibern8_time_cap);
6406 if (ret)
6407 goto out;
6408
6409 max_hibern8_time = max(local_tx_hibern8_time_cap,
6410 peer_rx_hibern8_time_cap);
6411 /* make sure proper unit conversion is applied */
6412 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6413 / PA_HIBERN8_TIME_UNIT_US);
6414 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6415 tuned_pa_hibern8time);
6416out:
6417 return ret;
6418}
6419
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08006420/**
6421 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6422 * less than device PA_TACTIVATE time.
6423 * @hba: per-adapter instance
6424 *
6425 * Some UFS devices require host PA_TACTIVATE to be lower than device
6426 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6427 * for such devices.
6428 *
6429 * Returns zero on success, non-zero error value on failure.
6430 */
6431static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6432{
6433 int ret = 0;
6434 u32 granularity, peer_granularity;
6435 u32 pa_tactivate, peer_pa_tactivate;
6436 u32 pa_tactivate_us, peer_pa_tactivate_us;
6437 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6438
6439 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6440 &granularity);
6441 if (ret)
6442 goto out;
6443
6444 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6445 &peer_granularity);
6446 if (ret)
6447 goto out;
6448
6449 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6450 (granularity > PA_GRANULARITY_MAX_VAL)) {
6451 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6452 __func__, granularity);
6453 return -EINVAL;
6454 }
6455
6456 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6457 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6458 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6459 __func__, peer_granularity);
6460 return -EINVAL;
6461 }
6462
6463 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6464 if (ret)
6465 goto out;
6466
6467 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6468 &peer_pa_tactivate);
6469 if (ret)
6470 goto out;
6471
6472 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6473 peer_pa_tactivate_us = peer_pa_tactivate *
6474 gran_to_us_table[peer_granularity - 1];
6475
6476 if (pa_tactivate_us > peer_pa_tactivate_us) {
6477 u32 new_peer_pa_tactivate;
6478
6479 new_peer_pa_tactivate = pa_tactivate_us /
6480 gran_to_us_table[peer_granularity - 1];
6481 new_peer_pa_tactivate++;
6482 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6483 new_peer_pa_tactivate);
6484 }
6485
6486out:
6487 return ret;
6488}
6489
Yaniv Gardi37113102016-03-10 17:37:16 +02006490static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6491{
6492 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6493 ufshcd_tune_pa_tactivate(hba);
6494 ufshcd_tune_pa_hibern8time(hba);
6495 }
6496
6497 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6498 /* set 1ms timeout for PA_TACTIVATE */
6499 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08006500
6501 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6502 ufshcd_quirk_tune_host_pa_tactivate(hba);
Subhash Jadavani56d4a182016-12-05 19:25:32 -08006503
6504 ufshcd_vops_apply_dev_quirks(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02006505}
6506
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006507static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6508{
6509 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6510
6511 hba->ufs_stats.hibern8_exit_cnt = 0;
6512 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6513
6514 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6515 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6516 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6517 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6518 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
Gilad Broner7fabb772017-02-03 16:56:50 -08006519
6520 hba->req_abort_count = 0;
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006521}
6522
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00006523static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6524{
6525 int err;
6526
6527 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6528 &hba->desc_size.dev_desc);
6529 if (err)
6530 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6531
6532 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6533 &hba->desc_size.pwr_desc);
6534 if (err)
6535 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6536
6537 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6538 &hba->desc_size.interc_desc);
6539 if (err)
6540 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6541
6542 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6543 &hba->desc_size.conf_desc);
6544 if (err)
6545 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6546
6547 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6548 &hba->desc_size.unit_desc);
6549 if (err)
6550 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6551
6552 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6553 &hba->desc_size.geom_desc);
6554 if (err)
6555 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
Stanislav Nijnikovc648c2d2018-02-15 14:14:05 +02006556 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6557 &hba->desc_size.hlth_desc);
6558 if (err)
6559 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00006560}
6561
6562static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6563{
6564 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6565 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6566 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6567 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6568 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6569 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
Stanislav Nijnikovc648c2d2018-02-15 14:14:05 +02006570 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00006571}
6572
Yaniv Gardi37113102016-03-10 17:37:16 +02006573/**
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006574 * ufshcd_probe_hba - probe hba to detect device and initialize
6575 * @hba: per-adapter instance
6576 *
6577 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306578 */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006579static int ufshcd_probe_hba(struct ufs_hba *hba)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306580{
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02006581 struct ufs_dev_desc card = {0};
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306582 int ret;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006583 ktime_t start = ktime_get();
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306584
6585 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306586 if (ret)
6587 goto out;
6588
Yaniv Gardiafdfff52016-03-10 17:37:15 +02006589 /* set the default level for urgent bkops */
6590 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6591 hba->is_urgent_bkops_lvl_checked = false;
6592
Dolev Ravivff8e20c2016-12-22 18:42:18 -08006593 /* Debug counters initialization */
6594 ufshcd_clear_dbg_ufs_stats(hba);
6595
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006596 /* UniPro link is active now */
6597 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05306598
Adrian Hunterad448372018-03-20 15:07:38 +02006599 /* Enable Auto-Hibernate if configured */
6600 ufshcd_auto_hibern8_enable(hba);
6601
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306602 ret = ufshcd_verify_dev_init(hba);
6603 if (ret)
6604 goto out;
6605
Dolev Raviv68078d52013-07-30 00:35:58 +05306606 ret = ufshcd_complete_dev_init(hba);
6607 if (ret)
6608 goto out;
6609
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00006610 /* Init check for device descriptor sizes */
6611 ufshcd_init_desc_sizes(hba);
6612
Tomas Winkler93fdd5a2017-01-05 10:45:12 +02006613 ret = ufs_get_device_desc(hba, &card);
6614 if (ret) {
6615 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6616 __func__, ret);
6617 goto out;
6618 }
6619
6620 ufs_fixup_device_setup(hba, &card);
Yaniv Gardi37113102016-03-10 17:37:16 +02006621 ufshcd_tune_unipro_params(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02006622
6623 ret = ufshcd_set_vccq_rail_unused(hba,
6624 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6625 if (ret)
6626 goto out;
6627
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006628 /* UFS device is also active now */
6629 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306630 ufshcd_force_reset_auto_bkops(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006631 hba->wlun_dev_clr_ua = true;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306632
Dolev Raviv7eb584d2014-09-25 15:32:31 +03006633 if (ufshcd_get_max_pwr_mode(hba)) {
6634 dev_err(hba->dev,
6635 "%s: Failed getting max supported power mode\n",
6636 __func__);
6637 } else {
6638 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
Dov Levenglick8643ae62016-10-17 17:10:14 -07006639 if (ret) {
Dolev Raviv7eb584d2014-09-25 15:32:31 +03006640 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6641 __func__, ret);
Dov Levenglick8643ae62016-10-17 17:10:14 -07006642 goto out;
6643 }
Dolev Raviv7eb584d2014-09-25 15:32:31 +03006644 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006645
Yaniv Gardi53c12d02016-02-01 15:02:45 +02006646 /* set the state as operational after switching to desired gear */
6647 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00006648
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006649 /*
6650 * If we are in error handling context or in power management callbacks
6651 * context, no need to scan the host
6652 */
6653 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6654 bool flag;
6655
6656 /* clear any previous UFS device information */
6657 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02006658 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6659 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006660 hba->dev_info.f_power_on_wp_en = flag;
6661
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006662 if (!hba->is_init_prefetch)
6663 ufshcd_init_icc_levels(hba);
6664
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006665 /* Add required well known logical units to scsi mid layer */
6666 if (ufshcd_scsi_add_wlus(hba))
6667 goto out;
6668
subhashj@codeaurora.org0701e492017-02-03 16:58:01 -08006669 /* Initialize devfreq after UFS device is detected */
6670 if (ufshcd_is_clkscaling_supported(hba)) {
6671 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6672 &hba->pwr_info,
6673 sizeof(struct ufs_pa_layer_attr));
6674 hba->clk_scaling.saved_pwr_info.is_valid = true;
6675 if (!hba->devfreq) {
Bjorn Anderssondeac4442018-05-17 23:26:36 -07006676 ret = ufshcd_devfreq_init(hba);
6677 if (ret)
subhashj@codeaurora.org0701e492017-02-03 16:58:01 -08006678 goto out;
subhashj@codeaurora.org0701e492017-02-03 16:58:01 -08006679 }
6680 hba->clk_scaling.is_allowed = true;
6681 }
6682
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306683 scsi_scan_host(hba->host);
6684 pm_runtime_put_sync(hba->dev);
6685 }
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006686
6687 if (!hba->is_init_prefetch)
6688 hba->is_init_prefetch = true;
6689
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306690out:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006691 /*
6692 * If we failed to initialize the device or the device is not
6693 * present, turn off the power/clocks etc.
6694 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006695 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6696 pm_runtime_put_sync(hba->dev);
Vivek Gautameebcc192018-08-07 23:17:39 +05306697 ufshcd_exit_clk_scaling(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006698 ufshcd_hba_exit(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006699 }
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006700
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006701 trace_ufshcd_init(dev_name(hba->dev), ret,
6702 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08006703 hba->curr_dev_pwr_mode, hba->uic_link_state);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006704 return ret;
6705}
6706
6707/**
6708 * ufshcd_async_scan - asynchronous execution for probing hba
6709 * @data: data pointer to pass to this function
6710 * @cookie: cookie data
6711 */
6712static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6713{
6714 struct ufs_hba *hba = (struct ufs_hba *)data;
6715
6716 ufshcd_probe_hba(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306717}
6718
Yaniv Gardif550c652016-03-10 17:37:07 +02006719static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6720{
6721 unsigned long flags;
6722 struct Scsi_Host *host;
6723 struct ufs_hba *hba;
6724 int index;
6725 bool found = false;
6726
6727 if (!scmd || !scmd->device || !scmd->device->host)
Christoph Hellwig66005932018-05-29 15:52:29 +02006728 return BLK_EH_DONE;
Yaniv Gardif550c652016-03-10 17:37:07 +02006729
6730 host = scmd->device->host;
6731 hba = shost_priv(host);
6732 if (!hba)
Christoph Hellwig66005932018-05-29 15:52:29 +02006733 return BLK_EH_DONE;
Yaniv Gardif550c652016-03-10 17:37:07 +02006734
6735 spin_lock_irqsave(host->host_lock, flags);
6736
6737 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6738 if (hba->lrb[index].cmd == scmd) {
6739 found = true;
6740 break;
6741 }
6742 }
6743
6744 spin_unlock_irqrestore(host->host_lock, flags);
6745
6746 /*
6747 * Bypass SCSI error handling and reset the block layer timer if this
6748 * SCSI command was not actually dispatched to UFS driver, otherwise
6749 * let SCSI layer handle the error as usual.
6750 */
Christoph Hellwig66005932018-05-29 15:52:29 +02006751 return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
Yaniv Gardif550c652016-03-10 17:37:07 +02006752}
6753
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02006754static const struct attribute_group *ufshcd_driver_groups[] = {
6755 &ufs_sysfs_unit_descriptor_group,
Stanislav Nijnikovec92b592018-02-15 14:14:11 +02006756 &ufs_sysfs_lun_attributes_group,
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02006757 NULL,
6758};
6759
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306760static struct scsi_host_template ufshcd_driver_template = {
6761 .module = THIS_MODULE,
6762 .name = UFSHCD,
6763 .proc_name = UFSHCD,
6764 .queuecommand = ufshcd_queuecommand,
6765 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09006766 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306767 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03006768 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306769 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306770 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
6771 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Yaniv Gardif550c652016-03-10 17:37:07 +02006772 .eh_timed_out = ufshcd_eh_timed_out,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306773 .this_id = -1,
6774 .sg_tablesize = SG_ALL,
6775 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
6776 .can_queue = UFSHCD_CAN_QUEUE,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006777 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01006778 .track_queue_depth = 1,
Stanislav Nijnikovd829fc82018-02-15 14:14:09 +02006779 .sdev_groups = ufshcd_driver_groups,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306780};
6781
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006782static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
6783 int ua)
6784{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08006785 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006786
Bjorn Andersson7b16a072015-02-11 19:35:28 -08006787 if (!vreg)
6788 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006789
Bjorn Andersson7b16a072015-02-11 19:35:28 -08006790 ret = regulator_set_load(vreg->reg, ua);
6791 if (ret < 0) {
6792 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
6793 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006794 }
6795
6796 return ret;
6797}
6798
6799static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
6800 struct ufs_vreg *vreg)
6801{
Yaniv Gardi60f01872016-03-10 17:37:11 +02006802 if (!vreg)
6803 return 0;
6804 else if (vreg->unused)
6805 return 0;
6806 else
6807 return ufshcd_config_vreg_load(hba->dev, vreg,
6808 UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006809}
6810
6811static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
6812 struct ufs_vreg *vreg)
6813{
Yaniv Gardi60f01872016-03-10 17:37:11 +02006814 if (!vreg)
6815 return 0;
6816 else if (vreg->unused)
6817 return 0;
6818 else
6819 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006820}
6821
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006822static int ufshcd_config_vreg(struct device *dev,
6823 struct ufs_vreg *vreg, bool on)
6824{
6825 int ret = 0;
Gustavo A. R. Silva72753592017-11-20 08:12:29 -06006826 struct regulator *reg;
6827 const char *name;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006828 int min_uV, uA_load;
6829
6830 BUG_ON(!vreg);
6831
Gustavo A. R. Silva72753592017-11-20 08:12:29 -06006832 reg = vreg->reg;
6833 name = vreg->name;
6834
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006835 if (regulator_count_voltages(reg) > 0) {
6836 min_uV = on ? vreg->min_uV : 0;
6837 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6838 if (ret) {
6839 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
6840 __func__, name, ret);
6841 goto out;
6842 }
6843
6844 uA_load = on ? vreg->max_uA : 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006845 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
6846 if (ret)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006847 goto out;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006848 }
6849out:
6850 return ret;
6851}
6852
6853static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
6854{
6855 int ret = 0;
6856
Yaniv Gardi60f01872016-03-10 17:37:11 +02006857 if (!vreg)
6858 goto out;
6859 else if (vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006860 goto out;
6861
6862 ret = ufshcd_config_vreg(dev, vreg, true);
6863 if (!ret)
6864 ret = regulator_enable(vreg->reg);
6865
6866 if (!ret)
6867 vreg->enabled = true;
6868 else
6869 dev_err(dev, "%s: %s enable failed, err=%d\n",
6870 __func__, vreg->name, ret);
6871out:
6872 return ret;
6873}
6874
6875static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
6876{
6877 int ret = 0;
6878
Yaniv Gardi60f01872016-03-10 17:37:11 +02006879 if (!vreg)
6880 goto out;
6881 else if (!vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006882 goto out;
6883
6884 ret = regulator_disable(vreg->reg);
6885
6886 if (!ret) {
6887 /* ignore errors on applying disable config */
6888 ufshcd_config_vreg(dev, vreg, false);
6889 vreg->enabled = false;
6890 } else {
6891 dev_err(dev, "%s: %s disable failed, err=%d\n",
6892 __func__, vreg->name, ret);
6893 }
6894out:
6895 return ret;
6896}
6897
6898static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
6899{
6900 int ret = 0;
6901 struct device *dev = hba->dev;
6902 struct ufs_vreg_info *info = &hba->vreg_info;
6903
6904 if (!info)
6905 goto out;
6906
6907 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
6908 if (ret)
6909 goto out;
6910
6911 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
6912 if (ret)
6913 goto out;
6914
6915 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
6916 if (ret)
6917 goto out;
6918
6919out:
6920 if (ret) {
6921 ufshcd_toggle_vreg(dev, info->vccq2, false);
6922 ufshcd_toggle_vreg(dev, info->vccq, false);
6923 ufshcd_toggle_vreg(dev, info->vcc, false);
6924 }
6925 return ret;
6926}
6927
Raviv Shvili6a771a62014-09-25 15:32:24 +03006928static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
6929{
6930 struct ufs_vreg_info *info = &hba->vreg_info;
6931
6932 if (info)
6933 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6934
6935 return 0;
6936}
6937
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006938static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
6939{
6940 int ret = 0;
6941
6942 if (!vreg)
6943 goto out;
6944
6945 vreg->reg = devm_regulator_get(dev, vreg->name);
6946 if (IS_ERR(vreg->reg)) {
6947 ret = PTR_ERR(vreg->reg);
6948 dev_err(dev, "%s: %s get failed, err=%d\n",
6949 __func__, vreg->name, ret);
6950 }
6951out:
6952 return ret;
6953}
6954
6955static int ufshcd_init_vreg(struct ufs_hba *hba)
6956{
6957 int ret = 0;
6958 struct device *dev = hba->dev;
6959 struct ufs_vreg_info *info = &hba->vreg_info;
6960
6961 if (!info)
6962 goto out;
6963
6964 ret = ufshcd_get_vreg(dev, info->vcc);
6965 if (ret)
6966 goto out;
6967
6968 ret = ufshcd_get_vreg(dev, info->vccq);
6969 if (ret)
6970 goto out;
6971
6972 ret = ufshcd_get_vreg(dev, info->vccq2);
6973out:
6974 return ret;
6975}
6976
Raviv Shvili6a771a62014-09-25 15:32:24 +03006977static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
6978{
6979 struct ufs_vreg_info *info = &hba->vreg_info;
6980
6981 if (info)
6982 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
6983
6984 return 0;
6985}
6986
Yaniv Gardi60f01872016-03-10 17:37:11 +02006987static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
6988{
6989 int ret = 0;
6990 struct ufs_vreg_info *info = &hba->vreg_info;
6991
6992 if (!info)
6993 goto out;
6994 else if (!info->vccq)
6995 goto out;
6996
6997 if (unused) {
6998 /* shut off the rail here */
6999 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
7000 /*
7001 * Mark this rail as no longer used, so it doesn't get enabled
7002 * later by mistake
7003 */
7004 if (!ret)
7005 info->vccq->unused = true;
7006 } else {
7007 /*
7008 * rail should have been already enabled hence just make sure
7009 * that unused flag is cleared.
7010 */
7011 info->vccq->unused = false;
7012 }
7013out:
7014 return ret;
7015}
7016
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007017static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7018 bool skip_ref_clk)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007019{
7020 int ret = 0;
7021 struct ufs_clk_info *clki;
7022 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007023 unsigned long flags;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007024 ktime_t start = ktime_get();
7025 bool clk_state_changed = false;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007026
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03007027 if (list_empty(head))
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007028 goto out;
7029
Subhash Jadavanib3344562018-05-03 16:37:17 +05307030 /*
7031 * vendor specific setup_clocks ops may depend on clocks managed by
7032 * this standard driver hence call the vendor specific setup_clocks
7033 * before disabling the clocks managed here.
7034 */
7035 if (!on) {
7036 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7037 if (ret)
7038 return ret;
7039 }
Subhash Jadavani1e879e82016-10-06 21:48:22 -07007040
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007041 list_for_each_entry(clki, head, list) {
7042 if (!IS_ERR_OR_NULL(clki->clk)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007043 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7044 continue;
7045
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007046 clk_state_changed = on ^ clki->enabled;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007047 if (on && !clki->enabled) {
7048 ret = clk_prepare_enable(clki->clk);
7049 if (ret) {
7050 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7051 __func__, clki->name, ret);
7052 goto out;
7053 }
7054 } else if (!on && clki->enabled) {
7055 clk_disable_unprepare(clki->clk);
7056 }
7057 clki->enabled = on;
7058 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7059 clki->name, on ? "en" : "dis");
7060 }
7061 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007062
Subhash Jadavanib3344562018-05-03 16:37:17 +05307063 /*
7064 * vendor specific setup_clocks ops may depend on clocks managed by
7065 * this standard driver hence call the vendor specific setup_clocks
7066 * after enabling the clocks managed here.
7067 */
7068 if (on) {
7069 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7070 if (ret)
7071 return ret;
7072 }
Subhash Jadavani1e879e82016-10-06 21:48:22 -07007073
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007074out:
7075 if (ret) {
7076 list_for_each_entry(clki, head, list) {
7077 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7078 clk_disable_unprepare(clki->clk);
7079 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007080 } else if (!ret && on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007081 spin_lock_irqsave(hba->host->host_lock, flags);
7082 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007083 trace_ufshcd_clk_gating(dev_name(hba->dev),
7084 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007085 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007086 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007087
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007088 if (clk_state_changed)
7089 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7090 (on ? "on" : "off"),
7091 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007092 return ret;
7093}
7094
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007095static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7096{
7097 return __ufshcd_setup_clocks(hba, on, false);
7098}
7099
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007100static int ufshcd_init_clocks(struct ufs_hba *hba)
7101{
7102 int ret = 0;
7103 struct ufs_clk_info *clki;
7104 struct device *dev = hba->dev;
7105 struct list_head *head = &hba->clk_list_head;
7106
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03007107 if (list_empty(head))
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007108 goto out;
7109
7110 list_for_each_entry(clki, head, list) {
7111 if (!clki->name)
7112 continue;
7113
7114 clki->clk = devm_clk_get(dev, clki->name);
7115 if (IS_ERR(clki->clk)) {
7116 ret = PTR_ERR(clki->clk);
7117 dev_err(dev, "%s: %s clk get failed, %d\n",
7118 __func__, clki->name, ret);
7119 goto out;
7120 }
7121
7122 if (clki->max_freq) {
7123 ret = clk_set_rate(clki->clk, clki->max_freq);
7124 if (ret) {
7125 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7126 __func__, clki->name,
7127 clki->max_freq, ret);
7128 goto out;
7129 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03007130 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007131 }
7132 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7133 clki->name, clk_get_rate(clki->clk));
7134 }
7135out:
7136 return ret;
7137}
7138
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007139static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7140{
7141 int err = 0;
7142
7143 if (!hba->vops)
7144 goto out;
7145
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007146 err = ufshcd_vops_init(hba);
7147 if (err)
7148 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007149
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007150 err = ufshcd_vops_setup_regulators(hba, true);
7151 if (err)
7152 goto out_exit;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007153
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007154 goto out;
7155
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007156out_exit:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007157 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007158out:
7159 if (err)
7160 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007161 __func__, ufshcd_get_var_name(hba), err);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007162 return err;
7163}
7164
7165static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7166{
7167 if (!hba->vops)
7168 return;
7169
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007170 ufshcd_vops_setup_regulators(hba, false);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007171
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007172 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007173}
7174
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007175static int ufshcd_hba_init(struct ufs_hba *hba)
7176{
7177 int err;
7178
Raviv Shvili6a771a62014-09-25 15:32:24 +03007179 /*
7180 * Handle host controller power separately from the UFS device power
7181 * rails as it will help controlling the UFS host controller power
7182 * collapse easily which is different than UFS device power collapse.
7183 * Also, enable the host controller power before we go ahead with rest
7184 * of the initialization here.
7185 */
7186 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007187 if (err)
7188 goto out;
7189
Raviv Shvili6a771a62014-09-25 15:32:24 +03007190 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007191 if (err)
7192 goto out;
7193
Raviv Shvili6a771a62014-09-25 15:32:24 +03007194 err = ufshcd_init_clocks(hba);
7195 if (err)
7196 goto out_disable_hba_vreg;
7197
7198 err = ufshcd_setup_clocks(hba, true);
7199 if (err)
7200 goto out_disable_hba_vreg;
7201
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007202 err = ufshcd_init_vreg(hba);
7203 if (err)
7204 goto out_disable_clks;
7205
7206 err = ufshcd_setup_vreg(hba, true);
7207 if (err)
7208 goto out_disable_clks;
7209
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007210 err = ufshcd_variant_hba_init(hba);
7211 if (err)
7212 goto out_disable_vreg;
7213
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007214 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007215 goto out;
7216
7217out_disable_vreg:
7218 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007219out_disable_clks:
7220 ufshcd_setup_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03007221out_disable_hba_vreg:
7222 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007223out:
7224 return err;
7225}
7226
7227static void ufshcd_hba_exit(struct ufs_hba *hba)
7228{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007229 if (hba->is_powered) {
7230 ufshcd_variant_hba_exit(hba);
7231 ufshcd_setup_vreg(hba, false);
Gilad Bronera5082532016-10-17 17:10:00 -07007232 ufshcd_suspend_clkscaling(hba);
Vivek Gautameebcc192018-08-07 23:17:39 +05307233 if (ufshcd_is_clkscaling_supported(hba))
subhashj@codeaurora.org0701e492017-02-03 16:58:01 -08007234 if (hba->devfreq)
7235 ufshcd_suspend_clkscaling(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007236 ufshcd_setup_clocks(hba, false);
7237 ufshcd_setup_hba_vreg(hba, false);
7238 hba->is_powered = false;
7239 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007240}
7241
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007242static int
7243ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307244{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007245 unsigned char cmd[6] = {REQUEST_SENSE,
7246 0,
7247 0,
7248 0,
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07007249 UFSHCD_REQ_SENSE_SIZE,
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007250 0};
7251 char *buffer;
7252 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307253
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07007254 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007255 if (!buffer) {
7256 ret = -ENOMEM;
7257 goto out;
7258 }
7259
Christoph Hellwigfcbfffe2017-02-23 16:02:37 +01007260 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7261 UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7262 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007263 if (ret)
7264 pr_err("%s: failed with err %d\n", __func__, ret);
7265
7266 kfree(buffer);
7267out:
7268 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307269}
7270
7271/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007272 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7273 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307274 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007275 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307276 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007277 * Returns 0 if requested power mode is set successfully
7278 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307279 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007280static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7281 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307282{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007283 unsigned char cmd[6] = { START_STOP };
7284 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007285 struct scsi_device *sdp;
7286 unsigned long flags;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007287 int ret;
7288
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007289 spin_lock_irqsave(hba->host->host_lock, flags);
7290 sdp = hba->sdev_ufs_device;
7291 if (sdp) {
7292 ret = scsi_device_get(sdp);
7293 if (!ret && !scsi_device_online(sdp)) {
7294 ret = -ENODEV;
7295 scsi_device_put(sdp);
7296 }
7297 } else {
7298 ret = -ENODEV;
7299 }
7300 spin_unlock_irqrestore(hba->host->host_lock, flags);
7301
7302 if (ret)
7303 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007304
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307305 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007306 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7307 * handling, which would wait for host to be resumed. Since we know
7308 * we are functional while we are here, skip host resume in error
7309 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307310 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007311 hba->host->eh_noresume = 1;
7312 if (hba->wlun_dev_clr_ua) {
7313 ret = ufshcd_send_request_sense(hba, sdp);
7314 if (ret)
7315 goto out;
7316 /* Unit attention condition is cleared now */
7317 hba->wlun_dev_clr_ua = false;
7318 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307319
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007320 cmd[4] = pwr_mode << 4;
7321
7322 /*
7323 * Current function would be generally called from the power management
Christoph Hellwige8064022016-10-20 15:12:13 +02007324 * callbacks hence set the RQF_PM flag so that it doesn't resume the
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007325 * already suspended childs.
7326 */
Christoph Hellwigfcbfffe2017-02-23 16:02:37 +01007327 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7328 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007329 if (ret) {
7330 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02007331 "START_STOP failed for power mode: %d, result %x\n",
7332 pwr_mode, ret);
Johannes Thumshirnc65be1a2018-06-25 13:20:58 +02007333 if (driver_byte(ret) == DRIVER_SENSE)
Hannes Reinecke21045512015-01-08 07:43:46 +01007334 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007335 }
7336
7337 if (!ret)
7338 hba->curr_dev_pwr_mode = pwr_mode;
7339out:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007340 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007341 hba->host->eh_noresume = 0;
7342 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307343}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307344
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007345static int ufshcd_link_state_transition(struct ufs_hba *hba,
7346 enum uic_link_state req_link_state,
7347 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307348{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007349 int ret = 0;
7350
7351 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307352 return 0;
7353
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007354 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7355 ret = ufshcd_uic_hibern8_enter(hba);
7356 if (!ret)
7357 ufshcd_set_link_hibern8(hba);
7358 else
7359 goto out;
7360 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307361 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007362 * If autobkops is enabled, link can't be turned off because
7363 * turning off the link would also turn off the device.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307364 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007365 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7366 (!check_for_bkops || (check_for_bkops &&
7367 !hba->auto_bkops_enabled))) {
7368 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02007369 * Let's make sure that link is in low power mode, we are doing
7370 * this currently by putting the link in Hibern8. Otherway to
7371 * put the link in low power mode is to send the DME end point
7372 * to device and then send the DME reset command to local
7373 * unipro. But putting the link in hibern8 is much faster.
7374 */
7375 ret = ufshcd_uic_hibern8_enter(hba);
7376 if (ret)
7377 goto out;
7378 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007379 * Change controller state to "reset state" which
7380 * should also put the link in off/reset state
7381 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02007382 ufshcd_hba_stop(hba, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007383 /*
7384 * TODO: Check if we need any delay to make sure that
7385 * controller is reset
7386 */
7387 ufshcd_set_link_off(hba);
7388 }
7389
7390out:
7391 return ret;
7392}
7393
7394static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7395{
7396 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02007397 * It seems some UFS devices may keep drawing more than sleep current
7398 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7399 * To avoid this situation, add 2ms delay before putting these UFS
7400 * rails in LPM mode.
7401 */
7402 if (!ufshcd_is_link_active(hba) &&
7403 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7404 usleep_range(2000, 2100);
7405
7406 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007407 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7408 * power.
7409 *
7410 * If UFS device and link is in OFF state, all power supplies (VCC,
7411 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7412 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7413 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7414 *
7415 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7416 * in low power state which would save some power.
7417 */
7418 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7419 !hba->dev_info.is_lu_power_on_wp) {
7420 ufshcd_setup_vreg(hba, false);
7421 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7422 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7423 if (!ufshcd_is_link_active(hba)) {
7424 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7425 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7426 }
7427 }
7428}
7429
7430static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7431{
7432 int ret = 0;
7433
7434 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7435 !hba->dev_info.is_lu_power_on_wp) {
7436 ret = ufshcd_setup_vreg(hba, true);
7437 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007438 if (!ret && !ufshcd_is_link_active(hba)) {
7439 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7440 if (ret)
7441 goto vcc_disable;
7442 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7443 if (ret)
7444 goto vccq_lpm;
7445 }
Subhash Jadavani69d72ac2016-10-27 17:26:24 -07007446 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007447 }
7448 goto out;
7449
7450vccq_lpm:
7451 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7452vcc_disable:
7453 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7454out:
7455 return ret;
7456}
7457
7458static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7459{
7460 if (ufshcd_is_link_off(hba))
7461 ufshcd_setup_hba_vreg(hba, false);
7462}
7463
7464static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7465{
7466 if (ufshcd_is_link_off(hba))
7467 ufshcd_setup_hba_vreg(hba, true);
7468}
7469
7470/**
7471 * ufshcd_suspend - helper function for suspend operations
7472 * @hba: per adapter instance
7473 * @pm_op: desired low power operation type
7474 *
7475 * This function will try to put the UFS device and link into low power
7476 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7477 * (System PM level).
7478 *
7479 * If this function is called during shutdown, it will make sure that
7480 * both UFS device and UFS link is powered off.
7481 *
7482 * NOTE: UFS device & link must be active before we enter in this function.
7483 *
7484 * Returns 0 for success and non-zero for failure
7485 */
7486static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7487{
7488 int ret = 0;
7489 enum ufs_pm_level pm_lvl;
7490 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7491 enum uic_link_state req_link_state;
7492
7493 hba->pm_op_in_progress = 1;
7494 if (!ufshcd_is_shutdown_pm(pm_op)) {
7495 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7496 hba->rpm_lvl : hba->spm_lvl;
7497 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7498 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7499 } else {
7500 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7501 req_link_state = UIC_LINK_OFF_STATE;
7502 }
7503
7504 /*
7505 * If we can't transition into any of the low power modes
7506 * just gate the clocks.
7507 */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007508 ufshcd_hold(hba, false);
7509 hba->clk_gating.is_suspended = true;
7510
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08007511 if (hba->clk_scaling.is_allowed) {
7512 cancel_work_sync(&hba->clk_scaling.suspend_work);
7513 cancel_work_sync(&hba->clk_scaling.resume_work);
7514 ufshcd_suspend_clkscaling(hba);
7515 }
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07007516
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007517 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7518 req_link_state == UIC_LINK_ACTIVE_STATE) {
7519 goto disable_clks;
7520 }
7521
7522 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7523 (req_link_state == hba->uic_link_state))
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07007524 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007525
7526 /* UFS device & link must be active before we enter in this function */
7527 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7528 ret = -EINVAL;
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07007529 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007530 }
7531
7532 if (ufshcd_is_runtime_pm(pm_op)) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03007533 if (ufshcd_can_autobkops_during_suspend(hba)) {
7534 /*
7535 * The device is idle with no requests in the queue,
7536 * allow background operations if bkops status shows
7537 * that performance might be impacted.
7538 */
7539 ret = ufshcd_urgent_bkops(hba);
7540 if (ret)
7541 goto enable_gating;
7542 } else {
7543 /* make sure that auto bkops is disabled */
7544 ufshcd_disable_auto_bkops(hba);
7545 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007546 }
7547
7548 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7549 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7550 !ufshcd_is_runtime_pm(pm_op))) {
7551 /* ensure that bkops is disabled */
7552 ufshcd_disable_auto_bkops(hba);
7553 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7554 if (ret)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007555 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007556 }
7557
7558 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7559 if (ret)
7560 goto set_dev_active;
7561
7562 ufshcd_vreg_set_lpm(hba);
7563
7564disable_clks:
7565 /*
7566 * Call vendor specific suspend callback. As these callbacks may access
7567 * vendor specific host controller register space call them before the
7568 * host clocks are ON.
7569 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007570 ret = ufshcd_vops_suspend(hba, pm_op);
7571 if (ret)
7572 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007573
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007574 if (!ufshcd_is_link_active(hba))
7575 ufshcd_setup_clocks(hba, false);
7576 else
7577 /* If link is active, device ref_clk can't be switched off */
7578 __ufshcd_setup_clocks(hba, false, true);
7579
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007580 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007581 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007582 /*
7583 * Disable the host irq as host controller as there won't be any
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007584 * host controller transaction expected till resume.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007585 */
7586 ufshcd_disable_irq(hba);
7587 /* Put the host controller in low power mode if possible */
7588 ufshcd_hba_vreg_set_lpm(hba);
7589 goto out;
7590
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007591set_link_active:
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08007592 if (hba->clk_scaling.is_allowed)
7593 ufshcd_resume_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007594 ufshcd_vreg_set_hpm(hba);
7595 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7596 ufshcd_set_link_active(hba);
7597 else if (ufshcd_is_link_off(hba))
7598 ufshcd_host_reset_and_restore(hba);
7599set_dev_active:
7600 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7601 ufshcd_disable_auto_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007602enable_gating:
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08007603 if (hba->clk_scaling.is_allowed)
7604 ufshcd_resume_clkscaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007605 hba->clk_gating.is_suspended = false;
7606 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007607out:
7608 hba->pm_op_in_progress = 0;
7609 return ret;
7610}
7611
7612/**
7613 * ufshcd_resume - helper function for resume operations
7614 * @hba: per adapter instance
7615 * @pm_op: runtime PM or system PM
7616 *
7617 * This function basically brings the UFS device, UniPro link and controller
7618 * to active state.
7619 *
7620 * Returns 0 for success and non-zero for failure
7621 */
7622static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7623{
7624 int ret;
7625 enum uic_link_state old_link_state;
7626
7627 hba->pm_op_in_progress = 1;
7628 old_link_state = hba->uic_link_state;
7629
7630 ufshcd_hba_vreg_set_hpm(hba);
7631 /* Make sure clocks are enabled before accessing controller */
7632 ret = ufshcd_setup_clocks(hba, true);
7633 if (ret)
7634 goto out;
7635
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007636 /* enable the host irq as host controller would be active soon */
7637 ret = ufshcd_enable_irq(hba);
7638 if (ret)
7639 goto disable_irq_and_vops_clks;
7640
7641 ret = ufshcd_vreg_set_hpm(hba);
7642 if (ret)
7643 goto disable_irq_and_vops_clks;
7644
7645 /*
7646 * Call vendor specific resume callback. As these callbacks may access
7647 * vendor specific host controller register space call them when the
7648 * host clocks are ON.
7649 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007650 ret = ufshcd_vops_resume(hba, pm_op);
7651 if (ret)
7652 goto disable_vreg;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007653
7654 if (ufshcd_is_link_hibern8(hba)) {
7655 ret = ufshcd_uic_hibern8_exit(hba);
7656 if (!ret)
7657 ufshcd_set_link_active(hba);
7658 else
7659 goto vendor_suspend;
7660 } else if (ufshcd_is_link_off(hba)) {
7661 ret = ufshcd_host_reset_and_restore(hba);
7662 /*
7663 * ufshcd_host_reset_and_restore() should have already
7664 * set the link state as active
7665 */
7666 if (ret || !ufshcd_is_link_active(hba))
7667 goto vendor_suspend;
7668 }
7669
7670 if (!ufshcd_is_ufs_dev_active(hba)) {
7671 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7672 if (ret)
7673 goto set_old_link_state;
7674 }
7675
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08007676 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7677 ufshcd_enable_auto_bkops(hba);
7678 else
7679 /*
7680 * If BKOPs operations are urgently needed at this moment then
7681 * keep auto-bkops enabled or else disable it.
7682 */
7683 ufshcd_urgent_bkops(hba);
7684
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007685 hba->clk_gating.is_suspended = false;
7686
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007687 if (hba->clk_scaling.is_allowed)
7688 ufshcd_resume_clkscaling(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03007689
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007690 /* Schedule clock gating in case of no access to UFS device yet */
7691 ufshcd_release(hba);
Adrian Hunterad448372018-03-20 15:07:38 +02007692
7693 /* Enable Auto-Hibernate if configured */
7694 ufshcd_auto_hibern8_enable(hba);
7695
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007696 goto out;
7697
7698set_old_link_state:
7699 ufshcd_link_state_transition(hba, old_link_state, 0);
7700vendor_suspend:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007701 ufshcd_vops_suspend(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007702disable_vreg:
7703 ufshcd_vreg_set_lpm(hba);
7704disable_irq_and_vops_clks:
7705 ufshcd_disable_irq(hba);
subhashj@codeaurora.org401f1e42017-02-03 16:57:39 -08007706 if (hba->clk_scaling.is_allowed)
7707 ufshcd_suspend_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007708 ufshcd_setup_clocks(hba, false);
7709out:
7710 hba->pm_op_in_progress = 0;
7711 return ret;
7712}
7713
7714/**
7715 * ufshcd_system_suspend - system suspend routine
7716 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007717 *
7718 * Check the description of ufshcd_suspend() function for more details.
7719 *
7720 * Returns 0 for success and non-zero for failure
7721 */
7722int ufshcd_system_suspend(struct ufs_hba *hba)
7723{
7724 int ret = 0;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007725 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007726
7727 if (!hba || !hba->is_powered)
Dolev Raviv233b5942014-10-23 13:25:14 +03007728 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007729
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08007730 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
7731 hba->curr_dev_pwr_mode) &&
7732 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7733 hba->uic_link_state))
7734 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007735
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08007736 if (pm_runtime_suspended(hba->dev)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007737 /*
7738 * UFS device and/or UFS link low power states during runtime
7739 * suspend seems to be different than what is expected during
7740 * system suspend. Hence runtime resume the devic & link and
7741 * let the system suspend low power states to take effect.
7742 * TODO: If resume takes longer time, we might have optimize
7743 * it in future by not resuming everything if possible.
7744 */
7745 ret = ufshcd_runtime_resume(hba);
7746 if (ret)
7747 goto out;
7748 }
7749
7750 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
7751out:
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007752 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7753 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08007754 hba->curr_dev_pwr_mode, hba->uic_link_state);
Dolev Ravive7850602014-09-25 15:32:36 +03007755 if (!ret)
7756 hba->is_sys_suspended = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007757 return ret;
7758}
7759EXPORT_SYMBOL(ufshcd_system_suspend);
7760
7761/**
7762 * ufshcd_system_resume - system resume routine
7763 * @hba: per adapter instance
7764 *
7765 * Returns 0 for success and non-zero for failure
7766 */
7767
7768int ufshcd_system_resume(struct ufs_hba *hba)
7769{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007770 int ret = 0;
7771 ktime_t start = ktime_get();
7772
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07007773 if (!hba)
7774 return -EINVAL;
7775
7776 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007777 /*
7778 * Let the runtime resume take care of resuming
7779 * if runtime suspended.
7780 */
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007781 goto out;
7782 else
7783 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
7784out:
7785 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
7786 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08007787 hba->curr_dev_pwr_mode, hba->uic_link_state);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007788 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007789}
7790EXPORT_SYMBOL(ufshcd_system_resume);
7791
7792/**
7793 * ufshcd_runtime_suspend - runtime suspend routine
7794 * @hba: per adapter instance
7795 *
7796 * Check the description of ufshcd_suspend() function for more details.
7797 *
7798 * Returns 0 for success and non-zero for failure
7799 */
7800int ufshcd_runtime_suspend(struct ufs_hba *hba)
7801{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007802 int ret = 0;
7803 ktime_t start = ktime_get();
7804
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07007805 if (!hba)
7806 return -EINVAL;
7807
7808 if (!hba->is_powered)
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007809 goto out;
7810 else
7811 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
7812out:
7813 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
7814 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08007815 hba->curr_dev_pwr_mode, hba->uic_link_state);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007816 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307817}
7818EXPORT_SYMBOL(ufshcd_runtime_suspend);
7819
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007820/**
7821 * ufshcd_runtime_resume - runtime resume routine
7822 * @hba: per adapter instance
7823 *
7824 * This function basically brings the UFS device, UniPro link and controller
7825 * to active state. Following operations are done in this function:
7826 *
7827 * 1. Turn on all the controller related clocks
7828 * 2. Bring the UniPro link out of Hibernate state
7829 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
7830 * to active state.
7831 * 4. If auto-bkops is enabled on the device, disable it.
7832 *
7833 * So following would be the possible power state after this function return
7834 * successfully:
7835 * S1: UFS device in Active state with VCC rail ON
7836 * UniPro link in Active state
7837 * All the UFS/UniPro controller clocks are ON
7838 *
7839 * Returns 0 for success and non-zero for failure
7840 */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307841int ufshcd_runtime_resume(struct ufs_hba *hba)
7842{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007843 int ret = 0;
7844 ktime_t start = ktime_get();
7845
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07007846 if (!hba)
7847 return -EINVAL;
7848
7849 if (!hba->is_powered)
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007850 goto out;
7851 else
7852 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
7853out:
7854 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
7855 ktime_to_us(ktime_sub(ktime_get(), start)),
Subhash Jadavani73eba2b2017-01-10 16:48:25 -08007856 hba->curr_dev_pwr_mode, hba->uic_link_state);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007857 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307858}
7859EXPORT_SYMBOL(ufshcd_runtime_resume);
7860
7861int ufshcd_runtime_idle(struct ufs_hba *hba)
7862{
7863 return 0;
7864}
7865EXPORT_SYMBOL(ufshcd_runtime_idle);
7866
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307867/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007868 * ufshcd_shutdown - shutdown routine
7869 * @hba: per adapter instance
7870 *
7871 * This function would power off both UFS device and UFS link.
7872 *
7873 * Returns 0 always to allow force shutdown even in case of errors.
7874 */
7875int ufshcd_shutdown(struct ufs_hba *hba)
7876{
7877 int ret = 0;
7878
7879 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
7880 goto out;
7881
7882 if (pm_runtime_suspended(hba->dev)) {
7883 ret = ufshcd_runtime_resume(hba);
7884 if (ret)
7885 goto out;
7886 }
7887
7888 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
7889out:
7890 if (ret)
7891 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
7892 /* allow force shutdown even in case of errors */
7893 return 0;
7894}
7895EXPORT_SYMBOL(ufshcd_shutdown);
7896
7897/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307898 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307899 * data structure memory
Bart Van Assche8aa29f12018-03-01 15:07:20 -08007900 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307901 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307902void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307903{
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +02007904 ufs_sysfs_remove_nodes(hba->dev);
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05307905 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307906 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05307907 ufshcd_disable_intr(hba, hba->intr_mask);
Yaniv Gardi596585a2016-03-10 17:37:08 +02007908 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307909
Vivek Gautameebcc192018-08-07 23:17:39 +05307910 ufshcd_exit_clk_scaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007911 ufshcd_exit_clk_gating(hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007912 if (ufshcd_is_clkscaling_supported(hba))
7913 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007914 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307915}
7916EXPORT_SYMBOL_GPL(ufshcd_remove);
7917
7918/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02007919 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
7920 * @hba: pointer to Host Bus Adapter (HBA)
7921 */
7922void ufshcd_dealloc_host(struct ufs_hba *hba)
7923{
7924 scsi_host_put(hba->host);
7925}
7926EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
7927
7928/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09007929 * ufshcd_set_dma_mask - Set dma mask based on the controller
7930 * addressing capability
7931 * @hba: per adapter instance
7932 *
7933 * Returns 0 for success, non-zero for failure
7934 */
7935static int ufshcd_set_dma_mask(struct ufs_hba *hba)
7936{
7937 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
7938 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
7939 return 0;
7940 }
7941 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
7942}
7943
7944/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007945 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307946 * @dev: pointer to device handle
7947 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307948 * Returns 0 on success, non-zero value on failure
7949 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007950int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307951{
7952 struct Scsi_Host *host;
7953 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007954 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307955
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307956 if (!dev) {
7957 dev_err(dev,
7958 "Invalid memory reference for dev is NULL\n");
7959 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307960 goto out_error;
7961 }
7962
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307963 host = scsi_host_alloc(&ufshcd_driver_template,
7964 sizeof(struct ufs_hba));
7965 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307966 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307967 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307968 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307969 }
7970 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307971 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307972 hba->dev = dev;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007973 *hba_handle = hba;
7974
Szymon Mielczarek566ec9a2017-06-05 11:36:54 +03007975 INIT_LIST_HEAD(&hba->clk_list_head);
7976
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007977out_error:
7978 return err;
7979}
7980EXPORT_SYMBOL(ufshcd_alloc_host);
7981
7982/**
7983 * ufshcd_init - Driver initialization routine
7984 * @hba: per-adapter instance
7985 * @mmio_base: base register address
7986 * @irq: Interrupt line of device
7987 * Returns 0 on success, non-zero value on failure
7988 */
7989int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7990{
7991 int err;
7992 struct Scsi_Host *host = hba->host;
7993 struct device *dev = hba->dev;
7994
7995 if (!mmio_base) {
7996 dev_err(hba->dev,
7997 "Invalid memory reference for mmio_base is NULL\n");
7998 err = -ENODEV;
7999 goto out_error;
8000 }
8001
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308002 hba->mmio_base = mmio_base;
8003 hba->irq = irq;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308004
Potomski, MichalXa4b0e8a2017-02-23 09:05:30 +00008005 /* Set descriptor lengths to specification defaults */
8006 ufshcd_def_desc_sizes(hba);
8007
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008008 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008009 if (err)
8010 goto out_error;
8011
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308012 /* Read capabilities registers */
8013 ufshcd_hba_capabilities(hba);
8014
8015 /* Get UFS version supported by the controller */
8016 hba->ufs_version = ufshcd_get_ufs_version(hba);
8017
Yaniv Gardic01848c2016-12-05 19:25:02 -08008018 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8019 (hba->ufs_version != UFSHCI_VERSION_11) &&
8020 (hba->ufs_version != UFSHCI_VERSION_20) &&
8021 (hba->ufs_version != UFSHCI_VERSION_21))
8022 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8023 hba->ufs_version);
8024
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05308025 /* Get Interrupt bit mask per version */
8026 hba->intr_mask = ufshcd_get_intr_mask(hba);
8027
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09008028 err = ufshcd_set_dma_mask(hba);
8029 if (err) {
8030 dev_err(hba->dev, "set dma mask failed\n");
8031 goto out_disable;
8032 }
8033
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308034 /* Allocate memory for host memory space */
8035 err = ufshcd_memory_alloc(hba);
8036 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308037 dev_err(hba->dev, "Memory allocation failed\n");
8038 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308039 }
8040
8041 /* Configure LRB */
8042 ufshcd_host_memory_configure(hba);
8043
8044 host->can_queue = hba->nutrs;
8045 host->cmd_per_lun = hba->nutrs;
8046 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03008047 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308048 host->max_channel = UFSHCD_MAX_CHANNEL;
8049 host->unique_id = host->host_no;
8050 host->max_cmd_len = MAX_CDB_SIZE;
8051
Dolev Raviv7eb584d2014-09-25 15:32:31 +03008052 hba->max_pwr_info.is_valid = false;
8053
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308054 /* Initailize wait queue for task management */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05308055 init_waitqueue_head(&hba->tm_wq);
8056 init_waitqueue_head(&hba->tm_tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308057
8058 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05308059 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308060 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308061
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308062 /* Initialize UIC command mutex */
8063 mutex_init(&hba->uic_cmd_mutex);
8064
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05308065 /* Initialize mutex for device management commands */
8066 mutex_init(&hba->dev_cmd.lock);
8067
subhashj@codeaurora.orga3cd5ec2017-02-03 16:57:02 -08008068 init_rwsem(&hba->clk_scaling_lock);
8069
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05308070 /* Initialize device management tag acquire wait queue */
8071 init_waitqueue_head(&hba->dev_cmd.tag_wq);
8072
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008073 ufshcd_init_clk_gating(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +02008074
Vivek Gautameebcc192018-08-07 23:17:39 +05308075 ufshcd_init_clk_scaling(hba);
8076
Yaniv Gardi199ef132016-03-10 17:37:06 +02008077 /*
8078 * In order to avoid any spurious interrupt immediately after
8079 * registering UFS controller interrupt handler, clear any pending UFS
8080 * interrupt status and disable all the UFS interrupts.
8081 */
8082 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8083 REG_INTERRUPT_STATUS);
8084 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8085 /*
8086 * Make sure that UFS interrupts are disabled and any pending interrupt
8087 * status is cleared before registering UFS interrupt handler.
8088 */
8089 mb();
8090
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308091 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09008092 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308093 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308094 dev_err(hba->dev, "request irq failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008095 goto exit_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008096 } else {
8097 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308098 }
8099
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308100 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308101 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308102 dev_err(hba->dev, "scsi_add_host failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008103 goto exit_gating;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308104 }
8105
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308106 /* Host controller enable */
8107 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308108 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308109 dev_err(hba->dev, "Host controller enable failed\n");
Dolev Raviv66cc8202016-12-22 18:39:42 -08008110 ufshcd_print_host_regs(hba);
Gilad Broner6ba65582017-02-03 16:57:28 -08008111 ufshcd_print_host_state(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308112 goto out_remove_scsi_host;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308113 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308114
subhashj@codeaurora.org0c8f7582016-12-22 18:41:11 -08008115 /*
8116 * Set the default power management level for runtime and system PM.
8117 * Default power saving mode is to keep UFS link in Hibern8 state
8118 * and UFS device in sleep state.
8119 */
8120 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8121 UFS_SLEEP_PWR_MODE,
8122 UIC_LINK_HIBERN8_STATE);
8123 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8124 UFS_SLEEP_PWR_MODE,
8125 UIC_LINK_HIBERN8_STATE);
8126
Adrian Hunterad448372018-03-20 15:07:38 +02008127 /* Set the default auto-hiberate idle timer value to 150 ms */
8128 if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
8129 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8130 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8131 }
8132
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05308133 /* Hold auto suspend until async scan completes */
8134 pm_runtime_get_sync(dev);
Subhash Jadavani38135532018-05-03 16:37:18 +05308135 atomic_set(&hba->scsi_block_reqs_cnt, 0);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008136 /*
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08008137 * We are assuming that device wasn't put in sleep/power-down
8138 * state exclusively during the boot stage before kernel.
8139 * This assumption helps avoid doing link startup twice during
8140 * ufshcd_probe_hba().
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008141 */
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08008142 ufshcd_set_ufs_dev_active(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008143
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308144 async_schedule(ufshcd_async_scan, hba);
Stanislav Nijnikovcbb68132018-02-15 14:14:01 +02008145 ufs_sysfs_add_nodes(hba->dev);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308146
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308147 return 0;
8148
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308149out_remove_scsi_host:
8150 scsi_remove_host(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008151exit_gating:
Vivek Gautameebcc192018-08-07 23:17:39 +05308152 ufshcd_exit_clk_scaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008153 ufshcd_exit_clk_gating(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308154out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008155 hba->is_irq_enabled = false;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008156 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308157out_error:
8158 return err;
8159}
8160EXPORT_SYMBOL_GPL(ufshcd_init);
8161
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308162MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8163MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05308164MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308165MODULE_LICENSE("GPL");
8166MODULE_VERSION(UFSHCD_DRIVER_VERSION);