blob: 2c88757eb701077b5a2164847bcdd5fc470460a9 [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02006 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053011 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053016 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053018 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053024 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030035 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053038 */
39
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053040#include <linux/async.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030041#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020042#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020043#include <linux/of.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053044#include "ufshcd.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020045#include "ufs_quirks.h"
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053046#include "unipro.h"
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053047
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -080048#define CREATE_TRACE_POINTS
49#include <trace/events/ufs.h>
50
Gilad Bronerdcea0bf2016-10-17 17:09:48 -070051#define UFSHCD_REQ_SENSE_SIZE 18
52
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053053#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
54 UTP_TASK_REQ_COMPL |\
55 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053056/* UIC command timeout, unit: ms */
57#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053058
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053059/* NOP OUT retries waiting for NOP IN response */
60#define NOP_OUT_RETRIES 10
61/* Timeout after 30 msecs if NOP OUT hangs without response */
62#define NOP_OUT_TIMEOUT 30 /* msecs */
63
Dolev Raviv68078d52013-07-30 00:35:58 +053064/* Query request retries */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080065#define QUERY_REQ_RETRIES 3
Dolev Raviv68078d52013-07-30 00:35:58 +053066/* Query request timeout */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080067#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
Dolev Raviv68078d52013-07-30 00:35:58 +053068
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053069/* Task management command timeout */
70#define TM_CMD_TIMEOUT 100 /* msecs */
71
Yaniv Gardi64238fb2016-02-01 15:02:43 +020072/* maximum number of retries for a general UIC command */
73#define UFS_UIC_COMMAND_RETRIES 3
74
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030075/* maximum number of link-startup retries */
76#define DME_LINKSTARTUP_RETRIES 3
77
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +020078/* Maximum retries for Hibern8 enter */
79#define UIC_HIBERN8_ENTER_RETRIES 3
80
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030081/* maximum number of reset retries before giving up */
82#define MAX_HOST_RESET_RETRIES 5
83
Dolev Raviv68078d52013-07-30 00:35:58 +053084/* Expose the flag value from utp_upiu_query.value */
85#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86
Seungwon Jeon7d568652013-08-31 21:40:20 +053087/* Interrupt aggregation default timeout, unit: 40us */
88#define INT_AGGR_DEF_TO 0x02
89
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +030090#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
91 ({ \
92 int _ret; \
93 if (_on) \
94 _ret = ufshcd_enable_vreg(_dev, _vreg); \
95 else \
96 _ret = ufshcd_disable_vreg(_dev, _vreg); \
97 _ret; \
98 })
99
Dolev Raviv66cc8202016-12-22 18:39:42 -0800100#define ufshcd_hex_dump(prefix_str, buf, len) \
101print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
102
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300103static u32 ufs_query_desc_max_size[] = {
104 QUERY_DESC_DEVICE_MAX_SIZE,
105 QUERY_DESC_CONFIGURAION_MAX_SIZE,
106 QUERY_DESC_UNIT_MAX_SIZE,
107 QUERY_DESC_RFU_MAX_SIZE,
108 QUERY_DESC_INTERCONNECT_MAX_SIZE,
109 QUERY_DESC_STRING_MAX_SIZE,
110 QUERY_DESC_RFU_MAX_SIZE,
Tomas Winkler1ce21792016-02-09 10:25:40 +0200111 QUERY_DESC_GEOMETRY_MAX_SIZE,
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300112 QUERY_DESC_POWER_MAX_SIZE,
113 QUERY_DESC_RFU_MAX_SIZE,
114};
115
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530116enum {
117 UFSHCD_MAX_CHANNEL = 0,
118 UFSHCD_MAX_ID = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530119 UFSHCD_CMD_PER_LUN = 32,
120 UFSHCD_CAN_QUEUE = 32,
121};
122
123/* UFSHCD states */
124enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530125 UFSHCD_STATE_RESET,
126 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530127 UFSHCD_STATE_OPERATIONAL,
Zang Leigang141f8162016-11-16 11:29:37 +0800128 UFSHCD_STATE_EH_SCHEDULED,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530129};
130
131/* UFSHCD error handling flags */
132enum {
133 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530134};
135
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530136/* UFSHCD UIC layer error flags */
137enum {
138 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200139 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
140 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
141 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
142 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
143 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530144};
145
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530146/* Interrupt configuration options */
147enum {
148 UFSHCD_INT_DISABLE,
149 UFSHCD_INT_ENABLE,
150 UFSHCD_INT_CLEAR,
151};
152
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530153#define ufshcd_set_eh_in_progress(h) \
154 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
155#define ufshcd_eh_in_progress(h) \
156 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
157#define ufshcd_clear_eh_in_progress(h) \
158 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
159
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300160#define ufshcd_set_ufs_dev_active(h) \
161 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
162#define ufshcd_set_ufs_dev_sleep(h) \
163 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
164#define ufshcd_set_ufs_dev_poweroff(h) \
165 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
166#define ufshcd_is_ufs_dev_active(h) \
167 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
168#define ufshcd_is_ufs_dev_sleep(h) \
169 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
170#define ufshcd_is_ufs_dev_poweroff(h) \
171 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
172
173static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
174 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
175 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
176 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
177 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
178 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
179 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
180};
181
182static inline enum ufs_dev_pwr_mode
183ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
184{
185 return ufs_pm_lvl_states[lvl].dev_state;
186}
187
188static inline enum uic_link_state
189ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
190{
191 return ufs_pm_lvl_states[lvl].link_state;
192}
193
subhashj@codeaurora.org0c8f7582016-12-22 18:41:11 -0800194static inline enum ufs_pm_level
195ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
196 enum uic_link_state link_state)
197{
198 enum ufs_pm_level lvl;
199
200 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
201 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
202 (ufs_pm_lvl_states[lvl].link_state == link_state))
203 return lvl;
204 }
205
206 /* if no match found, return the level 0 */
207 return UFS_PM_LVL_0;
208}
209
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800210static struct ufs_dev_fix ufs_fixups[] = {
211 /* UFS cards deviations table */
212 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
213 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
214 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
215 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
216 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
217 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
218 UFS_DEVICE_NO_FASTAUTO),
219 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
220 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
221 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
222 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
223 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
224 UFS_DEVICE_QUIRK_PA_TACTIVATE),
225 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
226 UFS_DEVICE_QUIRK_PA_TACTIVATE),
227 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
228 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
229 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
230
231 END_FIX
232};
233
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530234static void ufshcd_tmc_handler(struct ufs_hba *hba);
235static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530236static int ufshcd_reset_and_restore(struct ufs_hba *hba);
Dolev Ravive7d38252016-12-22 18:40:07 -0800237static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530238static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300239static void ufshcd_hba_exit(struct ufs_hba *hba);
240static int ufshcd_probe_hba(struct ufs_hba *hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300241static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
242 bool skip_ref_clk);
243static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
Yaniv Gardi60f01872016-03-10 17:37:11 +0200244static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300245static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
246static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300247static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300248static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800249static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
250static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
251static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300252static irqreturn_t ufshcd_intr(int irq, void *__hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +0300253static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
254 struct ufs_pa_layer_attr *desired_pwr_mode);
Yaniv Gardi874237f2015-05-17 18:55:03 +0300255static int ufshcd_change_power_mode(struct ufs_hba *hba,
256 struct ufs_pa_layer_attr *pwr_mode);
Yaniv Gardi14497322016-02-01 15:02:39 +0200257static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
258{
259 return tag >= 0 && tag < hba->nutrs;
260}
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300261
262static inline int ufshcd_enable_irq(struct ufs_hba *hba)
263{
264 int ret = 0;
265
266 if (!hba->is_irq_enabled) {
267 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
268 hba);
269 if (ret)
270 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
271 __func__, ret);
272 hba->is_irq_enabled = true;
273 }
274
275 return ret;
276}
277
278static inline void ufshcd_disable_irq(struct ufs_hba *hba)
279{
280 if (hba->is_irq_enabled) {
281 free_irq(hba->irq, hba);
282 hba->is_irq_enabled = false;
283 }
284}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530285
Yaniv Gardib573d482016-03-10 17:37:09 +0200286/* replace non-printable or non-ASCII characters with spaces */
287static inline void ufshcd_remove_non_printable(char *val)
288{
289 if (!val)
290 return;
291
292 if (*val < 0x20 || *val > 0x7e)
293 *val = ' ';
294}
295
Lee Susman1a07f2d2016-12-22 18:42:03 -0800296static void ufshcd_add_command_trace(struct ufs_hba *hba,
297 unsigned int tag, const char *str)
298{
299 sector_t lba = -1;
300 u8 opcode = 0;
301 u32 intr, doorbell;
302 struct ufshcd_lrb *lrbp;
303 int transfer_len = -1;
304
305 if (!trace_ufshcd_command_enabled())
306 return;
307
308 lrbp = &hba->lrb[tag];
309
310 if (lrbp->cmd) { /* data phase exists */
311 opcode = (u8)(*lrbp->cmd->cmnd);
312 if ((opcode == READ_10) || (opcode == WRITE_10)) {
313 /*
314 * Currently we only fully trace read(10) and write(10)
315 * commands
316 */
317 if (lrbp->cmd->request && lrbp->cmd->request->bio)
318 lba =
319 lrbp->cmd->request->bio->bi_iter.bi_sector;
320 transfer_len = be32_to_cpu(
321 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
322 }
323 }
324
325 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
326 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
327 trace_ufshcd_command(dev_name(hba->dev), str, tag,
328 doorbell, transfer_len, intr, lba, opcode);
329}
330
Dolev Raviv66cc8202016-12-22 18:39:42 -0800331static void ufshcd_print_host_regs(struct ufs_hba *hba)
332{
333 /*
334 * hex_dump reads its data without the readl macro. This might
335 * cause inconsistency issues on some platform, as the printed
336 * values may be from cache and not the most recent value.
337 * To know whether you are looking at an un-cached version verify
338 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
339 * during platform/pci probe function.
340 */
341 ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
342 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
343 hba->ufs_version, hba->capabilities);
344 dev_err(hba->dev,
345 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
346 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
347}
348
349static
350void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
351{
352 struct ufshcd_lrb *lrbp;
353 int tag;
354
355 for_each_set_bit(tag, &bitmap, hba->nutrs) {
356 lrbp = &hba->lrb[tag];
357
358 dev_err(hba->dev, "UPIU[%d] - Transfer Request Descriptor\n",
359 tag);
360 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
361 sizeof(struct utp_transfer_req_desc));
362 dev_err(hba->dev, "UPIU[%d] - Request UPIU\n", tag);
363 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
364 sizeof(struct utp_upiu_req));
365 dev_err(hba->dev, "UPIU[%d] - Response UPIU\n", tag);
366 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
367 sizeof(struct utp_upiu_rsp));
368 if (pr_prdt) {
369 int prdt_length = le16_to_cpu(
370 lrbp->utr_descriptor_ptr->prd_table_length);
371
372 dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries\n", tag,
373 prdt_length);
374 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
375 sizeof(struct ufshcd_sg_entry) *
376 prdt_length);
377 }
378 }
379}
380
381static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
382{
383 struct utp_task_req_desc *tmrdp;
384 int tag;
385
386 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
387 tmrdp = &hba->utmrdl_base_addr[tag];
388 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
389 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
390 sizeof(struct request_desc_header));
391 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
392 tag);
393 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
394 sizeof(struct utp_upiu_req));
395 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
396 tag);
397 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
398 sizeof(struct utp_task_req_desc));
399 }
400}
401
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530402/*
403 * ufshcd_wait_for_register - wait for register value to change
404 * @hba - per-adapter interface
405 * @reg - mmio register offset
406 * @mask - mask to apply to read register value
407 * @val - wait condition
408 * @interval_us - polling interval in microsecs
409 * @timeout_ms - timeout in millisecs
Yaniv Gardi596585a2016-03-10 17:37:08 +0200410 * @can_sleep - perform sleep or just spin
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530411 *
412 * Returns -ETIMEDOUT on error, zero on success
413 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200414int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
415 u32 val, unsigned long interval_us,
416 unsigned long timeout_ms, bool can_sleep)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530417{
418 int err = 0;
419 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
420
421 /* ignore bits that we don't intend to wait on */
422 val = val & mask;
423
424 while ((ufshcd_readl(hba, reg) & mask) != val) {
Yaniv Gardi596585a2016-03-10 17:37:08 +0200425 if (can_sleep)
426 usleep_range(interval_us, interval_us + 50);
427 else
428 udelay(interval_us);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530429 if (time_after(jiffies, timeout)) {
430 if ((ufshcd_readl(hba, reg) & mask) != val)
431 err = -ETIMEDOUT;
432 break;
433 }
434 }
435
436 return err;
437}
438
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530439/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530440 * ufshcd_get_intr_mask - Get the interrupt bit mask
441 * @hba - Pointer to adapter instance
442 *
443 * Returns interrupt bit mask per version
444 */
445static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
446{
Yaniv Gardic01848c2016-12-05 19:25:02 -0800447 u32 intr_mask = 0;
448
449 switch (hba->ufs_version) {
450 case UFSHCI_VERSION_10:
451 intr_mask = INTERRUPT_MASK_ALL_VER_10;
452 break;
453 /* allow fall through */
454 case UFSHCI_VERSION_11:
455 case UFSHCI_VERSION_20:
456 intr_mask = INTERRUPT_MASK_ALL_VER_11;
457 break;
458 /* allow fall through */
459 case UFSHCI_VERSION_21:
460 default:
461 intr_mask = INTERRUPT_MASK_ALL_VER_21;
462 }
463
464 return intr_mask;
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530465}
466
467/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530468 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
469 * @hba - Pointer to adapter instance
470 *
471 * Returns UFSHCI version supported by the controller
472 */
473static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
474{
Yaniv Gardi0263bcd2015-10-28 13:15:48 +0200475 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
476 return ufshcd_vops_get_ufs_hci_version(hba);
Yaniv Gardi9949e702015-05-17 18:55:05 +0300477
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530478 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530479}
480
481/**
482 * ufshcd_is_device_present - Check if any device connected to
483 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300484 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530485 *
Venkatraman S73ec5132012-07-10 19:39:23 +0530486 * Returns 1 if device present, 0 if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530487 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300488static inline int ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530489{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300490 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
491 DEVICE_PRESENT) ? 1 : 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530492}
493
494/**
495 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
496 * @lrb: pointer to local command reference block
497 *
498 * This function is used to get the OCS field from UTRD
499 * Returns the OCS field in the UTRD
500 */
501static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
502{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530503 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530504}
505
506/**
507 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
508 * @task_req_descp: pointer to utp_task_req_desc structure
509 *
510 * This function is used to get the OCS field from UTMRD
511 * Returns the OCS field in the UTMRD
512 */
513static inline int
514ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
515{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530516 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530517}
518
519/**
520 * ufshcd_get_tm_free_slot - get a free slot for task management request
521 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530522 * @free_slot: pointer to variable with available slot value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530523 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530524 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
525 * Returns 0 if free slot is not available, else return 1 with tag value
526 * in @free_slot.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530527 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530528static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530529{
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530530 int tag;
531 bool ret = false;
532
533 if (!free_slot)
534 goto out;
535
536 do {
537 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
538 if (tag >= hba->nutmrs)
539 goto out;
540 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
541
542 *free_slot = tag;
543 ret = true;
544out:
545 return ret;
546}
547
548static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
549{
550 clear_bit_unlock(slot, &hba->tm_slots_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530551}
552
553/**
554 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
555 * @hba: per adapter instance
556 * @pos: position of the bit to be cleared
557 */
558static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
559{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530560 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530561}
562
563/**
Yaniv Gardia48353f2016-02-01 15:02:40 +0200564 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
565 * @hba: per adapter instance
566 * @tag: position of the bit to be cleared
567 */
568static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
569{
570 __clear_bit(tag, &hba->outstanding_reqs);
571}
572
573/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530574 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
575 * @reg: Register value of host controller status
576 *
577 * Returns integer, 0 on Success and positive value if failed
578 */
579static inline int ufshcd_get_lists_status(u32 reg)
580{
581 /*
582 * The mask 0xFF is for the following HCS register bits
583 * Bit Description
584 * 0 Device Present
585 * 1 UTRLRDY
586 * 2 UTMRLRDY
587 * 3 UCRDY
Yaniv Gardi897efe62016-02-01 15:02:48 +0200588 * 4-7 reserved
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530589 */
Yaniv Gardi897efe62016-02-01 15:02:48 +0200590 return ((reg & 0xFF) >> 1) ^ 0x07;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530591}
592
593/**
594 * ufshcd_get_uic_cmd_result - Get the UIC command result
595 * @hba: Pointer to adapter instance
596 *
597 * This function gets the result of UIC command completion
598 * Returns 0 on success, non zero value on error
599 */
600static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
601{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530602 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530603 MASK_UIC_COMMAND_RESULT;
604}
605
606/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530607 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
608 * @hba: Pointer to adapter instance
609 *
610 * This function gets UIC command argument3
611 * Returns 0 on success, non zero value on error
612 */
613static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
614{
615 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
616}
617
618/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530619 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530620 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530621 */
622static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530623ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530624{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530625 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530626}
627
628/**
629 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
630 * @ucd_rsp_ptr: pointer to response UPIU
631 *
632 * This function gets the response status and scsi_status from response UPIU
633 * Returns the response result code.
634 */
635static inline int
636ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
637{
638 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
639}
640
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530641/*
642 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
643 * from response UPIU
644 * @ucd_rsp_ptr: pointer to response UPIU
645 *
646 * Return the data segment length.
647 */
648static inline unsigned int
649ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
650{
651 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
652 MASK_RSP_UPIU_DATA_SEG_LEN;
653}
654
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530655/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530656 * ufshcd_is_exception_event - Check if the device raised an exception event
657 * @ucd_rsp_ptr: pointer to response UPIU
658 *
659 * The function checks if the device raised an exception event indicated in
660 * the Device Information field of response UPIU.
661 *
662 * Returns true if exception is raised, false otherwise.
663 */
664static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
665{
666 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
667 MASK_RSP_EXCEPTION_EVENT ? true : false;
668}
669
670/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530671 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530672 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530673 */
674static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530675ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530676{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530677 ufshcd_writel(hba, INT_AGGR_ENABLE |
678 INT_AGGR_COUNTER_AND_TIMER_RESET,
679 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
680}
681
682/**
683 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
684 * @hba: per adapter instance
685 * @cnt: Interrupt aggregation counter threshold
686 * @tmout: Interrupt aggregation timeout value
687 */
688static inline void
689ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
690{
691 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
692 INT_AGGR_COUNTER_THLD_VAL(cnt) |
693 INT_AGGR_TIMEOUT_VAL(tmout),
694 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530695}
696
697/**
Yaniv Gardib8521902015-05-17 18:54:57 +0300698 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
699 * @hba: per adapter instance
700 */
701static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
702{
703 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
704}
705
706/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530707 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
708 * When run-stop registers are set to 1, it indicates the
709 * host controller that it can process the requests
710 * @hba: per adapter instance
711 */
712static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
713{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530714 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
715 REG_UTP_TASK_REQ_LIST_RUN_STOP);
716 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
717 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530718}
719
720/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530721 * ufshcd_hba_start - Start controller initialization sequence
722 * @hba: per adapter instance
723 */
724static inline void ufshcd_hba_start(struct ufs_hba *hba)
725{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530726 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530727}
728
729/**
730 * ufshcd_is_hba_active - Get controller state
731 * @hba: per adapter instance
732 *
733 * Returns zero if controller is active, 1 otherwise
734 */
735static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
736{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530737 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530738}
739
subhashj@codeaurora.org09690d52016-12-22 18:41:00 -0800740static const char *ufschd_uic_link_state_to_string(
741 enum uic_link_state state)
742{
743 switch (state) {
744 case UIC_LINK_OFF_STATE: return "OFF";
745 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
746 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
747 default: return "UNKNOWN";
748 }
749}
750
751static const char *ufschd_ufs_dev_pwr_mode_to_string(
752 enum ufs_dev_pwr_mode state)
753{
754 switch (state) {
755 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
756 case UFS_SLEEP_PWR_MODE: return "SLEEP";
757 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
758 default: return "UNKNOWN";
759 }
760}
761
Yaniv Gardi37113102016-03-10 17:37:16 +0200762u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
763{
764 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
765 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
766 (hba->ufs_version == UFSHCI_VERSION_11))
767 return UFS_UNIPRO_VER_1_41;
768 else
769 return UFS_UNIPRO_VER_1_6;
770}
771EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
772
773static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
774{
775 /*
776 * If both host and device support UniPro ver1.6 or later, PA layer
777 * parameters tuning happens during link startup itself.
778 *
779 * We can manually tune PA layer parameters if either host or device
780 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
781 * logic simple, we will only do manual tuning if local unipro version
782 * doesn't support ver1.6 or later.
783 */
784 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
785 return true;
786 else
787 return false;
788}
789
Gilad Bronera5082532016-10-17 17:10:00 -0700790static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
791{
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800792 if (!ufshcd_is_clkscaling_supported(hba))
793 return;
794
795 devfreq_suspend_device(hba->devfreq);
796 hba->clk_scaling.window_start_t = 0;
Gilad Bronera5082532016-10-17 17:10:00 -0700797}
798
799static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
800{
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800801 devfreq_resume_device(hba->devfreq);
802}
803
804static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
805 struct device_attribute *attr, char *buf)
806{
807 struct ufs_hba *hba = dev_get_drvdata(dev);
808
809 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
810}
811
812static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
813 struct device_attribute *attr, const char *buf, size_t count)
814{
815 struct ufs_hba *hba = dev_get_drvdata(dev);
816 u32 value;
817 int err;
818
819 if (kstrtou32(buf, 0, &value))
820 return -EINVAL;
821
822 value = !!value;
823 if (value == hba->clk_scaling.is_allowed)
824 goto out;
825
826 pm_runtime_get_sync(hba->dev);
827 ufshcd_hold(hba, false);
828
829 if (value) {
830 ufshcd_resume_clkscaling(hba);
831 } else {
832 ufshcd_suspend_clkscaling(hba);
833 err = ufshcd_scale_clks(hba, true);
834 if (err)
835 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
836 __func__, err);
837 }
838 hba->clk_scaling.is_allowed = value;
839
840 ufshcd_release(hba);
841 pm_runtime_put_sync(hba->dev);
842out:
843 return count;
Gilad Bronera5082532016-10-17 17:10:00 -0700844}
845
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300846static void ufshcd_ungate_work(struct work_struct *work)
847{
848 int ret;
849 unsigned long flags;
850 struct ufs_hba *hba = container_of(work, struct ufs_hba,
851 clk_gating.ungate_work);
852
853 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
854
855 spin_lock_irqsave(hba->host->host_lock, flags);
856 if (hba->clk_gating.state == CLKS_ON) {
857 spin_unlock_irqrestore(hba->host->host_lock, flags);
858 goto unblock_reqs;
859 }
860
861 spin_unlock_irqrestore(hba->host->host_lock, flags);
862 ufshcd_setup_clocks(hba, true);
863
864 /* Exit from hibern8 */
865 if (ufshcd_can_hibern8_during_gating(hba)) {
866 /* Prevent gating in this path */
867 hba->clk_gating.is_suspended = true;
868 if (ufshcd_is_link_hibern8(hba)) {
869 ret = ufshcd_uic_hibern8_exit(hba);
870 if (ret)
871 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
872 __func__, ret);
873 else
874 ufshcd_set_link_active(hba);
875 }
876 hba->clk_gating.is_suspended = false;
877 }
878unblock_reqs:
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800879 if (hba->clk_scaling.is_allowed)
880 ufshcd_resume_clkscaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300881 scsi_unblock_requests(hba->host);
882}
883
884/**
885 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
886 * Also, exit from hibern8 mode and set the link as active.
887 * @hba: per adapter instance
888 * @async: This indicates whether caller should ungate clocks asynchronously.
889 */
890int ufshcd_hold(struct ufs_hba *hba, bool async)
891{
892 int rc = 0;
893 unsigned long flags;
894
895 if (!ufshcd_is_clkgating_allowed(hba))
896 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300897 spin_lock_irqsave(hba->host->host_lock, flags);
898 hba->clk_gating.active_reqs++;
899
Yaniv Gardi53c12d02016-02-01 15:02:45 +0200900 if (ufshcd_eh_in_progress(hba)) {
901 spin_unlock_irqrestore(hba->host->host_lock, flags);
902 return 0;
903 }
904
Sahitya Tummala856b3482014-09-25 15:32:34 +0300905start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300906 switch (hba->clk_gating.state) {
907 case CLKS_ON:
Venkat Gopalakrishnanf2a785a2016-10-17 17:10:53 -0700908 /*
909 * Wait for the ungate work to complete if in progress.
910 * Though the clocks may be in ON state, the link could
911 * still be in hibner8 state if hibern8 is allowed
912 * during clock gating.
913 * Make sure we exit hibern8 state also in addition to
914 * clocks being ON.
915 */
916 if (ufshcd_can_hibern8_during_gating(hba) &&
917 ufshcd_is_link_hibern8(hba)) {
918 spin_unlock_irqrestore(hba->host->host_lock, flags);
919 flush_work(&hba->clk_gating.ungate_work);
920 spin_lock_irqsave(hba->host->host_lock, flags);
921 goto start;
922 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300923 break;
924 case REQ_CLKS_OFF:
925 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
926 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800927 trace_ufshcd_clk_gating(dev_name(hba->dev),
928 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300929 break;
930 }
931 /*
932 * If we here, it means gating work is either done or
933 * currently running. Hence, fall through to cancel gating
934 * work and to enable clocks.
935 */
936 case CLKS_OFF:
937 scsi_block_requests(hba->host);
938 hba->clk_gating.state = REQ_CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800939 trace_ufshcd_clk_gating(dev_name(hba->dev),
940 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300941 schedule_work(&hba->clk_gating.ungate_work);
942 /*
943 * fall through to check if we should wait for this
944 * work to be done or not.
945 */
946 case REQ_CLKS_ON:
947 if (async) {
948 rc = -EAGAIN;
949 hba->clk_gating.active_reqs--;
950 break;
951 }
952
953 spin_unlock_irqrestore(hba->host->host_lock, flags);
954 flush_work(&hba->clk_gating.ungate_work);
955 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +0300956 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300957 goto start;
958 default:
959 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
960 __func__, hba->clk_gating.state);
961 break;
962 }
963 spin_unlock_irqrestore(hba->host->host_lock, flags);
964out:
965 return rc;
966}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +0200967EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300968
969static void ufshcd_gate_work(struct work_struct *work)
970{
971 struct ufs_hba *hba = container_of(work, struct ufs_hba,
972 clk_gating.gate_work.work);
973 unsigned long flags;
974
975 spin_lock_irqsave(hba->host->host_lock, flags);
Venkat Gopalakrishnan3f0c06d2016-10-17 17:11:07 -0700976 /*
977 * In case you are here to cancel this work the gating state
978 * would be marked as REQ_CLKS_ON. In this case save time by
979 * skipping the gating work and exit after changing the clock
980 * state to CLKS_ON.
981 */
982 if (hba->clk_gating.is_suspended ||
983 (hba->clk_gating.state == REQ_CLKS_ON)) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300984 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800985 trace_ufshcd_clk_gating(dev_name(hba->dev),
986 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300987 goto rel_lock;
988 }
989
990 if (hba->clk_gating.active_reqs
991 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
992 || hba->lrb_in_use || hba->outstanding_tasks
993 || hba->active_uic_cmd || hba->uic_async_done)
994 goto rel_lock;
995
996 spin_unlock_irqrestore(hba->host->host_lock, flags);
997
998 /* put the link into hibern8 mode before turning off clocks */
999 if (ufshcd_can_hibern8_during_gating(hba)) {
1000 if (ufshcd_uic_hibern8_enter(hba)) {
1001 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001002 trace_ufshcd_clk_gating(dev_name(hba->dev),
1003 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001004 goto out;
1005 }
1006 ufshcd_set_link_hibern8(hba);
1007 }
1008
Gilad Bronera5082532016-10-17 17:10:00 -07001009 ufshcd_suspend_clkscaling(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03001010
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001011 if (!ufshcd_is_link_active(hba))
1012 ufshcd_setup_clocks(hba, false);
1013 else
1014 /* If link is active, device ref_clk can't be switched off */
1015 __ufshcd_setup_clocks(hba, false, true);
1016
1017 /*
1018 * In case you are here to cancel this work the gating state
1019 * would be marked as REQ_CLKS_ON. In this case keep the state
1020 * as REQ_CLKS_ON which would anyway imply that clocks are off
1021 * and a request to turn them on is pending. By doing this way,
1022 * we keep the state machine in tact and this would ultimately
1023 * prevent from doing cancel work multiple times when there are
1024 * new requests arriving before the current cancel work is done.
1025 */
1026 spin_lock_irqsave(hba->host->host_lock, flags);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001027 if (hba->clk_gating.state == REQ_CLKS_OFF) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001028 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001029 trace_ufshcd_clk_gating(dev_name(hba->dev),
1030 hba->clk_gating.state);
1031 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001032rel_lock:
1033 spin_unlock_irqrestore(hba->host->host_lock, flags);
1034out:
1035 return;
1036}
1037
1038/* host lock must be held before calling this variant */
1039static void __ufshcd_release(struct ufs_hba *hba)
1040{
1041 if (!ufshcd_is_clkgating_allowed(hba))
1042 return;
1043
1044 hba->clk_gating.active_reqs--;
1045
1046 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1047 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1048 || hba->lrb_in_use || hba->outstanding_tasks
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001049 || hba->active_uic_cmd || hba->uic_async_done
1050 || ufshcd_eh_in_progress(hba))
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001051 return;
1052
1053 hba->clk_gating.state = REQ_CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08001054 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001055 schedule_delayed_work(&hba->clk_gating.gate_work,
1056 msecs_to_jiffies(hba->clk_gating.delay_ms));
1057}
1058
1059void ufshcd_release(struct ufs_hba *hba)
1060{
1061 unsigned long flags;
1062
1063 spin_lock_irqsave(hba->host->host_lock, flags);
1064 __ufshcd_release(hba);
1065 spin_unlock_irqrestore(hba->host->host_lock, flags);
1066}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001067EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001068
1069static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1070 struct device_attribute *attr, char *buf)
1071{
1072 struct ufs_hba *hba = dev_get_drvdata(dev);
1073
1074 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1075}
1076
1077static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1078 struct device_attribute *attr, const char *buf, size_t count)
1079{
1080 struct ufs_hba *hba = dev_get_drvdata(dev);
1081 unsigned long flags, value;
1082
1083 if (kstrtoul(buf, 0, &value))
1084 return -EINVAL;
1085
1086 spin_lock_irqsave(hba->host->host_lock, flags);
1087 hba->clk_gating.delay_ms = value;
1088 spin_unlock_irqrestore(hba->host->host_lock, flags);
1089 return count;
1090}
1091
Sahitya Tummalab4274112016-12-22 18:40:39 -08001092static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1093 struct device_attribute *attr, char *buf)
1094{
1095 struct ufs_hba *hba = dev_get_drvdata(dev);
1096
1097 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1098}
1099
1100static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1101 struct device_attribute *attr, const char *buf, size_t count)
1102{
1103 struct ufs_hba *hba = dev_get_drvdata(dev);
1104 unsigned long flags;
1105 u32 value;
1106
1107 if (kstrtou32(buf, 0, &value))
1108 return -EINVAL;
1109
1110 value = !!value;
1111 if (value == hba->clk_gating.is_enabled)
1112 goto out;
1113
1114 if (value) {
1115 ufshcd_release(hba);
1116 } else {
1117 spin_lock_irqsave(hba->host->host_lock, flags);
1118 hba->clk_gating.active_reqs++;
1119 spin_unlock_irqrestore(hba->host->host_lock, flags);
1120 }
1121
1122 hba->clk_gating.is_enabled = value;
1123out:
1124 return count;
1125}
1126
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001127static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1128{
1129 if (!ufshcd_is_clkgating_allowed(hba))
1130 return;
1131
1132 hba->clk_gating.delay_ms = 150;
1133 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1134 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1135
Sahitya Tummalab4274112016-12-22 18:40:39 -08001136 hba->clk_gating.is_enabled = true;
1137
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001138 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1139 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1140 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1141 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
Sahitya Tummalab4274112016-12-22 18:40:39 -08001142 hba->clk_gating.delay_attr.attr.mode = 0644;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001143 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1144 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
Sahitya Tummalab4274112016-12-22 18:40:39 -08001145
1146 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1147 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1148 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1149 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1150 hba->clk_gating.enable_attr.attr.mode = 0644;
1151 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1152 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001153}
1154
1155static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1156{
1157 if (!ufshcd_is_clkgating_allowed(hba))
1158 return;
1159 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
Sahitya Tummalab4274112016-12-22 18:40:39 -08001160 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
Akinobu Mita97cd6802014-11-24 14:24:18 +09001161 cancel_work_sync(&hba->clk_gating.ungate_work);
1162 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001163}
1164
Sahitya Tummala856b3482014-09-25 15:32:34 +03001165/* Must be called with host lock acquired */
1166static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1167{
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001168 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001169 return;
1170
1171 if (!hba->clk_scaling.is_busy_started) {
1172 hba->clk_scaling.busy_start_t = ktime_get();
1173 hba->clk_scaling.is_busy_started = true;
1174 }
1175}
1176
1177static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1178{
1179 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1180
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001181 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001182 return;
1183
1184 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1185 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1186 scaling->busy_start_t));
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01001187 scaling->busy_start_t = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +03001188 scaling->is_busy_started = false;
1189 }
1190}
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301191/**
1192 * ufshcd_send_command - Send SCSI or device management commands
1193 * @hba: per adapter instance
1194 * @task_tag: Task tag of the command
1195 */
1196static inline
1197void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1198{
Sahitya Tummala856b3482014-09-25 15:32:34 +03001199 ufshcd_clk_scaling_start_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301200 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301201 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001202 /* Make sure that doorbell is committed immediately */
1203 wmb();
Lee Susman1a07f2d2016-12-22 18:42:03 -08001204 ufshcd_add_command_trace(hba, task_tag, "send");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301205}
1206
1207/**
1208 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1209 * @lrb - pointer to local reference block
1210 */
1211static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1212{
1213 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05301214 if (lrbp->sense_buffer &&
1215 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07001216 int len_to_copy;
1217
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301218 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07001219 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1220
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301221 memcpy(lrbp->sense_buffer,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301222 lrbp->ucd_rsp_ptr->sr.sense_data,
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07001223 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301224 }
1225}
1226
1227/**
Dolev Raviv68078d52013-07-30 00:35:58 +05301228 * ufshcd_copy_query_response() - Copy the Query Response and the data
1229 * descriptor
1230 * @hba: per adapter instance
1231 * @lrb - pointer to local reference block
1232 */
1233static
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001234int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +05301235{
1236 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1237
Dolev Raviv68078d52013-07-30 00:35:58 +05301238 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05301239
Dolev Raviv68078d52013-07-30 00:35:58 +05301240 /* Get the descriptor */
1241 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001242 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +05301243 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001244 u16 resp_len;
1245 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +05301246
1247 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001248 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +05301249 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03001250 buf_len = be16_to_cpu(
1251 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001252 if (likely(buf_len >= resp_len)) {
1253 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1254 } else {
1255 dev_warn(hba->dev,
1256 "%s: Response size is bigger than buffer",
1257 __func__);
1258 return -EINVAL;
1259 }
Dolev Raviv68078d52013-07-30 00:35:58 +05301260 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001261
1262 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05301263}
1264
1265/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301266 * ufshcd_hba_capabilities - Read controller capabilities
1267 * @hba: per adapter instance
1268 */
1269static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1270{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301271 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301272
1273 /* nutrs and nutmrs are 0 based values */
1274 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1275 hba->nutmrs =
1276 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1277}
1278
1279/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301280 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1281 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301282 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301283 * Return true on success, else false
1284 */
1285static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1286{
1287 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1288 return true;
1289 else
1290 return false;
1291}
1292
1293/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05301294 * ufshcd_get_upmcrs - Get the power mode change request status
1295 * @hba: Pointer to adapter instance
1296 *
1297 * This function gets the UPMCRS field of HCS register
1298 * Returns value of UPMCRS field
1299 */
1300static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1301{
1302 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1303}
1304
1305/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301306 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1307 * @hba: per adapter instance
1308 * @uic_cmd: UIC command
1309 *
1310 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301311 */
1312static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301313ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301314{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301315 WARN_ON(hba->active_uic_cmd);
1316
1317 hba->active_uic_cmd = uic_cmd;
1318
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301319 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301320 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1321 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1322 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301323
1324 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301325 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301326 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301327}
1328
1329/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301330 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1331 * @hba: per adapter instance
1332 * @uic_command: UIC command
1333 *
1334 * Must be called with mutex held.
1335 * Returns 0 only if success.
1336 */
1337static int
1338ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1339{
1340 int ret;
1341 unsigned long flags;
1342
1343 if (wait_for_completion_timeout(&uic_cmd->done,
1344 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1345 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1346 else
1347 ret = -ETIMEDOUT;
1348
1349 spin_lock_irqsave(hba->host->host_lock, flags);
1350 hba->active_uic_cmd = NULL;
1351 spin_unlock_irqrestore(hba->host->host_lock, flags);
1352
1353 return ret;
1354}
1355
1356/**
1357 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1358 * @hba: per adapter instance
1359 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001360 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301361 *
1362 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001363 * with mutex held and host_lock locked.
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301364 * Returns 0 only if success.
1365 */
1366static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001367__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1368 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301369{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301370 if (!ufshcd_ready_for_uic_cmd(hba)) {
1371 dev_err(hba->dev,
1372 "Controller not ready to accept UIC commands\n");
1373 return -EIO;
1374 }
1375
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001376 if (completion)
1377 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301378
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301379 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301380
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001381 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301382}
1383
1384/**
1385 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1386 * @hba: per adapter instance
1387 * @uic_cmd: UIC command
1388 *
1389 * Returns 0 only if success.
1390 */
1391static int
1392ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1393{
1394 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001395 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301396
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001397 ufshcd_hold(hba, false);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301398 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001399 ufshcd_add_delay_before_dme_cmd(hba);
1400
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001401 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001402 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001403 spin_unlock_irqrestore(hba->host->host_lock, flags);
1404 if (!ret)
1405 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1406
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301407 mutex_unlock(&hba->uic_cmd_mutex);
1408
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001409 ufshcd_release(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301410 return ret;
1411}
1412
1413/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301414 * ufshcd_map_sg - Map scatter-gather list to prdt
1415 * @lrbp - pointer to local reference block
1416 *
1417 * Returns 0 in case of success, non-zero value in case of failure
1418 */
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09001419static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301420{
1421 struct ufshcd_sg_entry *prd_table;
1422 struct scatterlist *sg;
1423 struct scsi_cmnd *cmd;
1424 int sg_segments;
1425 int i;
1426
1427 cmd = lrbp->cmd;
1428 sg_segments = scsi_dma_map(cmd);
1429 if (sg_segments < 0)
1430 return sg_segments;
1431
1432 if (sg_segments) {
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09001433 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1434 lrbp->utr_descriptor_ptr->prd_table_length =
1435 cpu_to_le16((u16)(sg_segments *
1436 sizeof(struct ufshcd_sg_entry)));
1437 else
1438 lrbp->utr_descriptor_ptr->prd_table_length =
1439 cpu_to_le16((u16) (sg_segments));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301440
1441 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1442
1443 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1444 prd_table[i].size =
1445 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1446 prd_table[i].base_addr =
1447 cpu_to_le32(lower_32_bits(sg->dma_address));
1448 prd_table[i].upper_addr =
1449 cpu_to_le32(upper_32_bits(sg->dma_address));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001450 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301451 }
1452 } else {
1453 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1454 }
1455
1456 return 0;
1457}
1458
1459/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301460 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301461 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301462 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301463 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301464static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301465{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301466 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1467
1468 if (hba->ufs_version == UFSHCI_VERSION_10) {
1469 u32 rw;
1470 rw = set & INTERRUPT_MASK_RW_VER_10;
1471 set = rw | ((set ^ intrs) & intrs);
1472 } else {
1473 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301474 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301475
1476 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1477}
1478
1479/**
1480 * ufshcd_disable_intr - disable interrupts
1481 * @hba: per adapter instance
1482 * @intrs: interrupt bits
1483 */
1484static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1485{
1486 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1487
1488 if (hba->ufs_version == UFSHCI_VERSION_10) {
1489 u32 rw;
1490 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1491 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1492 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1493
1494 } else {
1495 set &= ~intrs;
1496 }
1497
1498 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301499}
1500
1501/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301502 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1503 * descriptor according to request
1504 * @lrbp: pointer to local reference block
1505 * @upiu_flags: flags required in the header
1506 * @cmd_dir: requests data direction
1507 */
1508static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
Joao Pinto300bb132016-05-11 12:21:27 +01001509 u32 *upiu_flags, enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301510{
1511 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1512 u32 data_direction;
1513 u32 dword_0;
1514
1515 if (cmd_dir == DMA_FROM_DEVICE) {
1516 data_direction = UTP_DEVICE_TO_HOST;
1517 *upiu_flags = UPIU_CMD_FLAGS_READ;
1518 } else if (cmd_dir == DMA_TO_DEVICE) {
1519 data_direction = UTP_HOST_TO_DEVICE;
1520 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1521 } else {
1522 data_direction = UTP_NO_DATA_TRANSFER;
1523 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1524 }
1525
1526 dword_0 = data_direction | (lrbp->command_type
1527 << UPIU_COMMAND_TYPE_OFFSET);
1528 if (lrbp->intr_cmd)
1529 dword_0 |= UTP_REQ_DESC_INT_CMD;
1530
1531 /* Transfer request descriptor header fields */
1532 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001533 /* dword_1 is reserved, hence it is set to 0 */
1534 req_desc->header.dword_1 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301535 /*
1536 * assigning invalid value for command status. Controller
1537 * updates OCS on command completion, with the command
1538 * status
1539 */
1540 req_desc->header.dword_2 =
1541 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001542 /* dword_3 is reserved, hence it is set to 0 */
1543 req_desc->header.dword_3 = 0;
Yaniv Gardi51047262016-02-01 15:02:38 +02001544
1545 req_desc->prd_table_length = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301546}
1547
1548/**
1549 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1550 * for scsi commands
1551 * @lrbp - local reference block pointer
1552 * @upiu_flags - flags
1553 */
1554static
1555void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1556{
1557 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001558 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301559
1560 /* command descriptor fields */
1561 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1562 UPIU_TRANSACTION_COMMAND, upiu_flags,
1563 lrbp->lun, lrbp->task_tag);
1564 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1565 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1566
1567 /* Total EHS length and Data segment length will be zero */
1568 ucd_req_ptr->header.dword_2 = 0;
1569
1570 ucd_req_ptr->sc.exp_data_transfer_len =
1571 cpu_to_be32(lrbp->cmd->sdb.length);
1572
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001573 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
1574 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
1575 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
1576
1577 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301578}
1579
Dolev Raviv68078d52013-07-30 00:35:58 +05301580/**
1581 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1582 * for query requsts
1583 * @hba: UFS hba
1584 * @lrbp: local reference block pointer
1585 * @upiu_flags: flags
1586 */
1587static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1588 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1589{
1590 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1591 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301592 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05301593 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1594
1595 /* Query request header */
1596 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1597 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1598 lrbp->lun, lrbp->task_tag);
1599 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1600 0, query->request.query_func, 0, 0);
1601
Zang Leigang68612852016-08-25 17:39:19 +08001602 /* Data segment length only need for WRITE_DESC */
1603 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1604 ucd_req_ptr->header.dword_2 =
1605 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
1606 else
1607 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05301608
1609 /* Copy the Query Request buffer as is */
1610 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1611 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05301612
1613 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001614 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1615 memcpy(descp, query->descriptor, len);
1616
Yaniv Gardi51047262016-02-01 15:02:38 +02001617 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05301618}
1619
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301620static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1621{
1622 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1623
1624 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1625
1626 /* command descriptor fields */
1627 ucd_req_ptr->header.dword_0 =
1628 UPIU_HEADER_DWORD(
1629 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02001630 /* clear rest of the fields of basic header */
1631 ucd_req_ptr->header.dword_1 = 0;
1632 ucd_req_ptr->header.dword_2 = 0;
1633
1634 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301635}
1636
1637/**
Joao Pinto300bb132016-05-11 12:21:27 +01001638 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
1639 * for Device Management Purposes
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301640 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301641 * @lrb - pointer to local reference block
1642 */
Joao Pinto300bb132016-05-11 12:21:27 +01001643static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301644{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301645 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301646 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301647
Joao Pinto300bb132016-05-11 12:21:27 +01001648 if (hba->ufs_version == UFSHCI_VERSION_20)
1649 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1650 else
1651 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1652
1653 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1654 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1655 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
1656 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1657 ufshcd_prepare_utp_nop_upiu(lrbp);
1658 else
1659 ret = -EINVAL;
1660
1661 return ret;
1662}
1663
1664/**
1665 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
1666 * for SCSI Purposes
1667 * @hba - per adapter instance
1668 * @lrb - pointer to local reference block
1669 */
1670static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1671{
1672 u32 upiu_flags;
1673 int ret = 0;
1674
1675 if (hba->ufs_version == UFSHCI_VERSION_20)
1676 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1677 else
1678 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1679
1680 if (likely(lrbp->cmd)) {
1681 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1682 lrbp->cmd->sc_data_direction);
1683 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1684 } else {
1685 ret = -EINVAL;
1686 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301687
1688 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301689}
1690
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03001691/*
1692 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1693 * @scsi_lun: scsi LUN id
1694 *
1695 * Returns UPIU LUN id
1696 */
1697static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1698{
1699 if (scsi_is_wlun(scsi_lun))
1700 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1701 | UFS_UPIU_WLUN_ID;
1702 else
1703 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1704}
1705
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301706/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03001707 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1708 * @scsi_lun: UPIU W-LUN id
1709 *
1710 * Returns SCSI W-LUN id
1711 */
1712static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1713{
1714 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1715}
1716
1717/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301718 * ufshcd_queuecommand - main entry point for SCSI requests
1719 * @cmd: command from SCSI Midlayer
1720 * @done: call back function
1721 *
1722 * Returns 0 for success, non-zero in case of failure
1723 */
1724static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1725{
1726 struct ufshcd_lrb *lrbp;
1727 struct ufs_hba *hba;
1728 unsigned long flags;
1729 int tag;
1730 int err = 0;
1731
1732 hba = shost_priv(host);
1733
1734 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02001735 if (!ufshcd_valid_tag(hba, tag)) {
1736 dev_err(hba->dev,
1737 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
1738 __func__, tag, cmd, cmd->request);
1739 BUG();
1740 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301741
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301742 spin_lock_irqsave(hba->host->host_lock, flags);
1743 switch (hba->ufshcd_state) {
1744 case UFSHCD_STATE_OPERATIONAL:
1745 break;
Zang Leigang141f8162016-11-16 11:29:37 +08001746 case UFSHCD_STATE_EH_SCHEDULED:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301747 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301748 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301749 goto out_unlock;
1750 case UFSHCD_STATE_ERROR:
1751 set_host_byte(cmd, DID_ERROR);
1752 cmd->scsi_done(cmd);
1753 goto out_unlock;
1754 default:
1755 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1756 __func__, hba->ufshcd_state);
1757 set_host_byte(cmd, DID_BAD_TARGET);
1758 cmd->scsi_done(cmd);
1759 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301760 }
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001761
1762 /* if error handling is in progress, don't issue commands */
1763 if (ufshcd_eh_in_progress(hba)) {
1764 set_host_byte(cmd, DID_ERROR);
1765 cmd->scsi_done(cmd);
1766 goto out_unlock;
1767 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301768 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301769
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301770 /* acquire the tag to make sure device cmds don't use it */
1771 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1772 /*
1773 * Dev manage command in progress, requeue the command.
1774 * Requeuing the command helps in cases where the request *may*
1775 * find different tag instead of waiting for dev manage command
1776 * completion.
1777 */
1778 err = SCSI_MLQUEUE_HOST_BUSY;
1779 goto out;
1780 }
1781
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001782 err = ufshcd_hold(hba, true);
1783 if (err) {
1784 err = SCSI_MLQUEUE_HOST_BUSY;
1785 clear_bit_unlock(tag, &hba->lrb_in_use);
1786 goto out;
1787 }
1788 WARN_ON(hba->clk_gating.state != CLKS_ON);
1789
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301790 lrbp = &hba->lrb[tag];
1791
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301792 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301793 lrbp->cmd = cmd;
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07001794 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301795 lrbp->sense_buffer = cmd->sense_buffer;
1796 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03001797 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03001798 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301799
Joao Pinto300bb132016-05-11 12:21:27 +01001800 ufshcd_comp_scsi_upiu(hba, lrbp);
1801
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09001802 err = ufshcd_map_sg(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301803 if (err) {
1804 lrbp->cmd = NULL;
1805 clear_bit_unlock(tag, &hba->lrb_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301806 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301807 }
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001808 /* Make sure descriptors are ready before ringing the doorbell */
1809 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301810
1811 /* issue command to the controller */
1812 spin_lock_irqsave(hba->host->host_lock, flags);
Kiwoong Kim0e675ef2016-11-10 21:14:36 +09001813 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301814 ufshcd_send_command(hba, tag);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301815out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301816 spin_unlock_irqrestore(hba->host->host_lock, flags);
1817out:
1818 return err;
1819}
1820
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301821static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1822 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1823{
1824 lrbp->cmd = NULL;
1825 lrbp->sense_bufflen = 0;
1826 lrbp->sense_buffer = NULL;
1827 lrbp->task_tag = tag;
1828 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301829 lrbp->intr_cmd = true; /* No interrupt aggregation */
1830 hba->dev_cmd.type = cmd_type;
1831
Joao Pinto300bb132016-05-11 12:21:27 +01001832 return ufshcd_comp_devman_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301833}
1834
1835static int
1836ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1837{
1838 int err = 0;
1839 unsigned long flags;
1840 u32 mask = 1 << tag;
1841
1842 /* clear outstanding transaction before retry */
1843 spin_lock_irqsave(hba->host->host_lock, flags);
1844 ufshcd_utrl_clear(hba, tag);
1845 spin_unlock_irqrestore(hba->host->host_lock, flags);
1846
1847 /*
1848 * wait for for h/w to clear corresponding bit in door-bell.
1849 * max. wait is 1 sec.
1850 */
1851 err = ufshcd_wait_for_register(hba,
1852 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02001853 mask, ~mask, 1000, 1000, true);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301854
1855 return err;
1856}
1857
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001858static int
1859ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1860{
1861 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1862
1863 /* Get the UPIU response */
1864 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1865 UPIU_RSP_CODE_OFFSET;
1866 return query_res->response;
1867}
1868
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301869/**
1870 * ufshcd_dev_cmd_completion() - handles device management command responses
1871 * @hba: per adapter instance
1872 * @lrbp: pointer to local reference block
1873 */
1874static int
1875ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1876{
1877 int resp;
1878 int err = 0;
1879
1880 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1881
1882 switch (resp) {
1883 case UPIU_TRANSACTION_NOP_IN:
1884 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1885 err = -EINVAL;
1886 dev_err(hba->dev, "%s: unexpected response %x\n",
1887 __func__, resp);
1888 }
1889 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05301890 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001891 err = ufshcd_check_query_response(hba, lrbp);
1892 if (!err)
1893 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05301894 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301895 case UPIU_TRANSACTION_REJECT_UPIU:
1896 /* TODO: handle Reject UPIU Response */
1897 err = -EPERM;
1898 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1899 __func__);
1900 break;
1901 default:
1902 err = -EINVAL;
1903 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1904 __func__, resp);
1905 break;
1906 }
1907
1908 return err;
1909}
1910
1911static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1912 struct ufshcd_lrb *lrbp, int max_timeout)
1913{
1914 int err = 0;
1915 unsigned long time_left;
1916 unsigned long flags;
1917
1918 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1919 msecs_to_jiffies(max_timeout));
1920
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001921 /* Make sure descriptors are ready before ringing the doorbell */
1922 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301923 spin_lock_irqsave(hba->host->host_lock, flags);
1924 hba->dev_cmd.complete = NULL;
1925 if (likely(time_left)) {
1926 err = ufshcd_get_tr_ocs(lrbp);
1927 if (!err)
1928 err = ufshcd_dev_cmd_completion(hba, lrbp);
1929 }
1930 spin_unlock_irqrestore(hba->host->host_lock, flags);
1931
1932 if (!time_left) {
1933 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02001934 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
1935 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301936 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02001937 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301938 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02001939 /*
1940 * in case of an error, after clearing the doorbell,
1941 * we also need to clear the outstanding_request
1942 * field in hba
1943 */
1944 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301945 }
1946
1947 return err;
1948}
1949
1950/**
1951 * ufshcd_get_dev_cmd_tag - Get device management command tag
1952 * @hba: per-adapter instance
1953 * @tag: pointer to variable with available slot value
1954 *
1955 * Get a free slot and lock it until device management command
1956 * completes.
1957 *
1958 * Returns false if free slot is unavailable for locking, else
1959 * return true with tag value in @tag.
1960 */
1961static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1962{
1963 int tag;
1964 bool ret = false;
1965 unsigned long tmp;
1966
1967 if (!tag_out)
1968 goto out;
1969
1970 do {
1971 tmp = ~hba->lrb_in_use;
1972 tag = find_last_bit(&tmp, hba->nutrs);
1973 if (tag >= hba->nutrs)
1974 goto out;
1975 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1976
1977 *tag_out = tag;
1978 ret = true;
1979out:
1980 return ret;
1981}
1982
1983static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1984{
1985 clear_bit_unlock(tag, &hba->lrb_in_use);
1986}
1987
1988/**
1989 * ufshcd_exec_dev_cmd - API for sending device management requests
1990 * @hba - UFS hba
1991 * @cmd_type - specifies the type (NOP, Query...)
1992 * @timeout - time in seconds
1993 *
Dolev Raviv68078d52013-07-30 00:35:58 +05301994 * NOTE: Since there is only one available tag for device management commands,
1995 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301996 */
1997static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1998 enum dev_cmd_type cmd_type, int timeout)
1999{
2000 struct ufshcd_lrb *lrbp;
2001 int err;
2002 int tag;
2003 struct completion wait;
2004 unsigned long flags;
2005
2006 /*
2007 * Get free slot, sleep if slots are unavailable.
2008 * Even though we use wait_event() which sleeps indefinitely,
2009 * the maximum wait time is bounded by SCSI request timeout.
2010 */
2011 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2012
2013 init_completion(&wait);
2014 lrbp = &hba->lrb[tag];
2015 WARN_ON(lrbp->cmd);
2016 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2017 if (unlikely(err))
2018 goto out_put_tag;
2019
2020 hba->dev_cmd.complete = &wait;
2021
Yaniv Gardie3dfdc52016-02-01 15:02:49 +02002022 /* Make sure descriptors are ready before ringing the doorbell */
2023 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302024 spin_lock_irqsave(hba->host->host_lock, flags);
Kiwoong Kim0e675ef2016-11-10 21:14:36 +09002025 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302026 ufshcd_send_command(hba, tag);
2027 spin_unlock_irqrestore(hba->host->host_lock, flags);
2028
2029 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2030
2031out_put_tag:
2032 ufshcd_put_dev_cmd_tag(hba, tag);
2033 wake_up(&hba->dev_cmd.tag_wq);
2034 return err;
2035}
2036
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302037/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002038 * ufshcd_init_query() - init the query response and request parameters
2039 * @hba: per-adapter instance
2040 * @request: address of the request pointer to be initialized
2041 * @response: address of the response pointer to be initialized
2042 * @opcode: operation to perform
2043 * @idn: flag idn to access
2044 * @index: LU number to access
2045 * @selector: query/flag/descriptor further identification
2046 */
2047static inline void ufshcd_init_query(struct ufs_hba *hba,
2048 struct ufs_query_req **request, struct ufs_query_res **response,
2049 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2050{
2051 *request = &hba->dev_cmd.query.request;
2052 *response = &hba->dev_cmd.query.response;
2053 memset(*request, 0, sizeof(struct ufs_query_req));
2054 memset(*response, 0, sizeof(struct ufs_query_res));
2055 (*request)->upiu_req.opcode = opcode;
2056 (*request)->upiu_req.idn = idn;
2057 (*request)->upiu_req.index = index;
2058 (*request)->upiu_req.selector = selector;
2059}
2060
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002061static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2062 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2063{
2064 int ret;
2065 int retries;
2066
2067 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2068 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2069 if (ret)
2070 dev_dbg(hba->dev,
2071 "%s: failed with error %d, retries %d\n",
2072 __func__, ret, retries);
2073 else
2074 break;
2075 }
2076
2077 if (ret)
2078 dev_err(hba->dev,
2079 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2080 __func__, opcode, idn, ret, retries);
2081 return ret;
2082}
2083
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002084/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302085 * ufshcd_query_flag() - API function for sending flag query requests
2086 * hba: per-adapter instance
2087 * query_opcode: flag query to perform
2088 * idn: flag idn to access
2089 * flag_res: the flag value after the query request completes
2090 *
2091 * Returns 0 for success, non-zero in case of failure
2092 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002093int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Dolev Raviv68078d52013-07-30 00:35:58 +05302094 enum flag_idn idn, bool *flag_res)
2095{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002096 struct ufs_query_req *request = NULL;
2097 struct ufs_query_res *response = NULL;
2098 int err, index = 0, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02002099 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05302100
2101 BUG_ON(!hba);
2102
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002103 ufshcd_hold(hba, false);
Dolev Raviv68078d52013-07-30 00:35:58 +05302104 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002105 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2106 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05302107
2108 switch (opcode) {
2109 case UPIU_QUERY_OPCODE_SET_FLAG:
2110 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2111 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2112 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2113 break;
2114 case UPIU_QUERY_OPCODE_READ_FLAG:
2115 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2116 if (!flag_res) {
2117 /* No dummy reads */
2118 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2119 __func__);
2120 err = -EINVAL;
2121 goto out_unlock;
2122 }
2123 break;
2124 default:
2125 dev_err(hba->dev,
2126 "%s: Expected query flag opcode but got = %d\n",
2127 __func__, opcode);
2128 err = -EINVAL;
2129 goto out_unlock;
2130 }
Dolev Raviv68078d52013-07-30 00:35:58 +05302131
Yaniv Gardie5ad4062016-02-01 15:02:41 +02002132 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05302133
2134 if (err) {
2135 dev_err(hba->dev,
2136 "%s: Sending flag query for idn %d failed, err = %d\n",
2137 __func__, idn, err);
2138 goto out_unlock;
2139 }
2140
2141 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302142 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05302143 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2144
2145out_unlock:
2146 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002147 ufshcd_release(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05302148 return err;
2149}
2150
2151/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302152 * ufshcd_query_attr - API function for sending attribute requests
2153 * hba: per-adapter instance
2154 * opcode: attribute opcode
2155 * idn: attribute idn to access
2156 * index: index field
2157 * selector: selector field
2158 * attr_val: the attribute value after the query request completes
2159 *
2160 * Returns 0 for success, non-zero in case of failure
2161*/
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05302162static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302163 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2164{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002165 struct ufs_query_req *request = NULL;
2166 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302167 int err;
2168
2169 BUG_ON(!hba);
2170
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002171 ufshcd_hold(hba, false);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302172 if (!attr_val) {
2173 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2174 __func__, opcode);
2175 err = -EINVAL;
2176 goto out;
2177 }
2178
2179 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002180 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2181 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302182
2183 switch (opcode) {
2184 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2185 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302186 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302187 break;
2188 case UPIU_QUERY_OPCODE_READ_ATTR:
2189 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2190 break;
2191 default:
2192 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2193 __func__, opcode);
2194 err = -EINVAL;
2195 goto out_unlock;
2196 }
2197
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002198 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302199
2200 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08002201 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2202 __func__, opcode, idn, index, err);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302203 goto out_unlock;
2204 }
2205
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302206 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302207
2208out_unlock:
2209 mutex_unlock(&hba->dev_cmd.lock);
2210out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002211 ufshcd_release(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302212 return err;
2213}
2214
2215/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02002216 * ufshcd_query_attr_retry() - API function for sending query
2217 * attribute with retries
2218 * @hba: per-adapter instance
2219 * @opcode: attribute opcode
2220 * @idn: attribute idn to access
2221 * @index: index field
2222 * @selector: selector field
2223 * @attr_val: the attribute value after the query request
2224 * completes
2225 *
2226 * Returns 0 for success, non-zero in case of failure
2227*/
2228static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2229 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2230 u32 *attr_val)
2231{
2232 int ret = 0;
2233 u32 retries;
2234
2235 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2236 ret = ufshcd_query_attr(hba, opcode, idn, index,
2237 selector, attr_val);
2238 if (ret)
2239 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2240 __func__, ret, retries);
2241 else
2242 break;
2243 }
2244
2245 if (ret)
2246 dev_err(hba->dev,
2247 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2248 __func__, idn, ret, QUERY_REQ_RETRIES);
2249 return ret;
2250}
2251
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002252static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002253 enum query_opcode opcode, enum desc_idn idn, u8 index,
2254 u8 selector, u8 *desc_buf, int *buf_len)
2255{
2256 struct ufs_query_req *request = NULL;
2257 struct ufs_query_res *response = NULL;
2258 int err;
2259
2260 BUG_ON(!hba);
2261
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002262 ufshcd_hold(hba, false);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002263 if (!desc_buf) {
2264 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2265 __func__, opcode);
2266 err = -EINVAL;
2267 goto out;
2268 }
2269
2270 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2271 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2272 __func__, *buf_len);
2273 err = -EINVAL;
2274 goto out;
2275 }
2276
2277 mutex_lock(&hba->dev_cmd.lock);
2278 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2279 selector);
2280 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002281 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002282
2283 switch (opcode) {
2284 case UPIU_QUERY_OPCODE_WRITE_DESC:
2285 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2286 break;
2287 case UPIU_QUERY_OPCODE_READ_DESC:
2288 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2289 break;
2290 default:
2291 dev_err(hba->dev,
2292 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2293 __func__, opcode);
2294 err = -EINVAL;
2295 goto out_unlock;
2296 }
2297
2298 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2299
2300 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08002301 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2302 __func__, opcode, idn, index, err);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002303 goto out_unlock;
2304 }
2305
2306 hba->dev_cmd.query.descriptor = NULL;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002307 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002308
2309out_unlock:
2310 mutex_unlock(&hba->dev_cmd.lock);
2311out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002312 ufshcd_release(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002313 return err;
2314}
2315
2316/**
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002317 * ufshcd_query_descriptor_retry - API function for sending descriptor
2318 * requests
2319 * hba: per-adapter instance
2320 * opcode: attribute opcode
2321 * idn: attribute idn to access
2322 * index: index field
2323 * selector: selector field
2324 * desc_buf: the buffer that contains the descriptor
2325 * buf_len: length parameter passed to the device
2326 *
2327 * Returns 0 for success, non-zero in case of failure.
2328 * The buf_len parameter will contain, on return, the length parameter
2329 * received on the response.
2330 */
2331int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2332 enum query_opcode opcode, enum desc_idn idn, u8 index,
2333 u8 selector, u8 *desc_buf, int *buf_len)
2334{
2335 int err;
2336 int retries;
2337
2338 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2339 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2340 selector, desc_buf, buf_len);
2341 if (!err || err == -EINVAL)
2342 break;
2343 }
2344
2345 return err;
2346}
2347EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2348
2349/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002350 * ufshcd_read_desc_param - read the specified descriptor parameter
2351 * @hba: Pointer to adapter instance
2352 * @desc_id: descriptor idn value
2353 * @desc_index: descriptor index
2354 * @param_offset: offset of the parameter to read
2355 * @param_read_buf: pointer to buffer where parameter would be read
2356 * @param_size: sizeof(param_read_buf)
2357 *
2358 * Return 0 in case of success, non-zero otherwise
2359 */
2360static int ufshcd_read_desc_param(struct ufs_hba *hba,
2361 enum desc_idn desc_id,
2362 int desc_index,
2363 u32 param_offset,
2364 u8 *param_read_buf,
2365 u32 param_size)
2366{
2367 int ret;
2368 u8 *desc_buf;
2369 u32 buff_len;
2370 bool is_kmalloc = true;
2371
2372 /* safety checks */
2373 if (desc_id >= QUERY_DESC_IDN_MAX)
2374 return -EINVAL;
2375
2376 buff_len = ufs_query_desc_max_size[desc_id];
2377 if ((param_offset + param_size) > buff_len)
2378 return -EINVAL;
2379
2380 if (!param_offset && (param_size == buff_len)) {
2381 /* memory space already available to hold full descriptor */
2382 desc_buf = param_read_buf;
2383 is_kmalloc = false;
2384 } else {
2385 /* allocate memory to hold full descriptor */
2386 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2387 if (!desc_buf)
2388 return -ENOMEM;
2389 }
2390
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002391 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2392 desc_id, desc_index, 0, desc_buf,
2393 &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002394
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08002395 if (ret) {
2396 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
2397 __func__, desc_id, desc_index, param_offset, ret);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002398
2399 goto out;
2400 }
2401
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08002402 /* Sanity check */
2403 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
2404 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
2405 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
2406 ret = -EINVAL;
2407 goto out;
2408 }
2409
2410 /*
2411 * While reading variable size descriptors (like string descriptor),
2412 * some UFS devices may report the "LENGTH" (field in "Transaction
2413 * Specific fields" of Query Response UPIU) same as what was requested
2414 * in Query Request UPIU instead of reporting the actual size of the
2415 * variable size descriptor.
2416 * Although it's safe to ignore the "LENGTH" field for variable size
2417 * descriptors as we can always derive the length of the descriptor from
2418 * the descriptor header fields. Hence this change impose the length
2419 * match check only for fixed size descriptors (for which we always
2420 * request the correct size as part of Query Request UPIU).
2421 */
2422 if ((desc_id != QUERY_DESC_IDN_STRING) &&
2423 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
2424 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
2425 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
2426 ret = -EINVAL;
2427 goto out;
2428 }
2429
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002430 if (is_kmalloc)
2431 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2432out:
2433 if (is_kmalloc)
2434 kfree(desc_buf);
2435 return ret;
2436}
2437
2438static inline int ufshcd_read_desc(struct ufs_hba *hba,
2439 enum desc_idn desc_id,
2440 int desc_index,
2441 u8 *buf,
2442 u32 size)
2443{
2444 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
2445}
2446
2447static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2448 u8 *buf,
2449 u32 size)
2450{
Dolev Raviv61e07352016-11-23 16:30:49 -08002451 int err = 0;
2452 int retries;
2453
2454 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2455 /* Read descriptor*/
2456 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2457 if (!err)
2458 break;
2459 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2460 }
2461
2462 return err;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002463}
2464
Yaniv Gardib573d482016-03-10 17:37:09 +02002465int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2466{
2467 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2468}
2469EXPORT_SYMBOL(ufshcd_read_device_desc);
2470
2471/**
2472 * ufshcd_read_string_desc - read string descriptor
2473 * @hba: pointer to adapter instance
2474 * @desc_index: descriptor index
2475 * @buf: pointer to buffer where descriptor would be read
2476 * @size: size of buf
2477 * @ascii: if true convert from unicode to ascii characters
2478 *
2479 * Return 0 in case of success, non-zero otherwise
2480 */
2481int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2482 u32 size, bool ascii)
2483{
2484 int err = 0;
2485
2486 err = ufshcd_read_desc(hba,
2487 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2488
2489 if (err) {
2490 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2491 __func__, QUERY_REQ_RETRIES, err);
2492 goto out;
2493 }
2494
2495 if (ascii) {
2496 int desc_len;
2497 int ascii_len;
2498 int i;
2499 char *buff_ascii;
2500
2501 desc_len = buf[0];
2502 /* remove header and divide by 2 to move from UTF16 to UTF8 */
2503 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2504 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2505 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2506 __func__);
2507 err = -ENOMEM;
2508 goto out;
2509 }
2510
2511 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2512 if (!buff_ascii) {
2513 err = -ENOMEM;
Tiezhu Yangfcbefc32016-06-25 12:35:22 +08002514 goto out;
Yaniv Gardib573d482016-03-10 17:37:09 +02002515 }
2516
2517 /*
2518 * the descriptor contains string in UTF16 format
2519 * we need to convert to utf-8 so it can be displayed
2520 */
2521 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2522 desc_len - QUERY_DESC_HDR_SIZE,
2523 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2524
2525 /* replace non-printable or non-ASCII characters with spaces */
2526 for (i = 0; i < ascii_len; i++)
2527 ufshcd_remove_non_printable(&buff_ascii[i]);
2528
2529 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2530 size - QUERY_DESC_HDR_SIZE);
2531 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2532 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
Yaniv Gardib573d482016-03-10 17:37:09 +02002533 kfree(buff_ascii);
2534 }
2535out:
2536 return err;
2537}
2538EXPORT_SYMBOL(ufshcd_read_string_desc);
2539
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002540/**
2541 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
2542 * @hba: Pointer to adapter instance
2543 * @lun: lun id
2544 * @param_offset: offset of the parameter to read
2545 * @param_read_buf: pointer to buffer where parameter would be read
2546 * @param_size: sizeof(param_read_buf)
2547 *
2548 * Return 0 in case of success, non-zero otherwise
2549 */
2550static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
2551 int lun,
2552 enum unit_desc_param param_offset,
2553 u8 *param_read_buf,
2554 u32 param_size)
2555{
2556 /*
2557 * Unit descriptors are only available for general purpose LUs (LUN id
2558 * from 0 to 7) and RPMB Well known LU.
2559 */
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002560 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002561 return -EOPNOTSUPP;
2562
2563 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
2564 param_offset, param_read_buf, param_size);
2565}
2566
2567/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302568 * ufshcd_memory_alloc - allocate memory for host memory space data structures
2569 * @hba: per adapter instance
2570 *
2571 * 1. Allocate DMA memory for Command Descriptor array
2572 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
2573 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
2574 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
2575 * (UTMRDL)
2576 * 4. Allocate memory for local reference block(lrb).
2577 *
2578 * Returns 0 for success, non-zero in case of failure
2579 */
2580static int ufshcd_memory_alloc(struct ufs_hba *hba)
2581{
2582 size_t utmrdl_size, utrdl_size, ucdl_size;
2583
2584 /* Allocate memory for UTP command descriptors */
2585 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09002586 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
2587 ucdl_size,
2588 &hba->ucdl_dma_addr,
2589 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302590
2591 /*
2592 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
2593 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
2594 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
2595 * be aligned to 128 bytes as well
2596 */
2597 if (!hba->ucdl_base_addr ||
2598 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302599 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302600 "Command Descriptor Memory allocation failed\n");
2601 goto out;
2602 }
2603
2604 /*
2605 * Allocate memory for UTP Transfer descriptors
2606 * UFSHCI requires 1024 byte alignment of UTRD
2607 */
2608 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09002609 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
2610 utrdl_size,
2611 &hba->utrdl_dma_addr,
2612 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302613 if (!hba->utrdl_base_addr ||
2614 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302615 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302616 "Transfer Descriptor Memory allocation failed\n");
2617 goto out;
2618 }
2619
2620 /*
2621 * Allocate memory for UTP Task Management descriptors
2622 * UFSHCI requires 1024 byte alignment of UTMRD
2623 */
2624 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09002625 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2626 utmrdl_size,
2627 &hba->utmrdl_dma_addr,
2628 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302629 if (!hba->utmrdl_base_addr ||
2630 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302631 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302632 "Task Management Descriptor Memory allocation failed\n");
2633 goto out;
2634 }
2635
2636 /* Allocate memory for local reference block */
Seungwon Jeon2953f852013-06-27 13:31:54 +09002637 hba->lrb = devm_kzalloc(hba->dev,
2638 hba->nutrs * sizeof(struct ufshcd_lrb),
2639 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302640 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302641 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302642 goto out;
2643 }
2644 return 0;
2645out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302646 return -ENOMEM;
2647}
2648
2649/**
Dolev Raviv66cc8202016-12-22 18:39:42 -08002650 * ufshcd_print_pwr_info - print power params as saved in hba
2651 * power info
2652 * @hba: per-adapter instance
2653 */
2654static void ufshcd_print_pwr_info(struct ufs_hba *hba)
2655{
2656 static const char * const names[] = {
2657 "INVALID MODE",
2658 "FAST MODE",
2659 "SLOW_MODE",
2660 "INVALID MODE",
2661 "FASTAUTO_MODE",
2662 "SLOWAUTO_MODE",
2663 "INVALID MODE",
2664 };
2665
2666 dev_info(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
2667 __func__,
2668 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
2669 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
2670 names[hba->pwr_info.pwr_rx],
2671 names[hba->pwr_info.pwr_tx],
2672 hba->pwr_info.hs_rate);
2673}
2674
2675/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302676 * ufshcd_host_memory_configure - configure local reference block with
2677 * memory offsets
2678 * @hba: per adapter instance
2679 *
2680 * Configure Host memory space
2681 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
2682 * address.
2683 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
2684 * and PRDT offset.
2685 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
2686 * into local reference block.
2687 */
2688static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2689{
2690 struct utp_transfer_cmd_desc *cmd_descp;
2691 struct utp_transfer_req_desc *utrdlp;
2692 dma_addr_t cmd_desc_dma_addr;
2693 dma_addr_t cmd_desc_element_addr;
2694 u16 response_offset;
2695 u16 prdt_offset;
2696 int cmd_desc_size;
2697 int i;
2698
2699 utrdlp = hba->utrdl_base_addr;
2700 cmd_descp = hba->ucdl_base_addr;
2701
2702 response_offset =
2703 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2704 prdt_offset =
2705 offsetof(struct utp_transfer_cmd_desc, prd_table);
2706
2707 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2708 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2709
2710 for (i = 0; i < hba->nutrs; i++) {
2711 /* Configure UTRD with command descriptor base address */
2712 cmd_desc_element_addr =
2713 (cmd_desc_dma_addr + (cmd_desc_size * i));
2714 utrdlp[i].command_desc_base_addr_lo =
2715 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2716 utrdlp[i].command_desc_base_addr_hi =
2717 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2718
2719 /* Response upiu and prdt offset should be in double words */
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002720 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
2721 utrdlp[i].response_upiu_offset =
2722 cpu_to_le16(response_offset);
2723 utrdlp[i].prd_table_offset =
2724 cpu_to_le16(prdt_offset);
2725 utrdlp[i].response_upiu_length =
2726 cpu_to_le16(ALIGNED_UPIU_SIZE);
2727 } else {
2728 utrdlp[i].response_upiu_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302729 cpu_to_le16((response_offset >> 2));
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002730 utrdlp[i].prd_table_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302731 cpu_to_le16((prdt_offset >> 2));
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002732 utrdlp[i].response_upiu_length =
Sujit Reddy Thumma3ca316c2013-06-26 22:39:30 +05302733 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002734 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302735
2736 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302737 hba->lrb[i].ucd_req_ptr =
2738 (struct utp_upiu_req *)(cmd_descp + i);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302739 hba->lrb[i].ucd_rsp_ptr =
2740 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2741 hba->lrb[i].ucd_prdt_ptr =
2742 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2743 }
2744}
2745
2746/**
2747 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2748 * @hba: per adapter instance
2749 *
2750 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2751 * in order to initialize the Unipro link startup procedure.
2752 * Once the Unipro links are up, the device connected to the controller
2753 * is detected.
2754 *
2755 * Returns 0 on success, non-zero value on failure
2756 */
2757static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2758{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302759 struct uic_command uic_cmd = {0};
2760 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302761
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302762 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2763
2764 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2765 if (ret)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302766 dev_err(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302767 "dme-link-startup: error code %d\n", ret);
2768 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302769}
2770
Yaniv Gardicad2e032015-03-31 17:37:14 +03002771static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2772{
2773 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
2774 unsigned long min_sleep_time_us;
2775
2776 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2777 return;
2778
2779 /*
2780 * last_dme_cmd_tstamp will be 0 only for 1st call to
2781 * this function
2782 */
2783 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2784 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2785 } else {
2786 unsigned long delta =
2787 (unsigned long) ktime_to_us(
2788 ktime_sub(ktime_get(),
2789 hba->last_dme_cmd_tstamp));
2790
2791 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2792 min_sleep_time_us =
2793 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2794 else
2795 return; /* no more delay required */
2796 }
2797
2798 /* allow sleep for extra 50us if needed */
2799 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2800}
2801
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302802/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302803 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2804 * @hba: per adapter instance
2805 * @attr_sel: uic command argument1
2806 * @attr_set: attribute set type as uic command argument2
2807 * @mib_val: setting value as uic command argument3
2808 * @peer: indicate whether peer or local
2809 *
2810 * Returns 0 on success, non-zero value on failure
2811 */
2812int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2813 u8 attr_set, u32 mib_val, u8 peer)
2814{
2815 struct uic_command uic_cmd = {0};
2816 static const char *const action[] = {
2817 "dme-set",
2818 "dme-peer-set"
2819 };
2820 const char *set = action[!!peer];
2821 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002822 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302823
2824 uic_cmd.command = peer ?
2825 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2826 uic_cmd.argument1 = attr_sel;
2827 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2828 uic_cmd.argument3 = mib_val;
2829
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002830 do {
2831 /* for peer attributes we retry upon failure */
2832 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2833 if (ret)
2834 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2835 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2836 } while (ret && peer && --retries);
2837
Yaniv Gardif37e9f82016-11-23 16:32:49 -08002838 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002839 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08002840 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2841 UFS_UIC_COMMAND_RETRIES - retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302842
2843 return ret;
2844}
2845EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2846
2847/**
2848 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2849 * @hba: per adapter instance
2850 * @attr_sel: uic command argument1
2851 * @mib_val: the value of the attribute as returned by the UIC command
2852 * @peer: indicate whether peer or local
2853 *
2854 * Returns 0 on success, non-zero value on failure
2855 */
2856int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2857 u32 *mib_val, u8 peer)
2858{
2859 struct uic_command uic_cmd = {0};
2860 static const char *const action[] = {
2861 "dme-get",
2862 "dme-peer-get"
2863 };
2864 const char *get = action[!!peer];
2865 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002866 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03002867 struct ufs_pa_layer_attr orig_pwr_info;
2868 struct ufs_pa_layer_attr temp_pwr_info;
2869 bool pwr_mode_change = false;
2870
2871 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2872 orig_pwr_info = hba->pwr_info;
2873 temp_pwr_info = orig_pwr_info;
2874
2875 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2876 orig_pwr_info.pwr_rx == FAST_MODE) {
2877 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2878 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2879 pwr_mode_change = true;
2880 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2881 orig_pwr_info.pwr_rx == SLOW_MODE) {
2882 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2883 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2884 pwr_mode_change = true;
2885 }
2886 if (pwr_mode_change) {
2887 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2888 if (ret)
2889 goto out;
2890 }
2891 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302892
2893 uic_cmd.command = peer ?
2894 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2895 uic_cmd.argument1 = attr_sel;
2896
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002897 do {
2898 /* for peer attributes we retry upon failure */
2899 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2900 if (ret)
2901 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
2902 get, UIC_GET_ATTR_ID(attr_sel), ret);
2903 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302904
Yaniv Gardif37e9f82016-11-23 16:32:49 -08002905 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002906 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08002907 get, UIC_GET_ATTR_ID(attr_sel),
2908 UFS_UIC_COMMAND_RETRIES - retries);
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002909
2910 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302911 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03002912
2913 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2914 && pwr_mode_change)
2915 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302916out:
2917 return ret;
2918}
2919EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2920
2921/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002922 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2923 * state) and waits for it to take effect.
2924 *
2925 * @hba: per adapter instance
2926 * @cmd: UIC command to execute
2927 *
2928 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2929 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2930 * and device UniPro link and hence it's final completion would be indicated by
2931 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2932 * addition to normal UIC command completion Status (UCCS). This function only
2933 * returns after the relevant status bits indicate the completion.
2934 *
2935 * Returns 0 on success, non-zero value on failure
2936 */
2937static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2938{
2939 struct completion uic_async_done;
2940 unsigned long flags;
2941 u8 status;
2942 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002943 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002944
2945 mutex_lock(&hba->uic_cmd_mutex);
2946 init_completion(&uic_async_done);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002947 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002948
2949 spin_lock_irqsave(hba->host->host_lock, flags);
2950 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002951 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
2952 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
2953 /*
2954 * Make sure UIC command completion interrupt is disabled before
2955 * issuing UIC command.
2956 */
2957 wmb();
2958 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002959 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002960 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
2961 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002962 if (ret) {
2963 dev_err(hba->dev,
2964 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2965 cmd->command, cmd->argument3, ret);
2966 goto out;
2967 }
2968
2969 if (!wait_for_completion_timeout(hba->uic_async_done,
2970 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2971 dev_err(hba->dev,
2972 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2973 cmd->command, cmd->argument3);
2974 ret = -ETIMEDOUT;
2975 goto out;
2976 }
2977
2978 status = ufshcd_get_upmcrs(hba);
2979 if (status != PWR_LOCAL) {
2980 dev_err(hba->dev,
Kiwoong Kim73615422016-09-08 16:50:02 +09002981 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002982 cmd->command, status);
2983 ret = (status != PWR_OK) ? status : -1;
2984 }
2985out:
2986 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002987 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002988 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002989 if (reenable_intr)
2990 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002991 spin_unlock_irqrestore(hba->host->host_lock, flags);
2992 mutex_unlock(&hba->uic_cmd_mutex);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002993
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002994 return ret;
2995}
2996
2997/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302998 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2999 * using DME_SET primitives.
3000 * @hba: per adapter instance
3001 * @mode: powr mode value
3002 *
3003 * Returns 0 on success, non-zero value on failure
3004 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05303005static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303006{
3007 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003008 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303009
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03003010 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3011 ret = ufshcd_dme_set(hba,
3012 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3013 if (ret) {
3014 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3015 __func__, ret);
3016 goto out;
3017 }
3018 }
3019
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303020 uic_cmd.command = UIC_CMD_DME_SET;
3021 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3022 uic_cmd.argument3 = mode;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003023 ufshcd_hold(hba, false);
3024 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3025 ufshcd_release(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303026
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03003027out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003028 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003029}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303030
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003031static int ufshcd_link_recovery(struct ufs_hba *hba)
3032{
3033 int ret;
3034 unsigned long flags;
3035
3036 spin_lock_irqsave(hba->host->host_lock, flags);
3037 hba->ufshcd_state = UFSHCD_STATE_RESET;
3038 ufshcd_set_eh_in_progress(hba);
3039 spin_unlock_irqrestore(hba->host->host_lock, flags);
3040
3041 ret = ufshcd_host_reset_and_restore(hba);
3042
3043 spin_lock_irqsave(hba->host->host_lock, flags);
3044 if (ret)
3045 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3046 ufshcd_clear_eh_in_progress(hba);
3047 spin_unlock_irqrestore(hba->host->host_lock, flags);
3048
3049 if (ret)
3050 dev_err(hba->dev, "%s: link recovery failed, err %d",
3051 __func__, ret);
3052
3053 return ret;
3054}
3055
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003056static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003057{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003058 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003059 struct uic_command uic_cmd = {0};
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003060 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003061
Kiwoong Kimee32c902016-11-10 21:17:43 +09003062 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3063
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003064 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003065 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003066 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3067 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003068
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003069 if (ret) {
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003070 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3071 __func__, ret);
3072
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003073 /*
3074 * If link recovery fails then return error so that caller
3075 * don't retry the hibern8 enter again.
3076 */
3077 if (ufshcd_link_recovery(hba))
3078 ret = -ENOLINK;
Kiwoong Kimee32c902016-11-10 21:17:43 +09003079 } else
3080 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3081 POST_CHANGE);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003082
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003083 return ret;
3084}
3085
3086static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3087{
3088 int ret = 0, retries;
3089
3090 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3091 ret = __ufshcd_uic_hibern8_enter(hba);
3092 if (!ret || ret == -ENOLINK)
3093 goto out;
3094 }
3095out:
3096 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003097}
3098
3099static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3100{
3101 struct uic_command uic_cmd = {0};
3102 int ret;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003103 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003104
Kiwoong Kimee32c902016-11-10 21:17:43 +09003105 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3106
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003107 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3108 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08003109 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3110 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3111
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303112 if (ret) {
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003113 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3114 __func__, ret);
3115 ret = ufshcd_link_recovery(hba);
Kiwoong Kimee32c902016-11-10 21:17:43 +09003116 } else
3117 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3118 POST_CHANGE);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303119
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303120 return ret;
3121}
3122
Yaniv Gardi50646362014-10-23 13:25:13 +03003123 /**
3124 * ufshcd_init_pwr_info - setting the POR (power on reset)
3125 * values in hba power info
3126 * @hba: per-adapter instance
3127 */
3128static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3129{
3130 hba->pwr_info.gear_rx = UFS_PWM_G1;
3131 hba->pwr_info.gear_tx = UFS_PWM_G1;
3132 hba->pwr_info.lane_rx = 1;
3133 hba->pwr_info.lane_tx = 1;
3134 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3135 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3136 hba->pwr_info.hs_rate = 0;
3137}
3138
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303139/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003140 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3141 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303142 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003143static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303144{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003145 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3146
3147 if (hba->max_pwr_info.is_valid)
3148 return 0;
3149
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08003150 pwr_info->pwr_tx = FAST_MODE;
3151 pwr_info->pwr_rx = FAST_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003152 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303153
3154 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003155 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3156 &pwr_info->lane_rx);
3157 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3158 &pwr_info->lane_tx);
3159
3160 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3161 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3162 __func__,
3163 pwr_info->lane_rx,
3164 pwr_info->lane_tx);
3165 return -EINVAL;
3166 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303167
3168 /*
3169 * First, get the maximum gears of HS speed.
3170 * If a zero value, it means there is no HSGEAR capability.
3171 * Then, get the maximum gears of PWM speed.
3172 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003173 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3174 if (!pwr_info->gear_rx) {
3175 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3176 &pwr_info->gear_rx);
3177 if (!pwr_info->gear_rx) {
3178 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3179 __func__, pwr_info->gear_rx);
3180 return -EINVAL;
3181 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08003182 pwr_info->pwr_rx = SLOW_MODE;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303183 }
3184
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003185 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3186 &pwr_info->gear_tx);
3187 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303188 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003189 &pwr_info->gear_tx);
3190 if (!pwr_info->gear_tx) {
3191 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3192 __func__, pwr_info->gear_tx);
3193 return -EINVAL;
3194 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08003195 pwr_info->pwr_tx = SLOW_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003196 }
3197
3198 hba->max_pwr_info.is_valid = true;
3199 return 0;
3200}
3201
3202static int ufshcd_change_power_mode(struct ufs_hba *hba,
3203 struct ufs_pa_layer_attr *pwr_mode)
3204{
3205 int ret;
3206
3207 /* if already configured to the requested pwr_mode */
3208 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3209 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3210 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3211 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3212 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3213 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3214 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3215 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3216 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303217 }
3218
3219 /*
3220 * Configure attributes for power mode change with below.
3221 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
3222 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
3223 * - PA_HSSERIES
3224 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003225 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
3226 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
3227 pwr_mode->lane_rx);
3228 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3229 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303230 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003231 else
3232 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303233
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003234 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
3235 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
3236 pwr_mode->lane_tx);
3237 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
3238 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303239 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003240 else
3241 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303242
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003243 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3244 pwr_mode->pwr_tx == FASTAUTO_MODE ||
3245 pwr_mode->pwr_rx == FAST_MODE ||
3246 pwr_mode->pwr_tx == FAST_MODE)
3247 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
3248 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303249
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003250 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
3251 | pwr_mode->pwr_tx);
3252
3253 if (ret) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303254 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003255 "%s: power mode change failed %d\n", __func__, ret);
3256 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003257 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
3258 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003259
3260 memcpy(&hba->pwr_info, pwr_mode,
3261 sizeof(struct ufs_pa_layer_attr));
3262 }
3263
Dolev Raviv66cc8202016-12-22 18:39:42 -08003264 ufshcd_print_pwr_info(hba);
3265
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003266 return ret;
3267}
3268
3269/**
3270 * ufshcd_config_pwr_mode - configure a new power mode
3271 * @hba: per-adapter instance
3272 * @desired_pwr_mode: desired power configuration
3273 */
3274static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3275 struct ufs_pa_layer_attr *desired_pwr_mode)
3276{
3277 struct ufs_pa_layer_attr final_params = { 0 };
3278 int ret;
3279
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003280 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
3281 desired_pwr_mode, &final_params);
3282
3283 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003284 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
3285
3286 ret = ufshcd_change_power_mode(hba, &final_params);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303287
3288 return ret;
3289}
3290
3291/**
Dolev Raviv68078d52013-07-30 00:35:58 +05303292 * ufshcd_complete_dev_init() - checks device readiness
3293 * hba: per-adapter instance
3294 *
3295 * Set fDeviceInit flag and poll until device toggles it.
3296 */
3297static int ufshcd_complete_dev_init(struct ufs_hba *hba)
3298{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003299 int i;
3300 int err;
Dolev Raviv68078d52013-07-30 00:35:58 +05303301 bool flag_res = 1;
3302
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003303 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3304 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05303305 if (err) {
3306 dev_err(hba->dev,
3307 "%s setting fDeviceInit flag failed with error %d\n",
3308 __func__, err);
3309 goto out;
3310 }
3311
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003312 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
3313 for (i = 0; i < 1000 && !err && flag_res; i++)
3314 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
3315 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
3316
Dolev Raviv68078d52013-07-30 00:35:58 +05303317 if (err)
3318 dev_err(hba->dev,
3319 "%s reading fDeviceInit flag failed with error %d\n",
3320 __func__, err);
3321 else if (flag_res)
3322 dev_err(hba->dev,
3323 "%s fDeviceInit was not cleared by the device\n",
3324 __func__);
3325
3326out:
3327 return err;
3328}
3329
3330/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303331 * ufshcd_make_hba_operational - Make UFS controller operational
3332 * @hba: per adapter instance
3333 *
3334 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003335 * 1. Enable required interrupts
3336 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02003337 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003338 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303339 *
3340 * Returns 0 on success, non-zero value on failure
3341 */
3342static int ufshcd_make_hba_operational(struct ufs_hba *hba)
3343{
3344 int err = 0;
3345 u32 reg;
3346
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303347 /* Enable required interrupts */
3348 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
3349
3350 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03003351 if (ufshcd_is_intr_aggr_allowed(hba))
3352 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
3353 else
3354 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303355
3356 /* Configure UTRL and UTMRL base address registers */
3357 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
3358 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
3359 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
3360 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
3361 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
3362 REG_UTP_TASK_REQ_LIST_BASE_L);
3363 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
3364 REG_UTP_TASK_REQ_LIST_BASE_H);
3365
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303366 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02003367 * Make sure base address and interrupt setup are updated before
3368 * enabling the run/stop registers below.
3369 */
3370 wmb();
3371
3372 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303373 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303374 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003375 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303376 if (!(ufshcd_get_lists_status(reg))) {
3377 ufshcd_enable_run_stop_reg(hba);
3378 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303379 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303380 "Host controller not ready to process requests");
3381 err = -EIO;
3382 goto out;
3383 }
3384
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303385out:
3386 return err;
3387}
3388
3389/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02003390 * ufshcd_hba_stop - Send controller to reset state
3391 * @hba: per adapter instance
3392 * @can_sleep: perform sleep or just spin
3393 */
3394static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
3395{
3396 int err;
3397
3398 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
3399 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
3400 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
3401 10, 1, can_sleep);
3402 if (err)
3403 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
3404}
3405
3406/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303407 * ufshcd_hba_enable - initialize the controller
3408 * @hba: per adapter instance
3409 *
3410 * The controller resets itself and controller firmware initialization
3411 * sequence kicks off. When controller is ready it will set
3412 * the Host Controller Enable bit to 1.
3413 *
3414 * Returns 0 on success, non-zero value on failure
3415 */
3416static int ufshcd_hba_enable(struct ufs_hba *hba)
3417{
3418 int retry;
3419
3420 /*
3421 * msleep of 1 and 5 used in this function might result in msleep(20),
3422 * but it was necessary to send the UFS FPGA to reset mode during
3423 * development and testing of this driver. msleep can be changed to
3424 * mdelay and retry count can be reduced based on the controller.
3425 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02003426 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303427 /* change controller state to "reset state" */
Yaniv Gardi596585a2016-03-10 17:37:08 +02003428 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303429
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003430 /* UniPro link is disabled at this point */
3431 ufshcd_set_link_off(hba);
3432
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003433 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003434
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303435 /* start controller initialization sequence */
3436 ufshcd_hba_start(hba);
3437
3438 /*
3439 * To initialize a UFS host controller HCE bit must be set to 1.
3440 * During initialization the HCE bit value changes from 1->0->1.
3441 * When the host controller completes initialization sequence
3442 * it sets the value of HCE bit to 1. The same HCE bit is read back
3443 * to check if the controller has completed initialization sequence.
3444 * So without this delay the value HCE = 1, set in the previous
3445 * instruction might be read back.
3446 * This delay can be changed based on the controller.
3447 */
3448 msleep(1);
3449
3450 /* wait for the host controller to complete initialization */
3451 retry = 10;
3452 while (ufshcd_is_hba_active(hba)) {
3453 if (retry) {
3454 retry--;
3455 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303456 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303457 "Controller enable failed\n");
3458 return -EIO;
3459 }
3460 msleep(5);
3461 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003462
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003463 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003464 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003465
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003466 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003467
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303468 return 0;
3469}
3470
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03003471static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
3472{
3473 int tx_lanes, i, err = 0;
3474
3475 if (!peer)
3476 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3477 &tx_lanes);
3478 else
3479 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3480 &tx_lanes);
3481 for (i = 0; i < tx_lanes; i++) {
3482 if (!peer)
3483 err = ufshcd_dme_set(hba,
3484 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3485 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3486 0);
3487 else
3488 err = ufshcd_dme_peer_set(hba,
3489 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3490 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3491 0);
3492 if (err) {
3493 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
3494 __func__, peer, i, err);
3495 break;
3496 }
3497 }
3498
3499 return err;
3500}
3501
3502static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
3503{
3504 return ufshcd_disable_tx_lcc(hba, true);
3505}
3506
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303507/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303508 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303509 * @hba: per adapter instance
3510 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303511 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303512 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303513static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303514{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303515 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003516 int retries = DME_LINKSTARTUP_RETRIES;
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08003517 bool link_startup_again = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303518
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08003519 /*
3520 * If UFS device isn't active then we will have to issue link startup
3521 * 2 times to make sure the device state move to active.
3522 */
3523 if (!ufshcd_is_ufs_dev_active(hba))
3524 link_startup_again = true;
3525
3526link_startup:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003527 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003528 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303529
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003530 ret = ufshcd_dme_link_startup(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003531
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003532 /* check if device is detected by inter-connect layer */
3533 if (!ret && !ufshcd_is_device_present(hba)) {
3534 dev_err(hba->dev, "%s: Device not present\n", __func__);
3535 ret = -ENXIO;
3536 goto out;
3537 }
3538
3539 /*
3540 * DME link lost indication is only received when link is up,
3541 * but we can't be sure if the link is up until link startup
3542 * succeeds. So reset the local Uni-Pro and try again.
3543 */
3544 if (ret && ufshcd_hba_enable(hba))
3545 goto out;
3546 } while (ret && retries--);
3547
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303548 if (ret)
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003549 /* failed to get the link up... retire */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303550 goto out;
3551
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08003552 if (link_startup_again) {
3553 link_startup_again = false;
3554 retries = DME_LINKSTARTUP_RETRIES;
3555 goto link_startup;
3556 }
3557
subhashj@codeaurora.orgd2aebb92016-12-22 18:41:33 -08003558 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
3559 ufshcd_init_pwr_info(hba);
3560 ufshcd_print_pwr_info(hba);
3561
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03003562 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3563 ret = ufshcd_disable_device_tx_lcc(hba);
3564 if (ret)
3565 goto out;
3566 }
3567
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003568 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003569 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
3570 if (ret)
3571 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003572
3573 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303574out:
3575 if (ret)
3576 dev_err(hba->dev, "link startup failed %d\n", ret);
3577 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303578}
3579
3580/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303581 * ufshcd_verify_dev_init() - Verify device initialization
3582 * @hba: per-adapter instance
3583 *
3584 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
3585 * device Transport Protocol (UTP) layer is ready after a reset.
3586 * If the UTP layer at the device side is not initialized, it may
3587 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
3588 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
3589 */
3590static int ufshcd_verify_dev_init(struct ufs_hba *hba)
3591{
3592 int err = 0;
3593 int retries;
3594
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003595 ufshcd_hold(hba, false);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303596 mutex_lock(&hba->dev_cmd.lock);
3597 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
3598 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
3599 NOP_OUT_TIMEOUT);
3600
3601 if (!err || err == -ETIMEDOUT)
3602 break;
3603
3604 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
3605 }
3606 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003607 ufshcd_release(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303608
3609 if (err)
3610 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
3611 return err;
3612}
3613
3614/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003615 * ufshcd_set_queue_depth - set lun queue depth
3616 * @sdev: pointer to SCSI device
3617 *
3618 * Read bLUQueueDepth value and activate scsi tagged command
3619 * queueing. For WLUN, queue depth is set to 1. For best-effort
3620 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
3621 * value that host can queue.
3622 */
3623static void ufshcd_set_queue_depth(struct scsi_device *sdev)
3624{
3625 int ret = 0;
3626 u8 lun_qdepth;
Dolev Raviv61e07352016-11-23 16:30:49 -08003627 int retries;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003628 struct ufs_hba *hba;
3629
3630 hba = shost_priv(sdev->host);
3631
3632 lun_qdepth = hba->nutrs;
Dolev Raviv61e07352016-11-23 16:30:49 -08003633 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3634 /* Read descriptor*/
3635 ret = ufshcd_read_unit_desc_param(hba,
3636 ufshcd_scsi_to_upiu_lun(sdev->lun),
3637 UNIT_DESC_PARAM_LU_Q_DEPTH,
3638 &lun_qdepth,
3639 sizeof(lun_qdepth));
3640 if (!ret || ret == -ENOTSUPP)
3641 break;
3642
3643 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
3644 }
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003645
3646 /* Some WLUN doesn't support unit descriptor */
3647 if (ret == -EOPNOTSUPP)
3648 lun_qdepth = 1;
3649 else if (!lun_qdepth)
3650 /* eventually, we can figure out the real queue depth */
3651 lun_qdepth = hba->nutrs;
3652 else
3653 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
3654
3655 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
3656 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003657 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003658}
3659
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003660/*
3661 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
3662 * @hba: per-adapter instance
3663 * @lun: UFS device lun id
3664 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
3665 *
3666 * Returns 0 in case of success and b_lu_write_protect status would be returned
3667 * @b_lu_write_protect parameter.
3668 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
3669 * Returns -EINVAL in case of invalid parameters passed to this function.
3670 */
3671static int ufshcd_get_lu_wp(struct ufs_hba *hba,
3672 u8 lun,
3673 u8 *b_lu_write_protect)
3674{
3675 int ret;
3676
3677 if (!b_lu_write_protect)
3678 ret = -EINVAL;
3679 /*
3680 * According to UFS device spec, RPMB LU can't be write
3681 * protected so skip reading bLUWriteProtect parameter for
3682 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
3683 */
3684 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
3685 ret = -ENOTSUPP;
3686 else
3687 ret = ufshcd_read_unit_desc_param(hba,
3688 lun,
3689 UNIT_DESC_PARAM_LU_WR_PROTECT,
3690 b_lu_write_protect,
3691 sizeof(*b_lu_write_protect));
3692 return ret;
3693}
3694
3695/**
3696 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
3697 * status
3698 * @hba: per-adapter instance
3699 * @sdev: pointer to SCSI device
3700 *
3701 */
3702static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
3703 struct scsi_device *sdev)
3704{
3705 if (hba->dev_info.f_power_on_wp_en &&
3706 !hba->dev_info.is_lu_power_on_wp) {
3707 u8 b_lu_write_protect;
3708
3709 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
3710 &b_lu_write_protect) &&
3711 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
3712 hba->dev_info.is_lu_power_on_wp = true;
3713 }
3714}
3715
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003716/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303717 * ufshcd_slave_alloc - handle initial SCSI device configurations
3718 * @sdev: pointer to SCSI device
3719 *
3720 * Returns success
3721 */
3722static int ufshcd_slave_alloc(struct scsi_device *sdev)
3723{
3724 struct ufs_hba *hba;
3725
3726 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303727
3728 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
3729 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303730
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303731 /* allow SCSI layer to restart the device in case of errors */
3732 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003733
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03003734 /* REPORT SUPPORTED OPERATION CODES is not supported */
3735 sdev->no_report_opcodes = 1;
3736
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003737
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003738 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003739
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003740 ufshcd_get_lu_power_on_wp_status(hba, sdev);
3741
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003742 return 0;
3743}
3744
3745/**
3746 * ufshcd_change_queue_depth - change queue depth
3747 * @sdev: pointer to SCSI device
3748 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003749 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003750 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003751 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003752static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003753{
3754 struct ufs_hba *hba = shost_priv(sdev->host);
3755
3756 if (depth > hba->nutrs)
3757 depth = hba->nutrs;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003758 return scsi_change_queue_depth(sdev, depth);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303759}
3760
3761/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09003762 * ufshcd_slave_configure - adjust SCSI device configurations
3763 * @sdev: pointer to SCSI device
3764 */
3765static int ufshcd_slave_configure(struct scsi_device *sdev)
3766{
3767 struct request_queue *q = sdev->request_queue;
3768
3769 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
3770 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
3771
3772 return 0;
3773}
3774
3775/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303776 * ufshcd_slave_destroy - remove SCSI device configurations
3777 * @sdev: pointer to SCSI device
3778 */
3779static void ufshcd_slave_destroy(struct scsi_device *sdev)
3780{
3781 struct ufs_hba *hba;
3782
3783 hba = shost_priv(sdev->host);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003784 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03003785 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
3786 unsigned long flags;
3787
3788 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003789 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03003790 spin_unlock_irqrestore(hba->host->host_lock, flags);
3791 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303792}
3793
3794/**
3795 * ufshcd_task_req_compl - handle task management request completion
3796 * @hba: per adapter instance
3797 * @index: index of the completed request
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303798 * @resp: task management service response
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303799 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303800 * Returns non-zero value on error, zero on success
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303801 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303802static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303803{
3804 struct utp_task_req_desc *task_req_descp;
3805 struct utp_upiu_task_rsp *task_rsp_upiup;
3806 unsigned long flags;
3807 int ocs_value;
3808 int task_result;
3809
3810 spin_lock_irqsave(hba->host->host_lock, flags);
3811
3812 /* Clear completed tasks from outstanding_tasks */
3813 __clear_bit(index, &hba->outstanding_tasks);
3814
3815 task_req_descp = hba->utmrdl_base_addr;
3816 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
3817
3818 if (ocs_value == OCS_SUCCESS) {
3819 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3820 task_req_descp[index].task_rsp_upiu;
Kiwoong Kim8794ee02016-09-09 08:22:22 +09003821 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
3822 task_result = task_result & MASK_TM_SERVICE_RESP;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303823 if (resp)
3824 *resp = (u8)task_result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303825 } else {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303826 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
3827 __func__, ocs_value);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303828 }
3829 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303830
3831 return ocs_value;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303832}
3833
3834/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303835 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
3836 * @lrb: pointer to local reference block of completed command
3837 * @scsi_status: SCSI command status
3838 *
3839 * Returns value base on SCSI command status
3840 */
3841static inline int
3842ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
3843{
3844 int result = 0;
3845
3846 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303847 case SAM_STAT_CHECK_CONDITION:
3848 ufshcd_copy_sense_data(lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303849 case SAM_STAT_GOOD:
3850 result |= DID_OK << 16 |
3851 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303852 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303853 break;
3854 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303855 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303856 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303857 ufshcd_copy_sense_data(lrbp);
3858 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303859 break;
3860 default:
3861 result |= DID_ERROR << 16;
3862 break;
3863 } /* end of switch */
3864
3865 return result;
3866}
3867
3868/**
3869 * ufshcd_transfer_rsp_status - Get overall status of the response
3870 * @hba: per adapter instance
3871 * @lrb: pointer to local reference block of completed command
3872 *
3873 * Returns result of the command to notify SCSI midlayer
3874 */
3875static inline int
3876ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3877{
3878 int result = 0;
3879 int scsi_status;
3880 int ocs;
3881
3882 /* overall command status of utrd */
3883 ocs = ufshcd_get_tr_ocs(lrbp);
3884
3885 switch (ocs) {
3886 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303887 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303888
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303889 switch (result) {
3890 case UPIU_TRANSACTION_RESPONSE:
3891 /*
3892 * get the response UPIU result to extract
3893 * the SCSI command status
3894 */
3895 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3896
3897 /*
3898 * get the result based on SCSI status response
3899 * to notify the SCSI midlayer of the command status
3900 */
3901 scsi_status = result & MASK_SCSI_STATUS;
3902 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303903
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02003904 /*
3905 * Currently we are only supporting BKOPs exception
3906 * events hence we can ignore BKOPs exception event
3907 * during power management callbacks. BKOPs exception
3908 * event is not expected to be raised in runtime suspend
3909 * callback as it allows the urgent bkops.
3910 * During system suspend, we are anyway forcefully
3911 * disabling the bkops and if urgent bkops is needed
3912 * it will be enabled on system resume. Long term
3913 * solution could be to abort the system suspend if
3914 * UFS device needs urgent BKOPs.
3915 */
3916 if (!hba->pm_op_in_progress &&
3917 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303918 schedule_work(&hba->eeh_work);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303919 break;
3920 case UPIU_TRANSACTION_REJECT_UPIU:
3921 /* TODO: handle Reject UPIU Response */
3922 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303923 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303924 "Reject UPIU not fully implemented\n");
3925 break;
3926 default:
3927 result = DID_ERROR << 16;
3928 dev_err(hba->dev,
3929 "Unexpected request response code = %x\n",
3930 result);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303931 break;
3932 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303933 break;
3934 case OCS_ABORTED:
3935 result |= DID_ABORT << 16;
3936 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303937 case OCS_INVALID_COMMAND_STATUS:
3938 result |= DID_REQUEUE << 16;
3939 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303940 case OCS_INVALID_CMD_TABLE_ATTR:
3941 case OCS_INVALID_PRDT_ATTR:
3942 case OCS_MISMATCH_DATA_BUF_SIZE:
3943 case OCS_MISMATCH_RESP_UPIU_SIZE:
3944 case OCS_PEER_COMM_FAILURE:
3945 case OCS_FATAL_ERROR:
3946 default:
3947 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303948 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303949 "OCS error from controller = %x\n", ocs);
3950 break;
3951 } /* end of switch */
3952
Dolev Raviv66cc8202016-12-22 18:39:42 -08003953 if (host_byte(result) != DID_OK)
3954 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303955 return result;
3956}
3957
3958/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303959 * ufshcd_uic_cmd_compl - handle completion of uic command
3960 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303961 * @intr_status: interrupt status generated by the controller
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303962 */
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303963static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303964{
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303965 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303966 hba->active_uic_cmd->argument2 |=
3967 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303968 hba->active_uic_cmd->argument3 =
3969 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303970 complete(&hba->active_uic_cmd->done);
3971 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303972
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003973 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3974 complete(hba->uic_async_done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303975}
3976
3977/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003978 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303979 * @hba: per adapter instance
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003980 * @completed_reqs: requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303981 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003982static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3983 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303984{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303985 struct ufshcd_lrb *lrbp;
3986 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303987 int result;
3988 int index;
Dolev Ravive9d501b2014-07-01 12:22:37 +03003989
Dolev Ravive9d501b2014-07-01 12:22:37 +03003990 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3991 lrbp = &hba->lrb[index];
3992 cmd = lrbp->cmd;
3993 if (cmd) {
Lee Susman1a07f2d2016-12-22 18:42:03 -08003994 ufshcd_add_command_trace(hba, index, "complete");
Dolev Ravive9d501b2014-07-01 12:22:37 +03003995 result = ufshcd_transfer_rsp_status(hba, lrbp);
3996 scsi_dma_unmap(cmd);
3997 cmd->result = result;
3998 /* Mark completed command as NULL in LRB */
3999 lrbp->cmd = NULL;
4000 clear_bit_unlock(index, &hba->lrb_in_use);
4001 /* Do not touch lrbp after scsi done */
4002 cmd->scsi_done(cmd);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004003 __ufshcd_release(hba);
Joao Pinto300bb132016-05-11 12:21:27 +01004004 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4005 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
Lee Susman1a07f2d2016-12-22 18:42:03 -08004006 if (hba->dev_cmd.complete) {
4007 ufshcd_add_command_trace(hba, index,
4008 "dev_complete");
Dolev Ravive9d501b2014-07-01 12:22:37 +03004009 complete(hba->dev_cmd.complete);
Lee Susman1a07f2d2016-12-22 18:42:03 -08004010 }
Dolev Ravive9d501b2014-07-01 12:22:37 +03004011 }
4012 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304013
4014 /* clear corresponding bits of completed commands */
4015 hba->outstanding_reqs ^= completed_reqs;
4016
Sahitya Tummala856b3482014-09-25 15:32:34 +03004017 ufshcd_clk_scaling_update_busy(hba);
4018
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304019 /* we might have free'd some tags above */
4020 wake_up(&hba->dev_cmd.tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304021}
4022
4023/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004024 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4025 * @hba: per adapter instance
4026 */
4027static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4028{
4029 unsigned long completed_reqs;
4030 u32 tr_doorbell;
4031
4032 /* Resetting interrupt aggregation counters first and reading the
4033 * DOOR_BELL afterward allows us to handle all the completed requests.
4034 * In order to prevent other interrupts starvation the DB is read once
4035 * after reset. The down side of this solution is the possibility of
4036 * false interrupt if device completes another request after resetting
4037 * aggregation and before reading the DB.
4038 */
4039 if (ufshcd_is_intr_aggr_allowed(hba))
4040 ufshcd_reset_intr_aggr(hba);
4041
4042 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4043 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4044
4045 __ufshcd_transfer_req_compl(hba, completed_reqs);
4046}
4047
4048/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304049 * ufshcd_disable_ee - disable exception event
4050 * @hba: per-adapter instance
4051 * @mask: exception event to disable
4052 *
4053 * Disables exception event in the device so that the EVENT_ALERT
4054 * bit is not set.
4055 *
4056 * Returns zero on success, non-zero error value on failure.
4057 */
4058static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4059{
4060 int err = 0;
4061 u32 val;
4062
4063 if (!(hba->ee_ctrl_mask & mask))
4064 goto out;
4065
4066 val = hba->ee_ctrl_mask & ~mask;
4067 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004068 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304069 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4070 if (!err)
4071 hba->ee_ctrl_mask &= ~mask;
4072out:
4073 return err;
4074}
4075
4076/**
4077 * ufshcd_enable_ee - enable exception event
4078 * @hba: per-adapter instance
4079 * @mask: exception event to enable
4080 *
4081 * Enable corresponding exception event in the device to allow
4082 * device to alert host in critical scenarios.
4083 *
4084 * Returns zero on success, non-zero error value on failure.
4085 */
4086static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4087{
4088 int err = 0;
4089 u32 val;
4090
4091 if (hba->ee_ctrl_mask & mask)
4092 goto out;
4093
4094 val = hba->ee_ctrl_mask | mask;
4095 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004096 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304097 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4098 if (!err)
4099 hba->ee_ctrl_mask |= mask;
4100out:
4101 return err;
4102}
4103
4104/**
4105 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4106 * @hba: per-adapter instance
4107 *
4108 * Allow device to manage background operations on its own. Enabling
4109 * this might lead to inconsistent latencies during normal data transfers
4110 * as the device is allowed to manage its own way of handling background
4111 * operations.
4112 *
4113 * Returns zero on success, non-zero on failure.
4114 */
4115static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4116{
4117 int err = 0;
4118
4119 if (hba->auto_bkops_enabled)
4120 goto out;
4121
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004122 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304123 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4124 if (err) {
4125 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4126 __func__, err);
4127 goto out;
4128 }
4129
4130 hba->auto_bkops_enabled = true;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08004131 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304132
4133 /* No need of URGENT_BKOPS exception from the device */
4134 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4135 if (err)
4136 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4137 __func__, err);
4138out:
4139 return err;
4140}
4141
4142/**
4143 * ufshcd_disable_auto_bkops - block device in doing background operations
4144 * @hba: per-adapter instance
4145 *
4146 * Disabling background operations improves command response latency but
4147 * has drawback of device moving into critical state where the device is
4148 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4149 * host is idle so that BKOPS are managed effectively without any negative
4150 * impacts.
4151 *
4152 * Returns zero on success, non-zero on failure.
4153 */
4154static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4155{
4156 int err = 0;
4157
4158 if (!hba->auto_bkops_enabled)
4159 goto out;
4160
4161 /*
4162 * If host assisted BKOPs is to be enabled, make sure
4163 * urgent bkops exception is allowed.
4164 */
4165 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4166 if (err) {
4167 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4168 __func__, err);
4169 goto out;
4170 }
4171
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004172 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304173 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4174 if (err) {
4175 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4176 __func__, err);
4177 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4178 goto out;
4179 }
4180
4181 hba->auto_bkops_enabled = false;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08004182 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304183out:
4184 return err;
4185}
4186
4187/**
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08004188 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304189 * @hba: per adapter instance
4190 *
4191 * After a device reset the device may toggle the BKOPS_EN flag
4192 * to default value. The s/w tracking variables should be updated
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08004193 * as well. This function would change the auto-bkops state based on
4194 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304195 */
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08004196static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304197{
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08004198 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
4199 hba->auto_bkops_enabled = false;
4200 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
4201 ufshcd_enable_auto_bkops(hba);
4202 } else {
4203 hba->auto_bkops_enabled = true;
4204 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
4205 ufshcd_disable_auto_bkops(hba);
4206 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304207}
4208
4209static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
4210{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004211 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304212 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
4213}
4214
4215/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004216 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
4217 * @hba: per-adapter instance
4218 * @status: bkops_status value
4219 *
4220 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
4221 * flag in the device to permit background operations if the device
4222 * bkops_status is greater than or equal to "status" argument passed to
4223 * this function, disable otherwise.
4224 *
4225 * Returns 0 for success, non-zero in case of failure.
4226 *
4227 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
4228 * to know whether auto bkops is enabled or disabled after this function
4229 * returns control to it.
4230 */
4231static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
4232 enum bkops_status status)
4233{
4234 int err;
4235 u32 curr_status = 0;
4236
4237 err = ufshcd_get_bkops_status(hba, &curr_status);
4238 if (err) {
4239 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4240 __func__, err);
4241 goto out;
4242 } else if (curr_status > BKOPS_STATUS_MAX) {
4243 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
4244 __func__, curr_status);
4245 err = -EINVAL;
4246 goto out;
4247 }
4248
4249 if (curr_status >= status)
4250 err = ufshcd_enable_auto_bkops(hba);
4251 else
4252 err = ufshcd_disable_auto_bkops(hba);
4253out:
4254 return err;
4255}
4256
4257/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304258 * ufshcd_urgent_bkops - handle urgent bkops exception event
4259 * @hba: per-adapter instance
4260 *
4261 * Enable fBackgroundOpsEn flag in the device to permit background
4262 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004263 *
4264 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
4265 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304266 */
4267static int ufshcd_urgent_bkops(struct ufs_hba *hba)
4268{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02004269 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304270}
4271
4272static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
4273{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004274 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304275 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
4276}
4277
Yaniv Gardiafdfff52016-03-10 17:37:15 +02004278static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
4279{
4280 int err;
4281 u32 curr_status = 0;
4282
4283 if (hba->is_urgent_bkops_lvl_checked)
4284 goto enable_auto_bkops;
4285
4286 err = ufshcd_get_bkops_status(hba, &curr_status);
4287 if (err) {
4288 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4289 __func__, err);
4290 goto out;
4291 }
4292
4293 /*
4294 * We are seeing that some devices are raising the urgent bkops
4295 * exception events even when BKOPS status doesn't indicate performace
4296 * impacted or critical. Handle these device by determining their urgent
4297 * bkops status at runtime.
4298 */
4299 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
4300 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
4301 __func__, curr_status);
4302 /* update the current status as the urgent bkops level */
4303 hba->urgent_bkops_lvl = curr_status;
4304 hba->is_urgent_bkops_lvl_checked = true;
4305 }
4306
4307enable_auto_bkops:
4308 err = ufshcd_enable_auto_bkops(hba);
4309out:
4310 if (err < 0)
4311 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
4312 __func__, err);
4313}
4314
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304315/**
4316 * ufshcd_exception_event_handler - handle exceptions raised by device
4317 * @work: pointer to work data
4318 *
4319 * Read bExceptionEventStatus attribute from the device and handle the
4320 * exception event accordingly.
4321 */
4322static void ufshcd_exception_event_handler(struct work_struct *work)
4323{
4324 struct ufs_hba *hba;
4325 int err;
4326 u32 status = 0;
4327 hba = container_of(work, struct ufs_hba, eeh_work);
4328
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304329 pm_runtime_get_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304330 err = ufshcd_get_ee_status(hba, &status);
4331 if (err) {
4332 dev_err(hba->dev, "%s: failed to get exception status %d\n",
4333 __func__, err);
4334 goto out;
4335 }
4336
4337 status &= hba->ee_ctrl_mask;
Yaniv Gardiafdfff52016-03-10 17:37:15 +02004338
4339 if (status & MASK_EE_URGENT_BKOPS)
4340 ufshcd_bkops_exception_event_handler(hba);
4341
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304342out:
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304343 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304344 return;
4345}
4346
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004347/* Complete requests that have door-bell cleared */
4348static void ufshcd_complete_requests(struct ufs_hba *hba)
4349{
4350 ufshcd_transfer_req_compl(hba);
4351 ufshcd_tmc_handler(hba);
4352}
4353
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304354/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02004355 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
4356 * to recover from the DL NAC errors or not.
4357 * @hba: per-adapter instance
4358 *
4359 * Returns true if error handling is required, false otherwise
4360 */
4361static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
4362{
4363 unsigned long flags;
4364 bool err_handling = true;
4365
4366 spin_lock_irqsave(hba->host->host_lock, flags);
4367 /*
4368 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
4369 * device fatal error and/or DL NAC & REPLAY timeout errors.
4370 */
4371 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
4372 goto out;
4373
4374 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
4375 ((hba->saved_err & UIC_ERROR) &&
4376 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
4377 goto out;
4378
4379 if ((hba->saved_err & UIC_ERROR) &&
4380 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
4381 int err;
4382 /*
4383 * wait for 50ms to see if we can get any other errors or not.
4384 */
4385 spin_unlock_irqrestore(hba->host->host_lock, flags);
4386 msleep(50);
4387 spin_lock_irqsave(hba->host->host_lock, flags);
4388
4389 /*
4390 * now check if we have got any other severe errors other than
4391 * DL NAC error?
4392 */
4393 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4394 ((hba->saved_err & UIC_ERROR) &&
4395 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
4396 goto out;
4397
4398 /*
4399 * As DL NAC is the only error received so far, send out NOP
4400 * command to confirm if link is still active or not.
4401 * - If we don't get any response then do error recovery.
4402 * - If we get response then clear the DL NAC error bit.
4403 */
4404
4405 spin_unlock_irqrestore(hba->host->host_lock, flags);
4406 err = ufshcd_verify_dev_init(hba);
4407 spin_lock_irqsave(hba->host->host_lock, flags);
4408
4409 if (err)
4410 goto out;
4411
4412 /* Link seems to be alive hence ignore the DL NAC errors */
4413 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
4414 hba->saved_err &= ~UIC_ERROR;
4415 /* clear NAC error */
4416 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4417 if (!hba->saved_uic_err) {
4418 err_handling = false;
4419 goto out;
4420 }
4421 }
4422out:
4423 spin_unlock_irqrestore(hba->host->host_lock, flags);
4424 return err_handling;
4425}
4426
4427/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304428 * ufshcd_err_handler - handle UFS errors that require s/w attention
4429 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304430 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304431static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304432{
4433 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304434 unsigned long flags;
4435 u32 err_xfer = 0;
4436 u32 err_tm = 0;
4437 int err = 0;
4438 int tag;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004439 bool needs_reset = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304440
4441 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304442
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304443 pm_runtime_get_sync(hba->dev);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004444 ufshcd_hold(hba, false);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304445
4446 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004447 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304448 goto out;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304449
4450 hba->ufshcd_state = UFSHCD_STATE_RESET;
4451 ufshcd_set_eh_in_progress(hba);
4452
4453 /* Complete requests that have door-bell cleared by h/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004454 ufshcd_complete_requests(hba);
Yaniv Gardi583fa622016-03-10 17:37:13 +02004455
4456 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4457 bool ret;
4458
4459 spin_unlock_irqrestore(hba->host->host_lock, flags);
4460 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
4461 ret = ufshcd_quirk_dl_nac_errors(hba);
4462 spin_lock_irqsave(hba->host->host_lock, flags);
4463 if (!ret)
4464 goto skip_err_handling;
4465 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004466 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4467 ((hba->saved_err & UIC_ERROR) &&
4468 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
4469 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
4470 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
4471 needs_reset = true;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304472
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004473 /*
4474 * if host reset is required then skip clearing the pending
4475 * transfers forcefully because they will automatically get
4476 * cleared after link startup.
4477 */
4478 if (needs_reset)
4479 goto skip_pending_xfer_clear;
4480
4481 /* release lock as clear command might sleep */
4482 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304483 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004484 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
4485 if (ufshcd_clear_cmd(hba, tag)) {
4486 err_xfer = true;
4487 goto lock_skip_pending_xfer_clear;
4488 }
4489 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304490
4491 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004492 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
4493 if (ufshcd_clear_tm_cmd(hba, tag)) {
4494 err_tm = true;
4495 goto lock_skip_pending_xfer_clear;
4496 }
4497 }
4498
4499lock_skip_pending_xfer_clear:
4500 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304501
4502 /* Complete the requests that are cleared by s/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004503 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304504
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004505 if (err_xfer || err_tm)
4506 needs_reset = true;
4507
4508skip_pending_xfer_clear:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304509 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004510 if (needs_reset) {
4511 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
4512
4513 /*
4514 * ufshcd_reset_and_restore() does the link reinitialization
4515 * which will need atleast one empty doorbell slot to send the
4516 * device management commands (NOP and query commands).
4517 * If there is no slot empty at this moment then free up last
4518 * slot forcefully.
4519 */
4520 if (hba->outstanding_reqs == max_doorbells)
4521 __ufshcd_transfer_req_compl(hba,
4522 (1UL << (hba->nutrs - 1)));
4523
4524 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304525 err = ufshcd_reset_and_restore(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004526 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304527 if (err) {
4528 dev_err(hba->dev, "%s: reset and restore failed\n",
4529 __func__);
4530 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4531 }
4532 /*
4533 * Inform scsi mid-layer that we did reset and allow to handle
4534 * Unit Attention properly.
4535 */
4536 scsi_report_bus_reset(hba->host, 0);
4537 hba->saved_err = 0;
4538 hba->saved_uic_err = 0;
4539 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004540
Yaniv Gardi583fa622016-03-10 17:37:13 +02004541skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004542 if (!needs_reset) {
4543 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4544 if (hba->saved_err || hba->saved_uic_err)
4545 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
4546 __func__, hba->saved_err, hba->saved_uic_err);
4547 }
4548
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304549 ufshcd_clear_eh_in_progress(hba);
4550
4551out:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004552 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304553 scsi_unblock_requests(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004554 ufshcd_release(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304555 pm_runtime_put_sync(hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304556}
4557
4558/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304559 * ufshcd_update_uic_error - check and set fatal UIC error flags.
4560 * @hba: per-adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304561 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304562static void ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304563{
4564 u32 reg;
4565
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08004566 /* PHY layer lane error */
4567 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4568 /* Ignore LINERESET indication, as this is not an error */
4569 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
4570 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
4571 /*
4572 * To know whether this error is fatal or not, DB timeout
4573 * must be checked but this error is handled separately.
4574 */
4575 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
4576
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304577 /* PA_INIT_ERROR is fatal and needs UIC reset */
4578 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
4579 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
4580 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
Yaniv Gardi583fa622016-03-10 17:37:13 +02004581 else if (hba->dev_quirks &
4582 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4583 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
4584 hba->uic_error |=
4585 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4586 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
4587 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
4588 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304589
4590 /* UIC NL/TL/DME errors needs software retry */
4591 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
4592 if (reg)
4593 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
4594
4595 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
4596 if (reg)
4597 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
4598
4599 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
4600 if (reg)
4601 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
4602
4603 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
4604 __func__, hba->uic_error);
4605}
4606
4607/**
4608 * ufshcd_check_errors - Check for errors that need s/w attention
4609 * @hba: per-adapter instance
4610 */
4611static void ufshcd_check_errors(struct ufs_hba *hba)
4612{
4613 bool queue_eh_work = false;
4614
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304615 if (hba->errors & INT_FATAL_ERRORS)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304616 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304617
4618 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304619 hba->uic_error = 0;
4620 ufshcd_update_uic_error(hba);
4621 if (hba->uic_error)
4622 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304623 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304624
4625 if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004626 /*
4627 * update the transfer error masks to sticky bits, let's do this
4628 * irrespective of current ufshcd_state.
4629 */
4630 hba->saved_err |= hba->errors;
4631 hba->saved_uic_err |= hba->uic_error;
4632
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304633 /* handle fatal errors only when link is functional */
4634 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
4635 /* block commands from scsi mid-layer */
4636 scsi_block_requests(hba->host);
4637
Zang Leigang141f8162016-11-16 11:29:37 +08004638 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
Dolev Raviv66cc8202016-12-22 18:39:42 -08004639
4640 /* dump controller state before resetting */
4641 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
4642 bool pr_prdt = !!(hba->saved_err &
4643 SYSTEM_BUS_FATAL_ERROR);
4644
4645 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
4646 __func__, hba->saved_err,
4647 hba->saved_uic_err);
4648
4649 ufshcd_print_host_regs(hba);
4650 ufshcd_print_pwr_info(hba);
4651 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
4652 ufshcd_print_trs(hba, hba->outstanding_reqs,
4653 pr_prdt);
4654 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304655 schedule_work(&hba->eh_work);
4656 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304657 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304658 /*
4659 * if (!queue_eh_work) -
4660 * Other errors are either non-fatal where host recovers
4661 * itself without s/w intervention or errors that will be
4662 * handled by the SCSI core layer.
4663 */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304664}
4665
4666/**
4667 * ufshcd_tmc_handler - handle task management function completion
4668 * @hba: per adapter instance
4669 */
4670static void ufshcd_tmc_handler(struct ufs_hba *hba)
4671{
4672 u32 tm_doorbell;
4673
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304674 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304675 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304676 wake_up(&hba->tm_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304677}
4678
4679/**
4680 * ufshcd_sl_intr - Interrupt service routine
4681 * @hba: per adapter instance
4682 * @intr_status: contains interrupts generated by the controller
4683 */
4684static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
4685{
4686 hba->errors = UFSHCD_ERROR_MASK & intr_status;
4687 if (hba->errors)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304688 ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304689
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304690 if (intr_status & UFSHCD_UIC_MASK)
4691 ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304692
4693 if (intr_status & UTP_TASK_REQ_COMPL)
4694 ufshcd_tmc_handler(hba);
4695
4696 if (intr_status & UTP_TRANSFER_REQ_COMPL)
4697 ufshcd_transfer_req_compl(hba);
4698}
4699
4700/**
4701 * ufshcd_intr - Main interrupt service routine
4702 * @irq: irq number
4703 * @__hba: pointer to adapter instance
4704 *
4705 * Returns IRQ_HANDLED - If interrupt is valid
4706 * IRQ_NONE - If invalid interrupt
4707 */
4708static irqreturn_t ufshcd_intr(int irq, void *__hba)
4709{
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004710 u32 intr_status, enabled_intr_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304711 irqreturn_t retval = IRQ_NONE;
4712 struct ufs_hba *hba = __hba;
4713
4714 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304715 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004716 enabled_intr_status =
4717 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304718
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004719 if (intr_status)
Seungwon Jeon261ea452013-06-26 22:39:28 +05304720 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004721
4722 if (enabled_intr_status) {
4723 ufshcd_sl_intr(hba, enabled_intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304724 retval = IRQ_HANDLED;
4725 }
4726 spin_unlock(hba->host->host_lock);
4727 return retval;
4728}
4729
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304730static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
4731{
4732 int err = 0;
4733 u32 mask = 1 << tag;
4734 unsigned long flags;
4735
4736 if (!test_bit(tag, &hba->outstanding_tasks))
4737 goto out;
4738
4739 spin_lock_irqsave(hba->host->host_lock, flags);
4740 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
4741 spin_unlock_irqrestore(hba->host->host_lock, flags);
4742
4743 /* poll for max. 1 sec to clear door bell register by h/w */
4744 err = ufshcd_wait_for_register(hba,
4745 REG_UTP_TASK_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02004746 mask, 0, 1000, 1000, true);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304747out:
4748 return err;
4749}
4750
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304751/**
4752 * ufshcd_issue_tm_cmd - issues task management commands to controller
4753 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304754 * @lun_id: LUN ID to which TM command is sent
4755 * @task_id: task ID to which the TM command is applicable
4756 * @tm_function: task management function opcode
4757 * @tm_response: task management service response return value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304758 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304759 * Returns non-zero value on error, zero on success.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304760 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304761static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4762 u8 tm_function, u8 *tm_response)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304763{
4764 struct utp_task_req_desc *task_req_descp;
4765 struct utp_upiu_task_req *task_req_upiup;
4766 struct Scsi_Host *host;
4767 unsigned long flags;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304768 int free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304769 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304770 int task_tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304771
4772 host = hba->host;
4773
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304774 /*
4775 * Get free slot, sleep if slots are unavailable.
4776 * Even though we use wait_event() which sleeps indefinitely,
4777 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
4778 */
4779 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004780 ufshcd_hold(hba, false);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304781
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304782 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304783 task_req_descp = hba->utmrdl_base_addr;
4784 task_req_descp += free_slot;
4785
4786 /* Configure task request descriptor */
4787 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
4788 task_req_descp->header.dword_2 =
4789 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
4790
4791 /* Configure task request UPIU */
4792 task_req_upiup =
4793 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304794 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304795 task_req_upiup->header.dword_0 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304796 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304797 lun_id, task_tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304798 task_req_upiup->header.dword_1 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304799 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004800 /*
4801 * The host shall provide the same value for LUN field in the basic
4802 * header and for Input Parameter.
4803 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304804 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
4805 task_req_upiup->input_param2 = cpu_to_be32(task_id);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304806
Kiwoong Kimd2877be2016-11-10 21:16:15 +09004807 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
4808
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304809 /* send command to the controller */
4810 __set_bit(free_slot, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02004811
4812 /* Make sure descriptors are ready before ringing the task doorbell */
4813 wmb();
4814
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304815 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07004816 /* Make sure that doorbell is committed immediately */
4817 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304818
4819 spin_unlock_irqrestore(host->host_lock, flags);
4820
4821 /* wait until the task management command is completed */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304822 err = wait_event_timeout(hba->tm_wq,
4823 test_bit(free_slot, &hba->tm_condition),
4824 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304825 if (!err) {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304826 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
4827 __func__, tm_function);
4828 if (ufshcd_clear_tm_cmd(hba, free_slot))
4829 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
4830 __func__, free_slot);
4831 err = -ETIMEDOUT;
4832 } else {
4833 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304834 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304835
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304836 clear_bit(free_slot, &hba->tm_condition);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304837 ufshcd_put_tm_slot(hba, free_slot);
4838 wake_up(&hba->tm_tag_wq);
4839
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004840 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304841 return err;
4842}
4843
4844/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304845 * ufshcd_eh_device_reset_handler - device reset handler registered to
4846 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304847 * @cmd: SCSI command pointer
4848 *
4849 * Returns SUCCESS/FAILED
4850 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304851static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304852{
4853 struct Scsi_Host *host;
4854 struct ufs_hba *hba;
4855 unsigned int tag;
4856 u32 pos;
4857 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304858 u8 resp = 0xF;
4859 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304860 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304861
4862 host = cmd->device->host;
4863 hba = shost_priv(host);
4864 tag = cmd->request->tag;
4865
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304866 lrbp = &hba->lrb[tag];
4867 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
4868 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304869 if (!err)
4870 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304871 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304872 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304873
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304874 /* clear the commands that were pending for corresponding LUN */
4875 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
4876 if (hba->lrb[pos].lun == lrbp->lun) {
4877 err = ufshcd_clear_cmd(hba, pos);
4878 if (err)
4879 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304880 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304881 }
4882 spin_lock_irqsave(host->host_lock, flags);
4883 ufshcd_transfer_req_compl(hba);
4884 spin_unlock_irqrestore(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304885out:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304886 if (!err) {
4887 err = SUCCESS;
4888 } else {
4889 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4890 err = FAILED;
4891 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304892 return err;
4893}
4894
4895/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304896 * ufshcd_abort - abort a specific command
4897 * @cmd: SCSI command pointer
4898 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304899 * Abort the pending command in device by sending UFS_ABORT_TASK task management
4900 * command, and in host controller by clearing the door-bell register. There can
4901 * be race between controller sending the command to the device while abort is
4902 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
4903 * really issued and then try to abort it.
4904 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304905 * Returns SUCCESS/FAILED
4906 */
4907static int ufshcd_abort(struct scsi_cmnd *cmd)
4908{
4909 struct Scsi_Host *host;
4910 struct ufs_hba *hba;
4911 unsigned long flags;
4912 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304913 int err = 0;
4914 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304915 u8 resp = 0xF;
4916 struct ufshcd_lrb *lrbp;
Dolev Ravive9d501b2014-07-01 12:22:37 +03004917 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304918
4919 host = cmd->device->host;
4920 hba = shost_priv(host);
4921 tag = cmd->request->tag;
Dolev Ravive7d38252016-12-22 18:40:07 -08004922 lrbp = &hba->lrb[tag];
Yaniv Gardi14497322016-02-01 15:02:39 +02004923 if (!ufshcd_valid_tag(hba, tag)) {
4924 dev_err(hba->dev,
4925 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
4926 __func__, tag, cmd, cmd->request);
4927 BUG();
4928 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304929
Dolev Ravive7d38252016-12-22 18:40:07 -08004930 /*
4931 * Task abort to the device W-LUN is illegal. When this command
4932 * will fail, due to spec violation, scsi err handling next step
4933 * will be to send LU reset which, again, is a spec violation.
4934 * To avoid these unnecessary/illegal step we skip to the last error
4935 * handling stage: reset and restore.
4936 */
4937 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
4938 return ufshcd_eh_host_reset_handler(cmd);
4939
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004940 ufshcd_hold(hba, false);
Dolev Ravive9d501b2014-07-01 12:22:37 +03004941 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Yaniv Gardi14497322016-02-01 15:02:39 +02004942 /* If command is already aborted/completed, return SUCCESS */
4943 if (!(test_bit(tag, &hba->outstanding_reqs))) {
4944 dev_err(hba->dev,
4945 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
4946 __func__, tag, hba->outstanding_reqs, reg);
4947 goto out;
4948 }
4949
Dolev Ravive9d501b2014-07-01 12:22:37 +03004950 if (!(reg & (1 << tag))) {
4951 dev_err(hba->dev,
4952 "%s: cmd was completed, but without a notifying intr, tag = %d",
4953 __func__, tag);
4954 }
4955
Dolev Raviv66cc8202016-12-22 18:39:42 -08004956 /* Print Transfer Request of aborted task */
4957 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
4958 scsi_print_command(hba->lrb[tag].cmd);
4959 ufshcd_print_host_regs(hba);
4960 ufshcd_print_pwr_info(hba);
4961 ufshcd_print_trs(hba, 1 << tag, true);
4962
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304963 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
4964 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4965 UFS_QUERY_TASK, &resp);
4966 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
4967 /* cmd pending in the device */
4968 break;
4969 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304970 /*
4971 * cmd not pending in the device, check if it is
4972 * in transition.
4973 */
4974 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4975 if (reg & (1 << tag)) {
4976 /* sleep for max. 200us to stabilize */
4977 usleep_range(100, 200);
4978 continue;
4979 }
4980 /* command completed already */
4981 goto out;
4982 } else {
4983 if (!err)
4984 err = resp; /* service response error */
4985 goto out;
4986 }
4987 }
4988
4989 if (!poll_cnt) {
4990 err = -EBUSY;
4991 goto out;
4992 }
4993
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304994 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4995 UFS_ABORT_TASK, &resp);
4996 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304997 if (!err)
4998 err = resp; /* service response error */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304999 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305000 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305001
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305002 err = ufshcd_clear_cmd(hba, tag);
5003 if (err)
5004 goto out;
5005
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305006 scsi_dma_unmap(cmd);
5007
5008 spin_lock_irqsave(host->host_lock, flags);
Yaniv Gardia48353f2016-02-01 15:02:40 +02005009 ufshcd_outstanding_req_clear(hba, tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305010 hba->lrb[tag].cmd = NULL;
5011 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305012
5013 clear_bit_unlock(tag, &hba->lrb_in_use);
5014 wake_up(&hba->dev_cmd.tag_wq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005015
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305016out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05305017 if (!err) {
5018 err = SUCCESS;
5019 } else {
5020 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5021 err = FAILED;
5022 }
5023
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005024 /*
5025 * This ufshcd_release() corresponds to the original scsi cmd that got
5026 * aborted here (as we won't get any IRQ for it).
5027 */
5028 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305029 return err;
5030}
5031
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305032/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305033 * ufshcd_host_reset_and_restore - reset and restore host controller
5034 * @hba: per-adapter instance
5035 *
5036 * Note that host controller reset may issue DME_RESET to
5037 * local and remote (device) Uni-Pro stack and the attributes
5038 * are reset to default state.
5039 *
5040 * Returns zero on success, non-zero on failure
5041 */
5042static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
5043{
5044 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305045 unsigned long flags;
5046
5047 /* Reset the host controller */
5048 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02005049 ufshcd_hba_stop(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305050 spin_unlock_irqrestore(hba->host->host_lock, flags);
5051
5052 err = ufshcd_hba_enable(hba);
5053 if (err)
5054 goto out;
5055
5056 /* Establish the link again and restore the device */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005057 err = ufshcd_probe_hba(hba);
5058
5059 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305060 err = -EIO;
5061out:
5062 if (err)
5063 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
5064
5065 return err;
5066}
5067
5068/**
5069 * ufshcd_reset_and_restore - reset and re-initialize host/device
5070 * @hba: per-adapter instance
5071 *
5072 * Reset and recover device, host and re-establish link. This
5073 * is helpful to recover the communication in fatal error conditions.
5074 *
5075 * Returns zero on success, non-zero on failure
5076 */
5077static int ufshcd_reset_and_restore(struct ufs_hba *hba)
5078{
5079 int err = 0;
5080 unsigned long flags;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005081 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305082
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005083 do {
5084 err = ufshcd_host_reset_and_restore(hba);
5085 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305086
5087 /*
5088 * After reset the door-bell might be cleared, complete
5089 * outstanding requests in s/w here.
5090 */
5091 spin_lock_irqsave(hba->host->host_lock, flags);
5092 ufshcd_transfer_req_compl(hba);
5093 ufshcd_tmc_handler(hba);
5094 spin_unlock_irqrestore(hba->host->host_lock, flags);
5095
5096 return err;
5097}
5098
5099/**
5100 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
5101 * @cmd - SCSI command pointer
5102 *
5103 * Returns SUCCESS/FAILED
5104 */
5105static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
5106{
5107 int err;
5108 unsigned long flags;
5109 struct ufs_hba *hba;
5110
5111 hba = shost_priv(cmd->device->host);
5112
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005113 ufshcd_hold(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305114 /*
5115 * Check if there is any race with fatal error handling.
5116 * If so, wait for it to complete. Even though fatal error
5117 * handling does reset and restore in some cases, don't assume
5118 * anything out of it. We are just avoiding race here.
5119 */
5120 do {
5121 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305122 if (!(work_pending(&hba->eh_work) ||
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305123 hba->ufshcd_state == UFSHCD_STATE_RESET))
5124 break;
5125 spin_unlock_irqrestore(hba->host->host_lock, flags);
5126 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305127 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305128 } while (1);
5129
5130 hba->ufshcd_state = UFSHCD_STATE_RESET;
5131 ufshcd_set_eh_in_progress(hba);
5132 spin_unlock_irqrestore(hba->host->host_lock, flags);
5133
5134 err = ufshcd_reset_and_restore(hba);
5135
5136 spin_lock_irqsave(hba->host->host_lock, flags);
5137 if (!err) {
5138 err = SUCCESS;
5139 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5140 } else {
5141 err = FAILED;
5142 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5143 }
5144 ufshcd_clear_eh_in_progress(hba);
5145 spin_unlock_irqrestore(hba->host->host_lock, flags);
5146
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005147 ufshcd_release(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305148 return err;
5149}
5150
5151/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005152 * ufshcd_get_max_icc_level - calculate the ICC level
5153 * @sup_curr_uA: max. current supported by the regulator
5154 * @start_scan: row at the desc table to start scan from
5155 * @buff: power descriptor buffer
5156 *
5157 * Returns calculated max ICC level for specific regulator
5158 */
5159static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
5160{
5161 int i;
5162 int curr_uA;
5163 u16 data;
5164 u16 unit;
5165
5166 for (i = start_scan; i >= 0; i--) {
5167 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
5168 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
5169 ATTR_ICC_LVL_UNIT_OFFSET;
5170 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
5171 switch (unit) {
5172 case UFSHCD_NANO_AMP:
5173 curr_uA = curr_uA / 1000;
5174 break;
5175 case UFSHCD_MILI_AMP:
5176 curr_uA = curr_uA * 1000;
5177 break;
5178 case UFSHCD_AMP:
5179 curr_uA = curr_uA * 1000 * 1000;
5180 break;
5181 case UFSHCD_MICRO_AMP:
5182 default:
5183 break;
5184 }
5185 if (sup_curr_uA >= curr_uA)
5186 break;
5187 }
5188 if (i < 0) {
5189 i = 0;
5190 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
5191 }
5192
5193 return (u32)i;
5194}
5195
5196/**
5197 * ufshcd_calc_icc_level - calculate the max ICC level
5198 * In case regulators are not initialized we'll return 0
5199 * @hba: per-adapter instance
5200 * @desc_buf: power descriptor buffer to extract ICC levels from.
5201 * @len: length of desc_buff
5202 *
5203 * Returns calculated ICC level
5204 */
5205static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
5206 u8 *desc_buf, int len)
5207{
5208 u32 icc_level = 0;
5209
5210 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
5211 !hba->vreg_info.vccq2) {
5212 dev_err(hba->dev,
5213 "%s: Regulator capability was not set, actvIccLevel=%d",
5214 __func__, icc_level);
5215 goto out;
5216 }
5217
5218 if (hba->vreg_info.vcc)
5219 icc_level = ufshcd_get_max_icc_level(
5220 hba->vreg_info.vcc->max_uA,
5221 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
5222 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
5223
5224 if (hba->vreg_info.vccq)
5225 icc_level = ufshcd_get_max_icc_level(
5226 hba->vreg_info.vccq->max_uA,
5227 icc_level,
5228 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
5229
5230 if (hba->vreg_info.vccq2)
5231 icc_level = ufshcd_get_max_icc_level(
5232 hba->vreg_info.vccq2->max_uA,
5233 icc_level,
5234 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
5235out:
5236 return icc_level;
5237}
5238
Dolev Raviv61e07352016-11-23 16:30:49 -08005239static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
5240{
5241 int ret = 0;
5242 int retries;
5243
5244 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
5245 /* write attribute */
5246 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5247 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
5248 if (!ret)
5249 break;
5250
5251 dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
5252 }
5253
5254 return ret;
5255}
5256
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005257static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5258{
5259 int ret;
5260 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
5261 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
5262
5263 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
5264 if (ret) {
5265 dev_err(hba->dev,
5266 "%s: Failed reading power descriptor.len = %d ret = %d",
5267 __func__, buff_len, ret);
5268 return;
5269 }
5270
5271 hba->init_prefetch_data.icc_level =
5272 ufshcd_find_max_sup_active_icc_level(hba,
5273 desc_buf, buff_len);
5274 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
5275 __func__, hba->init_prefetch_data.icc_level);
5276
Dolev Raviv61e07352016-11-23 16:30:49 -08005277 ret = ufshcd_set_icc_levels_attr(hba,
5278 hba->init_prefetch_data.icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005279
5280 if (ret)
5281 dev_err(hba->dev,
5282 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
5283 __func__, hba->init_prefetch_data.icc_level , ret);
5284
5285}
5286
5287/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005288 * ufshcd_scsi_add_wlus - Adds required W-LUs
5289 * @hba: per-adapter instance
5290 *
5291 * UFS device specification requires the UFS devices to support 4 well known
5292 * logical units:
5293 * "REPORT_LUNS" (address: 01h)
5294 * "UFS Device" (address: 50h)
5295 * "RPMB" (address: 44h)
5296 * "BOOT" (address: 30h)
5297 * UFS device's power management needs to be controlled by "POWER CONDITION"
5298 * field of SSU (START STOP UNIT) command. But this "power condition" field
5299 * will take effect only when its sent to "UFS device" well known logical unit
5300 * hence we require the scsi_device instance to represent this logical unit in
5301 * order for the UFS host driver to send the SSU command for power management.
5302
5303 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
5304 * Block) LU so user space process can control this LU. User space may also
5305 * want to have access to BOOT LU.
5306
5307 * This function adds scsi device instances for each of all well known LUs
5308 * (except "REPORT LUNS" LU).
5309 *
5310 * Returns zero on success (all required W-LUs are added successfully),
5311 * non-zero error value on failure (if failed to add any of the required W-LU).
5312 */
5313static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
5314{
5315 int ret = 0;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005316 struct scsi_device *sdev_rpmb;
5317 struct scsi_device *sdev_boot;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005318
5319 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
5320 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
5321 if (IS_ERR(hba->sdev_ufs_device)) {
5322 ret = PTR_ERR(hba->sdev_ufs_device);
5323 hba->sdev_ufs_device = NULL;
5324 goto out;
5325 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005326 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005327
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005328 sdev_boot = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005329 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005330 if (IS_ERR(sdev_boot)) {
5331 ret = PTR_ERR(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005332 goto remove_sdev_ufs_device;
5333 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005334 scsi_device_put(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005335
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005336 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005337 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005338 if (IS_ERR(sdev_rpmb)) {
5339 ret = PTR_ERR(sdev_rpmb);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005340 goto remove_sdev_boot;
5341 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005342 scsi_device_put(sdev_rpmb);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005343 goto out;
5344
5345remove_sdev_boot:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005346 scsi_remove_device(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005347remove_sdev_ufs_device:
5348 scsi_remove_device(hba->sdev_ufs_device);
5349out:
5350 return ret;
5351}
5352
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02005353static int ufs_get_device_info(struct ufs_hba *hba,
5354 struct ufs_device_info *card_data)
5355{
5356 int err;
5357 u8 model_index;
5358 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
5359 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
5360
5361 err = ufshcd_read_device_desc(hba, desc_buf,
5362 QUERY_DESC_DEVICE_MAX_SIZE);
5363 if (err) {
5364 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
5365 __func__, err);
5366 goto out;
5367 }
5368
5369 /*
5370 * getting vendor (manufacturerID) and Bank Index in big endian
5371 * format
5372 */
5373 card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
5374 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
5375
5376 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
5377
5378 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
5379 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
5380 if (err) {
5381 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
5382 __func__, err);
5383 goto out;
5384 }
5385
5386 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
5387 strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
5388 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
5389 MAX_MODEL_LEN));
5390
5391 /* Null terminate the model string */
5392 card_data->model[MAX_MODEL_LEN] = '\0';
5393
5394out:
5395 return err;
5396}
5397
5398void ufs_advertise_fixup_device(struct ufs_hba *hba)
5399{
5400 int err;
5401 struct ufs_dev_fix *f;
5402 struct ufs_device_info card_data;
5403
5404 card_data.wmanufacturerid = 0;
5405
5406 err = ufs_get_device_info(hba, &card_data);
5407 if (err) {
5408 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
5409 __func__, err);
5410 return;
5411 }
5412
5413 for (f = ufs_fixups; f->quirk; f++) {
5414 if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
5415 (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
5416 (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
5417 !strcmp(f->card.model, UFS_ANY_MODEL)))
5418 hba->dev_quirks |= f->quirk;
5419 }
5420}
5421
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005422/**
Yaniv Gardi37113102016-03-10 17:37:16 +02005423 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
5424 * @hba: per-adapter instance
5425 *
5426 * PA_TActivate parameter can be tuned manually if UniPro version is less than
5427 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
5428 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
5429 * the hibern8 exit latency.
5430 *
5431 * Returns zero on success, non-zero error value on failure.
5432 */
5433static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
5434{
5435 int ret = 0;
5436 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
5437
5438 ret = ufshcd_dme_peer_get(hba,
5439 UIC_ARG_MIB_SEL(
5440 RX_MIN_ACTIVATETIME_CAPABILITY,
5441 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
5442 &peer_rx_min_activatetime);
5443 if (ret)
5444 goto out;
5445
5446 /* make sure proper unit conversion is applied */
5447 tuned_pa_tactivate =
5448 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
5449 / PA_TACTIVATE_TIME_UNIT_US);
5450 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
5451 tuned_pa_tactivate);
5452
5453out:
5454 return ret;
5455}
5456
5457/**
5458 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
5459 * @hba: per-adapter instance
5460 *
5461 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
5462 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
5463 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
5464 * This optimal value can help reduce the hibern8 exit latency.
5465 *
5466 * Returns zero on success, non-zero error value on failure.
5467 */
5468static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
5469{
5470 int ret = 0;
5471 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
5472 u32 max_hibern8_time, tuned_pa_hibern8time;
5473
5474 ret = ufshcd_dme_get(hba,
5475 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
5476 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
5477 &local_tx_hibern8_time_cap);
5478 if (ret)
5479 goto out;
5480
5481 ret = ufshcd_dme_peer_get(hba,
5482 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
5483 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
5484 &peer_rx_hibern8_time_cap);
5485 if (ret)
5486 goto out;
5487
5488 max_hibern8_time = max(local_tx_hibern8_time_cap,
5489 peer_rx_hibern8_time_cap);
5490 /* make sure proper unit conversion is applied */
5491 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
5492 / PA_HIBERN8_TIME_UNIT_US);
5493 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
5494 tuned_pa_hibern8time);
5495out:
5496 return ret;
5497}
5498
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08005499/**
5500 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
5501 * less than device PA_TACTIVATE time.
5502 * @hba: per-adapter instance
5503 *
5504 * Some UFS devices require host PA_TACTIVATE to be lower than device
5505 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
5506 * for such devices.
5507 *
5508 * Returns zero on success, non-zero error value on failure.
5509 */
5510static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
5511{
5512 int ret = 0;
5513 u32 granularity, peer_granularity;
5514 u32 pa_tactivate, peer_pa_tactivate;
5515 u32 pa_tactivate_us, peer_pa_tactivate_us;
5516 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
5517
5518 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5519 &granularity);
5520 if (ret)
5521 goto out;
5522
5523 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5524 &peer_granularity);
5525 if (ret)
5526 goto out;
5527
5528 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
5529 (granularity > PA_GRANULARITY_MAX_VAL)) {
5530 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
5531 __func__, granularity);
5532 return -EINVAL;
5533 }
5534
5535 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
5536 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
5537 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
5538 __func__, peer_granularity);
5539 return -EINVAL;
5540 }
5541
5542 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
5543 if (ret)
5544 goto out;
5545
5546 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
5547 &peer_pa_tactivate);
5548 if (ret)
5549 goto out;
5550
5551 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
5552 peer_pa_tactivate_us = peer_pa_tactivate *
5553 gran_to_us_table[peer_granularity - 1];
5554
5555 if (pa_tactivate_us > peer_pa_tactivate_us) {
5556 u32 new_peer_pa_tactivate;
5557
5558 new_peer_pa_tactivate = pa_tactivate_us /
5559 gran_to_us_table[peer_granularity - 1];
5560 new_peer_pa_tactivate++;
5561 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
5562 new_peer_pa_tactivate);
5563 }
5564
5565out:
5566 return ret;
5567}
5568
Yaniv Gardi37113102016-03-10 17:37:16 +02005569static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
5570{
5571 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
5572 ufshcd_tune_pa_tactivate(hba);
5573 ufshcd_tune_pa_hibern8time(hba);
5574 }
5575
5576 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
5577 /* set 1ms timeout for PA_TACTIVATE */
5578 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08005579
5580 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
5581 ufshcd_quirk_tune_host_pa_tactivate(hba);
Subhash Jadavani56d4a182016-12-05 19:25:32 -08005582
5583 ufshcd_vops_apply_dev_quirks(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02005584}
5585
5586/**
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005587 * ufshcd_probe_hba - probe hba to detect device and initialize
5588 * @hba: per-adapter instance
5589 *
5590 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305591 */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005592static int ufshcd_probe_hba(struct ufs_hba *hba)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305593{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305594 int ret;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005595 ktime_t start = ktime_get();
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305596
5597 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305598 if (ret)
5599 goto out;
5600
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005601 /* set the default level for urgent bkops */
5602 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5603 hba->is_urgent_bkops_lvl_checked = false;
5604
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005605 /* UniPro link is active now */
5606 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05305607
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305608 ret = ufshcd_verify_dev_init(hba);
5609 if (ret)
5610 goto out;
5611
Dolev Raviv68078d52013-07-30 00:35:58 +05305612 ret = ufshcd_complete_dev_init(hba);
5613 if (ret)
5614 goto out;
5615
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02005616 ufs_advertise_fixup_device(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02005617 ufshcd_tune_unipro_params(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02005618
5619 ret = ufshcd_set_vccq_rail_unused(hba,
5620 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
5621 if (ret)
5622 goto out;
5623
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005624 /* UFS device is also active now */
5625 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305626 ufshcd_force_reset_auto_bkops(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005627 hba->wlun_dev_clr_ua = true;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305628
Dolev Raviv7eb584d2014-09-25 15:32:31 +03005629 if (ufshcd_get_max_pwr_mode(hba)) {
5630 dev_err(hba->dev,
5631 "%s: Failed getting max supported power mode\n",
5632 __func__);
5633 } else {
5634 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
Dov Levenglick8643ae62016-10-17 17:10:14 -07005635 if (ret) {
Dolev Raviv7eb584d2014-09-25 15:32:31 +03005636 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
5637 __func__, ret);
Dov Levenglick8643ae62016-10-17 17:10:14 -07005638 goto out;
5639 }
Dolev Raviv7eb584d2014-09-25 15:32:31 +03005640 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005641
Yaniv Gardi53c12d02016-02-01 15:02:45 +02005642 /* set the state as operational after switching to desired gear */
5643 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005644 /*
5645 * If we are in error handling context or in power management callbacks
5646 * context, no need to scan the host
5647 */
5648 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5649 bool flag;
5650
5651 /* clear any previous UFS device information */
5652 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005653 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
5654 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005655 hba->dev_info.f_power_on_wp_en = flag;
5656
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005657 if (!hba->is_init_prefetch)
5658 ufshcd_init_icc_levels(hba);
5659
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005660 /* Add required well known logical units to scsi mid layer */
5661 if (ufshcd_scsi_add_wlus(hba))
5662 goto out;
5663
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305664 scsi_scan_host(hba->host);
5665 pm_runtime_put_sync(hba->dev);
5666 }
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005667
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08005668 /* Resume devfreq after UFS device is detected */
5669 if (ufshcd_is_clkscaling_supported(hba)) {
5670 ufshcd_resume_clkscaling(hba);
5671 hba->clk_scaling.is_allowed = true;
5672 }
5673
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005674 if (!hba->is_init_prefetch)
5675 hba->is_init_prefetch = true;
5676
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305677out:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005678 /*
5679 * If we failed to initialize the device or the device is not
5680 * present, turn off the power/clocks etc.
5681 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005682 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5683 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005684 ufshcd_hba_exit(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005685 }
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005686
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005687 trace_ufshcd_init(dev_name(hba->dev), ret,
5688 ktime_to_us(ktime_sub(ktime_get(), start)),
5689 hba->uic_link_state, hba->curr_dev_pwr_mode);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005690 return ret;
5691}
5692
5693/**
5694 * ufshcd_async_scan - asynchronous execution for probing hba
5695 * @data: data pointer to pass to this function
5696 * @cookie: cookie data
5697 */
5698static void ufshcd_async_scan(void *data, async_cookie_t cookie)
5699{
5700 struct ufs_hba *hba = (struct ufs_hba *)data;
5701
5702 ufshcd_probe_hba(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305703}
5704
Yaniv Gardif550c652016-03-10 17:37:07 +02005705static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
5706{
5707 unsigned long flags;
5708 struct Scsi_Host *host;
5709 struct ufs_hba *hba;
5710 int index;
5711 bool found = false;
5712
5713 if (!scmd || !scmd->device || !scmd->device->host)
5714 return BLK_EH_NOT_HANDLED;
5715
5716 host = scmd->device->host;
5717 hba = shost_priv(host);
5718 if (!hba)
5719 return BLK_EH_NOT_HANDLED;
5720
5721 spin_lock_irqsave(host->host_lock, flags);
5722
5723 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5724 if (hba->lrb[index].cmd == scmd) {
5725 found = true;
5726 break;
5727 }
5728 }
5729
5730 spin_unlock_irqrestore(host->host_lock, flags);
5731
5732 /*
5733 * Bypass SCSI error handling and reset the block layer timer if this
5734 * SCSI command was not actually dispatched to UFS driver, otherwise
5735 * let SCSI layer handle the error as usual.
5736 */
5737 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
5738}
5739
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305740static struct scsi_host_template ufshcd_driver_template = {
5741 .module = THIS_MODULE,
5742 .name = UFSHCD,
5743 .proc_name = UFSHCD,
5744 .queuecommand = ufshcd_queuecommand,
5745 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09005746 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305747 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005748 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305749 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305750 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
5751 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Yaniv Gardif550c652016-03-10 17:37:07 +02005752 .eh_timed_out = ufshcd_eh_timed_out,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305753 .this_id = -1,
5754 .sg_tablesize = SG_ALL,
5755 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
5756 .can_queue = UFSHCD_CAN_QUEUE,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005757 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01005758 .track_queue_depth = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305759};
5760
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005761static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
5762 int ua)
5763{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005764 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005765
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005766 if (!vreg)
5767 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005768
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005769 ret = regulator_set_load(vreg->reg, ua);
5770 if (ret < 0) {
5771 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
5772 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005773 }
5774
5775 return ret;
5776}
5777
5778static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
5779 struct ufs_vreg *vreg)
5780{
Yaniv Gardi60f01872016-03-10 17:37:11 +02005781 if (!vreg)
5782 return 0;
5783 else if (vreg->unused)
5784 return 0;
5785 else
5786 return ufshcd_config_vreg_load(hba->dev, vreg,
5787 UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005788}
5789
5790static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
5791 struct ufs_vreg *vreg)
5792{
Yaniv Gardi60f01872016-03-10 17:37:11 +02005793 if (!vreg)
5794 return 0;
5795 else if (vreg->unused)
5796 return 0;
5797 else
5798 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005799}
5800
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005801static int ufshcd_config_vreg(struct device *dev,
5802 struct ufs_vreg *vreg, bool on)
5803{
5804 int ret = 0;
5805 struct regulator *reg = vreg->reg;
5806 const char *name = vreg->name;
5807 int min_uV, uA_load;
5808
5809 BUG_ON(!vreg);
5810
5811 if (regulator_count_voltages(reg) > 0) {
5812 min_uV = on ? vreg->min_uV : 0;
5813 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
5814 if (ret) {
5815 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
5816 __func__, name, ret);
5817 goto out;
5818 }
5819
5820 uA_load = on ? vreg->max_uA : 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005821 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
5822 if (ret)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005823 goto out;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005824 }
5825out:
5826 return ret;
5827}
5828
5829static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
5830{
5831 int ret = 0;
5832
Yaniv Gardi60f01872016-03-10 17:37:11 +02005833 if (!vreg)
5834 goto out;
5835 else if (vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005836 goto out;
5837
5838 ret = ufshcd_config_vreg(dev, vreg, true);
5839 if (!ret)
5840 ret = regulator_enable(vreg->reg);
5841
5842 if (!ret)
5843 vreg->enabled = true;
5844 else
5845 dev_err(dev, "%s: %s enable failed, err=%d\n",
5846 __func__, vreg->name, ret);
5847out:
5848 return ret;
5849}
5850
5851static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
5852{
5853 int ret = 0;
5854
Yaniv Gardi60f01872016-03-10 17:37:11 +02005855 if (!vreg)
5856 goto out;
5857 else if (!vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005858 goto out;
5859
5860 ret = regulator_disable(vreg->reg);
5861
5862 if (!ret) {
5863 /* ignore errors on applying disable config */
5864 ufshcd_config_vreg(dev, vreg, false);
5865 vreg->enabled = false;
5866 } else {
5867 dev_err(dev, "%s: %s disable failed, err=%d\n",
5868 __func__, vreg->name, ret);
5869 }
5870out:
5871 return ret;
5872}
5873
5874static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
5875{
5876 int ret = 0;
5877 struct device *dev = hba->dev;
5878 struct ufs_vreg_info *info = &hba->vreg_info;
5879
5880 if (!info)
5881 goto out;
5882
5883 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
5884 if (ret)
5885 goto out;
5886
5887 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
5888 if (ret)
5889 goto out;
5890
5891 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
5892 if (ret)
5893 goto out;
5894
5895out:
5896 if (ret) {
5897 ufshcd_toggle_vreg(dev, info->vccq2, false);
5898 ufshcd_toggle_vreg(dev, info->vccq, false);
5899 ufshcd_toggle_vreg(dev, info->vcc, false);
5900 }
5901 return ret;
5902}
5903
Raviv Shvili6a771a62014-09-25 15:32:24 +03005904static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
5905{
5906 struct ufs_vreg_info *info = &hba->vreg_info;
5907
5908 if (info)
5909 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
5910
5911 return 0;
5912}
5913
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005914static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
5915{
5916 int ret = 0;
5917
5918 if (!vreg)
5919 goto out;
5920
5921 vreg->reg = devm_regulator_get(dev, vreg->name);
5922 if (IS_ERR(vreg->reg)) {
5923 ret = PTR_ERR(vreg->reg);
5924 dev_err(dev, "%s: %s get failed, err=%d\n",
5925 __func__, vreg->name, ret);
5926 }
5927out:
5928 return ret;
5929}
5930
5931static int ufshcd_init_vreg(struct ufs_hba *hba)
5932{
5933 int ret = 0;
5934 struct device *dev = hba->dev;
5935 struct ufs_vreg_info *info = &hba->vreg_info;
5936
5937 if (!info)
5938 goto out;
5939
5940 ret = ufshcd_get_vreg(dev, info->vcc);
5941 if (ret)
5942 goto out;
5943
5944 ret = ufshcd_get_vreg(dev, info->vccq);
5945 if (ret)
5946 goto out;
5947
5948 ret = ufshcd_get_vreg(dev, info->vccq2);
5949out:
5950 return ret;
5951}
5952
Raviv Shvili6a771a62014-09-25 15:32:24 +03005953static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
5954{
5955 struct ufs_vreg_info *info = &hba->vreg_info;
5956
5957 if (info)
5958 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
5959
5960 return 0;
5961}
5962
Yaniv Gardi60f01872016-03-10 17:37:11 +02005963static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
5964{
5965 int ret = 0;
5966 struct ufs_vreg_info *info = &hba->vreg_info;
5967
5968 if (!info)
5969 goto out;
5970 else if (!info->vccq)
5971 goto out;
5972
5973 if (unused) {
5974 /* shut off the rail here */
5975 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
5976 /*
5977 * Mark this rail as no longer used, so it doesn't get enabled
5978 * later by mistake
5979 */
5980 if (!ret)
5981 info->vccq->unused = true;
5982 } else {
5983 /*
5984 * rail should have been already enabled hence just make sure
5985 * that unused flag is cleared.
5986 */
5987 info->vccq->unused = false;
5988 }
5989out:
5990 return ret;
5991}
5992
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005993static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5994 bool skip_ref_clk)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005995{
5996 int ret = 0;
5997 struct ufs_clk_info *clki;
5998 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005999 unsigned long flags;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08006000 ktime_t start = ktime_get();
6001 bool clk_state_changed = false;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006002
6003 if (!head || list_empty(head))
6004 goto out;
6005
Subhash Jadavani1e879e82016-10-06 21:48:22 -07006006 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
6007 if (ret)
6008 return ret;
6009
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006010 list_for_each_entry(clki, head, list) {
6011 if (!IS_ERR_OR_NULL(clki->clk)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006012 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
6013 continue;
6014
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08006015 clk_state_changed = on ^ clki->enabled;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006016 if (on && !clki->enabled) {
6017 ret = clk_prepare_enable(clki->clk);
6018 if (ret) {
6019 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
6020 __func__, clki->name, ret);
6021 goto out;
6022 }
6023 } else if (!on && clki->enabled) {
6024 clk_disable_unprepare(clki->clk);
6025 }
6026 clki->enabled = on;
6027 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
6028 clki->name, on ? "en" : "dis");
6029 }
6030 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006031
Subhash Jadavani1e879e82016-10-06 21:48:22 -07006032 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
6033 if (ret)
6034 return ret;
6035
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006036out:
6037 if (ret) {
6038 list_for_each_entry(clki, head, list) {
6039 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
6040 clk_disable_unprepare(clki->clk);
6041 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006042 } else if (!ret && on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006043 spin_lock_irqsave(hba->host->host_lock, flags);
6044 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006045 trace_ufshcd_clk_gating(dev_name(hba->dev),
6046 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006047 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006048 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006049
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08006050 if (clk_state_changed)
6051 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
6052 (on ? "on" : "off"),
6053 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006054 return ret;
6055}
6056
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006057static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
6058{
6059 return __ufshcd_setup_clocks(hba, on, false);
6060}
6061
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006062static int ufshcd_init_clocks(struct ufs_hba *hba)
6063{
6064 int ret = 0;
6065 struct ufs_clk_info *clki;
6066 struct device *dev = hba->dev;
6067 struct list_head *head = &hba->clk_list_head;
6068
6069 if (!head || list_empty(head))
6070 goto out;
6071
6072 list_for_each_entry(clki, head, list) {
6073 if (!clki->name)
6074 continue;
6075
6076 clki->clk = devm_clk_get(dev, clki->name);
6077 if (IS_ERR(clki->clk)) {
6078 ret = PTR_ERR(clki->clk);
6079 dev_err(dev, "%s: %s clk get failed, %d\n",
6080 __func__, clki->name, ret);
6081 goto out;
6082 }
6083
6084 if (clki->max_freq) {
6085 ret = clk_set_rate(clki->clk, clki->max_freq);
6086 if (ret) {
6087 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6088 __func__, clki->name,
6089 clki->max_freq, ret);
6090 goto out;
6091 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03006092 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006093 }
6094 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
6095 clki->name, clk_get_rate(clki->clk));
6096 }
6097out:
6098 return ret;
6099}
6100
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006101static int ufshcd_variant_hba_init(struct ufs_hba *hba)
6102{
6103 int err = 0;
6104
6105 if (!hba->vops)
6106 goto out;
6107
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006108 err = ufshcd_vops_init(hba);
6109 if (err)
6110 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006111
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006112 err = ufshcd_vops_setup_regulators(hba, true);
6113 if (err)
6114 goto out_exit;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006115
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006116 goto out;
6117
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006118out_exit:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006119 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006120out:
6121 if (err)
6122 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006123 __func__, ufshcd_get_var_name(hba), err);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006124 return err;
6125}
6126
6127static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
6128{
6129 if (!hba->vops)
6130 return;
6131
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006132 ufshcd_vops_setup_regulators(hba, false);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006133
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006134 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006135}
6136
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006137static int ufshcd_hba_init(struct ufs_hba *hba)
6138{
6139 int err;
6140
Raviv Shvili6a771a62014-09-25 15:32:24 +03006141 /*
6142 * Handle host controller power separately from the UFS device power
6143 * rails as it will help controlling the UFS host controller power
6144 * collapse easily which is different than UFS device power collapse.
6145 * Also, enable the host controller power before we go ahead with rest
6146 * of the initialization here.
6147 */
6148 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006149 if (err)
6150 goto out;
6151
Raviv Shvili6a771a62014-09-25 15:32:24 +03006152 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006153 if (err)
6154 goto out;
6155
Raviv Shvili6a771a62014-09-25 15:32:24 +03006156 err = ufshcd_init_clocks(hba);
6157 if (err)
6158 goto out_disable_hba_vreg;
6159
6160 err = ufshcd_setup_clocks(hba, true);
6161 if (err)
6162 goto out_disable_hba_vreg;
6163
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006164 err = ufshcd_init_vreg(hba);
6165 if (err)
6166 goto out_disable_clks;
6167
6168 err = ufshcd_setup_vreg(hba, true);
6169 if (err)
6170 goto out_disable_clks;
6171
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006172 err = ufshcd_variant_hba_init(hba);
6173 if (err)
6174 goto out_disable_vreg;
6175
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006176 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006177 goto out;
6178
6179out_disable_vreg:
6180 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006181out_disable_clks:
6182 ufshcd_setup_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03006183out_disable_hba_vreg:
6184 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006185out:
6186 return err;
6187}
6188
6189static void ufshcd_hba_exit(struct ufs_hba *hba)
6190{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006191 if (hba->is_powered) {
6192 ufshcd_variant_hba_exit(hba);
6193 ufshcd_setup_vreg(hba, false);
Gilad Bronera5082532016-10-17 17:10:00 -07006194 ufshcd_suspend_clkscaling(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006195 ufshcd_setup_clocks(hba, false);
6196 ufshcd_setup_hba_vreg(hba, false);
6197 hba->is_powered = false;
6198 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006199}
6200
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006201static int
6202ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306203{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006204 unsigned char cmd[6] = {REQUEST_SENSE,
6205 0,
6206 0,
6207 0,
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07006208 UFSHCD_REQ_SENSE_SIZE,
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006209 0};
6210 char *buffer;
6211 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306212
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07006213 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006214 if (!buffer) {
6215 ret = -ENOMEM;
6216 goto out;
6217 }
6218
6219 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07006220 UFSHCD_REQ_SENSE_SIZE, NULL,
Christoph Hellwige8064022016-10-20 15:12:13 +02006221 msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006222 if (ret)
6223 pr_err("%s: failed with err %d\n", __func__, ret);
6224
6225 kfree(buffer);
6226out:
6227 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306228}
6229
6230/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006231 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
6232 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306233 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006234 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306235 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006236 * Returns 0 if requested power mode is set successfully
6237 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306238 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006239static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
6240 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306241{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006242 unsigned char cmd[6] = { START_STOP };
6243 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006244 struct scsi_device *sdp;
6245 unsigned long flags;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006246 int ret;
6247
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006248 spin_lock_irqsave(hba->host->host_lock, flags);
6249 sdp = hba->sdev_ufs_device;
6250 if (sdp) {
6251 ret = scsi_device_get(sdp);
6252 if (!ret && !scsi_device_online(sdp)) {
6253 ret = -ENODEV;
6254 scsi_device_put(sdp);
6255 }
6256 } else {
6257 ret = -ENODEV;
6258 }
6259 spin_unlock_irqrestore(hba->host->host_lock, flags);
6260
6261 if (ret)
6262 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006263
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306264 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006265 * If scsi commands fail, the scsi mid-layer schedules scsi error-
6266 * handling, which would wait for host to be resumed. Since we know
6267 * we are functional while we are here, skip host resume in error
6268 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306269 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006270 hba->host->eh_noresume = 1;
6271 if (hba->wlun_dev_clr_ua) {
6272 ret = ufshcd_send_request_sense(hba, sdp);
6273 if (ret)
6274 goto out;
6275 /* Unit attention condition is cleared now */
6276 hba->wlun_dev_clr_ua = false;
6277 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306278
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006279 cmd[4] = pwr_mode << 4;
6280
6281 /*
6282 * Current function would be generally called from the power management
Christoph Hellwige8064022016-10-20 15:12:13 +02006283 * callbacks hence set the RQF_PM flag so that it doesn't resume the
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006284 * already suspended childs.
6285 */
6286 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
Christoph Hellwige8064022016-10-20 15:12:13 +02006287 START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006288 if (ret) {
6289 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02006290 "START_STOP failed for power mode: %d, result %x\n",
6291 pwr_mode, ret);
Hannes Reinecke21045512015-01-08 07:43:46 +01006292 if (driver_byte(ret) & DRIVER_SENSE)
6293 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006294 }
6295
6296 if (!ret)
6297 hba->curr_dev_pwr_mode = pwr_mode;
6298out:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006299 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006300 hba->host->eh_noresume = 0;
6301 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306302}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306303
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006304static int ufshcd_link_state_transition(struct ufs_hba *hba,
6305 enum uic_link_state req_link_state,
6306 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306307{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006308 int ret = 0;
6309
6310 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306311 return 0;
6312
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006313 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
6314 ret = ufshcd_uic_hibern8_enter(hba);
6315 if (!ret)
6316 ufshcd_set_link_hibern8(hba);
6317 else
6318 goto out;
6319 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306320 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006321 * If autobkops is enabled, link can't be turned off because
6322 * turning off the link would also turn off the device.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306323 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006324 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
6325 (!check_for_bkops || (check_for_bkops &&
6326 !hba->auto_bkops_enabled))) {
6327 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02006328 * Let's make sure that link is in low power mode, we are doing
6329 * this currently by putting the link in Hibern8. Otherway to
6330 * put the link in low power mode is to send the DME end point
6331 * to device and then send the DME reset command to local
6332 * unipro. But putting the link in hibern8 is much faster.
6333 */
6334 ret = ufshcd_uic_hibern8_enter(hba);
6335 if (ret)
6336 goto out;
6337 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006338 * Change controller state to "reset state" which
6339 * should also put the link in off/reset state
6340 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02006341 ufshcd_hba_stop(hba, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006342 /*
6343 * TODO: Check if we need any delay to make sure that
6344 * controller is reset
6345 */
6346 ufshcd_set_link_off(hba);
6347 }
6348
6349out:
6350 return ret;
6351}
6352
6353static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
6354{
6355 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02006356 * It seems some UFS devices may keep drawing more than sleep current
6357 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
6358 * To avoid this situation, add 2ms delay before putting these UFS
6359 * rails in LPM mode.
6360 */
6361 if (!ufshcd_is_link_active(hba) &&
6362 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
6363 usleep_range(2000, 2100);
6364
6365 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006366 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
6367 * power.
6368 *
6369 * If UFS device and link is in OFF state, all power supplies (VCC,
6370 * VCCQ, VCCQ2) can be turned off if power on write protect is not
6371 * required. If UFS link is inactive (Hibern8 or OFF state) and device
6372 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
6373 *
6374 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
6375 * in low power state which would save some power.
6376 */
6377 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
6378 !hba->dev_info.is_lu_power_on_wp) {
6379 ufshcd_setup_vreg(hba, false);
6380 } else if (!ufshcd_is_ufs_dev_active(hba)) {
6381 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
6382 if (!ufshcd_is_link_active(hba)) {
6383 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
6384 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
6385 }
6386 }
6387}
6388
6389static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
6390{
6391 int ret = 0;
6392
6393 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
6394 !hba->dev_info.is_lu_power_on_wp) {
6395 ret = ufshcd_setup_vreg(hba, true);
6396 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006397 if (!ret && !ufshcd_is_link_active(hba)) {
6398 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6399 if (ret)
6400 goto vcc_disable;
6401 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6402 if (ret)
6403 goto vccq_lpm;
6404 }
Subhash Jadavani69d72ac2016-10-27 17:26:24 -07006405 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006406 }
6407 goto out;
6408
6409vccq_lpm:
6410 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
6411vcc_disable:
6412 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
6413out:
6414 return ret;
6415}
6416
6417static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
6418{
6419 if (ufshcd_is_link_off(hba))
6420 ufshcd_setup_hba_vreg(hba, false);
6421}
6422
6423static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
6424{
6425 if (ufshcd_is_link_off(hba))
6426 ufshcd_setup_hba_vreg(hba, true);
6427}
6428
6429/**
6430 * ufshcd_suspend - helper function for suspend operations
6431 * @hba: per adapter instance
6432 * @pm_op: desired low power operation type
6433 *
6434 * This function will try to put the UFS device and link into low power
6435 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
6436 * (System PM level).
6437 *
6438 * If this function is called during shutdown, it will make sure that
6439 * both UFS device and UFS link is powered off.
6440 *
6441 * NOTE: UFS device & link must be active before we enter in this function.
6442 *
6443 * Returns 0 for success and non-zero for failure
6444 */
6445static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6446{
6447 int ret = 0;
6448 enum ufs_pm_level pm_lvl;
6449 enum ufs_dev_pwr_mode req_dev_pwr_mode;
6450 enum uic_link_state req_link_state;
6451
6452 hba->pm_op_in_progress = 1;
6453 if (!ufshcd_is_shutdown_pm(pm_op)) {
6454 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
6455 hba->rpm_lvl : hba->spm_lvl;
6456 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
6457 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
6458 } else {
6459 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
6460 req_link_state = UIC_LINK_OFF_STATE;
6461 }
6462
6463 /*
6464 * If we can't transition into any of the low power modes
6465 * just gate the clocks.
6466 */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006467 ufshcd_hold(hba, false);
6468 hba->clk_gating.is_suspended = true;
6469
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07006470 ufshcd_suspend_clkscaling(hba);
6471
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006472 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
6473 req_link_state == UIC_LINK_ACTIVE_STATE) {
6474 goto disable_clks;
6475 }
6476
6477 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
6478 (req_link_state == hba->uic_link_state))
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07006479 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006480
6481 /* UFS device & link must be active before we enter in this function */
6482 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
6483 ret = -EINVAL;
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07006484 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006485 }
6486
6487 if (ufshcd_is_runtime_pm(pm_op)) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03006488 if (ufshcd_can_autobkops_during_suspend(hba)) {
6489 /*
6490 * The device is idle with no requests in the queue,
6491 * allow background operations if bkops status shows
6492 * that performance might be impacted.
6493 */
6494 ret = ufshcd_urgent_bkops(hba);
6495 if (ret)
6496 goto enable_gating;
6497 } else {
6498 /* make sure that auto bkops is disabled */
6499 ufshcd_disable_auto_bkops(hba);
6500 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006501 }
6502
6503 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
6504 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
6505 !ufshcd_is_runtime_pm(pm_op))) {
6506 /* ensure that bkops is disabled */
6507 ufshcd_disable_auto_bkops(hba);
6508 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
6509 if (ret)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006510 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006511 }
6512
6513 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
6514 if (ret)
6515 goto set_dev_active;
6516
6517 ufshcd_vreg_set_lpm(hba);
6518
6519disable_clks:
6520 /*
6521 * Call vendor specific suspend callback. As these callbacks may access
6522 * vendor specific host controller register space call them before the
6523 * host clocks are ON.
6524 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006525 ret = ufshcd_vops_suspend(hba, pm_op);
6526 if (ret)
6527 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006528
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006529 if (!ufshcd_is_link_active(hba))
6530 ufshcd_setup_clocks(hba, false);
6531 else
6532 /* If link is active, device ref_clk can't be switched off */
6533 __ufshcd_setup_clocks(hba, false, true);
6534
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006535 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006536 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006537 /*
6538 * Disable the host irq as host controller as there won't be any
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006539 * host controller transaction expected till resume.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006540 */
6541 ufshcd_disable_irq(hba);
6542 /* Put the host controller in low power mode if possible */
6543 ufshcd_hba_vreg_set_lpm(hba);
6544 goto out;
6545
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006546set_link_active:
Gilad Bronera5082532016-10-17 17:10:00 -07006547 ufshcd_resume_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006548 ufshcd_vreg_set_hpm(hba);
6549 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
6550 ufshcd_set_link_active(hba);
6551 else if (ufshcd_is_link_off(hba))
6552 ufshcd_host_reset_and_restore(hba);
6553set_dev_active:
6554 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
6555 ufshcd_disable_auto_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006556enable_gating:
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07006557 ufshcd_resume_clkscaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006558 hba->clk_gating.is_suspended = false;
6559 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006560out:
6561 hba->pm_op_in_progress = 0;
6562 return ret;
6563}
6564
6565/**
6566 * ufshcd_resume - helper function for resume operations
6567 * @hba: per adapter instance
6568 * @pm_op: runtime PM or system PM
6569 *
6570 * This function basically brings the UFS device, UniPro link and controller
6571 * to active state.
6572 *
6573 * Returns 0 for success and non-zero for failure
6574 */
6575static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6576{
6577 int ret;
6578 enum uic_link_state old_link_state;
6579
6580 hba->pm_op_in_progress = 1;
6581 old_link_state = hba->uic_link_state;
6582
6583 ufshcd_hba_vreg_set_hpm(hba);
6584 /* Make sure clocks are enabled before accessing controller */
6585 ret = ufshcd_setup_clocks(hba, true);
6586 if (ret)
6587 goto out;
6588
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006589 /* enable the host irq as host controller would be active soon */
6590 ret = ufshcd_enable_irq(hba);
6591 if (ret)
6592 goto disable_irq_and_vops_clks;
6593
6594 ret = ufshcd_vreg_set_hpm(hba);
6595 if (ret)
6596 goto disable_irq_and_vops_clks;
6597
6598 /*
6599 * Call vendor specific resume callback. As these callbacks may access
6600 * vendor specific host controller register space call them when the
6601 * host clocks are ON.
6602 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006603 ret = ufshcd_vops_resume(hba, pm_op);
6604 if (ret)
6605 goto disable_vreg;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006606
6607 if (ufshcd_is_link_hibern8(hba)) {
6608 ret = ufshcd_uic_hibern8_exit(hba);
6609 if (!ret)
6610 ufshcd_set_link_active(hba);
6611 else
6612 goto vendor_suspend;
6613 } else if (ufshcd_is_link_off(hba)) {
6614 ret = ufshcd_host_reset_and_restore(hba);
6615 /*
6616 * ufshcd_host_reset_and_restore() should have already
6617 * set the link state as active
6618 */
6619 if (ret || !ufshcd_is_link_active(hba))
6620 goto vendor_suspend;
6621 }
6622
6623 if (!ufshcd_is_ufs_dev_active(hba)) {
6624 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6625 if (ret)
6626 goto set_old_link_state;
6627 }
6628
subhashj@codeaurora.org4e768e72016-12-22 18:41:22 -08006629 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
6630 ufshcd_enable_auto_bkops(hba);
6631 else
6632 /*
6633 * If BKOPs operations are urgently needed at this moment then
6634 * keep auto-bkops enabled or else disable it.
6635 */
6636 ufshcd_urgent_bkops(hba);
6637
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006638 hba->clk_gating.is_suspended = false;
6639
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08006640 if (hba->clk_scaling.is_allowed)
6641 ufshcd_resume_clkscaling(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03006642
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006643 /* Schedule clock gating in case of no access to UFS device yet */
6644 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006645 goto out;
6646
6647set_old_link_state:
6648 ufshcd_link_state_transition(hba, old_link_state, 0);
6649vendor_suspend:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006650 ufshcd_vops_suspend(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006651disable_vreg:
6652 ufshcd_vreg_set_lpm(hba);
6653disable_irq_and_vops_clks:
6654 ufshcd_disable_irq(hba);
Gilad Bronera5082532016-10-17 17:10:00 -07006655 ufshcd_suspend_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006656 ufshcd_setup_clocks(hba, false);
6657out:
6658 hba->pm_op_in_progress = 0;
6659 return ret;
6660}
6661
6662/**
6663 * ufshcd_system_suspend - system suspend routine
6664 * @hba: per adapter instance
6665 * @pm_op: runtime PM or system PM
6666 *
6667 * Check the description of ufshcd_suspend() function for more details.
6668 *
6669 * Returns 0 for success and non-zero for failure
6670 */
6671int ufshcd_system_suspend(struct ufs_hba *hba)
6672{
6673 int ret = 0;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006674 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006675
6676 if (!hba || !hba->is_powered)
Dolev Raviv233b5942014-10-23 13:25:14 +03006677 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006678
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08006679 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6680 hba->curr_dev_pwr_mode) &&
6681 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
6682 hba->uic_link_state))
6683 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006684
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08006685 if (pm_runtime_suspended(hba->dev)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006686 /*
6687 * UFS device and/or UFS link low power states during runtime
6688 * suspend seems to be different than what is expected during
6689 * system suspend. Hence runtime resume the devic & link and
6690 * let the system suspend low power states to take effect.
6691 * TODO: If resume takes longer time, we might have optimize
6692 * it in future by not resuming everything if possible.
6693 */
6694 ret = ufshcd_runtime_resume(hba);
6695 if (ret)
6696 goto out;
6697 }
6698
6699 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
6700out:
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006701 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
6702 ktime_to_us(ktime_sub(ktime_get(), start)),
6703 hba->uic_link_state, hba->curr_dev_pwr_mode);
Dolev Ravive7850602014-09-25 15:32:36 +03006704 if (!ret)
6705 hba->is_sys_suspended = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006706 return ret;
6707}
6708EXPORT_SYMBOL(ufshcd_system_suspend);
6709
6710/**
6711 * ufshcd_system_resume - system resume routine
6712 * @hba: per adapter instance
6713 *
6714 * Returns 0 for success and non-zero for failure
6715 */
6716
6717int ufshcd_system_resume(struct ufs_hba *hba)
6718{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006719 int ret = 0;
6720 ktime_t start = ktime_get();
6721
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006722 if (!hba)
6723 return -EINVAL;
6724
6725 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006726 /*
6727 * Let the runtime resume take care of resuming
6728 * if runtime suspended.
6729 */
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006730 goto out;
6731 else
6732 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
6733out:
6734 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
6735 ktime_to_us(ktime_sub(ktime_get(), start)),
6736 hba->uic_link_state, hba->curr_dev_pwr_mode);
6737 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006738}
6739EXPORT_SYMBOL(ufshcd_system_resume);
6740
6741/**
6742 * ufshcd_runtime_suspend - runtime suspend routine
6743 * @hba: per adapter instance
6744 *
6745 * Check the description of ufshcd_suspend() function for more details.
6746 *
6747 * Returns 0 for success and non-zero for failure
6748 */
6749int ufshcd_runtime_suspend(struct ufs_hba *hba)
6750{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006751 int ret = 0;
6752 ktime_t start = ktime_get();
6753
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006754 if (!hba)
6755 return -EINVAL;
6756
6757 if (!hba->is_powered)
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006758 goto out;
6759 else
6760 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
6761out:
6762 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
6763 ktime_to_us(ktime_sub(ktime_get(), start)),
6764 hba->uic_link_state, hba->curr_dev_pwr_mode);
6765 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306766}
6767EXPORT_SYMBOL(ufshcd_runtime_suspend);
6768
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006769/**
6770 * ufshcd_runtime_resume - runtime resume routine
6771 * @hba: per adapter instance
6772 *
6773 * This function basically brings the UFS device, UniPro link and controller
6774 * to active state. Following operations are done in this function:
6775 *
6776 * 1. Turn on all the controller related clocks
6777 * 2. Bring the UniPro link out of Hibernate state
6778 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
6779 * to active state.
6780 * 4. If auto-bkops is enabled on the device, disable it.
6781 *
6782 * So following would be the possible power state after this function return
6783 * successfully:
6784 * S1: UFS device in Active state with VCC rail ON
6785 * UniPro link in Active state
6786 * All the UFS/UniPro controller clocks are ON
6787 *
6788 * Returns 0 for success and non-zero for failure
6789 */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306790int ufshcd_runtime_resume(struct ufs_hba *hba)
6791{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006792 int ret = 0;
6793 ktime_t start = ktime_get();
6794
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006795 if (!hba)
6796 return -EINVAL;
6797
6798 if (!hba->is_powered)
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006799 goto out;
6800 else
6801 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
6802out:
6803 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
6804 ktime_to_us(ktime_sub(ktime_get(), start)),
6805 hba->uic_link_state, hba->curr_dev_pwr_mode);
6806 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306807}
6808EXPORT_SYMBOL(ufshcd_runtime_resume);
6809
6810int ufshcd_runtime_idle(struct ufs_hba *hba)
6811{
6812 return 0;
6813}
6814EXPORT_SYMBOL(ufshcd_runtime_idle);
6815
subhashj@codeaurora.org09690d52016-12-22 18:41:00 -08006816static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
6817 struct device_attribute *attr,
6818 const char *buf, size_t count,
6819 bool rpm)
6820{
6821 struct ufs_hba *hba = dev_get_drvdata(dev);
6822 unsigned long flags, value;
6823
6824 if (kstrtoul(buf, 0, &value))
6825 return -EINVAL;
6826
6827 if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
6828 return -EINVAL;
6829
6830 spin_lock_irqsave(hba->host->host_lock, flags);
6831 if (rpm)
6832 hba->rpm_lvl = value;
6833 else
6834 hba->spm_lvl = value;
6835 spin_unlock_irqrestore(hba->host->host_lock, flags);
6836 return count;
6837}
6838
6839static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
6840 struct device_attribute *attr, char *buf)
6841{
6842 struct ufs_hba *hba = dev_get_drvdata(dev);
6843 int curr_len;
6844 u8 lvl;
6845
6846 curr_len = snprintf(buf, PAGE_SIZE,
6847 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
6848 hba->rpm_lvl,
6849 ufschd_ufs_dev_pwr_mode_to_string(
6850 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
6851 ufschd_uic_link_state_to_string(
6852 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
6853
6854 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
6855 "\nAll available Runtime PM levels info:\n");
6856 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
6857 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
6858 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
6859 lvl,
6860 ufschd_ufs_dev_pwr_mode_to_string(
6861 ufs_pm_lvl_states[lvl].dev_state),
6862 ufschd_uic_link_state_to_string(
6863 ufs_pm_lvl_states[lvl].link_state));
6864
6865 return curr_len;
6866}
6867
6868static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
6869 struct device_attribute *attr, const char *buf, size_t count)
6870{
6871 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
6872}
6873
6874static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
6875{
6876 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
6877 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
6878 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
6879 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
6880 hba->rpm_lvl_attr.attr.mode = 0644;
6881 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
6882 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
6883}
6884
6885static ssize_t ufshcd_spm_lvl_show(struct device *dev,
6886 struct device_attribute *attr, char *buf)
6887{
6888 struct ufs_hba *hba = dev_get_drvdata(dev);
6889 int curr_len;
6890 u8 lvl;
6891
6892 curr_len = snprintf(buf, PAGE_SIZE,
6893 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
6894 hba->spm_lvl,
6895 ufschd_ufs_dev_pwr_mode_to_string(
6896 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
6897 ufschd_uic_link_state_to_string(
6898 ufs_pm_lvl_states[hba->spm_lvl].link_state));
6899
6900 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
6901 "\nAll available System PM levels info:\n");
6902 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
6903 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
6904 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
6905 lvl,
6906 ufschd_ufs_dev_pwr_mode_to_string(
6907 ufs_pm_lvl_states[lvl].dev_state),
6908 ufschd_uic_link_state_to_string(
6909 ufs_pm_lvl_states[lvl].link_state));
6910
6911 return curr_len;
6912}
6913
6914static ssize_t ufshcd_spm_lvl_store(struct device *dev,
6915 struct device_attribute *attr, const char *buf, size_t count)
6916{
6917 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
6918}
6919
6920static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
6921{
6922 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
6923 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
6924 sysfs_attr_init(&hba->spm_lvl_attr.attr);
6925 hba->spm_lvl_attr.attr.name = "spm_lvl";
6926 hba->spm_lvl_attr.attr.mode = 0644;
6927 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
6928 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
6929}
6930
6931static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
6932{
6933 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
6934 ufshcd_add_spm_lvl_sysfs_nodes(hba);
6935}
6936
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306937/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006938 * ufshcd_shutdown - shutdown routine
6939 * @hba: per adapter instance
6940 *
6941 * This function would power off both UFS device and UFS link.
6942 *
6943 * Returns 0 always to allow force shutdown even in case of errors.
6944 */
6945int ufshcd_shutdown(struct ufs_hba *hba)
6946{
6947 int ret = 0;
6948
6949 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
6950 goto out;
6951
6952 if (pm_runtime_suspended(hba->dev)) {
6953 ret = ufshcd_runtime_resume(hba);
6954 if (ret)
6955 goto out;
6956 }
6957
6958 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
6959out:
6960 if (ret)
6961 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
6962 /* allow force shutdown even in case of errors */
6963 return 0;
6964}
6965EXPORT_SYMBOL(ufshcd_shutdown);
6966
6967/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306968 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306969 * data structure memory
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306970 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306971 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306972void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306973{
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05306974 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306975 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05306976 ufshcd_disable_intr(hba, hba->intr_mask);
Yaniv Gardi596585a2016-03-10 17:37:08 +02006977 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306978
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006979 ufshcd_exit_clk_gating(hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08006980 if (ufshcd_is_clkscaling_supported(hba))
6981 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006982 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306983}
6984EXPORT_SYMBOL_GPL(ufshcd_remove);
6985
6986/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02006987 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
6988 * @hba: pointer to Host Bus Adapter (HBA)
6989 */
6990void ufshcd_dealloc_host(struct ufs_hba *hba)
6991{
6992 scsi_host_put(hba->host);
6993}
6994EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
6995
6996/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09006997 * ufshcd_set_dma_mask - Set dma mask based on the controller
6998 * addressing capability
6999 * @hba: per adapter instance
7000 *
7001 * Returns 0 for success, non-zero for failure
7002 */
7003static int ufshcd_set_dma_mask(struct ufs_hba *hba)
7004{
7005 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
7006 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
7007 return 0;
7008 }
7009 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
7010}
7011
7012/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007013 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307014 * @dev: pointer to device handle
7015 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307016 * Returns 0 on success, non-zero value on failure
7017 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007018int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307019{
7020 struct Scsi_Host *host;
7021 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007022 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307023
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307024 if (!dev) {
7025 dev_err(dev,
7026 "Invalid memory reference for dev is NULL\n");
7027 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307028 goto out_error;
7029 }
7030
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307031 host = scsi_host_alloc(&ufshcd_driver_template,
7032 sizeof(struct ufs_hba));
7033 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307034 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307035 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307036 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307037 }
7038 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307039 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307040 hba->dev = dev;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007041 *hba_handle = hba;
7042
7043out_error:
7044 return err;
7045}
7046EXPORT_SYMBOL(ufshcd_alloc_host);
7047
Sahitya Tummala856b3482014-09-25 15:32:34 +03007048static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
7049{
7050 int ret = 0;
7051 struct ufs_clk_info *clki;
7052 struct list_head *head = &hba->clk_list_head;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007053 ktime_t start = ktime_get();
7054 bool clk_state_changed = false;
Sahitya Tummala856b3482014-09-25 15:32:34 +03007055
7056 if (!head || list_empty(head))
7057 goto out;
7058
Yaniv Gardif06fcc72015-10-28 13:15:51 +02007059 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
7060 if (ret)
7061 return ret;
7062
Sahitya Tummala856b3482014-09-25 15:32:34 +03007063 list_for_each_entry(clki, head, list) {
7064 if (!IS_ERR_OR_NULL(clki->clk)) {
7065 if (scale_up && clki->max_freq) {
7066 if (clki->curr_freq == clki->max_freq)
7067 continue;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007068
7069 clk_state_changed = true;
Sahitya Tummala856b3482014-09-25 15:32:34 +03007070 ret = clk_set_rate(clki->clk, clki->max_freq);
7071 if (ret) {
7072 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7073 __func__, clki->name,
7074 clki->max_freq, ret);
7075 break;
7076 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007077 trace_ufshcd_clk_scaling(dev_name(hba->dev),
7078 "scaled up", clki->name,
7079 clki->curr_freq,
7080 clki->max_freq);
7081
Sahitya Tummala856b3482014-09-25 15:32:34 +03007082 clki->curr_freq = clki->max_freq;
7083
7084 } else if (!scale_up && clki->min_freq) {
7085 if (clki->curr_freq == clki->min_freq)
7086 continue;
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007087
7088 clk_state_changed = true;
Sahitya Tummala856b3482014-09-25 15:32:34 +03007089 ret = clk_set_rate(clki->clk, clki->min_freq);
7090 if (ret) {
7091 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7092 __func__, clki->name,
7093 clki->min_freq, ret);
7094 break;
7095 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08007096 trace_ufshcd_clk_scaling(dev_name(hba->dev),
7097 "scaled down", clki->name,
7098 clki->curr_freq,
7099 clki->min_freq);
Sahitya Tummala856b3482014-09-25 15:32:34 +03007100 clki->curr_freq = clki->min_freq;
7101 }
7102 }
7103 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
7104 clki->name, clk_get_rate(clki->clk));
7105 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02007106
7107 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
7108
Sahitya Tummala856b3482014-09-25 15:32:34 +03007109out:
subhashj@codeaurora.org911a0772016-12-22 18:41:48 -08007110 if (clk_state_changed)
7111 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
7112 (scale_up ? "up" : "down"),
7113 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Sahitya Tummala856b3482014-09-25 15:32:34 +03007114 return ret;
7115}
7116
7117static int ufshcd_devfreq_target(struct device *dev,
7118 unsigned long *freq, u32 flags)
7119{
7120 int err = 0;
7121 struct ufs_hba *hba = dev_get_drvdata(dev);
Subhash Jadavani30fc33f2016-10-27 17:25:47 -07007122 bool release_clk_hold = false;
7123 unsigned long irq_flags;
Sahitya Tummala856b3482014-09-25 15:32:34 +03007124
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007125 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03007126 return -EINVAL;
7127
Subhash Jadavani30fc33f2016-10-27 17:25:47 -07007128 spin_lock_irqsave(hba->host->host_lock, irq_flags);
7129 if (ufshcd_eh_in_progress(hba)) {
7130 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
7131 return 0;
7132 }
7133
7134 if (ufshcd_is_clkgating_allowed(hba) &&
7135 (hba->clk_gating.state != CLKS_ON)) {
7136 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
7137 /* hold the vote until the scaling work is completed */
7138 hba->clk_gating.active_reqs++;
7139 release_clk_hold = true;
7140 hba->clk_gating.state = CLKS_ON;
7141 } else {
7142 /*
7143 * Clock gating work seems to be running in parallel
7144 * hence skip scaling work to avoid deadlock between
7145 * current scaling work and gating work.
7146 */
7147 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
7148 return 0;
7149 }
7150 }
7151 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
7152
Sahitya Tummala856b3482014-09-25 15:32:34 +03007153 if (*freq == UINT_MAX)
7154 err = ufshcd_scale_clks(hba, true);
7155 else if (*freq == 0)
7156 err = ufshcd_scale_clks(hba, false);
7157
Subhash Jadavani30fc33f2016-10-27 17:25:47 -07007158 spin_lock_irqsave(hba->host->host_lock, irq_flags);
7159 if (release_clk_hold)
7160 __ufshcd_release(hba);
7161 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
7162
Sahitya Tummala856b3482014-09-25 15:32:34 +03007163 return err;
7164}
7165
7166static int ufshcd_devfreq_get_dev_status(struct device *dev,
7167 struct devfreq_dev_status *stat)
7168{
7169 struct ufs_hba *hba = dev_get_drvdata(dev);
7170 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
7171 unsigned long flags;
7172
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007173 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03007174 return -EINVAL;
7175
7176 memset(stat, 0, sizeof(*stat));
7177
7178 spin_lock_irqsave(hba->host->host_lock, flags);
7179 if (!scaling->window_start_t)
7180 goto start_window;
7181
7182 if (scaling->is_busy_started)
7183 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
7184 scaling->busy_start_t));
7185
7186 stat->total_time = jiffies_to_usecs((long)jiffies -
7187 (long)scaling->window_start_t);
7188 stat->busy_time = scaling->tot_busy_t;
7189start_window:
7190 scaling->window_start_t = jiffies;
7191 scaling->tot_busy_t = 0;
7192
7193 if (hba->outstanding_reqs) {
7194 scaling->busy_start_t = ktime_get();
7195 scaling->is_busy_started = true;
7196 } else {
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01007197 scaling->busy_start_t = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +03007198 scaling->is_busy_started = false;
7199 }
7200 spin_unlock_irqrestore(hba->host->host_lock, flags);
7201 return 0;
7202}
7203
7204static struct devfreq_dev_profile ufs_devfreq_profile = {
7205 .polling_ms = 100,
7206 .target = ufshcd_devfreq_target,
7207 .get_dev_status = ufshcd_devfreq_get_dev_status,
7208};
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007209static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
7210{
7211 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
7212 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
7213 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
7214 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
7215 hba->clk_scaling.enable_attr.attr.mode = 0644;
7216 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
7217 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
7218}
Sahitya Tummala856b3482014-09-25 15:32:34 +03007219
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007220/**
7221 * ufshcd_init - Driver initialization routine
7222 * @hba: per-adapter instance
7223 * @mmio_base: base register address
7224 * @irq: Interrupt line of device
7225 * Returns 0 on success, non-zero value on failure
7226 */
7227int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7228{
7229 int err;
7230 struct Scsi_Host *host = hba->host;
7231 struct device *dev = hba->dev;
7232
7233 if (!mmio_base) {
7234 dev_err(hba->dev,
7235 "Invalid memory reference for mmio_base is NULL\n");
7236 err = -ENODEV;
7237 goto out_error;
7238 }
7239
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307240 hba->mmio_base = mmio_base;
7241 hba->irq = irq;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307242
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007243 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007244 if (err)
7245 goto out_error;
7246
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307247 /* Read capabilities registers */
7248 ufshcd_hba_capabilities(hba);
7249
7250 /* Get UFS version supported by the controller */
7251 hba->ufs_version = ufshcd_get_ufs_version(hba);
7252
Yaniv Gardic01848c2016-12-05 19:25:02 -08007253 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
7254 (hba->ufs_version != UFSHCI_VERSION_11) &&
7255 (hba->ufs_version != UFSHCI_VERSION_20) &&
7256 (hba->ufs_version != UFSHCI_VERSION_21))
7257 dev_err(hba->dev, "invalid UFS version 0x%x\n",
7258 hba->ufs_version);
7259
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05307260 /* Get Interrupt bit mask per version */
7261 hba->intr_mask = ufshcd_get_intr_mask(hba);
7262
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09007263 err = ufshcd_set_dma_mask(hba);
7264 if (err) {
7265 dev_err(hba->dev, "set dma mask failed\n");
7266 goto out_disable;
7267 }
7268
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307269 /* Allocate memory for host memory space */
7270 err = ufshcd_memory_alloc(hba);
7271 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307272 dev_err(hba->dev, "Memory allocation failed\n");
7273 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307274 }
7275
7276 /* Configure LRB */
7277 ufshcd_host_memory_configure(hba);
7278
7279 host->can_queue = hba->nutrs;
7280 host->cmd_per_lun = hba->nutrs;
7281 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03007282 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307283 host->max_channel = UFSHCD_MAX_CHANNEL;
7284 host->unique_id = host->host_no;
7285 host->max_cmd_len = MAX_CDB_SIZE;
7286
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007287 hba->max_pwr_info.is_valid = false;
7288
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307289 /* Initailize wait queue for task management */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05307290 init_waitqueue_head(&hba->tm_wq);
7291 init_waitqueue_head(&hba->tm_tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307292
7293 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05307294 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307295 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307296
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307297 /* Initialize UIC command mutex */
7298 mutex_init(&hba->uic_cmd_mutex);
7299
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307300 /* Initialize mutex for device management commands */
7301 mutex_init(&hba->dev_cmd.lock);
7302
7303 /* Initialize device management tag acquire wait queue */
7304 init_waitqueue_head(&hba->dev_cmd.tag_wq);
7305
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007306 ufshcd_init_clk_gating(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +02007307
7308 /*
7309 * In order to avoid any spurious interrupt immediately after
7310 * registering UFS controller interrupt handler, clear any pending UFS
7311 * interrupt status and disable all the UFS interrupts.
7312 */
7313 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
7314 REG_INTERRUPT_STATUS);
7315 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
7316 /*
7317 * Make sure that UFS interrupts are disabled and any pending interrupt
7318 * status is cleared before registering UFS interrupt handler.
7319 */
7320 mb();
7321
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307322 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09007323 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307324 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307325 dev_err(hba->dev, "request irq failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007326 goto exit_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007327 } else {
7328 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307329 }
7330
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307331 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307332 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307333 dev_err(hba->dev, "scsi_add_host failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007334 goto exit_gating;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307335 }
7336
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307337 /* Host controller enable */
7338 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307339 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307340 dev_err(hba->dev, "Host controller enable failed\n");
Dolev Raviv66cc8202016-12-22 18:39:42 -08007341 ufshcd_print_host_regs(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307342 goto out_remove_scsi_host;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307343 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307344
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007345 if (ufshcd_is_clkscaling_supported(hba)) {
Chanwoo Choi4861ee12016-11-08 18:13:28 +09007346 hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
Sahitya Tummala856b3482014-09-25 15:32:34 +03007347 "simple_ondemand", NULL);
7348 if (IS_ERR(hba->devfreq)) {
7349 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
7350 PTR_ERR(hba->devfreq));
Wei Yongjun73811c92016-09-28 14:49:42 +00007351 err = PTR_ERR(hba->devfreq);
Sahitya Tummala856b3482014-09-25 15:32:34 +03007352 goto out_remove_scsi_host;
7353 }
7354 /* Suspend devfreq until the UFS device is detected */
Gilad Bronera5082532016-10-17 17:10:00 -07007355 ufshcd_suspend_clkscaling(hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007356 ufshcd_clkscaling_init_sysfs(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03007357 }
7358
subhashj@codeaurora.org0c8f7582016-12-22 18:41:11 -08007359 /*
7360 * Set the default power management level for runtime and system PM.
7361 * Default power saving mode is to keep UFS link in Hibern8 state
7362 * and UFS device in sleep state.
7363 */
7364 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
7365 UFS_SLEEP_PWR_MODE,
7366 UIC_LINK_HIBERN8_STATE);
7367 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
7368 UFS_SLEEP_PWR_MODE,
7369 UIC_LINK_HIBERN8_STATE);
7370
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05307371 /* Hold auto suspend until async scan completes */
7372 pm_runtime_get_sync(dev);
7373
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007374 /*
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08007375 * We are assuming that device wasn't put in sleep/power-down
7376 * state exclusively during the boot stage before kernel.
7377 * This assumption helps avoid doing link startup twice during
7378 * ufshcd_probe_hba().
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007379 */
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08007380 ufshcd_set_ufs_dev_active(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007381
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307382 async_schedule(ufshcd_async_scan, hba);
subhashj@codeaurora.org09690d52016-12-22 18:41:00 -08007383 ufshcd_add_sysfs_nodes(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307384
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307385 return 0;
7386
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307387out_remove_scsi_host:
7388 scsi_remove_host(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007389exit_gating:
7390 ufshcd_exit_clk_gating(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307391out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007392 hba->is_irq_enabled = false;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007393 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307394out_error:
7395 return err;
7396}
7397EXPORT_SYMBOL_GPL(ufshcd_init);
7398
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307399MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
7400MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05307401MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307402MODULE_LICENSE("GPL");
7403MODULE_VERSION(UFSHCD_DRIVER_VERSION);