blob: 4468ba059696490de891ed7d26f00a30dee34745 [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02006 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053011 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053016 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053018 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053024 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030035 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053038 */
39
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053040#include <linux/async.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030041#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020042#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020043#include <linux/of.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053044#include "ufshcd.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020045#include "ufs_quirks.h"
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053046#include "unipro.h"
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053047
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -080048#define CREATE_TRACE_POINTS
49#include <trace/events/ufs.h>
50
Gilad Bronerdcea0bf2016-10-17 17:09:48 -070051#define UFSHCD_REQ_SENSE_SIZE 18
52
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053053#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
54 UTP_TASK_REQ_COMPL |\
55 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053056/* UIC command timeout, unit: ms */
57#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053058
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053059/* NOP OUT retries waiting for NOP IN response */
60#define NOP_OUT_RETRIES 10
61/* Timeout after 30 msecs if NOP OUT hangs without response */
62#define NOP_OUT_TIMEOUT 30 /* msecs */
63
Dolev Raviv68078d52013-07-30 00:35:58 +053064/* Query request retries */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080065#define QUERY_REQ_RETRIES 3
Dolev Raviv68078d52013-07-30 00:35:58 +053066/* Query request timeout */
subhashj@codeaurora.org10fe5882016-11-23 16:31:52 -080067#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
Dolev Raviv68078d52013-07-30 00:35:58 +053068
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053069/* Task management command timeout */
70#define TM_CMD_TIMEOUT 100 /* msecs */
71
Yaniv Gardi64238fb2016-02-01 15:02:43 +020072/* maximum number of retries for a general UIC command */
73#define UFS_UIC_COMMAND_RETRIES 3
74
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030075/* maximum number of link-startup retries */
76#define DME_LINKSTARTUP_RETRIES 3
77
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +020078/* Maximum retries for Hibern8 enter */
79#define UIC_HIBERN8_ENTER_RETRIES 3
80
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030081/* maximum number of reset retries before giving up */
82#define MAX_HOST_RESET_RETRIES 5
83
Dolev Raviv68078d52013-07-30 00:35:58 +053084/* Expose the flag value from utp_upiu_query.value */
85#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86
Seungwon Jeon7d568652013-08-31 21:40:20 +053087/* Interrupt aggregation default timeout, unit: 40us */
88#define INT_AGGR_DEF_TO 0x02
89
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +030090#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
91 ({ \
92 int _ret; \
93 if (_on) \
94 _ret = ufshcd_enable_vreg(_dev, _vreg); \
95 else \
96 _ret = ufshcd_disable_vreg(_dev, _vreg); \
97 _ret; \
98 })
99
Dolev Raviv66cc8202016-12-22 18:39:42 -0800100#define ufshcd_hex_dump(prefix_str, buf, len) \
101print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
102
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300103static u32 ufs_query_desc_max_size[] = {
104 QUERY_DESC_DEVICE_MAX_SIZE,
105 QUERY_DESC_CONFIGURAION_MAX_SIZE,
106 QUERY_DESC_UNIT_MAX_SIZE,
107 QUERY_DESC_RFU_MAX_SIZE,
108 QUERY_DESC_INTERCONNECT_MAX_SIZE,
109 QUERY_DESC_STRING_MAX_SIZE,
110 QUERY_DESC_RFU_MAX_SIZE,
Tomas Winkler1ce21792016-02-09 10:25:40 +0200111 QUERY_DESC_GEOMETRY_MAX_SIZE,
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300112 QUERY_DESC_POWER_MAX_SIZE,
113 QUERY_DESC_RFU_MAX_SIZE,
114};
115
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530116enum {
117 UFSHCD_MAX_CHANNEL = 0,
118 UFSHCD_MAX_ID = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530119 UFSHCD_CMD_PER_LUN = 32,
120 UFSHCD_CAN_QUEUE = 32,
121};
122
123/* UFSHCD states */
124enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530125 UFSHCD_STATE_RESET,
126 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530127 UFSHCD_STATE_OPERATIONAL,
Zang Leigang141f8162016-11-16 11:29:37 +0800128 UFSHCD_STATE_EH_SCHEDULED,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530129};
130
131/* UFSHCD error handling flags */
132enum {
133 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530134};
135
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530136/* UFSHCD UIC layer error flags */
137enum {
138 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200139 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
140 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
141 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
142 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
143 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530144};
145
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530146/* Interrupt configuration options */
147enum {
148 UFSHCD_INT_DISABLE,
149 UFSHCD_INT_ENABLE,
150 UFSHCD_INT_CLEAR,
151};
152
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530153#define ufshcd_set_eh_in_progress(h) \
154 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
155#define ufshcd_eh_in_progress(h) \
156 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
157#define ufshcd_clear_eh_in_progress(h) \
158 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
159
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300160#define ufshcd_set_ufs_dev_active(h) \
161 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
162#define ufshcd_set_ufs_dev_sleep(h) \
163 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
164#define ufshcd_set_ufs_dev_poweroff(h) \
165 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
166#define ufshcd_is_ufs_dev_active(h) \
167 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
168#define ufshcd_is_ufs_dev_sleep(h) \
169 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
170#define ufshcd_is_ufs_dev_poweroff(h) \
171 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
172
173static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
174 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
175 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
176 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
177 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
178 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
179 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
180};
181
182static inline enum ufs_dev_pwr_mode
183ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
184{
185 return ufs_pm_lvl_states[lvl].dev_state;
186}
187
188static inline enum uic_link_state
189ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
190{
191 return ufs_pm_lvl_states[lvl].link_state;
192}
193
Subhash Jadavani56d4a182016-12-05 19:25:32 -0800194static struct ufs_dev_fix ufs_fixups[] = {
195 /* UFS cards deviations table */
196 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
197 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
198 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
199 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
200 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
201 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
202 UFS_DEVICE_NO_FASTAUTO),
203 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
204 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
205 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
206 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
207 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
208 UFS_DEVICE_QUIRK_PA_TACTIVATE),
209 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
210 UFS_DEVICE_QUIRK_PA_TACTIVATE),
211 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
212 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
213 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
214
215 END_FIX
216};
217
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530218static void ufshcd_tmc_handler(struct ufs_hba *hba);
219static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530220static int ufshcd_reset_and_restore(struct ufs_hba *hba);
Dolev Ravive7d38252016-12-22 18:40:07 -0800221static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530222static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300223static void ufshcd_hba_exit(struct ufs_hba *hba);
224static int ufshcd_probe_hba(struct ufs_hba *hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300225static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
226 bool skip_ref_clk);
227static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
Yaniv Gardi60f01872016-03-10 17:37:11 +0200228static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300229static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
230static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300231static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300232static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800233static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
234static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
235static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300236static irqreturn_t ufshcd_intr(int irq, void *__hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +0300237static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
238 struct ufs_pa_layer_attr *desired_pwr_mode);
Yaniv Gardi874237f2015-05-17 18:55:03 +0300239static int ufshcd_change_power_mode(struct ufs_hba *hba,
240 struct ufs_pa_layer_attr *pwr_mode);
Yaniv Gardi14497322016-02-01 15:02:39 +0200241static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
242{
243 return tag >= 0 && tag < hba->nutrs;
244}
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300245
246static inline int ufshcd_enable_irq(struct ufs_hba *hba)
247{
248 int ret = 0;
249
250 if (!hba->is_irq_enabled) {
251 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
252 hba);
253 if (ret)
254 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
255 __func__, ret);
256 hba->is_irq_enabled = true;
257 }
258
259 return ret;
260}
261
262static inline void ufshcd_disable_irq(struct ufs_hba *hba)
263{
264 if (hba->is_irq_enabled) {
265 free_irq(hba->irq, hba);
266 hba->is_irq_enabled = false;
267 }
268}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530269
Yaniv Gardib573d482016-03-10 17:37:09 +0200270/* replace non-printable or non-ASCII characters with spaces */
271static inline void ufshcd_remove_non_printable(char *val)
272{
273 if (!val)
274 return;
275
276 if (*val < 0x20 || *val > 0x7e)
277 *val = ' ';
278}
279
Dolev Raviv66cc8202016-12-22 18:39:42 -0800280static void ufshcd_print_host_regs(struct ufs_hba *hba)
281{
282 /*
283 * hex_dump reads its data without the readl macro. This might
284 * cause inconsistency issues on some platform, as the printed
285 * values may be from cache and not the most recent value.
286 * To know whether you are looking at an un-cached version verify
287 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
288 * during platform/pci probe function.
289 */
290 ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
291 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
292 hba->ufs_version, hba->capabilities);
293 dev_err(hba->dev,
294 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
295 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
296}
297
298static
299void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
300{
301 struct ufshcd_lrb *lrbp;
302 int tag;
303
304 for_each_set_bit(tag, &bitmap, hba->nutrs) {
305 lrbp = &hba->lrb[tag];
306
307 dev_err(hba->dev, "UPIU[%d] - Transfer Request Descriptor\n",
308 tag);
309 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
310 sizeof(struct utp_transfer_req_desc));
311 dev_err(hba->dev, "UPIU[%d] - Request UPIU\n", tag);
312 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
313 sizeof(struct utp_upiu_req));
314 dev_err(hba->dev, "UPIU[%d] - Response UPIU\n", tag);
315 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
316 sizeof(struct utp_upiu_rsp));
317 if (pr_prdt) {
318 int prdt_length = le16_to_cpu(
319 lrbp->utr_descriptor_ptr->prd_table_length);
320
321 dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries\n", tag,
322 prdt_length);
323 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
324 sizeof(struct ufshcd_sg_entry) *
325 prdt_length);
326 }
327 }
328}
329
330static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
331{
332 struct utp_task_req_desc *tmrdp;
333 int tag;
334
335 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
336 tmrdp = &hba->utmrdl_base_addr[tag];
337 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
338 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
339 sizeof(struct request_desc_header));
340 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
341 tag);
342 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
343 sizeof(struct utp_upiu_req));
344 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
345 tag);
346 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
347 sizeof(struct utp_task_req_desc));
348 }
349}
350
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530351/*
352 * ufshcd_wait_for_register - wait for register value to change
353 * @hba - per-adapter interface
354 * @reg - mmio register offset
355 * @mask - mask to apply to read register value
356 * @val - wait condition
357 * @interval_us - polling interval in microsecs
358 * @timeout_ms - timeout in millisecs
Yaniv Gardi596585a2016-03-10 17:37:08 +0200359 * @can_sleep - perform sleep or just spin
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530360 *
361 * Returns -ETIMEDOUT on error, zero on success
362 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200363int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
364 u32 val, unsigned long interval_us,
365 unsigned long timeout_ms, bool can_sleep)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530366{
367 int err = 0;
368 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
369
370 /* ignore bits that we don't intend to wait on */
371 val = val & mask;
372
373 while ((ufshcd_readl(hba, reg) & mask) != val) {
Yaniv Gardi596585a2016-03-10 17:37:08 +0200374 if (can_sleep)
375 usleep_range(interval_us, interval_us + 50);
376 else
377 udelay(interval_us);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530378 if (time_after(jiffies, timeout)) {
379 if ((ufshcd_readl(hba, reg) & mask) != val)
380 err = -ETIMEDOUT;
381 break;
382 }
383 }
384
385 return err;
386}
387
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530388/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530389 * ufshcd_get_intr_mask - Get the interrupt bit mask
390 * @hba - Pointer to adapter instance
391 *
392 * Returns interrupt bit mask per version
393 */
394static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
395{
Yaniv Gardic01848c2016-12-05 19:25:02 -0800396 u32 intr_mask = 0;
397
398 switch (hba->ufs_version) {
399 case UFSHCI_VERSION_10:
400 intr_mask = INTERRUPT_MASK_ALL_VER_10;
401 break;
402 /* allow fall through */
403 case UFSHCI_VERSION_11:
404 case UFSHCI_VERSION_20:
405 intr_mask = INTERRUPT_MASK_ALL_VER_11;
406 break;
407 /* allow fall through */
408 case UFSHCI_VERSION_21:
409 default:
410 intr_mask = INTERRUPT_MASK_ALL_VER_21;
411 }
412
413 return intr_mask;
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530414}
415
416/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530417 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
418 * @hba - Pointer to adapter instance
419 *
420 * Returns UFSHCI version supported by the controller
421 */
422static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
423{
Yaniv Gardi0263bcd2015-10-28 13:15:48 +0200424 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
425 return ufshcd_vops_get_ufs_hci_version(hba);
Yaniv Gardi9949e702015-05-17 18:55:05 +0300426
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530427 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530428}
429
430/**
431 * ufshcd_is_device_present - Check if any device connected to
432 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300433 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530434 *
Venkatraman S73ec5132012-07-10 19:39:23 +0530435 * Returns 1 if device present, 0 if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530436 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300437static inline int ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530438{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300439 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
440 DEVICE_PRESENT) ? 1 : 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530441}
442
443/**
444 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
445 * @lrb: pointer to local command reference block
446 *
447 * This function is used to get the OCS field from UTRD
448 * Returns the OCS field in the UTRD
449 */
450static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
451{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530452 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530453}
454
455/**
456 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
457 * @task_req_descp: pointer to utp_task_req_desc structure
458 *
459 * This function is used to get the OCS field from UTMRD
460 * Returns the OCS field in the UTMRD
461 */
462static inline int
463ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
464{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530465 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530466}
467
468/**
469 * ufshcd_get_tm_free_slot - get a free slot for task management request
470 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530471 * @free_slot: pointer to variable with available slot value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530472 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530473 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
474 * Returns 0 if free slot is not available, else return 1 with tag value
475 * in @free_slot.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530476 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530477static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530478{
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530479 int tag;
480 bool ret = false;
481
482 if (!free_slot)
483 goto out;
484
485 do {
486 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
487 if (tag >= hba->nutmrs)
488 goto out;
489 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
490
491 *free_slot = tag;
492 ret = true;
493out:
494 return ret;
495}
496
497static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
498{
499 clear_bit_unlock(slot, &hba->tm_slots_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530500}
501
502/**
503 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
504 * @hba: per adapter instance
505 * @pos: position of the bit to be cleared
506 */
507static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
508{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530509 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530510}
511
512/**
Yaniv Gardia48353f2016-02-01 15:02:40 +0200513 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
514 * @hba: per adapter instance
515 * @tag: position of the bit to be cleared
516 */
517static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
518{
519 __clear_bit(tag, &hba->outstanding_reqs);
520}
521
522/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530523 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
524 * @reg: Register value of host controller status
525 *
526 * Returns integer, 0 on Success and positive value if failed
527 */
528static inline int ufshcd_get_lists_status(u32 reg)
529{
530 /*
531 * The mask 0xFF is for the following HCS register bits
532 * Bit Description
533 * 0 Device Present
534 * 1 UTRLRDY
535 * 2 UTMRLRDY
536 * 3 UCRDY
Yaniv Gardi897efe62016-02-01 15:02:48 +0200537 * 4-7 reserved
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530538 */
Yaniv Gardi897efe62016-02-01 15:02:48 +0200539 return ((reg & 0xFF) >> 1) ^ 0x07;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530540}
541
542/**
543 * ufshcd_get_uic_cmd_result - Get the UIC command result
544 * @hba: Pointer to adapter instance
545 *
546 * This function gets the result of UIC command completion
547 * Returns 0 on success, non zero value on error
548 */
549static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
550{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530551 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530552 MASK_UIC_COMMAND_RESULT;
553}
554
555/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530556 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
557 * @hba: Pointer to adapter instance
558 *
559 * This function gets UIC command argument3
560 * Returns 0 on success, non zero value on error
561 */
562static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
563{
564 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
565}
566
567/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530568 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530569 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530570 */
571static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530572ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530573{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530574 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530575}
576
577/**
578 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
579 * @ucd_rsp_ptr: pointer to response UPIU
580 *
581 * This function gets the response status and scsi_status from response UPIU
582 * Returns the response result code.
583 */
584static inline int
585ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
586{
587 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
588}
589
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530590/*
591 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
592 * from response UPIU
593 * @ucd_rsp_ptr: pointer to response UPIU
594 *
595 * Return the data segment length.
596 */
597static inline unsigned int
598ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
599{
600 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
601 MASK_RSP_UPIU_DATA_SEG_LEN;
602}
603
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530604/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530605 * ufshcd_is_exception_event - Check if the device raised an exception event
606 * @ucd_rsp_ptr: pointer to response UPIU
607 *
608 * The function checks if the device raised an exception event indicated in
609 * the Device Information field of response UPIU.
610 *
611 * Returns true if exception is raised, false otherwise.
612 */
613static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
614{
615 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
616 MASK_RSP_EXCEPTION_EVENT ? true : false;
617}
618
619/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530620 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530621 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530622 */
623static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530624ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530625{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530626 ufshcd_writel(hba, INT_AGGR_ENABLE |
627 INT_AGGR_COUNTER_AND_TIMER_RESET,
628 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
629}
630
631/**
632 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
633 * @hba: per adapter instance
634 * @cnt: Interrupt aggregation counter threshold
635 * @tmout: Interrupt aggregation timeout value
636 */
637static inline void
638ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
639{
640 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
641 INT_AGGR_COUNTER_THLD_VAL(cnt) |
642 INT_AGGR_TIMEOUT_VAL(tmout),
643 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530644}
645
646/**
Yaniv Gardib8521902015-05-17 18:54:57 +0300647 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
648 * @hba: per adapter instance
649 */
650static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
651{
652 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
653}
654
655/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530656 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
657 * When run-stop registers are set to 1, it indicates the
658 * host controller that it can process the requests
659 * @hba: per adapter instance
660 */
661static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
662{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530663 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
664 REG_UTP_TASK_REQ_LIST_RUN_STOP);
665 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
666 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530667}
668
669/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530670 * ufshcd_hba_start - Start controller initialization sequence
671 * @hba: per adapter instance
672 */
673static inline void ufshcd_hba_start(struct ufs_hba *hba)
674{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530675 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530676}
677
678/**
679 * ufshcd_is_hba_active - Get controller state
680 * @hba: per adapter instance
681 *
682 * Returns zero if controller is active, 1 otherwise
683 */
684static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
685{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530686 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530687}
688
Yaniv Gardi37113102016-03-10 17:37:16 +0200689u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
690{
691 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
692 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
693 (hba->ufs_version == UFSHCI_VERSION_11))
694 return UFS_UNIPRO_VER_1_41;
695 else
696 return UFS_UNIPRO_VER_1_6;
697}
698EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
699
700static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
701{
702 /*
703 * If both host and device support UniPro ver1.6 or later, PA layer
704 * parameters tuning happens during link startup itself.
705 *
706 * We can manually tune PA layer parameters if either host or device
707 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
708 * logic simple, we will only do manual tuning if local unipro version
709 * doesn't support ver1.6 or later.
710 */
711 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
712 return true;
713 else
714 return false;
715}
716
Gilad Bronera5082532016-10-17 17:10:00 -0700717static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
718{
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800719 if (!ufshcd_is_clkscaling_supported(hba))
720 return;
721
722 devfreq_suspend_device(hba->devfreq);
723 hba->clk_scaling.window_start_t = 0;
Gilad Bronera5082532016-10-17 17:10:00 -0700724}
725
726static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
727{
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800728 devfreq_resume_device(hba->devfreq);
729}
730
731static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
732 struct device_attribute *attr, char *buf)
733{
734 struct ufs_hba *hba = dev_get_drvdata(dev);
735
736 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
737}
738
739static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
740 struct device_attribute *attr, const char *buf, size_t count)
741{
742 struct ufs_hba *hba = dev_get_drvdata(dev);
743 u32 value;
744 int err;
745
746 if (kstrtou32(buf, 0, &value))
747 return -EINVAL;
748
749 value = !!value;
750 if (value == hba->clk_scaling.is_allowed)
751 goto out;
752
753 pm_runtime_get_sync(hba->dev);
754 ufshcd_hold(hba, false);
755
756 if (value) {
757 ufshcd_resume_clkscaling(hba);
758 } else {
759 ufshcd_suspend_clkscaling(hba);
760 err = ufshcd_scale_clks(hba, true);
761 if (err)
762 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
763 __func__, err);
764 }
765 hba->clk_scaling.is_allowed = value;
766
767 ufshcd_release(hba);
768 pm_runtime_put_sync(hba->dev);
769out:
770 return count;
Gilad Bronera5082532016-10-17 17:10:00 -0700771}
772
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300773static void ufshcd_ungate_work(struct work_struct *work)
774{
775 int ret;
776 unsigned long flags;
777 struct ufs_hba *hba = container_of(work, struct ufs_hba,
778 clk_gating.ungate_work);
779
780 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
781
782 spin_lock_irqsave(hba->host->host_lock, flags);
783 if (hba->clk_gating.state == CLKS_ON) {
784 spin_unlock_irqrestore(hba->host->host_lock, flags);
785 goto unblock_reqs;
786 }
787
788 spin_unlock_irqrestore(hba->host->host_lock, flags);
789 ufshcd_setup_clocks(hba, true);
790
791 /* Exit from hibern8 */
792 if (ufshcd_can_hibern8_during_gating(hba)) {
793 /* Prevent gating in this path */
794 hba->clk_gating.is_suspended = true;
795 if (ufshcd_is_link_hibern8(hba)) {
796 ret = ufshcd_uic_hibern8_exit(hba);
797 if (ret)
798 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
799 __func__, ret);
800 else
801 ufshcd_set_link_active(hba);
802 }
803 hba->clk_gating.is_suspended = false;
804 }
805unblock_reqs:
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -0800806 if (hba->clk_scaling.is_allowed)
807 ufshcd_resume_clkscaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300808 scsi_unblock_requests(hba->host);
809}
810
811/**
812 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
813 * Also, exit from hibern8 mode and set the link as active.
814 * @hba: per adapter instance
815 * @async: This indicates whether caller should ungate clocks asynchronously.
816 */
817int ufshcd_hold(struct ufs_hba *hba, bool async)
818{
819 int rc = 0;
820 unsigned long flags;
821
822 if (!ufshcd_is_clkgating_allowed(hba))
823 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300824 spin_lock_irqsave(hba->host->host_lock, flags);
825 hba->clk_gating.active_reqs++;
826
Yaniv Gardi53c12d02016-02-01 15:02:45 +0200827 if (ufshcd_eh_in_progress(hba)) {
828 spin_unlock_irqrestore(hba->host->host_lock, flags);
829 return 0;
830 }
831
Sahitya Tummala856b3482014-09-25 15:32:34 +0300832start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300833 switch (hba->clk_gating.state) {
834 case CLKS_ON:
Venkat Gopalakrishnanf2a785a2016-10-17 17:10:53 -0700835 /*
836 * Wait for the ungate work to complete if in progress.
837 * Though the clocks may be in ON state, the link could
838 * still be in hibner8 state if hibern8 is allowed
839 * during clock gating.
840 * Make sure we exit hibern8 state also in addition to
841 * clocks being ON.
842 */
843 if (ufshcd_can_hibern8_during_gating(hba) &&
844 ufshcd_is_link_hibern8(hba)) {
845 spin_unlock_irqrestore(hba->host->host_lock, flags);
846 flush_work(&hba->clk_gating.ungate_work);
847 spin_lock_irqsave(hba->host->host_lock, flags);
848 goto start;
849 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300850 break;
851 case REQ_CLKS_OFF:
852 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
853 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800854 trace_ufshcd_clk_gating(dev_name(hba->dev),
855 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300856 break;
857 }
858 /*
859 * If we here, it means gating work is either done or
860 * currently running. Hence, fall through to cancel gating
861 * work and to enable clocks.
862 */
863 case CLKS_OFF:
864 scsi_block_requests(hba->host);
865 hba->clk_gating.state = REQ_CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800866 trace_ufshcd_clk_gating(dev_name(hba->dev),
867 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300868 schedule_work(&hba->clk_gating.ungate_work);
869 /*
870 * fall through to check if we should wait for this
871 * work to be done or not.
872 */
873 case REQ_CLKS_ON:
874 if (async) {
875 rc = -EAGAIN;
876 hba->clk_gating.active_reqs--;
877 break;
878 }
879
880 spin_unlock_irqrestore(hba->host->host_lock, flags);
881 flush_work(&hba->clk_gating.ungate_work);
882 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +0300883 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300884 goto start;
885 default:
886 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
887 __func__, hba->clk_gating.state);
888 break;
889 }
890 spin_unlock_irqrestore(hba->host->host_lock, flags);
891out:
892 return rc;
893}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +0200894EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300895
896static void ufshcd_gate_work(struct work_struct *work)
897{
898 struct ufs_hba *hba = container_of(work, struct ufs_hba,
899 clk_gating.gate_work.work);
900 unsigned long flags;
901
902 spin_lock_irqsave(hba->host->host_lock, flags);
Venkat Gopalakrishnan3f0c06d2016-10-17 17:11:07 -0700903 /*
904 * In case you are here to cancel this work the gating state
905 * would be marked as REQ_CLKS_ON. In this case save time by
906 * skipping the gating work and exit after changing the clock
907 * state to CLKS_ON.
908 */
909 if (hba->clk_gating.is_suspended ||
910 (hba->clk_gating.state == REQ_CLKS_ON)) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300911 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800912 trace_ufshcd_clk_gating(dev_name(hba->dev),
913 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300914 goto rel_lock;
915 }
916
917 if (hba->clk_gating.active_reqs
918 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
919 || hba->lrb_in_use || hba->outstanding_tasks
920 || hba->active_uic_cmd || hba->uic_async_done)
921 goto rel_lock;
922
923 spin_unlock_irqrestore(hba->host->host_lock, flags);
924
925 /* put the link into hibern8 mode before turning off clocks */
926 if (ufshcd_can_hibern8_during_gating(hba)) {
927 if (ufshcd_uic_hibern8_enter(hba)) {
928 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800929 trace_ufshcd_clk_gating(dev_name(hba->dev),
930 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300931 goto out;
932 }
933 ufshcd_set_link_hibern8(hba);
934 }
935
Gilad Bronera5082532016-10-17 17:10:00 -0700936 ufshcd_suspend_clkscaling(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +0300937
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300938 if (!ufshcd_is_link_active(hba))
939 ufshcd_setup_clocks(hba, false);
940 else
941 /* If link is active, device ref_clk can't be switched off */
942 __ufshcd_setup_clocks(hba, false, true);
943
944 /*
945 * In case you are here to cancel this work the gating state
946 * would be marked as REQ_CLKS_ON. In this case keep the state
947 * as REQ_CLKS_ON which would anyway imply that clocks are off
948 * and a request to turn them on is pending. By doing this way,
949 * we keep the state machine in tact and this would ultimately
950 * prevent from doing cancel work multiple times when there are
951 * new requests arriving before the current cancel work is done.
952 */
953 spin_lock_irqsave(hba->host->host_lock, flags);
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800954 if (hba->clk_gating.state == REQ_CLKS_OFF) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300955 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800956 trace_ufshcd_clk_gating(dev_name(hba->dev),
957 hba->clk_gating.state);
958 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300959rel_lock:
960 spin_unlock_irqrestore(hba->host->host_lock, flags);
961out:
962 return;
963}
964
965/* host lock must be held before calling this variant */
966static void __ufshcd_release(struct ufs_hba *hba)
967{
968 if (!ufshcd_is_clkgating_allowed(hba))
969 return;
970
971 hba->clk_gating.active_reqs--;
972
973 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
974 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
975 || hba->lrb_in_use || hba->outstanding_tasks
Yaniv Gardi53c12d02016-02-01 15:02:45 +0200976 || hba->active_uic_cmd || hba->uic_async_done
977 || ufshcd_eh_in_progress(hba))
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300978 return;
979
980 hba->clk_gating.state = REQ_CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -0800981 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300982 schedule_delayed_work(&hba->clk_gating.gate_work,
983 msecs_to_jiffies(hba->clk_gating.delay_ms));
984}
985
986void ufshcd_release(struct ufs_hba *hba)
987{
988 unsigned long flags;
989
990 spin_lock_irqsave(hba->host->host_lock, flags);
991 __ufshcd_release(hba);
992 spin_unlock_irqrestore(hba->host->host_lock, flags);
993}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +0200994EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300995
996static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
997 struct device_attribute *attr, char *buf)
998{
999 struct ufs_hba *hba = dev_get_drvdata(dev);
1000
1001 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1002}
1003
1004static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1005 struct device_attribute *attr, const char *buf, size_t count)
1006{
1007 struct ufs_hba *hba = dev_get_drvdata(dev);
1008 unsigned long flags, value;
1009
1010 if (kstrtoul(buf, 0, &value))
1011 return -EINVAL;
1012
1013 spin_lock_irqsave(hba->host->host_lock, flags);
1014 hba->clk_gating.delay_ms = value;
1015 spin_unlock_irqrestore(hba->host->host_lock, flags);
1016 return count;
1017}
1018
Sahitya Tummalab4274112016-12-22 18:40:39 -08001019static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1020 struct device_attribute *attr, char *buf)
1021{
1022 struct ufs_hba *hba = dev_get_drvdata(dev);
1023
1024 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1025}
1026
1027static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1028 struct device_attribute *attr, const char *buf, size_t count)
1029{
1030 struct ufs_hba *hba = dev_get_drvdata(dev);
1031 unsigned long flags;
1032 u32 value;
1033
1034 if (kstrtou32(buf, 0, &value))
1035 return -EINVAL;
1036
1037 value = !!value;
1038 if (value == hba->clk_gating.is_enabled)
1039 goto out;
1040
1041 if (value) {
1042 ufshcd_release(hba);
1043 } else {
1044 spin_lock_irqsave(hba->host->host_lock, flags);
1045 hba->clk_gating.active_reqs++;
1046 spin_unlock_irqrestore(hba->host->host_lock, flags);
1047 }
1048
1049 hba->clk_gating.is_enabled = value;
1050out:
1051 return count;
1052}
1053
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001054static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1055{
1056 if (!ufshcd_is_clkgating_allowed(hba))
1057 return;
1058
1059 hba->clk_gating.delay_ms = 150;
1060 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1061 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1062
Sahitya Tummalab4274112016-12-22 18:40:39 -08001063 hba->clk_gating.is_enabled = true;
1064
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001065 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1066 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1067 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1068 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
Sahitya Tummalab4274112016-12-22 18:40:39 -08001069 hba->clk_gating.delay_attr.attr.mode = 0644;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001070 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1071 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
Sahitya Tummalab4274112016-12-22 18:40:39 -08001072
1073 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1074 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1075 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1076 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1077 hba->clk_gating.enable_attr.attr.mode = 0644;
1078 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1079 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001080}
1081
1082static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1083{
1084 if (!ufshcd_is_clkgating_allowed(hba))
1085 return;
1086 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
Sahitya Tummalab4274112016-12-22 18:40:39 -08001087 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
Akinobu Mita97cd6802014-11-24 14:24:18 +09001088 cancel_work_sync(&hba->clk_gating.ungate_work);
1089 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001090}
1091
Sahitya Tummala856b3482014-09-25 15:32:34 +03001092/* Must be called with host lock acquired */
1093static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1094{
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001095 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001096 return;
1097
1098 if (!hba->clk_scaling.is_busy_started) {
1099 hba->clk_scaling.busy_start_t = ktime_get();
1100 hba->clk_scaling.is_busy_started = true;
1101 }
1102}
1103
1104static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1105{
1106 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1107
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08001108 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001109 return;
1110
1111 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1112 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1113 scaling->busy_start_t));
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01001114 scaling->busy_start_t = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +03001115 scaling->is_busy_started = false;
1116 }
1117}
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301118/**
1119 * ufshcd_send_command - Send SCSI or device management commands
1120 * @hba: per adapter instance
1121 * @task_tag: Task tag of the command
1122 */
1123static inline
1124void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1125{
Sahitya Tummala856b3482014-09-25 15:32:34 +03001126 ufshcd_clk_scaling_start_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301127 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301128 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001129 /* Make sure that doorbell is committed immediately */
1130 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301131}
1132
1133/**
1134 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1135 * @lrb - pointer to local reference block
1136 */
1137static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1138{
1139 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05301140 if (lrbp->sense_buffer &&
1141 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07001142 int len_to_copy;
1143
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301144 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07001145 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1146
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301147 memcpy(lrbp->sense_buffer,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301148 lrbp->ucd_rsp_ptr->sr.sense_data,
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07001149 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301150 }
1151}
1152
1153/**
Dolev Raviv68078d52013-07-30 00:35:58 +05301154 * ufshcd_copy_query_response() - Copy the Query Response and the data
1155 * descriptor
1156 * @hba: per adapter instance
1157 * @lrb - pointer to local reference block
1158 */
1159static
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001160int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +05301161{
1162 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1163
Dolev Raviv68078d52013-07-30 00:35:58 +05301164 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05301165
Dolev Raviv68078d52013-07-30 00:35:58 +05301166 /* Get the descriptor */
1167 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001168 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +05301169 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001170 u16 resp_len;
1171 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +05301172
1173 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001174 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +05301175 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03001176 buf_len = be16_to_cpu(
1177 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001178 if (likely(buf_len >= resp_len)) {
1179 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1180 } else {
1181 dev_warn(hba->dev,
1182 "%s: Response size is bigger than buffer",
1183 __func__);
1184 return -EINVAL;
1185 }
Dolev Raviv68078d52013-07-30 00:35:58 +05301186 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001187
1188 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05301189}
1190
1191/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301192 * ufshcd_hba_capabilities - Read controller capabilities
1193 * @hba: per adapter instance
1194 */
1195static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1196{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301197 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301198
1199 /* nutrs and nutmrs are 0 based values */
1200 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1201 hba->nutmrs =
1202 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1203}
1204
1205/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301206 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1207 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301208 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301209 * Return true on success, else false
1210 */
1211static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1212{
1213 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1214 return true;
1215 else
1216 return false;
1217}
1218
1219/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05301220 * ufshcd_get_upmcrs - Get the power mode change request status
1221 * @hba: Pointer to adapter instance
1222 *
1223 * This function gets the UPMCRS field of HCS register
1224 * Returns value of UPMCRS field
1225 */
1226static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1227{
1228 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1229}
1230
1231/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301232 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1233 * @hba: per adapter instance
1234 * @uic_cmd: UIC command
1235 *
1236 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301237 */
1238static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301239ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301240{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301241 WARN_ON(hba->active_uic_cmd);
1242
1243 hba->active_uic_cmd = uic_cmd;
1244
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301245 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301246 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1247 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1248 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301249
1250 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301251 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301252 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301253}
1254
1255/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301256 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1257 * @hba: per adapter instance
1258 * @uic_command: UIC command
1259 *
1260 * Must be called with mutex held.
1261 * Returns 0 only if success.
1262 */
1263static int
1264ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1265{
1266 int ret;
1267 unsigned long flags;
1268
1269 if (wait_for_completion_timeout(&uic_cmd->done,
1270 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1271 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1272 else
1273 ret = -ETIMEDOUT;
1274
1275 spin_lock_irqsave(hba->host->host_lock, flags);
1276 hba->active_uic_cmd = NULL;
1277 spin_unlock_irqrestore(hba->host->host_lock, flags);
1278
1279 return ret;
1280}
1281
1282/**
1283 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1284 * @hba: per adapter instance
1285 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001286 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301287 *
1288 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001289 * with mutex held and host_lock locked.
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301290 * Returns 0 only if success.
1291 */
1292static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001293__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1294 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301295{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301296 if (!ufshcd_ready_for_uic_cmd(hba)) {
1297 dev_err(hba->dev,
1298 "Controller not ready to accept UIC commands\n");
1299 return -EIO;
1300 }
1301
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001302 if (completion)
1303 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301304
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301305 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301306
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001307 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301308}
1309
1310/**
1311 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1312 * @hba: per adapter instance
1313 * @uic_cmd: UIC command
1314 *
1315 * Returns 0 only if success.
1316 */
1317static int
1318ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1319{
1320 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001321 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301322
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001323 ufshcd_hold(hba, false);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301324 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001325 ufshcd_add_delay_before_dme_cmd(hba);
1326
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001327 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001328 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001329 spin_unlock_irqrestore(hba->host->host_lock, flags);
1330 if (!ret)
1331 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1332
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301333 mutex_unlock(&hba->uic_cmd_mutex);
1334
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001335 ufshcd_release(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301336 return ret;
1337}
1338
1339/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301340 * ufshcd_map_sg - Map scatter-gather list to prdt
1341 * @lrbp - pointer to local reference block
1342 *
1343 * Returns 0 in case of success, non-zero value in case of failure
1344 */
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09001345static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301346{
1347 struct ufshcd_sg_entry *prd_table;
1348 struct scatterlist *sg;
1349 struct scsi_cmnd *cmd;
1350 int sg_segments;
1351 int i;
1352
1353 cmd = lrbp->cmd;
1354 sg_segments = scsi_dma_map(cmd);
1355 if (sg_segments < 0)
1356 return sg_segments;
1357
1358 if (sg_segments) {
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09001359 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1360 lrbp->utr_descriptor_ptr->prd_table_length =
1361 cpu_to_le16((u16)(sg_segments *
1362 sizeof(struct ufshcd_sg_entry)));
1363 else
1364 lrbp->utr_descriptor_ptr->prd_table_length =
1365 cpu_to_le16((u16) (sg_segments));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301366
1367 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1368
1369 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1370 prd_table[i].size =
1371 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1372 prd_table[i].base_addr =
1373 cpu_to_le32(lower_32_bits(sg->dma_address));
1374 prd_table[i].upper_addr =
1375 cpu_to_le32(upper_32_bits(sg->dma_address));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001376 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301377 }
1378 } else {
1379 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1380 }
1381
1382 return 0;
1383}
1384
1385/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301386 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301387 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301388 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301389 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301390static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301391{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301392 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1393
1394 if (hba->ufs_version == UFSHCI_VERSION_10) {
1395 u32 rw;
1396 rw = set & INTERRUPT_MASK_RW_VER_10;
1397 set = rw | ((set ^ intrs) & intrs);
1398 } else {
1399 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301400 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301401
1402 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1403}
1404
1405/**
1406 * ufshcd_disable_intr - disable interrupts
1407 * @hba: per adapter instance
1408 * @intrs: interrupt bits
1409 */
1410static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1411{
1412 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1413
1414 if (hba->ufs_version == UFSHCI_VERSION_10) {
1415 u32 rw;
1416 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1417 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1418 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1419
1420 } else {
1421 set &= ~intrs;
1422 }
1423
1424 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301425}
1426
1427/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301428 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1429 * descriptor according to request
1430 * @lrbp: pointer to local reference block
1431 * @upiu_flags: flags required in the header
1432 * @cmd_dir: requests data direction
1433 */
1434static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
Joao Pinto300bb132016-05-11 12:21:27 +01001435 u32 *upiu_flags, enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301436{
1437 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1438 u32 data_direction;
1439 u32 dword_0;
1440
1441 if (cmd_dir == DMA_FROM_DEVICE) {
1442 data_direction = UTP_DEVICE_TO_HOST;
1443 *upiu_flags = UPIU_CMD_FLAGS_READ;
1444 } else if (cmd_dir == DMA_TO_DEVICE) {
1445 data_direction = UTP_HOST_TO_DEVICE;
1446 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1447 } else {
1448 data_direction = UTP_NO_DATA_TRANSFER;
1449 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1450 }
1451
1452 dword_0 = data_direction | (lrbp->command_type
1453 << UPIU_COMMAND_TYPE_OFFSET);
1454 if (lrbp->intr_cmd)
1455 dword_0 |= UTP_REQ_DESC_INT_CMD;
1456
1457 /* Transfer request descriptor header fields */
1458 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001459 /* dword_1 is reserved, hence it is set to 0 */
1460 req_desc->header.dword_1 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301461 /*
1462 * assigning invalid value for command status. Controller
1463 * updates OCS on command completion, with the command
1464 * status
1465 */
1466 req_desc->header.dword_2 =
1467 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001468 /* dword_3 is reserved, hence it is set to 0 */
1469 req_desc->header.dword_3 = 0;
Yaniv Gardi51047262016-02-01 15:02:38 +02001470
1471 req_desc->prd_table_length = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301472}
1473
1474/**
1475 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1476 * for scsi commands
1477 * @lrbp - local reference block pointer
1478 * @upiu_flags - flags
1479 */
1480static
1481void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1482{
1483 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001484 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301485
1486 /* command descriptor fields */
1487 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1488 UPIU_TRANSACTION_COMMAND, upiu_flags,
1489 lrbp->lun, lrbp->task_tag);
1490 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1491 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1492
1493 /* Total EHS length and Data segment length will be zero */
1494 ucd_req_ptr->header.dword_2 = 0;
1495
1496 ucd_req_ptr->sc.exp_data_transfer_len =
1497 cpu_to_be32(lrbp->cmd->sdb.length);
1498
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001499 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
1500 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
1501 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
1502
1503 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301504}
1505
Dolev Raviv68078d52013-07-30 00:35:58 +05301506/**
1507 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1508 * for query requsts
1509 * @hba: UFS hba
1510 * @lrbp: local reference block pointer
1511 * @upiu_flags: flags
1512 */
1513static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1514 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1515{
1516 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1517 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301518 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05301519 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1520
1521 /* Query request header */
1522 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1523 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1524 lrbp->lun, lrbp->task_tag);
1525 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1526 0, query->request.query_func, 0, 0);
1527
Zang Leigang68612852016-08-25 17:39:19 +08001528 /* Data segment length only need for WRITE_DESC */
1529 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1530 ucd_req_ptr->header.dword_2 =
1531 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
1532 else
1533 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05301534
1535 /* Copy the Query Request buffer as is */
1536 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1537 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05301538
1539 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001540 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1541 memcpy(descp, query->descriptor, len);
1542
Yaniv Gardi51047262016-02-01 15:02:38 +02001543 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05301544}
1545
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301546static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1547{
1548 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1549
1550 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1551
1552 /* command descriptor fields */
1553 ucd_req_ptr->header.dword_0 =
1554 UPIU_HEADER_DWORD(
1555 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02001556 /* clear rest of the fields of basic header */
1557 ucd_req_ptr->header.dword_1 = 0;
1558 ucd_req_ptr->header.dword_2 = 0;
1559
1560 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301561}
1562
1563/**
Joao Pinto300bb132016-05-11 12:21:27 +01001564 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
1565 * for Device Management Purposes
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301566 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301567 * @lrb - pointer to local reference block
1568 */
Joao Pinto300bb132016-05-11 12:21:27 +01001569static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301570{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301571 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301572 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301573
Joao Pinto300bb132016-05-11 12:21:27 +01001574 if (hba->ufs_version == UFSHCI_VERSION_20)
1575 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1576 else
1577 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1578
1579 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1580 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1581 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
1582 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1583 ufshcd_prepare_utp_nop_upiu(lrbp);
1584 else
1585 ret = -EINVAL;
1586
1587 return ret;
1588}
1589
1590/**
1591 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
1592 * for SCSI Purposes
1593 * @hba - per adapter instance
1594 * @lrb - pointer to local reference block
1595 */
1596static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1597{
1598 u32 upiu_flags;
1599 int ret = 0;
1600
1601 if (hba->ufs_version == UFSHCI_VERSION_20)
1602 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1603 else
1604 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1605
1606 if (likely(lrbp->cmd)) {
1607 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1608 lrbp->cmd->sc_data_direction);
1609 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1610 } else {
1611 ret = -EINVAL;
1612 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301613
1614 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301615}
1616
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03001617/*
1618 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1619 * @scsi_lun: scsi LUN id
1620 *
1621 * Returns UPIU LUN id
1622 */
1623static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1624{
1625 if (scsi_is_wlun(scsi_lun))
1626 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1627 | UFS_UPIU_WLUN_ID;
1628 else
1629 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1630}
1631
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301632/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03001633 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1634 * @scsi_lun: UPIU W-LUN id
1635 *
1636 * Returns SCSI W-LUN id
1637 */
1638static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1639{
1640 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1641}
1642
1643/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301644 * ufshcd_queuecommand - main entry point for SCSI requests
1645 * @cmd: command from SCSI Midlayer
1646 * @done: call back function
1647 *
1648 * Returns 0 for success, non-zero in case of failure
1649 */
1650static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1651{
1652 struct ufshcd_lrb *lrbp;
1653 struct ufs_hba *hba;
1654 unsigned long flags;
1655 int tag;
1656 int err = 0;
1657
1658 hba = shost_priv(host);
1659
1660 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02001661 if (!ufshcd_valid_tag(hba, tag)) {
1662 dev_err(hba->dev,
1663 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
1664 __func__, tag, cmd, cmd->request);
1665 BUG();
1666 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301667
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301668 spin_lock_irqsave(hba->host->host_lock, flags);
1669 switch (hba->ufshcd_state) {
1670 case UFSHCD_STATE_OPERATIONAL:
1671 break;
Zang Leigang141f8162016-11-16 11:29:37 +08001672 case UFSHCD_STATE_EH_SCHEDULED:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301673 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301674 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301675 goto out_unlock;
1676 case UFSHCD_STATE_ERROR:
1677 set_host_byte(cmd, DID_ERROR);
1678 cmd->scsi_done(cmd);
1679 goto out_unlock;
1680 default:
1681 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1682 __func__, hba->ufshcd_state);
1683 set_host_byte(cmd, DID_BAD_TARGET);
1684 cmd->scsi_done(cmd);
1685 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301686 }
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001687
1688 /* if error handling is in progress, don't issue commands */
1689 if (ufshcd_eh_in_progress(hba)) {
1690 set_host_byte(cmd, DID_ERROR);
1691 cmd->scsi_done(cmd);
1692 goto out_unlock;
1693 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301694 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301695
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301696 /* acquire the tag to make sure device cmds don't use it */
1697 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1698 /*
1699 * Dev manage command in progress, requeue the command.
1700 * Requeuing the command helps in cases where the request *may*
1701 * find different tag instead of waiting for dev manage command
1702 * completion.
1703 */
1704 err = SCSI_MLQUEUE_HOST_BUSY;
1705 goto out;
1706 }
1707
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001708 err = ufshcd_hold(hba, true);
1709 if (err) {
1710 err = SCSI_MLQUEUE_HOST_BUSY;
1711 clear_bit_unlock(tag, &hba->lrb_in_use);
1712 goto out;
1713 }
1714 WARN_ON(hba->clk_gating.state != CLKS_ON);
1715
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301716 lrbp = &hba->lrb[tag];
1717
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301718 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301719 lrbp->cmd = cmd;
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07001720 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301721 lrbp->sense_buffer = cmd->sense_buffer;
1722 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03001723 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03001724 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301725
Joao Pinto300bb132016-05-11 12:21:27 +01001726 ufshcd_comp_scsi_upiu(hba, lrbp);
1727
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09001728 err = ufshcd_map_sg(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301729 if (err) {
1730 lrbp->cmd = NULL;
1731 clear_bit_unlock(tag, &hba->lrb_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301732 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301733 }
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001734 /* Make sure descriptors are ready before ringing the doorbell */
1735 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301736
1737 /* issue command to the controller */
1738 spin_lock_irqsave(hba->host->host_lock, flags);
Kiwoong Kim0e675ef2016-11-10 21:14:36 +09001739 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301740 ufshcd_send_command(hba, tag);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301741out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301742 spin_unlock_irqrestore(hba->host->host_lock, flags);
1743out:
1744 return err;
1745}
1746
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301747static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1748 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1749{
1750 lrbp->cmd = NULL;
1751 lrbp->sense_bufflen = 0;
1752 lrbp->sense_buffer = NULL;
1753 lrbp->task_tag = tag;
1754 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301755 lrbp->intr_cmd = true; /* No interrupt aggregation */
1756 hba->dev_cmd.type = cmd_type;
1757
Joao Pinto300bb132016-05-11 12:21:27 +01001758 return ufshcd_comp_devman_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301759}
1760
1761static int
1762ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1763{
1764 int err = 0;
1765 unsigned long flags;
1766 u32 mask = 1 << tag;
1767
1768 /* clear outstanding transaction before retry */
1769 spin_lock_irqsave(hba->host->host_lock, flags);
1770 ufshcd_utrl_clear(hba, tag);
1771 spin_unlock_irqrestore(hba->host->host_lock, flags);
1772
1773 /*
1774 * wait for for h/w to clear corresponding bit in door-bell.
1775 * max. wait is 1 sec.
1776 */
1777 err = ufshcd_wait_for_register(hba,
1778 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02001779 mask, ~mask, 1000, 1000, true);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301780
1781 return err;
1782}
1783
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001784static int
1785ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1786{
1787 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1788
1789 /* Get the UPIU response */
1790 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1791 UPIU_RSP_CODE_OFFSET;
1792 return query_res->response;
1793}
1794
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301795/**
1796 * ufshcd_dev_cmd_completion() - handles device management command responses
1797 * @hba: per adapter instance
1798 * @lrbp: pointer to local reference block
1799 */
1800static int
1801ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1802{
1803 int resp;
1804 int err = 0;
1805
1806 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1807
1808 switch (resp) {
1809 case UPIU_TRANSACTION_NOP_IN:
1810 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1811 err = -EINVAL;
1812 dev_err(hba->dev, "%s: unexpected response %x\n",
1813 __func__, resp);
1814 }
1815 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05301816 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001817 err = ufshcd_check_query_response(hba, lrbp);
1818 if (!err)
1819 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05301820 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301821 case UPIU_TRANSACTION_REJECT_UPIU:
1822 /* TODO: handle Reject UPIU Response */
1823 err = -EPERM;
1824 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1825 __func__);
1826 break;
1827 default:
1828 err = -EINVAL;
1829 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1830 __func__, resp);
1831 break;
1832 }
1833
1834 return err;
1835}
1836
1837static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1838 struct ufshcd_lrb *lrbp, int max_timeout)
1839{
1840 int err = 0;
1841 unsigned long time_left;
1842 unsigned long flags;
1843
1844 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1845 msecs_to_jiffies(max_timeout));
1846
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001847 /* Make sure descriptors are ready before ringing the doorbell */
1848 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301849 spin_lock_irqsave(hba->host->host_lock, flags);
1850 hba->dev_cmd.complete = NULL;
1851 if (likely(time_left)) {
1852 err = ufshcd_get_tr_ocs(lrbp);
1853 if (!err)
1854 err = ufshcd_dev_cmd_completion(hba, lrbp);
1855 }
1856 spin_unlock_irqrestore(hba->host->host_lock, flags);
1857
1858 if (!time_left) {
1859 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02001860 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
1861 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301862 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02001863 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301864 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02001865 /*
1866 * in case of an error, after clearing the doorbell,
1867 * we also need to clear the outstanding_request
1868 * field in hba
1869 */
1870 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301871 }
1872
1873 return err;
1874}
1875
1876/**
1877 * ufshcd_get_dev_cmd_tag - Get device management command tag
1878 * @hba: per-adapter instance
1879 * @tag: pointer to variable with available slot value
1880 *
1881 * Get a free slot and lock it until device management command
1882 * completes.
1883 *
1884 * Returns false if free slot is unavailable for locking, else
1885 * return true with tag value in @tag.
1886 */
1887static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1888{
1889 int tag;
1890 bool ret = false;
1891 unsigned long tmp;
1892
1893 if (!tag_out)
1894 goto out;
1895
1896 do {
1897 tmp = ~hba->lrb_in_use;
1898 tag = find_last_bit(&tmp, hba->nutrs);
1899 if (tag >= hba->nutrs)
1900 goto out;
1901 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1902
1903 *tag_out = tag;
1904 ret = true;
1905out:
1906 return ret;
1907}
1908
1909static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1910{
1911 clear_bit_unlock(tag, &hba->lrb_in_use);
1912}
1913
1914/**
1915 * ufshcd_exec_dev_cmd - API for sending device management requests
1916 * @hba - UFS hba
1917 * @cmd_type - specifies the type (NOP, Query...)
1918 * @timeout - time in seconds
1919 *
Dolev Raviv68078d52013-07-30 00:35:58 +05301920 * NOTE: Since there is only one available tag for device management commands,
1921 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301922 */
1923static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1924 enum dev_cmd_type cmd_type, int timeout)
1925{
1926 struct ufshcd_lrb *lrbp;
1927 int err;
1928 int tag;
1929 struct completion wait;
1930 unsigned long flags;
1931
1932 /*
1933 * Get free slot, sleep if slots are unavailable.
1934 * Even though we use wait_event() which sleeps indefinitely,
1935 * the maximum wait time is bounded by SCSI request timeout.
1936 */
1937 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1938
1939 init_completion(&wait);
1940 lrbp = &hba->lrb[tag];
1941 WARN_ON(lrbp->cmd);
1942 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1943 if (unlikely(err))
1944 goto out_put_tag;
1945
1946 hba->dev_cmd.complete = &wait;
1947
Yaniv Gardie3dfdc52016-02-01 15:02:49 +02001948 /* Make sure descriptors are ready before ringing the doorbell */
1949 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301950 spin_lock_irqsave(hba->host->host_lock, flags);
Kiwoong Kim0e675ef2016-11-10 21:14:36 +09001951 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301952 ufshcd_send_command(hba, tag);
1953 spin_unlock_irqrestore(hba->host->host_lock, flags);
1954
1955 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1956
1957out_put_tag:
1958 ufshcd_put_dev_cmd_tag(hba, tag);
1959 wake_up(&hba->dev_cmd.tag_wq);
1960 return err;
1961}
1962
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301963/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001964 * ufshcd_init_query() - init the query response and request parameters
1965 * @hba: per-adapter instance
1966 * @request: address of the request pointer to be initialized
1967 * @response: address of the response pointer to be initialized
1968 * @opcode: operation to perform
1969 * @idn: flag idn to access
1970 * @index: LU number to access
1971 * @selector: query/flag/descriptor further identification
1972 */
1973static inline void ufshcd_init_query(struct ufs_hba *hba,
1974 struct ufs_query_req **request, struct ufs_query_res **response,
1975 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1976{
1977 *request = &hba->dev_cmd.query.request;
1978 *response = &hba->dev_cmd.query.response;
1979 memset(*request, 0, sizeof(struct ufs_query_req));
1980 memset(*response, 0, sizeof(struct ufs_query_res));
1981 (*request)->upiu_req.opcode = opcode;
1982 (*request)->upiu_req.idn = idn;
1983 (*request)->upiu_req.index = index;
1984 (*request)->upiu_req.selector = selector;
1985}
1986
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02001987static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1988 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
1989{
1990 int ret;
1991 int retries;
1992
1993 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1994 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1995 if (ret)
1996 dev_dbg(hba->dev,
1997 "%s: failed with error %d, retries %d\n",
1998 __func__, ret, retries);
1999 else
2000 break;
2001 }
2002
2003 if (ret)
2004 dev_err(hba->dev,
2005 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2006 __func__, opcode, idn, ret, retries);
2007 return ret;
2008}
2009
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002010/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302011 * ufshcd_query_flag() - API function for sending flag query requests
2012 * hba: per-adapter instance
2013 * query_opcode: flag query to perform
2014 * idn: flag idn to access
2015 * flag_res: the flag value after the query request completes
2016 *
2017 * Returns 0 for success, non-zero in case of failure
2018 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002019int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Dolev Raviv68078d52013-07-30 00:35:58 +05302020 enum flag_idn idn, bool *flag_res)
2021{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002022 struct ufs_query_req *request = NULL;
2023 struct ufs_query_res *response = NULL;
2024 int err, index = 0, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02002025 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05302026
2027 BUG_ON(!hba);
2028
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002029 ufshcd_hold(hba, false);
Dolev Raviv68078d52013-07-30 00:35:58 +05302030 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002031 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2032 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05302033
2034 switch (opcode) {
2035 case UPIU_QUERY_OPCODE_SET_FLAG:
2036 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2037 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2038 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2039 break;
2040 case UPIU_QUERY_OPCODE_READ_FLAG:
2041 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2042 if (!flag_res) {
2043 /* No dummy reads */
2044 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2045 __func__);
2046 err = -EINVAL;
2047 goto out_unlock;
2048 }
2049 break;
2050 default:
2051 dev_err(hba->dev,
2052 "%s: Expected query flag opcode but got = %d\n",
2053 __func__, opcode);
2054 err = -EINVAL;
2055 goto out_unlock;
2056 }
Dolev Raviv68078d52013-07-30 00:35:58 +05302057
Yaniv Gardie5ad4062016-02-01 15:02:41 +02002058 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05302059
2060 if (err) {
2061 dev_err(hba->dev,
2062 "%s: Sending flag query for idn %d failed, err = %d\n",
2063 __func__, idn, err);
2064 goto out_unlock;
2065 }
2066
2067 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302068 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05302069 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2070
2071out_unlock:
2072 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002073 ufshcd_release(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05302074 return err;
2075}
2076
2077/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302078 * ufshcd_query_attr - API function for sending attribute requests
2079 * hba: per-adapter instance
2080 * opcode: attribute opcode
2081 * idn: attribute idn to access
2082 * index: index field
2083 * selector: selector field
2084 * attr_val: the attribute value after the query request completes
2085 *
2086 * Returns 0 for success, non-zero in case of failure
2087*/
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05302088static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302089 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2090{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002091 struct ufs_query_req *request = NULL;
2092 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302093 int err;
2094
2095 BUG_ON(!hba);
2096
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002097 ufshcd_hold(hba, false);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302098 if (!attr_val) {
2099 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2100 __func__, opcode);
2101 err = -EINVAL;
2102 goto out;
2103 }
2104
2105 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002106 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2107 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302108
2109 switch (opcode) {
2110 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2111 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302112 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302113 break;
2114 case UPIU_QUERY_OPCODE_READ_ATTR:
2115 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2116 break;
2117 default:
2118 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2119 __func__, opcode);
2120 err = -EINVAL;
2121 goto out_unlock;
2122 }
2123
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002124 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302125
2126 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08002127 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2128 __func__, opcode, idn, index, err);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302129 goto out_unlock;
2130 }
2131
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302132 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302133
2134out_unlock:
2135 mutex_unlock(&hba->dev_cmd.lock);
2136out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002137 ufshcd_release(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302138 return err;
2139}
2140
2141/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02002142 * ufshcd_query_attr_retry() - API function for sending query
2143 * attribute with retries
2144 * @hba: per-adapter instance
2145 * @opcode: attribute opcode
2146 * @idn: attribute idn to access
2147 * @index: index field
2148 * @selector: selector field
2149 * @attr_val: the attribute value after the query request
2150 * completes
2151 *
2152 * Returns 0 for success, non-zero in case of failure
2153*/
2154static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2155 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2156 u32 *attr_val)
2157{
2158 int ret = 0;
2159 u32 retries;
2160
2161 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2162 ret = ufshcd_query_attr(hba, opcode, idn, index,
2163 selector, attr_val);
2164 if (ret)
2165 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2166 __func__, ret, retries);
2167 else
2168 break;
2169 }
2170
2171 if (ret)
2172 dev_err(hba->dev,
2173 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2174 __func__, idn, ret, QUERY_REQ_RETRIES);
2175 return ret;
2176}
2177
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002178static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002179 enum query_opcode opcode, enum desc_idn idn, u8 index,
2180 u8 selector, u8 *desc_buf, int *buf_len)
2181{
2182 struct ufs_query_req *request = NULL;
2183 struct ufs_query_res *response = NULL;
2184 int err;
2185
2186 BUG_ON(!hba);
2187
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002188 ufshcd_hold(hba, false);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002189 if (!desc_buf) {
2190 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2191 __func__, opcode);
2192 err = -EINVAL;
2193 goto out;
2194 }
2195
2196 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2197 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2198 __func__, *buf_len);
2199 err = -EINVAL;
2200 goto out;
2201 }
2202
2203 mutex_lock(&hba->dev_cmd.lock);
2204 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2205 selector);
2206 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002207 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002208
2209 switch (opcode) {
2210 case UPIU_QUERY_OPCODE_WRITE_DESC:
2211 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2212 break;
2213 case UPIU_QUERY_OPCODE_READ_DESC:
2214 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2215 break;
2216 default:
2217 dev_err(hba->dev,
2218 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2219 __func__, opcode);
2220 err = -EINVAL;
2221 goto out_unlock;
2222 }
2223
2224 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2225
2226 if (err) {
Yaniv Gardi4b761b52016-11-23 16:31:18 -08002227 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2228 __func__, opcode, idn, index, err);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002229 goto out_unlock;
2230 }
2231
2232 hba->dev_cmd.query.descriptor = NULL;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002233 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002234
2235out_unlock:
2236 mutex_unlock(&hba->dev_cmd.lock);
2237out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002238 ufshcd_release(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002239 return err;
2240}
2241
2242/**
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002243 * ufshcd_query_descriptor_retry - API function for sending descriptor
2244 * requests
2245 * hba: per-adapter instance
2246 * opcode: attribute opcode
2247 * idn: attribute idn to access
2248 * index: index field
2249 * selector: selector field
2250 * desc_buf: the buffer that contains the descriptor
2251 * buf_len: length parameter passed to the device
2252 *
2253 * Returns 0 for success, non-zero in case of failure.
2254 * The buf_len parameter will contain, on return, the length parameter
2255 * received on the response.
2256 */
2257int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2258 enum query_opcode opcode, enum desc_idn idn, u8 index,
2259 u8 selector, u8 *desc_buf, int *buf_len)
2260{
2261 int err;
2262 int retries;
2263
2264 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2265 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2266 selector, desc_buf, buf_len);
2267 if (!err || err == -EINVAL)
2268 break;
2269 }
2270
2271 return err;
2272}
2273EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2274
2275/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002276 * ufshcd_read_desc_param - read the specified descriptor parameter
2277 * @hba: Pointer to adapter instance
2278 * @desc_id: descriptor idn value
2279 * @desc_index: descriptor index
2280 * @param_offset: offset of the parameter to read
2281 * @param_read_buf: pointer to buffer where parameter would be read
2282 * @param_size: sizeof(param_read_buf)
2283 *
2284 * Return 0 in case of success, non-zero otherwise
2285 */
2286static int ufshcd_read_desc_param(struct ufs_hba *hba,
2287 enum desc_idn desc_id,
2288 int desc_index,
2289 u32 param_offset,
2290 u8 *param_read_buf,
2291 u32 param_size)
2292{
2293 int ret;
2294 u8 *desc_buf;
2295 u32 buff_len;
2296 bool is_kmalloc = true;
2297
2298 /* safety checks */
2299 if (desc_id >= QUERY_DESC_IDN_MAX)
2300 return -EINVAL;
2301
2302 buff_len = ufs_query_desc_max_size[desc_id];
2303 if ((param_offset + param_size) > buff_len)
2304 return -EINVAL;
2305
2306 if (!param_offset && (param_size == buff_len)) {
2307 /* memory space already available to hold full descriptor */
2308 desc_buf = param_read_buf;
2309 is_kmalloc = false;
2310 } else {
2311 /* allocate memory to hold full descriptor */
2312 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2313 if (!desc_buf)
2314 return -ENOMEM;
2315 }
2316
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002317 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2318 desc_id, desc_index, 0, desc_buf,
2319 &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002320
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08002321 if (ret) {
2322 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
2323 __func__, desc_id, desc_index, param_offset, ret);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002324
2325 goto out;
2326 }
2327
subhashj@codeaurora.orgbde44bb2016-11-23 16:31:41 -08002328 /* Sanity check */
2329 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
2330 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
2331 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
2332 ret = -EINVAL;
2333 goto out;
2334 }
2335
2336 /*
2337 * While reading variable size descriptors (like string descriptor),
2338 * some UFS devices may report the "LENGTH" (field in "Transaction
2339 * Specific fields" of Query Response UPIU) same as what was requested
2340 * in Query Request UPIU instead of reporting the actual size of the
2341 * variable size descriptor.
2342 * Although it's safe to ignore the "LENGTH" field for variable size
2343 * descriptors as we can always derive the length of the descriptor from
2344 * the descriptor header fields. Hence this change impose the length
2345 * match check only for fixed size descriptors (for which we always
2346 * request the correct size as part of Query Request UPIU).
2347 */
2348 if ((desc_id != QUERY_DESC_IDN_STRING) &&
2349 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
2350 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
2351 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
2352 ret = -EINVAL;
2353 goto out;
2354 }
2355
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002356 if (is_kmalloc)
2357 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2358out:
2359 if (is_kmalloc)
2360 kfree(desc_buf);
2361 return ret;
2362}
2363
2364static inline int ufshcd_read_desc(struct ufs_hba *hba,
2365 enum desc_idn desc_id,
2366 int desc_index,
2367 u8 *buf,
2368 u32 size)
2369{
2370 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
2371}
2372
2373static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2374 u8 *buf,
2375 u32 size)
2376{
Dolev Raviv61e07352016-11-23 16:30:49 -08002377 int err = 0;
2378 int retries;
2379
2380 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2381 /* Read descriptor*/
2382 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2383 if (!err)
2384 break;
2385 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2386 }
2387
2388 return err;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002389}
2390
Yaniv Gardib573d482016-03-10 17:37:09 +02002391int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2392{
2393 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2394}
2395EXPORT_SYMBOL(ufshcd_read_device_desc);
2396
2397/**
2398 * ufshcd_read_string_desc - read string descriptor
2399 * @hba: pointer to adapter instance
2400 * @desc_index: descriptor index
2401 * @buf: pointer to buffer where descriptor would be read
2402 * @size: size of buf
2403 * @ascii: if true convert from unicode to ascii characters
2404 *
2405 * Return 0 in case of success, non-zero otherwise
2406 */
2407int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2408 u32 size, bool ascii)
2409{
2410 int err = 0;
2411
2412 err = ufshcd_read_desc(hba,
2413 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2414
2415 if (err) {
2416 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2417 __func__, QUERY_REQ_RETRIES, err);
2418 goto out;
2419 }
2420
2421 if (ascii) {
2422 int desc_len;
2423 int ascii_len;
2424 int i;
2425 char *buff_ascii;
2426
2427 desc_len = buf[0];
2428 /* remove header and divide by 2 to move from UTF16 to UTF8 */
2429 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2430 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2431 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2432 __func__);
2433 err = -ENOMEM;
2434 goto out;
2435 }
2436
2437 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2438 if (!buff_ascii) {
2439 err = -ENOMEM;
Tiezhu Yangfcbefc32016-06-25 12:35:22 +08002440 goto out;
Yaniv Gardib573d482016-03-10 17:37:09 +02002441 }
2442
2443 /*
2444 * the descriptor contains string in UTF16 format
2445 * we need to convert to utf-8 so it can be displayed
2446 */
2447 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2448 desc_len - QUERY_DESC_HDR_SIZE,
2449 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2450
2451 /* replace non-printable or non-ASCII characters with spaces */
2452 for (i = 0; i < ascii_len; i++)
2453 ufshcd_remove_non_printable(&buff_ascii[i]);
2454
2455 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2456 size - QUERY_DESC_HDR_SIZE);
2457 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2458 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
Yaniv Gardib573d482016-03-10 17:37:09 +02002459 kfree(buff_ascii);
2460 }
2461out:
2462 return err;
2463}
2464EXPORT_SYMBOL(ufshcd_read_string_desc);
2465
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002466/**
2467 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
2468 * @hba: Pointer to adapter instance
2469 * @lun: lun id
2470 * @param_offset: offset of the parameter to read
2471 * @param_read_buf: pointer to buffer where parameter would be read
2472 * @param_size: sizeof(param_read_buf)
2473 *
2474 * Return 0 in case of success, non-zero otherwise
2475 */
2476static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
2477 int lun,
2478 enum unit_desc_param param_offset,
2479 u8 *param_read_buf,
2480 u32 param_size)
2481{
2482 /*
2483 * Unit descriptors are only available for general purpose LUs (LUN id
2484 * from 0 to 7) and RPMB Well known LU.
2485 */
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002486 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002487 return -EOPNOTSUPP;
2488
2489 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
2490 param_offset, param_read_buf, param_size);
2491}
2492
2493/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302494 * ufshcd_memory_alloc - allocate memory for host memory space data structures
2495 * @hba: per adapter instance
2496 *
2497 * 1. Allocate DMA memory for Command Descriptor array
2498 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
2499 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
2500 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
2501 * (UTMRDL)
2502 * 4. Allocate memory for local reference block(lrb).
2503 *
2504 * Returns 0 for success, non-zero in case of failure
2505 */
2506static int ufshcd_memory_alloc(struct ufs_hba *hba)
2507{
2508 size_t utmrdl_size, utrdl_size, ucdl_size;
2509
2510 /* Allocate memory for UTP command descriptors */
2511 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09002512 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
2513 ucdl_size,
2514 &hba->ucdl_dma_addr,
2515 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302516
2517 /*
2518 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
2519 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
2520 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
2521 * be aligned to 128 bytes as well
2522 */
2523 if (!hba->ucdl_base_addr ||
2524 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302525 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302526 "Command Descriptor Memory allocation failed\n");
2527 goto out;
2528 }
2529
2530 /*
2531 * Allocate memory for UTP Transfer descriptors
2532 * UFSHCI requires 1024 byte alignment of UTRD
2533 */
2534 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09002535 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
2536 utrdl_size,
2537 &hba->utrdl_dma_addr,
2538 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302539 if (!hba->utrdl_base_addr ||
2540 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302541 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302542 "Transfer Descriptor Memory allocation failed\n");
2543 goto out;
2544 }
2545
2546 /*
2547 * Allocate memory for UTP Task Management descriptors
2548 * UFSHCI requires 1024 byte alignment of UTMRD
2549 */
2550 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09002551 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2552 utmrdl_size,
2553 &hba->utmrdl_dma_addr,
2554 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302555 if (!hba->utmrdl_base_addr ||
2556 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302557 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302558 "Task Management Descriptor Memory allocation failed\n");
2559 goto out;
2560 }
2561
2562 /* Allocate memory for local reference block */
Seungwon Jeon2953f852013-06-27 13:31:54 +09002563 hba->lrb = devm_kzalloc(hba->dev,
2564 hba->nutrs * sizeof(struct ufshcd_lrb),
2565 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302566 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302567 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302568 goto out;
2569 }
2570 return 0;
2571out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302572 return -ENOMEM;
2573}
2574
2575/**
Dolev Raviv66cc8202016-12-22 18:39:42 -08002576 * ufshcd_print_pwr_info - print power params as saved in hba
2577 * power info
2578 * @hba: per-adapter instance
2579 */
2580static void ufshcd_print_pwr_info(struct ufs_hba *hba)
2581{
2582 static const char * const names[] = {
2583 "INVALID MODE",
2584 "FAST MODE",
2585 "SLOW_MODE",
2586 "INVALID MODE",
2587 "FASTAUTO_MODE",
2588 "SLOWAUTO_MODE",
2589 "INVALID MODE",
2590 };
2591
2592 dev_info(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
2593 __func__,
2594 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
2595 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
2596 names[hba->pwr_info.pwr_rx],
2597 names[hba->pwr_info.pwr_tx],
2598 hba->pwr_info.hs_rate);
2599}
2600
2601/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302602 * ufshcd_host_memory_configure - configure local reference block with
2603 * memory offsets
2604 * @hba: per adapter instance
2605 *
2606 * Configure Host memory space
2607 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
2608 * address.
2609 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
2610 * and PRDT offset.
2611 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
2612 * into local reference block.
2613 */
2614static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2615{
2616 struct utp_transfer_cmd_desc *cmd_descp;
2617 struct utp_transfer_req_desc *utrdlp;
2618 dma_addr_t cmd_desc_dma_addr;
2619 dma_addr_t cmd_desc_element_addr;
2620 u16 response_offset;
2621 u16 prdt_offset;
2622 int cmd_desc_size;
2623 int i;
2624
2625 utrdlp = hba->utrdl_base_addr;
2626 cmd_descp = hba->ucdl_base_addr;
2627
2628 response_offset =
2629 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2630 prdt_offset =
2631 offsetof(struct utp_transfer_cmd_desc, prd_table);
2632
2633 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2634 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2635
2636 for (i = 0; i < hba->nutrs; i++) {
2637 /* Configure UTRD with command descriptor base address */
2638 cmd_desc_element_addr =
2639 (cmd_desc_dma_addr + (cmd_desc_size * i));
2640 utrdlp[i].command_desc_base_addr_lo =
2641 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2642 utrdlp[i].command_desc_base_addr_hi =
2643 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2644
2645 /* Response upiu and prdt offset should be in double words */
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002646 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
2647 utrdlp[i].response_upiu_offset =
2648 cpu_to_le16(response_offset);
2649 utrdlp[i].prd_table_offset =
2650 cpu_to_le16(prdt_offset);
2651 utrdlp[i].response_upiu_length =
2652 cpu_to_le16(ALIGNED_UPIU_SIZE);
2653 } else {
2654 utrdlp[i].response_upiu_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302655 cpu_to_le16((response_offset >> 2));
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002656 utrdlp[i].prd_table_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302657 cpu_to_le16((prdt_offset >> 2));
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002658 utrdlp[i].response_upiu_length =
Sujit Reddy Thumma3ca316c2013-06-26 22:39:30 +05302659 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Kiwoong Kim75b1cc42016-11-22 17:06:59 +09002660 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302661
2662 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302663 hba->lrb[i].ucd_req_ptr =
2664 (struct utp_upiu_req *)(cmd_descp + i);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302665 hba->lrb[i].ucd_rsp_ptr =
2666 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2667 hba->lrb[i].ucd_prdt_ptr =
2668 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2669 }
2670}
2671
2672/**
2673 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2674 * @hba: per adapter instance
2675 *
2676 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2677 * in order to initialize the Unipro link startup procedure.
2678 * Once the Unipro links are up, the device connected to the controller
2679 * is detected.
2680 *
2681 * Returns 0 on success, non-zero value on failure
2682 */
2683static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2684{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302685 struct uic_command uic_cmd = {0};
2686 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302687
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302688 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2689
2690 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2691 if (ret)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302692 dev_err(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302693 "dme-link-startup: error code %d\n", ret);
2694 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302695}
2696
Yaniv Gardicad2e032015-03-31 17:37:14 +03002697static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2698{
2699 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
2700 unsigned long min_sleep_time_us;
2701
2702 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2703 return;
2704
2705 /*
2706 * last_dme_cmd_tstamp will be 0 only for 1st call to
2707 * this function
2708 */
2709 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2710 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2711 } else {
2712 unsigned long delta =
2713 (unsigned long) ktime_to_us(
2714 ktime_sub(ktime_get(),
2715 hba->last_dme_cmd_tstamp));
2716
2717 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2718 min_sleep_time_us =
2719 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2720 else
2721 return; /* no more delay required */
2722 }
2723
2724 /* allow sleep for extra 50us if needed */
2725 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2726}
2727
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302728/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302729 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2730 * @hba: per adapter instance
2731 * @attr_sel: uic command argument1
2732 * @attr_set: attribute set type as uic command argument2
2733 * @mib_val: setting value as uic command argument3
2734 * @peer: indicate whether peer or local
2735 *
2736 * Returns 0 on success, non-zero value on failure
2737 */
2738int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2739 u8 attr_set, u32 mib_val, u8 peer)
2740{
2741 struct uic_command uic_cmd = {0};
2742 static const char *const action[] = {
2743 "dme-set",
2744 "dme-peer-set"
2745 };
2746 const char *set = action[!!peer];
2747 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002748 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302749
2750 uic_cmd.command = peer ?
2751 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2752 uic_cmd.argument1 = attr_sel;
2753 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2754 uic_cmd.argument3 = mib_val;
2755
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002756 do {
2757 /* for peer attributes we retry upon failure */
2758 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2759 if (ret)
2760 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2761 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2762 } while (ret && peer && --retries);
2763
Yaniv Gardif37e9f82016-11-23 16:32:49 -08002764 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002765 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08002766 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2767 UFS_UIC_COMMAND_RETRIES - retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302768
2769 return ret;
2770}
2771EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2772
2773/**
2774 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2775 * @hba: per adapter instance
2776 * @attr_sel: uic command argument1
2777 * @mib_val: the value of the attribute as returned by the UIC command
2778 * @peer: indicate whether peer or local
2779 *
2780 * Returns 0 on success, non-zero value on failure
2781 */
2782int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2783 u32 *mib_val, u8 peer)
2784{
2785 struct uic_command uic_cmd = {0};
2786 static const char *const action[] = {
2787 "dme-get",
2788 "dme-peer-get"
2789 };
2790 const char *get = action[!!peer];
2791 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002792 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03002793 struct ufs_pa_layer_attr orig_pwr_info;
2794 struct ufs_pa_layer_attr temp_pwr_info;
2795 bool pwr_mode_change = false;
2796
2797 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2798 orig_pwr_info = hba->pwr_info;
2799 temp_pwr_info = orig_pwr_info;
2800
2801 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2802 orig_pwr_info.pwr_rx == FAST_MODE) {
2803 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2804 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2805 pwr_mode_change = true;
2806 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2807 orig_pwr_info.pwr_rx == SLOW_MODE) {
2808 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2809 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2810 pwr_mode_change = true;
2811 }
2812 if (pwr_mode_change) {
2813 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2814 if (ret)
2815 goto out;
2816 }
2817 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302818
2819 uic_cmd.command = peer ?
2820 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2821 uic_cmd.argument1 = attr_sel;
2822
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002823 do {
2824 /* for peer attributes we retry upon failure */
2825 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2826 if (ret)
2827 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
2828 get, UIC_GET_ATTR_ID(attr_sel), ret);
2829 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302830
Yaniv Gardif37e9f82016-11-23 16:32:49 -08002831 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002832 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
Yaniv Gardif37e9f82016-11-23 16:32:49 -08002833 get, UIC_GET_ATTR_ID(attr_sel),
2834 UFS_UIC_COMMAND_RETRIES - retries);
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002835
2836 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302837 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03002838
2839 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2840 && pwr_mode_change)
2841 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302842out:
2843 return ret;
2844}
2845EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2846
2847/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002848 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2849 * state) and waits for it to take effect.
2850 *
2851 * @hba: per adapter instance
2852 * @cmd: UIC command to execute
2853 *
2854 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2855 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2856 * and device UniPro link and hence it's final completion would be indicated by
2857 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2858 * addition to normal UIC command completion Status (UCCS). This function only
2859 * returns after the relevant status bits indicate the completion.
2860 *
2861 * Returns 0 on success, non-zero value on failure
2862 */
2863static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2864{
2865 struct completion uic_async_done;
2866 unsigned long flags;
2867 u8 status;
2868 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002869 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002870
2871 mutex_lock(&hba->uic_cmd_mutex);
2872 init_completion(&uic_async_done);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002873 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002874
2875 spin_lock_irqsave(hba->host->host_lock, flags);
2876 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002877 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
2878 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
2879 /*
2880 * Make sure UIC command completion interrupt is disabled before
2881 * issuing UIC command.
2882 */
2883 wmb();
2884 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002885 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002886 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
2887 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002888 if (ret) {
2889 dev_err(hba->dev,
2890 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2891 cmd->command, cmd->argument3, ret);
2892 goto out;
2893 }
2894
2895 if (!wait_for_completion_timeout(hba->uic_async_done,
2896 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2897 dev_err(hba->dev,
2898 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2899 cmd->command, cmd->argument3);
2900 ret = -ETIMEDOUT;
2901 goto out;
2902 }
2903
2904 status = ufshcd_get_upmcrs(hba);
2905 if (status != PWR_LOCAL) {
2906 dev_err(hba->dev,
Kiwoong Kim73615422016-09-08 16:50:02 +09002907 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002908 cmd->command, status);
2909 ret = (status != PWR_OK) ? status : -1;
2910 }
2911out:
2912 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002913 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002914 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002915 if (reenable_intr)
2916 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002917 spin_unlock_irqrestore(hba->host->host_lock, flags);
2918 mutex_unlock(&hba->uic_cmd_mutex);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002919
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002920 return ret;
2921}
2922
2923/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302924 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2925 * using DME_SET primitives.
2926 * @hba: per adapter instance
2927 * @mode: powr mode value
2928 *
2929 * Returns 0 on success, non-zero value on failure
2930 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05302931static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302932{
2933 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002934 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302935
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03002936 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
2937 ret = ufshcd_dme_set(hba,
2938 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
2939 if (ret) {
2940 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
2941 __func__, ret);
2942 goto out;
2943 }
2944 }
2945
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302946 uic_cmd.command = UIC_CMD_DME_SET;
2947 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2948 uic_cmd.argument3 = mode;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002949 ufshcd_hold(hba, false);
2950 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2951 ufshcd_release(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302952
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03002953out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002954 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002955}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302956
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002957static int ufshcd_link_recovery(struct ufs_hba *hba)
2958{
2959 int ret;
2960 unsigned long flags;
2961
2962 spin_lock_irqsave(hba->host->host_lock, flags);
2963 hba->ufshcd_state = UFSHCD_STATE_RESET;
2964 ufshcd_set_eh_in_progress(hba);
2965 spin_unlock_irqrestore(hba->host->host_lock, flags);
2966
2967 ret = ufshcd_host_reset_and_restore(hba);
2968
2969 spin_lock_irqsave(hba->host->host_lock, flags);
2970 if (ret)
2971 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2972 ufshcd_clear_eh_in_progress(hba);
2973 spin_unlock_irqrestore(hba->host->host_lock, flags);
2974
2975 if (ret)
2976 dev_err(hba->dev, "%s: link recovery failed, err %d",
2977 __func__, ret);
2978
2979 return ret;
2980}
2981
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002982static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002983{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002984 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002985 struct uic_command uic_cmd = {0};
2986
Kiwoong Kimee32c902016-11-10 21:17:43 +09002987 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
2988
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002989 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002990 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002991
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002992 if (ret) {
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002993 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
2994 __func__, ret);
2995
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002996 /*
2997 * If link recovery fails then return error so that caller
2998 * don't retry the hibern8 enter again.
2999 */
3000 if (ufshcd_link_recovery(hba))
3001 ret = -ENOLINK;
Kiwoong Kimee32c902016-11-10 21:17:43 +09003002 } else
3003 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3004 POST_CHANGE);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003005
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02003006 return ret;
3007}
3008
3009static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3010{
3011 int ret = 0, retries;
3012
3013 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3014 ret = __ufshcd_uic_hibern8_enter(hba);
3015 if (!ret || ret == -ENOLINK)
3016 goto out;
3017 }
3018out:
3019 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003020}
3021
3022static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3023{
3024 struct uic_command uic_cmd = {0};
3025 int ret;
3026
Kiwoong Kimee32c902016-11-10 21:17:43 +09003027 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3028
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003029 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3030 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303031 if (ret) {
Yaniv Gardi53c12d02016-02-01 15:02:45 +02003032 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3033 __func__, ret);
3034 ret = ufshcd_link_recovery(hba);
Kiwoong Kimee32c902016-11-10 21:17:43 +09003035 } else
3036 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3037 POST_CHANGE);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303038
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303039 return ret;
3040}
3041
Yaniv Gardi50646362014-10-23 13:25:13 +03003042 /**
3043 * ufshcd_init_pwr_info - setting the POR (power on reset)
3044 * values in hba power info
3045 * @hba: per-adapter instance
3046 */
3047static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3048{
3049 hba->pwr_info.gear_rx = UFS_PWM_G1;
3050 hba->pwr_info.gear_tx = UFS_PWM_G1;
3051 hba->pwr_info.lane_rx = 1;
3052 hba->pwr_info.lane_tx = 1;
3053 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3054 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3055 hba->pwr_info.hs_rate = 0;
3056}
3057
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303058/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003059 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3060 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303061 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003062static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303063{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003064 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3065
3066 if (hba->max_pwr_info.is_valid)
3067 return 0;
3068
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08003069 pwr_info->pwr_tx = FAST_MODE;
3070 pwr_info->pwr_rx = FAST_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003071 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303072
3073 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003074 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3075 &pwr_info->lane_rx);
3076 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3077 &pwr_info->lane_tx);
3078
3079 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3080 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3081 __func__,
3082 pwr_info->lane_rx,
3083 pwr_info->lane_tx);
3084 return -EINVAL;
3085 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303086
3087 /*
3088 * First, get the maximum gears of HS speed.
3089 * If a zero value, it means there is no HSGEAR capability.
3090 * Then, get the maximum gears of PWM speed.
3091 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003092 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3093 if (!pwr_info->gear_rx) {
3094 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3095 &pwr_info->gear_rx);
3096 if (!pwr_info->gear_rx) {
3097 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3098 __func__, pwr_info->gear_rx);
3099 return -EINVAL;
3100 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08003101 pwr_info->pwr_rx = SLOW_MODE;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303102 }
3103
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003104 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3105 &pwr_info->gear_tx);
3106 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303107 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003108 &pwr_info->gear_tx);
3109 if (!pwr_info->gear_tx) {
3110 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3111 __func__, pwr_info->gear_tx);
3112 return -EINVAL;
3113 }
subhashj@codeaurora.org2349b532016-11-23 16:33:19 -08003114 pwr_info->pwr_tx = SLOW_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003115 }
3116
3117 hba->max_pwr_info.is_valid = true;
3118 return 0;
3119}
3120
3121static int ufshcd_change_power_mode(struct ufs_hba *hba,
3122 struct ufs_pa_layer_attr *pwr_mode)
3123{
3124 int ret;
3125
3126 /* if already configured to the requested pwr_mode */
3127 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3128 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3129 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3130 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3131 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3132 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3133 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3134 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3135 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303136 }
3137
3138 /*
3139 * Configure attributes for power mode change with below.
3140 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
3141 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
3142 * - PA_HSSERIES
3143 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003144 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
3145 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
3146 pwr_mode->lane_rx);
3147 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3148 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303149 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003150 else
3151 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303152
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003153 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
3154 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
3155 pwr_mode->lane_tx);
3156 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
3157 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303158 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003159 else
3160 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303161
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003162 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3163 pwr_mode->pwr_tx == FASTAUTO_MODE ||
3164 pwr_mode->pwr_rx == FAST_MODE ||
3165 pwr_mode->pwr_tx == FAST_MODE)
3166 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
3167 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303168
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003169 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
3170 | pwr_mode->pwr_tx);
3171
3172 if (ret) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303173 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003174 "%s: power mode change failed %d\n", __func__, ret);
3175 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003176 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
3177 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003178
3179 memcpy(&hba->pwr_info, pwr_mode,
3180 sizeof(struct ufs_pa_layer_attr));
3181 }
3182
Dolev Raviv66cc8202016-12-22 18:39:42 -08003183 ufshcd_print_pwr_info(hba);
3184
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003185 return ret;
3186}
3187
3188/**
3189 * ufshcd_config_pwr_mode - configure a new power mode
3190 * @hba: per-adapter instance
3191 * @desired_pwr_mode: desired power configuration
3192 */
3193static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3194 struct ufs_pa_layer_attr *desired_pwr_mode)
3195{
3196 struct ufs_pa_layer_attr final_params = { 0 };
3197 int ret;
3198
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003199 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
3200 desired_pwr_mode, &final_params);
3201
3202 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03003203 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
3204
3205 ret = ufshcd_change_power_mode(hba, &final_params);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303206
3207 return ret;
3208}
3209
3210/**
Dolev Raviv68078d52013-07-30 00:35:58 +05303211 * ufshcd_complete_dev_init() - checks device readiness
3212 * hba: per-adapter instance
3213 *
3214 * Set fDeviceInit flag and poll until device toggles it.
3215 */
3216static int ufshcd_complete_dev_init(struct ufs_hba *hba)
3217{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003218 int i;
3219 int err;
Dolev Raviv68078d52013-07-30 00:35:58 +05303220 bool flag_res = 1;
3221
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003222 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3223 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05303224 if (err) {
3225 dev_err(hba->dev,
3226 "%s setting fDeviceInit flag failed with error %d\n",
3227 __func__, err);
3228 goto out;
3229 }
3230
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003231 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
3232 for (i = 0; i < 1000 && !err && flag_res; i++)
3233 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
3234 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
3235
Dolev Raviv68078d52013-07-30 00:35:58 +05303236 if (err)
3237 dev_err(hba->dev,
3238 "%s reading fDeviceInit flag failed with error %d\n",
3239 __func__, err);
3240 else if (flag_res)
3241 dev_err(hba->dev,
3242 "%s fDeviceInit was not cleared by the device\n",
3243 __func__);
3244
3245out:
3246 return err;
3247}
3248
3249/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303250 * ufshcd_make_hba_operational - Make UFS controller operational
3251 * @hba: per adapter instance
3252 *
3253 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003254 * 1. Enable required interrupts
3255 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02003256 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003257 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303258 *
3259 * Returns 0 on success, non-zero value on failure
3260 */
3261static int ufshcd_make_hba_operational(struct ufs_hba *hba)
3262{
3263 int err = 0;
3264 u32 reg;
3265
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303266 /* Enable required interrupts */
3267 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
3268
3269 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03003270 if (ufshcd_is_intr_aggr_allowed(hba))
3271 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
3272 else
3273 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303274
3275 /* Configure UTRL and UTMRL base address registers */
3276 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
3277 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
3278 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
3279 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
3280 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
3281 REG_UTP_TASK_REQ_LIST_BASE_L);
3282 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
3283 REG_UTP_TASK_REQ_LIST_BASE_H);
3284
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303285 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02003286 * Make sure base address and interrupt setup are updated before
3287 * enabling the run/stop registers below.
3288 */
3289 wmb();
3290
3291 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303292 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303293 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003294 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303295 if (!(ufshcd_get_lists_status(reg))) {
3296 ufshcd_enable_run_stop_reg(hba);
3297 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303298 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303299 "Host controller not ready to process requests");
3300 err = -EIO;
3301 goto out;
3302 }
3303
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303304out:
3305 return err;
3306}
3307
3308/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02003309 * ufshcd_hba_stop - Send controller to reset state
3310 * @hba: per adapter instance
3311 * @can_sleep: perform sleep or just spin
3312 */
3313static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
3314{
3315 int err;
3316
3317 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
3318 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
3319 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
3320 10, 1, can_sleep);
3321 if (err)
3322 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
3323}
3324
3325/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303326 * ufshcd_hba_enable - initialize the controller
3327 * @hba: per adapter instance
3328 *
3329 * The controller resets itself and controller firmware initialization
3330 * sequence kicks off. When controller is ready it will set
3331 * the Host Controller Enable bit to 1.
3332 *
3333 * Returns 0 on success, non-zero value on failure
3334 */
3335static int ufshcd_hba_enable(struct ufs_hba *hba)
3336{
3337 int retry;
3338
3339 /*
3340 * msleep of 1 and 5 used in this function might result in msleep(20),
3341 * but it was necessary to send the UFS FPGA to reset mode during
3342 * development and testing of this driver. msleep can be changed to
3343 * mdelay and retry count can be reduced based on the controller.
3344 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02003345 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303346 /* change controller state to "reset state" */
Yaniv Gardi596585a2016-03-10 17:37:08 +02003347 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303348
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003349 /* UniPro link is disabled at this point */
3350 ufshcd_set_link_off(hba);
3351
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003352 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003353
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303354 /* start controller initialization sequence */
3355 ufshcd_hba_start(hba);
3356
3357 /*
3358 * To initialize a UFS host controller HCE bit must be set to 1.
3359 * During initialization the HCE bit value changes from 1->0->1.
3360 * When the host controller completes initialization sequence
3361 * it sets the value of HCE bit to 1. The same HCE bit is read back
3362 * to check if the controller has completed initialization sequence.
3363 * So without this delay the value HCE = 1, set in the previous
3364 * instruction might be read back.
3365 * This delay can be changed based on the controller.
3366 */
3367 msleep(1);
3368
3369 /* wait for the host controller to complete initialization */
3370 retry = 10;
3371 while (ufshcd_is_hba_active(hba)) {
3372 if (retry) {
3373 retry--;
3374 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303375 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303376 "Controller enable failed\n");
3377 return -EIO;
3378 }
3379 msleep(5);
3380 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003381
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003382 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003383 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003384
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003385 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003386
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303387 return 0;
3388}
3389
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03003390static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
3391{
3392 int tx_lanes, i, err = 0;
3393
3394 if (!peer)
3395 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3396 &tx_lanes);
3397 else
3398 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3399 &tx_lanes);
3400 for (i = 0; i < tx_lanes; i++) {
3401 if (!peer)
3402 err = ufshcd_dme_set(hba,
3403 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3404 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3405 0);
3406 else
3407 err = ufshcd_dme_peer_set(hba,
3408 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3409 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3410 0);
3411 if (err) {
3412 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
3413 __func__, peer, i, err);
3414 break;
3415 }
3416 }
3417
3418 return err;
3419}
3420
3421static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
3422{
3423 return ufshcd_disable_tx_lcc(hba, true);
3424}
3425
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303426/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303427 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303428 * @hba: per adapter instance
3429 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303430 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303431 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303432static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303433{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303434 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003435 int retries = DME_LINKSTARTUP_RETRIES;
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08003436 bool link_startup_again = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303437
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08003438 /*
3439 * If UFS device isn't active then we will have to issue link startup
3440 * 2 times to make sure the device state move to active.
3441 */
3442 if (!ufshcd_is_ufs_dev_active(hba))
3443 link_startup_again = true;
3444
3445link_startup:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003446 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003447 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303448
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003449 ret = ufshcd_dme_link_startup(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003450
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003451 /* check if device is detected by inter-connect layer */
3452 if (!ret && !ufshcd_is_device_present(hba)) {
3453 dev_err(hba->dev, "%s: Device not present\n", __func__);
3454 ret = -ENXIO;
3455 goto out;
3456 }
3457
3458 /*
3459 * DME link lost indication is only received when link is up,
3460 * but we can't be sure if the link is up until link startup
3461 * succeeds. So reset the local Uni-Pro and try again.
3462 */
3463 if (ret && ufshcd_hba_enable(hba))
3464 goto out;
3465 } while (ret && retries--);
3466
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303467 if (ret)
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003468 /* failed to get the link up... retire */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303469 goto out;
3470
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08003471 if (link_startup_again) {
3472 link_startup_again = false;
3473 retries = DME_LINKSTARTUP_RETRIES;
3474 goto link_startup;
3475 }
3476
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03003477 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3478 ret = ufshcd_disable_device_tx_lcc(hba);
3479 if (ret)
3480 goto out;
3481 }
3482
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003483 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003484 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
3485 if (ret)
3486 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003487
3488 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303489out:
3490 if (ret)
3491 dev_err(hba->dev, "link startup failed %d\n", ret);
3492 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303493}
3494
3495/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303496 * ufshcd_verify_dev_init() - Verify device initialization
3497 * @hba: per-adapter instance
3498 *
3499 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
3500 * device Transport Protocol (UTP) layer is ready after a reset.
3501 * If the UTP layer at the device side is not initialized, it may
3502 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
3503 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
3504 */
3505static int ufshcd_verify_dev_init(struct ufs_hba *hba)
3506{
3507 int err = 0;
3508 int retries;
3509
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003510 ufshcd_hold(hba, false);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303511 mutex_lock(&hba->dev_cmd.lock);
3512 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
3513 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
3514 NOP_OUT_TIMEOUT);
3515
3516 if (!err || err == -ETIMEDOUT)
3517 break;
3518
3519 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
3520 }
3521 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003522 ufshcd_release(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303523
3524 if (err)
3525 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
3526 return err;
3527}
3528
3529/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003530 * ufshcd_set_queue_depth - set lun queue depth
3531 * @sdev: pointer to SCSI device
3532 *
3533 * Read bLUQueueDepth value and activate scsi tagged command
3534 * queueing. For WLUN, queue depth is set to 1. For best-effort
3535 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
3536 * value that host can queue.
3537 */
3538static void ufshcd_set_queue_depth(struct scsi_device *sdev)
3539{
3540 int ret = 0;
3541 u8 lun_qdepth;
Dolev Raviv61e07352016-11-23 16:30:49 -08003542 int retries;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003543 struct ufs_hba *hba;
3544
3545 hba = shost_priv(sdev->host);
3546
3547 lun_qdepth = hba->nutrs;
Dolev Raviv61e07352016-11-23 16:30:49 -08003548 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3549 /* Read descriptor*/
3550 ret = ufshcd_read_unit_desc_param(hba,
3551 ufshcd_scsi_to_upiu_lun(sdev->lun),
3552 UNIT_DESC_PARAM_LU_Q_DEPTH,
3553 &lun_qdepth,
3554 sizeof(lun_qdepth));
3555 if (!ret || ret == -ENOTSUPP)
3556 break;
3557
3558 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
3559 }
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003560
3561 /* Some WLUN doesn't support unit descriptor */
3562 if (ret == -EOPNOTSUPP)
3563 lun_qdepth = 1;
3564 else if (!lun_qdepth)
3565 /* eventually, we can figure out the real queue depth */
3566 lun_qdepth = hba->nutrs;
3567 else
3568 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
3569
3570 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
3571 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003572 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003573}
3574
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003575/*
3576 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
3577 * @hba: per-adapter instance
3578 * @lun: UFS device lun id
3579 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
3580 *
3581 * Returns 0 in case of success and b_lu_write_protect status would be returned
3582 * @b_lu_write_protect parameter.
3583 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
3584 * Returns -EINVAL in case of invalid parameters passed to this function.
3585 */
3586static int ufshcd_get_lu_wp(struct ufs_hba *hba,
3587 u8 lun,
3588 u8 *b_lu_write_protect)
3589{
3590 int ret;
3591
3592 if (!b_lu_write_protect)
3593 ret = -EINVAL;
3594 /*
3595 * According to UFS device spec, RPMB LU can't be write
3596 * protected so skip reading bLUWriteProtect parameter for
3597 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
3598 */
3599 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
3600 ret = -ENOTSUPP;
3601 else
3602 ret = ufshcd_read_unit_desc_param(hba,
3603 lun,
3604 UNIT_DESC_PARAM_LU_WR_PROTECT,
3605 b_lu_write_protect,
3606 sizeof(*b_lu_write_protect));
3607 return ret;
3608}
3609
3610/**
3611 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
3612 * status
3613 * @hba: per-adapter instance
3614 * @sdev: pointer to SCSI device
3615 *
3616 */
3617static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
3618 struct scsi_device *sdev)
3619{
3620 if (hba->dev_info.f_power_on_wp_en &&
3621 !hba->dev_info.is_lu_power_on_wp) {
3622 u8 b_lu_write_protect;
3623
3624 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
3625 &b_lu_write_protect) &&
3626 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
3627 hba->dev_info.is_lu_power_on_wp = true;
3628 }
3629}
3630
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003631/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303632 * ufshcd_slave_alloc - handle initial SCSI device configurations
3633 * @sdev: pointer to SCSI device
3634 *
3635 * Returns success
3636 */
3637static int ufshcd_slave_alloc(struct scsi_device *sdev)
3638{
3639 struct ufs_hba *hba;
3640
3641 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303642
3643 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
3644 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303645
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303646 /* allow SCSI layer to restart the device in case of errors */
3647 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003648
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03003649 /* REPORT SUPPORTED OPERATION CODES is not supported */
3650 sdev->no_report_opcodes = 1;
3651
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003652
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003653 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003654
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003655 ufshcd_get_lu_power_on_wp_status(hba, sdev);
3656
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003657 return 0;
3658}
3659
3660/**
3661 * ufshcd_change_queue_depth - change queue depth
3662 * @sdev: pointer to SCSI device
3663 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003664 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003665 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003666 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003667static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003668{
3669 struct ufs_hba *hba = shost_priv(sdev->host);
3670
3671 if (depth > hba->nutrs)
3672 depth = hba->nutrs;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003673 return scsi_change_queue_depth(sdev, depth);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303674}
3675
3676/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09003677 * ufshcd_slave_configure - adjust SCSI device configurations
3678 * @sdev: pointer to SCSI device
3679 */
3680static int ufshcd_slave_configure(struct scsi_device *sdev)
3681{
3682 struct request_queue *q = sdev->request_queue;
3683
3684 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
3685 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
3686
3687 return 0;
3688}
3689
3690/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303691 * ufshcd_slave_destroy - remove SCSI device configurations
3692 * @sdev: pointer to SCSI device
3693 */
3694static void ufshcd_slave_destroy(struct scsi_device *sdev)
3695{
3696 struct ufs_hba *hba;
3697
3698 hba = shost_priv(sdev->host);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003699 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03003700 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
3701 unsigned long flags;
3702
3703 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003704 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03003705 spin_unlock_irqrestore(hba->host->host_lock, flags);
3706 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303707}
3708
3709/**
3710 * ufshcd_task_req_compl - handle task management request completion
3711 * @hba: per adapter instance
3712 * @index: index of the completed request
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303713 * @resp: task management service response
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303714 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303715 * Returns non-zero value on error, zero on success
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303716 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303717static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303718{
3719 struct utp_task_req_desc *task_req_descp;
3720 struct utp_upiu_task_rsp *task_rsp_upiup;
3721 unsigned long flags;
3722 int ocs_value;
3723 int task_result;
3724
3725 spin_lock_irqsave(hba->host->host_lock, flags);
3726
3727 /* Clear completed tasks from outstanding_tasks */
3728 __clear_bit(index, &hba->outstanding_tasks);
3729
3730 task_req_descp = hba->utmrdl_base_addr;
3731 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
3732
3733 if (ocs_value == OCS_SUCCESS) {
3734 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3735 task_req_descp[index].task_rsp_upiu;
Kiwoong Kim8794ee02016-09-09 08:22:22 +09003736 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
3737 task_result = task_result & MASK_TM_SERVICE_RESP;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303738 if (resp)
3739 *resp = (u8)task_result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303740 } else {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303741 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
3742 __func__, ocs_value);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303743 }
3744 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303745
3746 return ocs_value;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303747}
3748
3749/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303750 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
3751 * @lrb: pointer to local reference block of completed command
3752 * @scsi_status: SCSI command status
3753 *
3754 * Returns value base on SCSI command status
3755 */
3756static inline int
3757ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
3758{
3759 int result = 0;
3760
3761 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303762 case SAM_STAT_CHECK_CONDITION:
3763 ufshcd_copy_sense_data(lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303764 case SAM_STAT_GOOD:
3765 result |= DID_OK << 16 |
3766 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303767 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303768 break;
3769 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303770 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303771 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303772 ufshcd_copy_sense_data(lrbp);
3773 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303774 break;
3775 default:
3776 result |= DID_ERROR << 16;
3777 break;
3778 } /* end of switch */
3779
3780 return result;
3781}
3782
3783/**
3784 * ufshcd_transfer_rsp_status - Get overall status of the response
3785 * @hba: per adapter instance
3786 * @lrb: pointer to local reference block of completed command
3787 *
3788 * Returns result of the command to notify SCSI midlayer
3789 */
3790static inline int
3791ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3792{
3793 int result = 0;
3794 int scsi_status;
3795 int ocs;
3796
3797 /* overall command status of utrd */
3798 ocs = ufshcd_get_tr_ocs(lrbp);
3799
3800 switch (ocs) {
3801 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303802 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303803
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303804 switch (result) {
3805 case UPIU_TRANSACTION_RESPONSE:
3806 /*
3807 * get the response UPIU result to extract
3808 * the SCSI command status
3809 */
3810 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3811
3812 /*
3813 * get the result based on SCSI status response
3814 * to notify the SCSI midlayer of the command status
3815 */
3816 scsi_status = result & MASK_SCSI_STATUS;
3817 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303818
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02003819 /*
3820 * Currently we are only supporting BKOPs exception
3821 * events hence we can ignore BKOPs exception event
3822 * during power management callbacks. BKOPs exception
3823 * event is not expected to be raised in runtime suspend
3824 * callback as it allows the urgent bkops.
3825 * During system suspend, we are anyway forcefully
3826 * disabling the bkops and if urgent bkops is needed
3827 * it will be enabled on system resume. Long term
3828 * solution could be to abort the system suspend if
3829 * UFS device needs urgent BKOPs.
3830 */
3831 if (!hba->pm_op_in_progress &&
3832 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303833 schedule_work(&hba->eeh_work);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303834 break;
3835 case UPIU_TRANSACTION_REJECT_UPIU:
3836 /* TODO: handle Reject UPIU Response */
3837 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303838 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303839 "Reject UPIU not fully implemented\n");
3840 break;
3841 default:
3842 result = DID_ERROR << 16;
3843 dev_err(hba->dev,
3844 "Unexpected request response code = %x\n",
3845 result);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303846 break;
3847 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303848 break;
3849 case OCS_ABORTED:
3850 result |= DID_ABORT << 16;
3851 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303852 case OCS_INVALID_COMMAND_STATUS:
3853 result |= DID_REQUEUE << 16;
3854 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303855 case OCS_INVALID_CMD_TABLE_ATTR:
3856 case OCS_INVALID_PRDT_ATTR:
3857 case OCS_MISMATCH_DATA_BUF_SIZE:
3858 case OCS_MISMATCH_RESP_UPIU_SIZE:
3859 case OCS_PEER_COMM_FAILURE:
3860 case OCS_FATAL_ERROR:
3861 default:
3862 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303863 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303864 "OCS error from controller = %x\n", ocs);
3865 break;
3866 } /* end of switch */
3867
Dolev Raviv66cc8202016-12-22 18:39:42 -08003868 if (host_byte(result) != DID_OK)
3869 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303870 return result;
3871}
3872
3873/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303874 * ufshcd_uic_cmd_compl - handle completion of uic command
3875 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303876 * @intr_status: interrupt status generated by the controller
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303877 */
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303878static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303879{
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303880 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303881 hba->active_uic_cmd->argument2 |=
3882 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303883 hba->active_uic_cmd->argument3 =
3884 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303885 complete(&hba->active_uic_cmd->done);
3886 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303887
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003888 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3889 complete(hba->uic_async_done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303890}
3891
3892/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003893 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303894 * @hba: per adapter instance
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003895 * @completed_reqs: requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303896 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003897static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3898 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303899{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303900 struct ufshcd_lrb *lrbp;
3901 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303902 int result;
3903 int index;
Dolev Ravive9d501b2014-07-01 12:22:37 +03003904
Dolev Ravive9d501b2014-07-01 12:22:37 +03003905 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3906 lrbp = &hba->lrb[index];
3907 cmd = lrbp->cmd;
3908 if (cmd) {
3909 result = ufshcd_transfer_rsp_status(hba, lrbp);
3910 scsi_dma_unmap(cmd);
3911 cmd->result = result;
3912 /* Mark completed command as NULL in LRB */
3913 lrbp->cmd = NULL;
3914 clear_bit_unlock(index, &hba->lrb_in_use);
3915 /* Do not touch lrbp after scsi done */
3916 cmd->scsi_done(cmd);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003917 __ufshcd_release(hba);
Joao Pinto300bb132016-05-11 12:21:27 +01003918 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
3919 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
Dolev Ravive9d501b2014-07-01 12:22:37 +03003920 if (hba->dev_cmd.complete)
3921 complete(hba->dev_cmd.complete);
3922 }
3923 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303924
3925 /* clear corresponding bits of completed commands */
3926 hba->outstanding_reqs ^= completed_reqs;
3927
Sahitya Tummala856b3482014-09-25 15:32:34 +03003928 ufshcd_clk_scaling_update_busy(hba);
3929
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303930 /* we might have free'd some tags above */
3931 wake_up(&hba->dev_cmd.tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303932}
3933
3934/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003935 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3936 * @hba: per adapter instance
3937 */
3938static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3939{
3940 unsigned long completed_reqs;
3941 u32 tr_doorbell;
3942
3943 /* Resetting interrupt aggregation counters first and reading the
3944 * DOOR_BELL afterward allows us to handle all the completed requests.
3945 * In order to prevent other interrupts starvation the DB is read once
3946 * after reset. The down side of this solution is the possibility of
3947 * false interrupt if device completes another request after resetting
3948 * aggregation and before reading the DB.
3949 */
3950 if (ufshcd_is_intr_aggr_allowed(hba))
3951 ufshcd_reset_intr_aggr(hba);
3952
3953 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3954 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3955
3956 __ufshcd_transfer_req_compl(hba, completed_reqs);
3957}
3958
3959/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303960 * ufshcd_disable_ee - disable exception event
3961 * @hba: per-adapter instance
3962 * @mask: exception event to disable
3963 *
3964 * Disables exception event in the device so that the EVENT_ALERT
3965 * bit is not set.
3966 *
3967 * Returns zero on success, non-zero error value on failure.
3968 */
3969static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3970{
3971 int err = 0;
3972 u32 val;
3973
3974 if (!(hba->ee_ctrl_mask & mask))
3975 goto out;
3976
3977 val = hba->ee_ctrl_mask & ~mask;
3978 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003979 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303980 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3981 if (!err)
3982 hba->ee_ctrl_mask &= ~mask;
3983out:
3984 return err;
3985}
3986
3987/**
3988 * ufshcd_enable_ee - enable exception event
3989 * @hba: per-adapter instance
3990 * @mask: exception event to enable
3991 *
3992 * Enable corresponding exception event in the device to allow
3993 * device to alert host in critical scenarios.
3994 *
3995 * Returns zero on success, non-zero error value on failure.
3996 */
3997static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3998{
3999 int err = 0;
4000 u32 val;
4001
4002 if (hba->ee_ctrl_mask & mask)
4003 goto out;
4004
4005 val = hba->ee_ctrl_mask | mask;
4006 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004007 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304008 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4009 if (!err)
4010 hba->ee_ctrl_mask |= mask;
4011out:
4012 return err;
4013}
4014
4015/**
4016 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4017 * @hba: per-adapter instance
4018 *
4019 * Allow device to manage background operations on its own. Enabling
4020 * this might lead to inconsistent latencies during normal data transfers
4021 * as the device is allowed to manage its own way of handling background
4022 * operations.
4023 *
4024 * Returns zero on success, non-zero on failure.
4025 */
4026static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4027{
4028 int err = 0;
4029
4030 if (hba->auto_bkops_enabled)
4031 goto out;
4032
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004033 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304034 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4035 if (err) {
4036 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4037 __func__, err);
4038 goto out;
4039 }
4040
4041 hba->auto_bkops_enabled = true;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08004042 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304043
4044 /* No need of URGENT_BKOPS exception from the device */
4045 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4046 if (err)
4047 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4048 __func__, err);
4049out:
4050 return err;
4051}
4052
4053/**
4054 * ufshcd_disable_auto_bkops - block device in doing background operations
4055 * @hba: per-adapter instance
4056 *
4057 * Disabling background operations improves command response latency but
4058 * has drawback of device moving into critical state where the device is
4059 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4060 * host is idle so that BKOPS are managed effectively without any negative
4061 * impacts.
4062 *
4063 * Returns zero on success, non-zero on failure.
4064 */
4065static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4066{
4067 int err = 0;
4068
4069 if (!hba->auto_bkops_enabled)
4070 goto out;
4071
4072 /*
4073 * If host assisted BKOPs is to be enabled, make sure
4074 * urgent bkops exception is allowed.
4075 */
4076 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4077 if (err) {
4078 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4079 __func__, err);
4080 goto out;
4081 }
4082
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004083 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304084 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4085 if (err) {
4086 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4087 __func__, err);
4088 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4089 goto out;
4090 }
4091
4092 hba->auto_bkops_enabled = false;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08004093 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304094out:
4095 return err;
4096}
4097
4098/**
4099 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
4100 * @hba: per adapter instance
4101 *
4102 * After a device reset the device may toggle the BKOPS_EN flag
4103 * to default value. The s/w tracking variables should be updated
4104 * as well. Do this by forcing enable of auto bkops.
4105 */
4106static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
4107{
4108 hba->auto_bkops_enabled = false;
4109 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
4110 ufshcd_enable_auto_bkops(hba);
4111}
4112
4113static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
4114{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004115 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304116 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
4117}
4118
4119/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004120 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
4121 * @hba: per-adapter instance
4122 * @status: bkops_status value
4123 *
4124 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
4125 * flag in the device to permit background operations if the device
4126 * bkops_status is greater than or equal to "status" argument passed to
4127 * this function, disable otherwise.
4128 *
4129 * Returns 0 for success, non-zero in case of failure.
4130 *
4131 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
4132 * to know whether auto bkops is enabled or disabled after this function
4133 * returns control to it.
4134 */
4135static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
4136 enum bkops_status status)
4137{
4138 int err;
4139 u32 curr_status = 0;
4140
4141 err = ufshcd_get_bkops_status(hba, &curr_status);
4142 if (err) {
4143 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4144 __func__, err);
4145 goto out;
4146 } else if (curr_status > BKOPS_STATUS_MAX) {
4147 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
4148 __func__, curr_status);
4149 err = -EINVAL;
4150 goto out;
4151 }
4152
4153 if (curr_status >= status)
4154 err = ufshcd_enable_auto_bkops(hba);
4155 else
4156 err = ufshcd_disable_auto_bkops(hba);
4157out:
4158 return err;
4159}
4160
4161/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304162 * ufshcd_urgent_bkops - handle urgent bkops exception event
4163 * @hba: per-adapter instance
4164 *
4165 * Enable fBackgroundOpsEn flag in the device to permit background
4166 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004167 *
4168 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
4169 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304170 */
4171static int ufshcd_urgent_bkops(struct ufs_hba *hba)
4172{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02004173 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304174}
4175
4176static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
4177{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004178 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304179 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
4180}
4181
Yaniv Gardiafdfff52016-03-10 17:37:15 +02004182static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
4183{
4184 int err;
4185 u32 curr_status = 0;
4186
4187 if (hba->is_urgent_bkops_lvl_checked)
4188 goto enable_auto_bkops;
4189
4190 err = ufshcd_get_bkops_status(hba, &curr_status);
4191 if (err) {
4192 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4193 __func__, err);
4194 goto out;
4195 }
4196
4197 /*
4198 * We are seeing that some devices are raising the urgent bkops
4199 * exception events even when BKOPS status doesn't indicate performace
4200 * impacted or critical. Handle these device by determining their urgent
4201 * bkops status at runtime.
4202 */
4203 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
4204 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
4205 __func__, curr_status);
4206 /* update the current status as the urgent bkops level */
4207 hba->urgent_bkops_lvl = curr_status;
4208 hba->is_urgent_bkops_lvl_checked = true;
4209 }
4210
4211enable_auto_bkops:
4212 err = ufshcd_enable_auto_bkops(hba);
4213out:
4214 if (err < 0)
4215 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
4216 __func__, err);
4217}
4218
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304219/**
4220 * ufshcd_exception_event_handler - handle exceptions raised by device
4221 * @work: pointer to work data
4222 *
4223 * Read bExceptionEventStatus attribute from the device and handle the
4224 * exception event accordingly.
4225 */
4226static void ufshcd_exception_event_handler(struct work_struct *work)
4227{
4228 struct ufs_hba *hba;
4229 int err;
4230 u32 status = 0;
4231 hba = container_of(work, struct ufs_hba, eeh_work);
4232
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304233 pm_runtime_get_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304234 err = ufshcd_get_ee_status(hba, &status);
4235 if (err) {
4236 dev_err(hba->dev, "%s: failed to get exception status %d\n",
4237 __func__, err);
4238 goto out;
4239 }
4240
4241 status &= hba->ee_ctrl_mask;
Yaniv Gardiafdfff52016-03-10 17:37:15 +02004242
4243 if (status & MASK_EE_URGENT_BKOPS)
4244 ufshcd_bkops_exception_event_handler(hba);
4245
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304246out:
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304247 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304248 return;
4249}
4250
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004251/* Complete requests that have door-bell cleared */
4252static void ufshcd_complete_requests(struct ufs_hba *hba)
4253{
4254 ufshcd_transfer_req_compl(hba);
4255 ufshcd_tmc_handler(hba);
4256}
4257
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304258/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02004259 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
4260 * to recover from the DL NAC errors or not.
4261 * @hba: per-adapter instance
4262 *
4263 * Returns true if error handling is required, false otherwise
4264 */
4265static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
4266{
4267 unsigned long flags;
4268 bool err_handling = true;
4269
4270 spin_lock_irqsave(hba->host->host_lock, flags);
4271 /*
4272 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
4273 * device fatal error and/or DL NAC & REPLAY timeout errors.
4274 */
4275 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
4276 goto out;
4277
4278 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
4279 ((hba->saved_err & UIC_ERROR) &&
4280 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
4281 goto out;
4282
4283 if ((hba->saved_err & UIC_ERROR) &&
4284 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
4285 int err;
4286 /*
4287 * wait for 50ms to see if we can get any other errors or not.
4288 */
4289 spin_unlock_irqrestore(hba->host->host_lock, flags);
4290 msleep(50);
4291 spin_lock_irqsave(hba->host->host_lock, flags);
4292
4293 /*
4294 * now check if we have got any other severe errors other than
4295 * DL NAC error?
4296 */
4297 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4298 ((hba->saved_err & UIC_ERROR) &&
4299 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
4300 goto out;
4301
4302 /*
4303 * As DL NAC is the only error received so far, send out NOP
4304 * command to confirm if link is still active or not.
4305 * - If we don't get any response then do error recovery.
4306 * - If we get response then clear the DL NAC error bit.
4307 */
4308
4309 spin_unlock_irqrestore(hba->host->host_lock, flags);
4310 err = ufshcd_verify_dev_init(hba);
4311 spin_lock_irqsave(hba->host->host_lock, flags);
4312
4313 if (err)
4314 goto out;
4315
4316 /* Link seems to be alive hence ignore the DL NAC errors */
4317 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
4318 hba->saved_err &= ~UIC_ERROR;
4319 /* clear NAC error */
4320 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4321 if (!hba->saved_uic_err) {
4322 err_handling = false;
4323 goto out;
4324 }
4325 }
4326out:
4327 spin_unlock_irqrestore(hba->host->host_lock, flags);
4328 return err_handling;
4329}
4330
4331/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304332 * ufshcd_err_handler - handle UFS errors that require s/w attention
4333 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304334 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304335static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304336{
4337 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304338 unsigned long flags;
4339 u32 err_xfer = 0;
4340 u32 err_tm = 0;
4341 int err = 0;
4342 int tag;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004343 bool needs_reset = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304344
4345 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304346
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304347 pm_runtime_get_sync(hba->dev);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004348 ufshcd_hold(hba, false);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304349
4350 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004351 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304352 goto out;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304353
4354 hba->ufshcd_state = UFSHCD_STATE_RESET;
4355 ufshcd_set_eh_in_progress(hba);
4356
4357 /* Complete requests that have door-bell cleared by h/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004358 ufshcd_complete_requests(hba);
Yaniv Gardi583fa622016-03-10 17:37:13 +02004359
4360 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4361 bool ret;
4362
4363 spin_unlock_irqrestore(hba->host->host_lock, flags);
4364 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
4365 ret = ufshcd_quirk_dl_nac_errors(hba);
4366 spin_lock_irqsave(hba->host->host_lock, flags);
4367 if (!ret)
4368 goto skip_err_handling;
4369 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004370 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4371 ((hba->saved_err & UIC_ERROR) &&
4372 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
4373 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
4374 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
4375 needs_reset = true;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304376
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004377 /*
4378 * if host reset is required then skip clearing the pending
4379 * transfers forcefully because they will automatically get
4380 * cleared after link startup.
4381 */
4382 if (needs_reset)
4383 goto skip_pending_xfer_clear;
4384
4385 /* release lock as clear command might sleep */
4386 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304387 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004388 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
4389 if (ufshcd_clear_cmd(hba, tag)) {
4390 err_xfer = true;
4391 goto lock_skip_pending_xfer_clear;
4392 }
4393 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304394
4395 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004396 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
4397 if (ufshcd_clear_tm_cmd(hba, tag)) {
4398 err_tm = true;
4399 goto lock_skip_pending_xfer_clear;
4400 }
4401 }
4402
4403lock_skip_pending_xfer_clear:
4404 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304405
4406 /* Complete the requests that are cleared by s/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004407 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304408
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004409 if (err_xfer || err_tm)
4410 needs_reset = true;
4411
4412skip_pending_xfer_clear:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304413 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004414 if (needs_reset) {
4415 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
4416
4417 /*
4418 * ufshcd_reset_and_restore() does the link reinitialization
4419 * which will need atleast one empty doorbell slot to send the
4420 * device management commands (NOP and query commands).
4421 * If there is no slot empty at this moment then free up last
4422 * slot forcefully.
4423 */
4424 if (hba->outstanding_reqs == max_doorbells)
4425 __ufshcd_transfer_req_compl(hba,
4426 (1UL << (hba->nutrs - 1)));
4427
4428 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304429 err = ufshcd_reset_and_restore(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004430 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304431 if (err) {
4432 dev_err(hba->dev, "%s: reset and restore failed\n",
4433 __func__);
4434 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4435 }
4436 /*
4437 * Inform scsi mid-layer that we did reset and allow to handle
4438 * Unit Attention properly.
4439 */
4440 scsi_report_bus_reset(hba->host, 0);
4441 hba->saved_err = 0;
4442 hba->saved_uic_err = 0;
4443 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004444
Yaniv Gardi583fa622016-03-10 17:37:13 +02004445skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004446 if (!needs_reset) {
4447 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4448 if (hba->saved_err || hba->saved_uic_err)
4449 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
4450 __func__, hba->saved_err, hba->saved_uic_err);
4451 }
4452
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304453 ufshcd_clear_eh_in_progress(hba);
4454
4455out:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004456 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304457 scsi_unblock_requests(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004458 ufshcd_release(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304459 pm_runtime_put_sync(hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304460}
4461
4462/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304463 * ufshcd_update_uic_error - check and set fatal UIC error flags.
4464 * @hba: per-adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304465 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304466static void ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304467{
4468 u32 reg;
4469
Dolev Ravivfb7b45f2016-11-23 16:32:32 -08004470 /* PHY layer lane error */
4471 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4472 /* Ignore LINERESET indication, as this is not an error */
4473 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
4474 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
4475 /*
4476 * To know whether this error is fatal or not, DB timeout
4477 * must be checked but this error is handled separately.
4478 */
4479 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
4480
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304481 /* PA_INIT_ERROR is fatal and needs UIC reset */
4482 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
4483 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
4484 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
Yaniv Gardi583fa622016-03-10 17:37:13 +02004485 else if (hba->dev_quirks &
4486 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4487 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
4488 hba->uic_error |=
4489 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4490 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
4491 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
4492 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304493
4494 /* UIC NL/TL/DME errors needs software retry */
4495 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
4496 if (reg)
4497 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
4498
4499 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
4500 if (reg)
4501 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
4502
4503 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
4504 if (reg)
4505 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
4506
4507 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
4508 __func__, hba->uic_error);
4509}
4510
4511/**
4512 * ufshcd_check_errors - Check for errors that need s/w attention
4513 * @hba: per-adapter instance
4514 */
4515static void ufshcd_check_errors(struct ufs_hba *hba)
4516{
4517 bool queue_eh_work = false;
4518
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304519 if (hba->errors & INT_FATAL_ERRORS)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304520 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304521
4522 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304523 hba->uic_error = 0;
4524 ufshcd_update_uic_error(hba);
4525 if (hba->uic_error)
4526 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304527 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304528
4529 if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004530 /*
4531 * update the transfer error masks to sticky bits, let's do this
4532 * irrespective of current ufshcd_state.
4533 */
4534 hba->saved_err |= hba->errors;
4535 hba->saved_uic_err |= hba->uic_error;
4536
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304537 /* handle fatal errors only when link is functional */
4538 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
4539 /* block commands from scsi mid-layer */
4540 scsi_block_requests(hba->host);
4541
Zang Leigang141f8162016-11-16 11:29:37 +08004542 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
Dolev Raviv66cc8202016-12-22 18:39:42 -08004543
4544 /* dump controller state before resetting */
4545 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
4546 bool pr_prdt = !!(hba->saved_err &
4547 SYSTEM_BUS_FATAL_ERROR);
4548
4549 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
4550 __func__, hba->saved_err,
4551 hba->saved_uic_err);
4552
4553 ufshcd_print_host_regs(hba);
4554 ufshcd_print_pwr_info(hba);
4555 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
4556 ufshcd_print_trs(hba, hba->outstanding_reqs,
4557 pr_prdt);
4558 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304559 schedule_work(&hba->eh_work);
4560 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304561 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304562 /*
4563 * if (!queue_eh_work) -
4564 * Other errors are either non-fatal where host recovers
4565 * itself without s/w intervention or errors that will be
4566 * handled by the SCSI core layer.
4567 */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304568}
4569
4570/**
4571 * ufshcd_tmc_handler - handle task management function completion
4572 * @hba: per adapter instance
4573 */
4574static void ufshcd_tmc_handler(struct ufs_hba *hba)
4575{
4576 u32 tm_doorbell;
4577
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304578 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304579 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304580 wake_up(&hba->tm_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304581}
4582
4583/**
4584 * ufshcd_sl_intr - Interrupt service routine
4585 * @hba: per adapter instance
4586 * @intr_status: contains interrupts generated by the controller
4587 */
4588static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
4589{
4590 hba->errors = UFSHCD_ERROR_MASK & intr_status;
4591 if (hba->errors)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304592 ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304593
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304594 if (intr_status & UFSHCD_UIC_MASK)
4595 ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304596
4597 if (intr_status & UTP_TASK_REQ_COMPL)
4598 ufshcd_tmc_handler(hba);
4599
4600 if (intr_status & UTP_TRANSFER_REQ_COMPL)
4601 ufshcd_transfer_req_compl(hba);
4602}
4603
4604/**
4605 * ufshcd_intr - Main interrupt service routine
4606 * @irq: irq number
4607 * @__hba: pointer to adapter instance
4608 *
4609 * Returns IRQ_HANDLED - If interrupt is valid
4610 * IRQ_NONE - If invalid interrupt
4611 */
4612static irqreturn_t ufshcd_intr(int irq, void *__hba)
4613{
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004614 u32 intr_status, enabled_intr_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304615 irqreturn_t retval = IRQ_NONE;
4616 struct ufs_hba *hba = __hba;
4617
4618 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304619 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004620 enabled_intr_status =
4621 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304622
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004623 if (intr_status)
Seungwon Jeon261ea452013-06-26 22:39:28 +05304624 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004625
4626 if (enabled_intr_status) {
4627 ufshcd_sl_intr(hba, enabled_intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304628 retval = IRQ_HANDLED;
4629 }
4630 spin_unlock(hba->host->host_lock);
4631 return retval;
4632}
4633
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304634static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
4635{
4636 int err = 0;
4637 u32 mask = 1 << tag;
4638 unsigned long flags;
4639
4640 if (!test_bit(tag, &hba->outstanding_tasks))
4641 goto out;
4642
4643 spin_lock_irqsave(hba->host->host_lock, flags);
4644 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
4645 spin_unlock_irqrestore(hba->host->host_lock, flags);
4646
4647 /* poll for max. 1 sec to clear door bell register by h/w */
4648 err = ufshcd_wait_for_register(hba,
4649 REG_UTP_TASK_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02004650 mask, 0, 1000, 1000, true);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304651out:
4652 return err;
4653}
4654
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304655/**
4656 * ufshcd_issue_tm_cmd - issues task management commands to controller
4657 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304658 * @lun_id: LUN ID to which TM command is sent
4659 * @task_id: task ID to which the TM command is applicable
4660 * @tm_function: task management function opcode
4661 * @tm_response: task management service response return value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304662 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304663 * Returns non-zero value on error, zero on success.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304664 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304665static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4666 u8 tm_function, u8 *tm_response)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304667{
4668 struct utp_task_req_desc *task_req_descp;
4669 struct utp_upiu_task_req *task_req_upiup;
4670 struct Scsi_Host *host;
4671 unsigned long flags;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304672 int free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304673 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304674 int task_tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304675
4676 host = hba->host;
4677
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304678 /*
4679 * Get free slot, sleep if slots are unavailable.
4680 * Even though we use wait_event() which sleeps indefinitely,
4681 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
4682 */
4683 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004684 ufshcd_hold(hba, false);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304685
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304686 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304687 task_req_descp = hba->utmrdl_base_addr;
4688 task_req_descp += free_slot;
4689
4690 /* Configure task request descriptor */
4691 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
4692 task_req_descp->header.dword_2 =
4693 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
4694
4695 /* Configure task request UPIU */
4696 task_req_upiup =
4697 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304698 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304699 task_req_upiup->header.dword_0 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304700 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304701 lun_id, task_tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304702 task_req_upiup->header.dword_1 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304703 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004704 /*
4705 * The host shall provide the same value for LUN field in the basic
4706 * header and for Input Parameter.
4707 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304708 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
4709 task_req_upiup->input_param2 = cpu_to_be32(task_id);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304710
Kiwoong Kimd2877be2016-11-10 21:16:15 +09004711 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
4712
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304713 /* send command to the controller */
4714 __set_bit(free_slot, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02004715
4716 /* Make sure descriptors are ready before ringing the task doorbell */
4717 wmb();
4718
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304719 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07004720 /* Make sure that doorbell is committed immediately */
4721 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304722
4723 spin_unlock_irqrestore(host->host_lock, flags);
4724
4725 /* wait until the task management command is completed */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304726 err = wait_event_timeout(hba->tm_wq,
4727 test_bit(free_slot, &hba->tm_condition),
4728 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304729 if (!err) {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304730 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
4731 __func__, tm_function);
4732 if (ufshcd_clear_tm_cmd(hba, free_slot))
4733 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
4734 __func__, free_slot);
4735 err = -ETIMEDOUT;
4736 } else {
4737 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304738 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304739
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304740 clear_bit(free_slot, &hba->tm_condition);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304741 ufshcd_put_tm_slot(hba, free_slot);
4742 wake_up(&hba->tm_tag_wq);
4743
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004744 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304745 return err;
4746}
4747
4748/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304749 * ufshcd_eh_device_reset_handler - device reset handler registered to
4750 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304751 * @cmd: SCSI command pointer
4752 *
4753 * Returns SUCCESS/FAILED
4754 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304755static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304756{
4757 struct Scsi_Host *host;
4758 struct ufs_hba *hba;
4759 unsigned int tag;
4760 u32 pos;
4761 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304762 u8 resp = 0xF;
4763 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304764 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304765
4766 host = cmd->device->host;
4767 hba = shost_priv(host);
4768 tag = cmd->request->tag;
4769
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304770 lrbp = &hba->lrb[tag];
4771 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
4772 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304773 if (!err)
4774 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304775 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304776 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304777
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304778 /* clear the commands that were pending for corresponding LUN */
4779 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
4780 if (hba->lrb[pos].lun == lrbp->lun) {
4781 err = ufshcd_clear_cmd(hba, pos);
4782 if (err)
4783 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304784 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304785 }
4786 spin_lock_irqsave(host->host_lock, flags);
4787 ufshcd_transfer_req_compl(hba);
4788 spin_unlock_irqrestore(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304789out:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304790 if (!err) {
4791 err = SUCCESS;
4792 } else {
4793 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4794 err = FAILED;
4795 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304796 return err;
4797}
4798
4799/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304800 * ufshcd_abort - abort a specific command
4801 * @cmd: SCSI command pointer
4802 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304803 * Abort the pending command in device by sending UFS_ABORT_TASK task management
4804 * command, and in host controller by clearing the door-bell register. There can
4805 * be race between controller sending the command to the device while abort is
4806 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
4807 * really issued and then try to abort it.
4808 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304809 * Returns SUCCESS/FAILED
4810 */
4811static int ufshcd_abort(struct scsi_cmnd *cmd)
4812{
4813 struct Scsi_Host *host;
4814 struct ufs_hba *hba;
4815 unsigned long flags;
4816 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304817 int err = 0;
4818 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304819 u8 resp = 0xF;
4820 struct ufshcd_lrb *lrbp;
Dolev Ravive9d501b2014-07-01 12:22:37 +03004821 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304822
4823 host = cmd->device->host;
4824 hba = shost_priv(host);
4825 tag = cmd->request->tag;
Dolev Ravive7d38252016-12-22 18:40:07 -08004826 lrbp = &hba->lrb[tag];
Yaniv Gardi14497322016-02-01 15:02:39 +02004827 if (!ufshcd_valid_tag(hba, tag)) {
4828 dev_err(hba->dev,
4829 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
4830 __func__, tag, cmd, cmd->request);
4831 BUG();
4832 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304833
Dolev Ravive7d38252016-12-22 18:40:07 -08004834 /*
4835 * Task abort to the device W-LUN is illegal. When this command
4836 * will fail, due to spec violation, scsi err handling next step
4837 * will be to send LU reset which, again, is a spec violation.
4838 * To avoid these unnecessary/illegal step we skip to the last error
4839 * handling stage: reset and restore.
4840 */
4841 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
4842 return ufshcd_eh_host_reset_handler(cmd);
4843
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004844 ufshcd_hold(hba, false);
Dolev Ravive9d501b2014-07-01 12:22:37 +03004845 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Yaniv Gardi14497322016-02-01 15:02:39 +02004846 /* If command is already aborted/completed, return SUCCESS */
4847 if (!(test_bit(tag, &hba->outstanding_reqs))) {
4848 dev_err(hba->dev,
4849 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
4850 __func__, tag, hba->outstanding_reqs, reg);
4851 goto out;
4852 }
4853
Dolev Ravive9d501b2014-07-01 12:22:37 +03004854 if (!(reg & (1 << tag))) {
4855 dev_err(hba->dev,
4856 "%s: cmd was completed, but without a notifying intr, tag = %d",
4857 __func__, tag);
4858 }
4859
Dolev Raviv66cc8202016-12-22 18:39:42 -08004860 /* Print Transfer Request of aborted task */
4861 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
4862 scsi_print_command(hba->lrb[tag].cmd);
4863 ufshcd_print_host_regs(hba);
4864 ufshcd_print_pwr_info(hba);
4865 ufshcd_print_trs(hba, 1 << tag, true);
4866
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304867 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
4868 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4869 UFS_QUERY_TASK, &resp);
4870 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
4871 /* cmd pending in the device */
4872 break;
4873 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304874 /*
4875 * cmd not pending in the device, check if it is
4876 * in transition.
4877 */
4878 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4879 if (reg & (1 << tag)) {
4880 /* sleep for max. 200us to stabilize */
4881 usleep_range(100, 200);
4882 continue;
4883 }
4884 /* command completed already */
4885 goto out;
4886 } else {
4887 if (!err)
4888 err = resp; /* service response error */
4889 goto out;
4890 }
4891 }
4892
4893 if (!poll_cnt) {
4894 err = -EBUSY;
4895 goto out;
4896 }
4897
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304898 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4899 UFS_ABORT_TASK, &resp);
4900 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304901 if (!err)
4902 err = resp; /* service response error */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304903 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304904 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304905
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304906 err = ufshcd_clear_cmd(hba, tag);
4907 if (err)
4908 goto out;
4909
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304910 scsi_dma_unmap(cmd);
4911
4912 spin_lock_irqsave(host->host_lock, flags);
Yaniv Gardia48353f2016-02-01 15:02:40 +02004913 ufshcd_outstanding_req_clear(hba, tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304914 hba->lrb[tag].cmd = NULL;
4915 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304916
4917 clear_bit_unlock(tag, &hba->lrb_in_use);
4918 wake_up(&hba->dev_cmd.tag_wq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004919
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304920out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304921 if (!err) {
4922 err = SUCCESS;
4923 } else {
4924 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4925 err = FAILED;
4926 }
4927
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004928 /*
4929 * This ufshcd_release() corresponds to the original scsi cmd that got
4930 * aborted here (as we won't get any IRQ for it).
4931 */
4932 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304933 return err;
4934}
4935
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304936/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304937 * ufshcd_host_reset_and_restore - reset and restore host controller
4938 * @hba: per-adapter instance
4939 *
4940 * Note that host controller reset may issue DME_RESET to
4941 * local and remote (device) Uni-Pro stack and the attributes
4942 * are reset to default state.
4943 *
4944 * Returns zero on success, non-zero on failure
4945 */
4946static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4947{
4948 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304949 unsigned long flags;
4950
4951 /* Reset the host controller */
4952 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02004953 ufshcd_hba_stop(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304954 spin_unlock_irqrestore(hba->host->host_lock, flags);
4955
4956 err = ufshcd_hba_enable(hba);
4957 if (err)
4958 goto out;
4959
4960 /* Establish the link again and restore the device */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004961 err = ufshcd_probe_hba(hba);
4962
4963 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304964 err = -EIO;
4965out:
4966 if (err)
4967 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
4968
4969 return err;
4970}
4971
4972/**
4973 * ufshcd_reset_and_restore - reset and re-initialize host/device
4974 * @hba: per-adapter instance
4975 *
4976 * Reset and recover device, host and re-establish link. This
4977 * is helpful to recover the communication in fatal error conditions.
4978 *
4979 * Returns zero on success, non-zero on failure
4980 */
4981static int ufshcd_reset_and_restore(struct ufs_hba *hba)
4982{
4983 int err = 0;
4984 unsigned long flags;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004985 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304986
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004987 do {
4988 err = ufshcd_host_reset_and_restore(hba);
4989 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304990
4991 /*
4992 * After reset the door-bell might be cleared, complete
4993 * outstanding requests in s/w here.
4994 */
4995 spin_lock_irqsave(hba->host->host_lock, flags);
4996 ufshcd_transfer_req_compl(hba);
4997 ufshcd_tmc_handler(hba);
4998 spin_unlock_irqrestore(hba->host->host_lock, flags);
4999
5000 return err;
5001}
5002
5003/**
5004 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
5005 * @cmd - SCSI command pointer
5006 *
5007 * Returns SUCCESS/FAILED
5008 */
5009static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
5010{
5011 int err;
5012 unsigned long flags;
5013 struct ufs_hba *hba;
5014
5015 hba = shost_priv(cmd->device->host);
5016
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005017 ufshcd_hold(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305018 /*
5019 * Check if there is any race with fatal error handling.
5020 * If so, wait for it to complete. Even though fatal error
5021 * handling does reset and restore in some cases, don't assume
5022 * anything out of it. We are just avoiding race here.
5023 */
5024 do {
5025 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305026 if (!(work_pending(&hba->eh_work) ||
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305027 hba->ufshcd_state == UFSHCD_STATE_RESET))
5028 break;
5029 spin_unlock_irqrestore(hba->host->host_lock, flags);
5030 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305031 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305032 } while (1);
5033
5034 hba->ufshcd_state = UFSHCD_STATE_RESET;
5035 ufshcd_set_eh_in_progress(hba);
5036 spin_unlock_irqrestore(hba->host->host_lock, flags);
5037
5038 err = ufshcd_reset_and_restore(hba);
5039
5040 spin_lock_irqsave(hba->host->host_lock, flags);
5041 if (!err) {
5042 err = SUCCESS;
5043 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5044 } else {
5045 err = FAILED;
5046 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5047 }
5048 ufshcd_clear_eh_in_progress(hba);
5049 spin_unlock_irqrestore(hba->host->host_lock, flags);
5050
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005051 ufshcd_release(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305052 return err;
5053}
5054
5055/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005056 * ufshcd_get_max_icc_level - calculate the ICC level
5057 * @sup_curr_uA: max. current supported by the regulator
5058 * @start_scan: row at the desc table to start scan from
5059 * @buff: power descriptor buffer
5060 *
5061 * Returns calculated max ICC level for specific regulator
5062 */
5063static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
5064{
5065 int i;
5066 int curr_uA;
5067 u16 data;
5068 u16 unit;
5069
5070 for (i = start_scan; i >= 0; i--) {
5071 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
5072 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
5073 ATTR_ICC_LVL_UNIT_OFFSET;
5074 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
5075 switch (unit) {
5076 case UFSHCD_NANO_AMP:
5077 curr_uA = curr_uA / 1000;
5078 break;
5079 case UFSHCD_MILI_AMP:
5080 curr_uA = curr_uA * 1000;
5081 break;
5082 case UFSHCD_AMP:
5083 curr_uA = curr_uA * 1000 * 1000;
5084 break;
5085 case UFSHCD_MICRO_AMP:
5086 default:
5087 break;
5088 }
5089 if (sup_curr_uA >= curr_uA)
5090 break;
5091 }
5092 if (i < 0) {
5093 i = 0;
5094 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
5095 }
5096
5097 return (u32)i;
5098}
5099
5100/**
5101 * ufshcd_calc_icc_level - calculate the max ICC level
5102 * In case regulators are not initialized we'll return 0
5103 * @hba: per-adapter instance
5104 * @desc_buf: power descriptor buffer to extract ICC levels from.
5105 * @len: length of desc_buff
5106 *
5107 * Returns calculated ICC level
5108 */
5109static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
5110 u8 *desc_buf, int len)
5111{
5112 u32 icc_level = 0;
5113
5114 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
5115 !hba->vreg_info.vccq2) {
5116 dev_err(hba->dev,
5117 "%s: Regulator capability was not set, actvIccLevel=%d",
5118 __func__, icc_level);
5119 goto out;
5120 }
5121
5122 if (hba->vreg_info.vcc)
5123 icc_level = ufshcd_get_max_icc_level(
5124 hba->vreg_info.vcc->max_uA,
5125 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
5126 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
5127
5128 if (hba->vreg_info.vccq)
5129 icc_level = ufshcd_get_max_icc_level(
5130 hba->vreg_info.vccq->max_uA,
5131 icc_level,
5132 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
5133
5134 if (hba->vreg_info.vccq2)
5135 icc_level = ufshcd_get_max_icc_level(
5136 hba->vreg_info.vccq2->max_uA,
5137 icc_level,
5138 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
5139out:
5140 return icc_level;
5141}
5142
Dolev Raviv61e07352016-11-23 16:30:49 -08005143static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
5144{
5145 int ret = 0;
5146 int retries;
5147
5148 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
5149 /* write attribute */
5150 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5151 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
5152 if (!ret)
5153 break;
5154
5155 dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
5156 }
5157
5158 return ret;
5159}
5160
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005161static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5162{
5163 int ret;
5164 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
5165 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
5166
5167 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
5168 if (ret) {
5169 dev_err(hba->dev,
5170 "%s: Failed reading power descriptor.len = %d ret = %d",
5171 __func__, buff_len, ret);
5172 return;
5173 }
5174
5175 hba->init_prefetch_data.icc_level =
5176 ufshcd_find_max_sup_active_icc_level(hba,
5177 desc_buf, buff_len);
5178 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
5179 __func__, hba->init_prefetch_data.icc_level);
5180
Dolev Raviv61e07352016-11-23 16:30:49 -08005181 ret = ufshcd_set_icc_levels_attr(hba,
5182 hba->init_prefetch_data.icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005183
5184 if (ret)
5185 dev_err(hba->dev,
5186 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
5187 __func__, hba->init_prefetch_data.icc_level , ret);
5188
5189}
5190
5191/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005192 * ufshcd_scsi_add_wlus - Adds required W-LUs
5193 * @hba: per-adapter instance
5194 *
5195 * UFS device specification requires the UFS devices to support 4 well known
5196 * logical units:
5197 * "REPORT_LUNS" (address: 01h)
5198 * "UFS Device" (address: 50h)
5199 * "RPMB" (address: 44h)
5200 * "BOOT" (address: 30h)
5201 * UFS device's power management needs to be controlled by "POWER CONDITION"
5202 * field of SSU (START STOP UNIT) command. But this "power condition" field
5203 * will take effect only when its sent to "UFS device" well known logical unit
5204 * hence we require the scsi_device instance to represent this logical unit in
5205 * order for the UFS host driver to send the SSU command for power management.
5206
5207 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
5208 * Block) LU so user space process can control this LU. User space may also
5209 * want to have access to BOOT LU.
5210
5211 * This function adds scsi device instances for each of all well known LUs
5212 * (except "REPORT LUNS" LU).
5213 *
5214 * Returns zero on success (all required W-LUs are added successfully),
5215 * non-zero error value on failure (if failed to add any of the required W-LU).
5216 */
5217static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
5218{
5219 int ret = 0;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005220 struct scsi_device *sdev_rpmb;
5221 struct scsi_device *sdev_boot;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005222
5223 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
5224 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
5225 if (IS_ERR(hba->sdev_ufs_device)) {
5226 ret = PTR_ERR(hba->sdev_ufs_device);
5227 hba->sdev_ufs_device = NULL;
5228 goto out;
5229 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005230 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005231
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005232 sdev_boot = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005233 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005234 if (IS_ERR(sdev_boot)) {
5235 ret = PTR_ERR(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005236 goto remove_sdev_ufs_device;
5237 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005238 scsi_device_put(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005239
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005240 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005241 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005242 if (IS_ERR(sdev_rpmb)) {
5243 ret = PTR_ERR(sdev_rpmb);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005244 goto remove_sdev_boot;
5245 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005246 scsi_device_put(sdev_rpmb);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005247 goto out;
5248
5249remove_sdev_boot:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005250 scsi_remove_device(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005251remove_sdev_ufs_device:
5252 scsi_remove_device(hba->sdev_ufs_device);
5253out:
5254 return ret;
5255}
5256
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02005257static int ufs_get_device_info(struct ufs_hba *hba,
5258 struct ufs_device_info *card_data)
5259{
5260 int err;
5261 u8 model_index;
5262 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
5263 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
5264
5265 err = ufshcd_read_device_desc(hba, desc_buf,
5266 QUERY_DESC_DEVICE_MAX_SIZE);
5267 if (err) {
5268 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
5269 __func__, err);
5270 goto out;
5271 }
5272
5273 /*
5274 * getting vendor (manufacturerID) and Bank Index in big endian
5275 * format
5276 */
5277 card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
5278 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
5279
5280 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
5281
5282 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
5283 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
5284 if (err) {
5285 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
5286 __func__, err);
5287 goto out;
5288 }
5289
5290 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
5291 strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
5292 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
5293 MAX_MODEL_LEN));
5294
5295 /* Null terminate the model string */
5296 card_data->model[MAX_MODEL_LEN] = '\0';
5297
5298out:
5299 return err;
5300}
5301
5302void ufs_advertise_fixup_device(struct ufs_hba *hba)
5303{
5304 int err;
5305 struct ufs_dev_fix *f;
5306 struct ufs_device_info card_data;
5307
5308 card_data.wmanufacturerid = 0;
5309
5310 err = ufs_get_device_info(hba, &card_data);
5311 if (err) {
5312 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
5313 __func__, err);
5314 return;
5315 }
5316
5317 for (f = ufs_fixups; f->quirk; f++) {
5318 if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
5319 (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
5320 (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
5321 !strcmp(f->card.model, UFS_ANY_MODEL)))
5322 hba->dev_quirks |= f->quirk;
5323 }
5324}
5325
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005326/**
Yaniv Gardi37113102016-03-10 17:37:16 +02005327 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
5328 * @hba: per-adapter instance
5329 *
5330 * PA_TActivate parameter can be tuned manually if UniPro version is less than
5331 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
5332 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
5333 * the hibern8 exit latency.
5334 *
5335 * Returns zero on success, non-zero error value on failure.
5336 */
5337static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
5338{
5339 int ret = 0;
5340 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
5341
5342 ret = ufshcd_dme_peer_get(hba,
5343 UIC_ARG_MIB_SEL(
5344 RX_MIN_ACTIVATETIME_CAPABILITY,
5345 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
5346 &peer_rx_min_activatetime);
5347 if (ret)
5348 goto out;
5349
5350 /* make sure proper unit conversion is applied */
5351 tuned_pa_tactivate =
5352 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
5353 / PA_TACTIVATE_TIME_UNIT_US);
5354 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
5355 tuned_pa_tactivate);
5356
5357out:
5358 return ret;
5359}
5360
5361/**
5362 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
5363 * @hba: per-adapter instance
5364 *
5365 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
5366 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
5367 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
5368 * This optimal value can help reduce the hibern8 exit latency.
5369 *
5370 * Returns zero on success, non-zero error value on failure.
5371 */
5372static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
5373{
5374 int ret = 0;
5375 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
5376 u32 max_hibern8_time, tuned_pa_hibern8time;
5377
5378 ret = ufshcd_dme_get(hba,
5379 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
5380 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
5381 &local_tx_hibern8_time_cap);
5382 if (ret)
5383 goto out;
5384
5385 ret = ufshcd_dme_peer_get(hba,
5386 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
5387 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
5388 &peer_rx_hibern8_time_cap);
5389 if (ret)
5390 goto out;
5391
5392 max_hibern8_time = max(local_tx_hibern8_time_cap,
5393 peer_rx_hibern8_time_cap);
5394 /* make sure proper unit conversion is applied */
5395 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
5396 / PA_HIBERN8_TIME_UNIT_US);
5397 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
5398 tuned_pa_hibern8time);
5399out:
5400 return ret;
5401}
5402
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08005403/**
5404 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
5405 * less than device PA_TACTIVATE time.
5406 * @hba: per-adapter instance
5407 *
5408 * Some UFS devices require host PA_TACTIVATE to be lower than device
5409 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
5410 * for such devices.
5411 *
5412 * Returns zero on success, non-zero error value on failure.
5413 */
5414static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
5415{
5416 int ret = 0;
5417 u32 granularity, peer_granularity;
5418 u32 pa_tactivate, peer_pa_tactivate;
5419 u32 pa_tactivate_us, peer_pa_tactivate_us;
5420 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
5421
5422 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5423 &granularity);
5424 if (ret)
5425 goto out;
5426
5427 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5428 &peer_granularity);
5429 if (ret)
5430 goto out;
5431
5432 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
5433 (granularity > PA_GRANULARITY_MAX_VAL)) {
5434 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
5435 __func__, granularity);
5436 return -EINVAL;
5437 }
5438
5439 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
5440 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
5441 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
5442 __func__, peer_granularity);
5443 return -EINVAL;
5444 }
5445
5446 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
5447 if (ret)
5448 goto out;
5449
5450 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
5451 &peer_pa_tactivate);
5452 if (ret)
5453 goto out;
5454
5455 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
5456 peer_pa_tactivate_us = peer_pa_tactivate *
5457 gran_to_us_table[peer_granularity - 1];
5458
5459 if (pa_tactivate_us > peer_pa_tactivate_us) {
5460 u32 new_peer_pa_tactivate;
5461
5462 new_peer_pa_tactivate = pa_tactivate_us /
5463 gran_to_us_table[peer_granularity - 1];
5464 new_peer_pa_tactivate++;
5465 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
5466 new_peer_pa_tactivate);
5467 }
5468
5469out:
5470 return ret;
5471}
5472
Yaniv Gardi37113102016-03-10 17:37:16 +02005473static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
5474{
5475 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
5476 ufshcd_tune_pa_tactivate(hba);
5477 ufshcd_tune_pa_hibern8time(hba);
5478 }
5479
5480 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
5481 /* set 1ms timeout for PA_TACTIVATE */
5482 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
subhashj@codeaurora.orgc6a6db42016-11-23 16:32:08 -08005483
5484 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
5485 ufshcd_quirk_tune_host_pa_tactivate(hba);
Subhash Jadavani56d4a182016-12-05 19:25:32 -08005486
5487 ufshcd_vops_apply_dev_quirks(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02005488}
5489
5490/**
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005491 * ufshcd_probe_hba - probe hba to detect device and initialize
5492 * @hba: per-adapter instance
5493 *
5494 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305495 */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005496static int ufshcd_probe_hba(struct ufs_hba *hba)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305497{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305498 int ret;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005499 ktime_t start = ktime_get();
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305500
5501 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305502 if (ret)
5503 goto out;
5504
Yaniv Gardi50646362014-10-23 13:25:13 +03005505 ufshcd_init_pwr_info(hba);
Dolev Raviv66cc8202016-12-22 18:39:42 -08005506 ufshcd_print_pwr_info(hba);
Yaniv Gardi50646362014-10-23 13:25:13 +03005507
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005508 /* set the default level for urgent bkops */
5509 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5510 hba->is_urgent_bkops_lvl_checked = false;
5511
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005512 /* UniPro link is active now */
5513 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05305514
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305515 ret = ufshcd_verify_dev_init(hba);
5516 if (ret)
5517 goto out;
5518
Dolev Raviv68078d52013-07-30 00:35:58 +05305519 ret = ufshcd_complete_dev_init(hba);
5520 if (ret)
5521 goto out;
5522
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02005523 ufs_advertise_fixup_device(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02005524 ufshcd_tune_unipro_params(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02005525
5526 ret = ufshcd_set_vccq_rail_unused(hba,
5527 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
5528 if (ret)
5529 goto out;
5530
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005531 /* UFS device is also active now */
5532 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305533 ufshcd_force_reset_auto_bkops(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005534 hba->wlun_dev_clr_ua = true;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305535
Dolev Raviv7eb584d2014-09-25 15:32:31 +03005536 if (ufshcd_get_max_pwr_mode(hba)) {
5537 dev_err(hba->dev,
5538 "%s: Failed getting max supported power mode\n",
5539 __func__);
5540 } else {
5541 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
Dov Levenglick8643ae62016-10-17 17:10:14 -07005542 if (ret) {
Dolev Raviv7eb584d2014-09-25 15:32:31 +03005543 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
5544 __func__, ret);
Dov Levenglick8643ae62016-10-17 17:10:14 -07005545 goto out;
5546 }
Dolev Raviv7eb584d2014-09-25 15:32:31 +03005547 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005548
Yaniv Gardi53c12d02016-02-01 15:02:45 +02005549 /* set the state as operational after switching to desired gear */
5550 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005551 /*
5552 * If we are in error handling context or in power management callbacks
5553 * context, no need to scan the host
5554 */
5555 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5556 bool flag;
5557
5558 /* clear any previous UFS device information */
5559 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005560 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
5561 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005562 hba->dev_info.f_power_on_wp_en = flag;
5563
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005564 if (!hba->is_init_prefetch)
5565 ufshcd_init_icc_levels(hba);
5566
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005567 /* Add required well known logical units to scsi mid layer */
5568 if (ufshcd_scsi_add_wlus(hba))
5569 goto out;
5570
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305571 scsi_scan_host(hba->host);
5572 pm_runtime_put_sync(hba->dev);
5573 }
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005574
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08005575 /* Resume devfreq after UFS device is detected */
5576 if (ufshcd_is_clkscaling_supported(hba)) {
5577 ufshcd_resume_clkscaling(hba);
5578 hba->clk_scaling.is_allowed = true;
5579 }
5580
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005581 if (!hba->is_init_prefetch)
5582 hba->is_init_prefetch = true;
5583
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305584out:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005585 /*
5586 * If we failed to initialize the device or the device is not
5587 * present, turn off the power/clocks etc.
5588 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005589 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5590 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005591 ufshcd_hba_exit(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005592 }
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005593
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005594 trace_ufshcd_init(dev_name(hba->dev), ret,
5595 ktime_to_us(ktime_sub(ktime_get(), start)),
5596 hba->uic_link_state, hba->curr_dev_pwr_mode);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005597 return ret;
5598}
5599
5600/**
5601 * ufshcd_async_scan - asynchronous execution for probing hba
5602 * @data: data pointer to pass to this function
5603 * @cookie: cookie data
5604 */
5605static void ufshcd_async_scan(void *data, async_cookie_t cookie)
5606{
5607 struct ufs_hba *hba = (struct ufs_hba *)data;
5608
5609 ufshcd_probe_hba(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305610}
5611
Yaniv Gardif550c652016-03-10 17:37:07 +02005612static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
5613{
5614 unsigned long flags;
5615 struct Scsi_Host *host;
5616 struct ufs_hba *hba;
5617 int index;
5618 bool found = false;
5619
5620 if (!scmd || !scmd->device || !scmd->device->host)
5621 return BLK_EH_NOT_HANDLED;
5622
5623 host = scmd->device->host;
5624 hba = shost_priv(host);
5625 if (!hba)
5626 return BLK_EH_NOT_HANDLED;
5627
5628 spin_lock_irqsave(host->host_lock, flags);
5629
5630 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5631 if (hba->lrb[index].cmd == scmd) {
5632 found = true;
5633 break;
5634 }
5635 }
5636
5637 spin_unlock_irqrestore(host->host_lock, flags);
5638
5639 /*
5640 * Bypass SCSI error handling and reset the block layer timer if this
5641 * SCSI command was not actually dispatched to UFS driver, otherwise
5642 * let SCSI layer handle the error as usual.
5643 */
5644 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
5645}
5646
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305647static struct scsi_host_template ufshcd_driver_template = {
5648 .module = THIS_MODULE,
5649 .name = UFSHCD,
5650 .proc_name = UFSHCD,
5651 .queuecommand = ufshcd_queuecommand,
5652 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09005653 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305654 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005655 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305656 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305657 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
5658 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Yaniv Gardif550c652016-03-10 17:37:07 +02005659 .eh_timed_out = ufshcd_eh_timed_out,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305660 .this_id = -1,
5661 .sg_tablesize = SG_ALL,
5662 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
5663 .can_queue = UFSHCD_CAN_QUEUE,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005664 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01005665 .track_queue_depth = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305666};
5667
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005668static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
5669 int ua)
5670{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005671 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005672
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005673 if (!vreg)
5674 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005675
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005676 ret = regulator_set_load(vreg->reg, ua);
5677 if (ret < 0) {
5678 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
5679 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005680 }
5681
5682 return ret;
5683}
5684
5685static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
5686 struct ufs_vreg *vreg)
5687{
Yaniv Gardi60f01872016-03-10 17:37:11 +02005688 if (!vreg)
5689 return 0;
5690 else if (vreg->unused)
5691 return 0;
5692 else
5693 return ufshcd_config_vreg_load(hba->dev, vreg,
5694 UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005695}
5696
5697static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
5698 struct ufs_vreg *vreg)
5699{
Yaniv Gardi60f01872016-03-10 17:37:11 +02005700 if (!vreg)
5701 return 0;
5702 else if (vreg->unused)
5703 return 0;
5704 else
5705 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005706}
5707
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005708static int ufshcd_config_vreg(struct device *dev,
5709 struct ufs_vreg *vreg, bool on)
5710{
5711 int ret = 0;
5712 struct regulator *reg = vreg->reg;
5713 const char *name = vreg->name;
5714 int min_uV, uA_load;
5715
5716 BUG_ON(!vreg);
5717
5718 if (regulator_count_voltages(reg) > 0) {
5719 min_uV = on ? vreg->min_uV : 0;
5720 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
5721 if (ret) {
5722 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
5723 __func__, name, ret);
5724 goto out;
5725 }
5726
5727 uA_load = on ? vreg->max_uA : 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005728 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
5729 if (ret)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005730 goto out;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005731 }
5732out:
5733 return ret;
5734}
5735
5736static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
5737{
5738 int ret = 0;
5739
Yaniv Gardi60f01872016-03-10 17:37:11 +02005740 if (!vreg)
5741 goto out;
5742 else if (vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005743 goto out;
5744
5745 ret = ufshcd_config_vreg(dev, vreg, true);
5746 if (!ret)
5747 ret = regulator_enable(vreg->reg);
5748
5749 if (!ret)
5750 vreg->enabled = true;
5751 else
5752 dev_err(dev, "%s: %s enable failed, err=%d\n",
5753 __func__, vreg->name, ret);
5754out:
5755 return ret;
5756}
5757
5758static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
5759{
5760 int ret = 0;
5761
Yaniv Gardi60f01872016-03-10 17:37:11 +02005762 if (!vreg)
5763 goto out;
5764 else if (!vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005765 goto out;
5766
5767 ret = regulator_disable(vreg->reg);
5768
5769 if (!ret) {
5770 /* ignore errors on applying disable config */
5771 ufshcd_config_vreg(dev, vreg, false);
5772 vreg->enabled = false;
5773 } else {
5774 dev_err(dev, "%s: %s disable failed, err=%d\n",
5775 __func__, vreg->name, ret);
5776 }
5777out:
5778 return ret;
5779}
5780
5781static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
5782{
5783 int ret = 0;
5784 struct device *dev = hba->dev;
5785 struct ufs_vreg_info *info = &hba->vreg_info;
5786
5787 if (!info)
5788 goto out;
5789
5790 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
5791 if (ret)
5792 goto out;
5793
5794 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
5795 if (ret)
5796 goto out;
5797
5798 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
5799 if (ret)
5800 goto out;
5801
5802out:
5803 if (ret) {
5804 ufshcd_toggle_vreg(dev, info->vccq2, false);
5805 ufshcd_toggle_vreg(dev, info->vccq, false);
5806 ufshcd_toggle_vreg(dev, info->vcc, false);
5807 }
5808 return ret;
5809}
5810
Raviv Shvili6a771a62014-09-25 15:32:24 +03005811static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
5812{
5813 struct ufs_vreg_info *info = &hba->vreg_info;
5814
5815 if (info)
5816 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
5817
5818 return 0;
5819}
5820
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005821static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
5822{
5823 int ret = 0;
5824
5825 if (!vreg)
5826 goto out;
5827
5828 vreg->reg = devm_regulator_get(dev, vreg->name);
5829 if (IS_ERR(vreg->reg)) {
5830 ret = PTR_ERR(vreg->reg);
5831 dev_err(dev, "%s: %s get failed, err=%d\n",
5832 __func__, vreg->name, ret);
5833 }
5834out:
5835 return ret;
5836}
5837
5838static int ufshcd_init_vreg(struct ufs_hba *hba)
5839{
5840 int ret = 0;
5841 struct device *dev = hba->dev;
5842 struct ufs_vreg_info *info = &hba->vreg_info;
5843
5844 if (!info)
5845 goto out;
5846
5847 ret = ufshcd_get_vreg(dev, info->vcc);
5848 if (ret)
5849 goto out;
5850
5851 ret = ufshcd_get_vreg(dev, info->vccq);
5852 if (ret)
5853 goto out;
5854
5855 ret = ufshcd_get_vreg(dev, info->vccq2);
5856out:
5857 return ret;
5858}
5859
Raviv Shvili6a771a62014-09-25 15:32:24 +03005860static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
5861{
5862 struct ufs_vreg_info *info = &hba->vreg_info;
5863
5864 if (info)
5865 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
5866
5867 return 0;
5868}
5869
Yaniv Gardi60f01872016-03-10 17:37:11 +02005870static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
5871{
5872 int ret = 0;
5873 struct ufs_vreg_info *info = &hba->vreg_info;
5874
5875 if (!info)
5876 goto out;
5877 else if (!info->vccq)
5878 goto out;
5879
5880 if (unused) {
5881 /* shut off the rail here */
5882 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
5883 /*
5884 * Mark this rail as no longer used, so it doesn't get enabled
5885 * later by mistake
5886 */
5887 if (!ret)
5888 info->vccq->unused = true;
5889 } else {
5890 /*
5891 * rail should have been already enabled hence just make sure
5892 * that unused flag is cleared.
5893 */
5894 info->vccq->unused = false;
5895 }
5896out:
5897 return ret;
5898}
5899
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005900static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5901 bool skip_ref_clk)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005902{
5903 int ret = 0;
5904 struct ufs_clk_info *clki;
5905 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005906 unsigned long flags;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005907
5908 if (!head || list_empty(head))
5909 goto out;
5910
Subhash Jadavani1e879e82016-10-06 21:48:22 -07005911 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
5912 if (ret)
5913 return ret;
5914
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005915 list_for_each_entry(clki, head, list) {
5916 if (!IS_ERR_OR_NULL(clki->clk)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005917 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
5918 continue;
5919
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005920 if (on && !clki->enabled) {
5921 ret = clk_prepare_enable(clki->clk);
5922 if (ret) {
5923 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
5924 __func__, clki->name, ret);
5925 goto out;
5926 }
5927 } else if (!on && clki->enabled) {
5928 clk_disable_unprepare(clki->clk);
5929 }
5930 clki->enabled = on;
5931 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
5932 clki->name, on ? "en" : "dis");
5933 }
5934 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005935
Subhash Jadavani1e879e82016-10-06 21:48:22 -07005936 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
5937 if (ret)
5938 return ret;
5939
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005940out:
5941 if (ret) {
5942 list_for_each_entry(clki, head, list) {
5943 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
5944 clk_disable_unprepare(clki->clk);
5945 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005946 } else if (!ret && on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005947 spin_lock_irqsave(hba->host->host_lock, flags);
5948 hba->clk_gating.state = CLKS_ON;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005949 trace_ufshcd_clk_gating(dev_name(hba->dev),
5950 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005951 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005952 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08005953
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005954 return ret;
5955}
5956
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005957static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
5958{
5959 return __ufshcd_setup_clocks(hba, on, false);
5960}
5961
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005962static int ufshcd_init_clocks(struct ufs_hba *hba)
5963{
5964 int ret = 0;
5965 struct ufs_clk_info *clki;
5966 struct device *dev = hba->dev;
5967 struct list_head *head = &hba->clk_list_head;
5968
5969 if (!head || list_empty(head))
5970 goto out;
5971
5972 list_for_each_entry(clki, head, list) {
5973 if (!clki->name)
5974 continue;
5975
5976 clki->clk = devm_clk_get(dev, clki->name);
5977 if (IS_ERR(clki->clk)) {
5978 ret = PTR_ERR(clki->clk);
5979 dev_err(dev, "%s: %s clk get failed, %d\n",
5980 __func__, clki->name, ret);
5981 goto out;
5982 }
5983
5984 if (clki->max_freq) {
5985 ret = clk_set_rate(clki->clk, clki->max_freq);
5986 if (ret) {
5987 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5988 __func__, clki->name,
5989 clki->max_freq, ret);
5990 goto out;
5991 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03005992 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005993 }
5994 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
5995 clki->name, clk_get_rate(clki->clk));
5996 }
5997out:
5998 return ret;
5999}
6000
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006001static int ufshcd_variant_hba_init(struct ufs_hba *hba)
6002{
6003 int err = 0;
6004
6005 if (!hba->vops)
6006 goto out;
6007
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006008 err = ufshcd_vops_init(hba);
6009 if (err)
6010 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006011
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006012 err = ufshcd_vops_setup_regulators(hba, true);
6013 if (err)
6014 goto out_exit;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006015
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006016 goto out;
6017
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006018out_exit:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006019 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006020out:
6021 if (err)
6022 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006023 __func__, ufshcd_get_var_name(hba), err);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006024 return err;
6025}
6026
6027static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
6028{
6029 if (!hba->vops)
6030 return;
6031
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006032 ufshcd_vops_setup_regulators(hba, false);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006033
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006034 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006035}
6036
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006037static int ufshcd_hba_init(struct ufs_hba *hba)
6038{
6039 int err;
6040
Raviv Shvili6a771a62014-09-25 15:32:24 +03006041 /*
6042 * Handle host controller power separately from the UFS device power
6043 * rails as it will help controlling the UFS host controller power
6044 * collapse easily which is different than UFS device power collapse.
6045 * Also, enable the host controller power before we go ahead with rest
6046 * of the initialization here.
6047 */
6048 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006049 if (err)
6050 goto out;
6051
Raviv Shvili6a771a62014-09-25 15:32:24 +03006052 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006053 if (err)
6054 goto out;
6055
Raviv Shvili6a771a62014-09-25 15:32:24 +03006056 err = ufshcd_init_clocks(hba);
6057 if (err)
6058 goto out_disable_hba_vreg;
6059
6060 err = ufshcd_setup_clocks(hba, true);
6061 if (err)
6062 goto out_disable_hba_vreg;
6063
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006064 err = ufshcd_init_vreg(hba);
6065 if (err)
6066 goto out_disable_clks;
6067
6068 err = ufshcd_setup_vreg(hba, true);
6069 if (err)
6070 goto out_disable_clks;
6071
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006072 err = ufshcd_variant_hba_init(hba);
6073 if (err)
6074 goto out_disable_vreg;
6075
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006076 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006077 goto out;
6078
6079out_disable_vreg:
6080 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03006081out_disable_clks:
6082 ufshcd_setup_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03006083out_disable_hba_vreg:
6084 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006085out:
6086 return err;
6087}
6088
6089static void ufshcd_hba_exit(struct ufs_hba *hba)
6090{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006091 if (hba->is_powered) {
6092 ufshcd_variant_hba_exit(hba);
6093 ufshcd_setup_vreg(hba, false);
Gilad Bronera5082532016-10-17 17:10:00 -07006094 ufshcd_suspend_clkscaling(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006095 ufshcd_setup_clocks(hba, false);
6096 ufshcd_setup_hba_vreg(hba, false);
6097 hba->is_powered = false;
6098 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006099}
6100
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006101static int
6102ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306103{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006104 unsigned char cmd[6] = {REQUEST_SENSE,
6105 0,
6106 0,
6107 0,
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07006108 UFSHCD_REQ_SENSE_SIZE,
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006109 0};
6110 char *buffer;
6111 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306112
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07006113 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006114 if (!buffer) {
6115 ret = -ENOMEM;
6116 goto out;
6117 }
6118
6119 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
Gilad Bronerdcea0bf2016-10-17 17:09:48 -07006120 UFSHCD_REQ_SENSE_SIZE, NULL,
Christoph Hellwige8064022016-10-20 15:12:13 +02006121 msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006122 if (ret)
6123 pr_err("%s: failed with err %d\n", __func__, ret);
6124
6125 kfree(buffer);
6126out:
6127 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306128}
6129
6130/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006131 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
6132 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306133 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006134 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306135 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006136 * Returns 0 if requested power mode is set successfully
6137 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306138 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006139static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
6140 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306141{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006142 unsigned char cmd[6] = { START_STOP };
6143 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006144 struct scsi_device *sdp;
6145 unsigned long flags;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006146 int ret;
6147
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006148 spin_lock_irqsave(hba->host->host_lock, flags);
6149 sdp = hba->sdev_ufs_device;
6150 if (sdp) {
6151 ret = scsi_device_get(sdp);
6152 if (!ret && !scsi_device_online(sdp)) {
6153 ret = -ENODEV;
6154 scsi_device_put(sdp);
6155 }
6156 } else {
6157 ret = -ENODEV;
6158 }
6159 spin_unlock_irqrestore(hba->host->host_lock, flags);
6160
6161 if (ret)
6162 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006163
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306164 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006165 * If scsi commands fail, the scsi mid-layer schedules scsi error-
6166 * handling, which would wait for host to be resumed. Since we know
6167 * we are functional while we are here, skip host resume in error
6168 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306169 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006170 hba->host->eh_noresume = 1;
6171 if (hba->wlun_dev_clr_ua) {
6172 ret = ufshcd_send_request_sense(hba, sdp);
6173 if (ret)
6174 goto out;
6175 /* Unit attention condition is cleared now */
6176 hba->wlun_dev_clr_ua = false;
6177 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306178
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006179 cmd[4] = pwr_mode << 4;
6180
6181 /*
6182 * Current function would be generally called from the power management
Christoph Hellwige8064022016-10-20 15:12:13 +02006183 * callbacks hence set the RQF_PM flag so that it doesn't resume the
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006184 * already suspended childs.
6185 */
6186 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
Christoph Hellwige8064022016-10-20 15:12:13 +02006187 START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006188 if (ret) {
6189 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02006190 "START_STOP failed for power mode: %d, result %x\n",
6191 pwr_mode, ret);
Hannes Reinecke21045512015-01-08 07:43:46 +01006192 if (driver_byte(ret) & DRIVER_SENSE)
6193 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006194 }
6195
6196 if (!ret)
6197 hba->curr_dev_pwr_mode = pwr_mode;
6198out:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006199 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006200 hba->host->eh_noresume = 0;
6201 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306202}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306203
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006204static int ufshcd_link_state_transition(struct ufs_hba *hba,
6205 enum uic_link_state req_link_state,
6206 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306207{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006208 int ret = 0;
6209
6210 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306211 return 0;
6212
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006213 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
6214 ret = ufshcd_uic_hibern8_enter(hba);
6215 if (!ret)
6216 ufshcd_set_link_hibern8(hba);
6217 else
6218 goto out;
6219 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306220 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006221 * If autobkops is enabled, link can't be turned off because
6222 * turning off the link would also turn off the device.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306223 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006224 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
6225 (!check_for_bkops || (check_for_bkops &&
6226 !hba->auto_bkops_enabled))) {
6227 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02006228 * Let's make sure that link is in low power mode, we are doing
6229 * this currently by putting the link in Hibern8. Otherway to
6230 * put the link in low power mode is to send the DME end point
6231 * to device and then send the DME reset command to local
6232 * unipro. But putting the link in hibern8 is much faster.
6233 */
6234 ret = ufshcd_uic_hibern8_enter(hba);
6235 if (ret)
6236 goto out;
6237 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006238 * Change controller state to "reset state" which
6239 * should also put the link in off/reset state
6240 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02006241 ufshcd_hba_stop(hba, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006242 /*
6243 * TODO: Check if we need any delay to make sure that
6244 * controller is reset
6245 */
6246 ufshcd_set_link_off(hba);
6247 }
6248
6249out:
6250 return ret;
6251}
6252
6253static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
6254{
6255 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02006256 * It seems some UFS devices may keep drawing more than sleep current
6257 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
6258 * To avoid this situation, add 2ms delay before putting these UFS
6259 * rails in LPM mode.
6260 */
6261 if (!ufshcd_is_link_active(hba) &&
6262 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
6263 usleep_range(2000, 2100);
6264
6265 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006266 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
6267 * power.
6268 *
6269 * If UFS device and link is in OFF state, all power supplies (VCC,
6270 * VCCQ, VCCQ2) can be turned off if power on write protect is not
6271 * required. If UFS link is inactive (Hibern8 or OFF state) and device
6272 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
6273 *
6274 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
6275 * in low power state which would save some power.
6276 */
6277 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
6278 !hba->dev_info.is_lu_power_on_wp) {
6279 ufshcd_setup_vreg(hba, false);
6280 } else if (!ufshcd_is_ufs_dev_active(hba)) {
6281 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
6282 if (!ufshcd_is_link_active(hba)) {
6283 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
6284 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
6285 }
6286 }
6287}
6288
6289static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
6290{
6291 int ret = 0;
6292
6293 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
6294 !hba->dev_info.is_lu_power_on_wp) {
6295 ret = ufshcd_setup_vreg(hba, true);
6296 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006297 if (!ret && !ufshcd_is_link_active(hba)) {
6298 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6299 if (ret)
6300 goto vcc_disable;
6301 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6302 if (ret)
6303 goto vccq_lpm;
6304 }
Subhash Jadavani69d72ac2016-10-27 17:26:24 -07006305 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006306 }
6307 goto out;
6308
6309vccq_lpm:
6310 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
6311vcc_disable:
6312 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
6313out:
6314 return ret;
6315}
6316
6317static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
6318{
6319 if (ufshcd_is_link_off(hba))
6320 ufshcd_setup_hba_vreg(hba, false);
6321}
6322
6323static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
6324{
6325 if (ufshcd_is_link_off(hba))
6326 ufshcd_setup_hba_vreg(hba, true);
6327}
6328
6329/**
6330 * ufshcd_suspend - helper function for suspend operations
6331 * @hba: per adapter instance
6332 * @pm_op: desired low power operation type
6333 *
6334 * This function will try to put the UFS device and link into low power
6335 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
6336 * (System PM level).
6337 *
6338 * If this function is called during shutdown, it will make sure that
6339 * both UFS device and UFS link is powered off.
6340 *
6341 * NOTE: UFS device & link must be active before we enter in this function.
6342 *
6343 * Returns 0 for success and non-zero for failure
6344 */
6345static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6346{
6347 int ret = 0;
6348 enum ufs_pm_level pm_lvl;
6349 enum ufs_dev_pwr_mode req_dev_pwr_mode;
6350 enum uic_link_state req_link_state;
6351
6352 hba->pm_op_in_progress = 1;
6353 if (!ufshcd_is_shutdown_pm(pm_op)) {
6354 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
6355 hba->rpm_lvl : hba->spm_lvl;
6356 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
6357 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
6358 } else {
6359 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
6360 req_link_state = UIC_LINK_OFF_STATE;
6361 }
6362
6363 /*
6364 * If we can't transition into any of the low power modes
6365 * just gate the clocks.
6366 */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006367 ufshcd_hold(hba, false);
6368 hba->clk_gating.is_suspended = true;
6369
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07006370 ufshcd_suspend_clkscaling(hba);
6371
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006372 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
6373 req_link_state == UIC_LINK_ACTIVE_STATE) {
6374 goto disable_clks;
6375 }
6376
6377 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
6378 (req_link_state == hba->uic_link_state))
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07006379 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006380
6381 /* UFS device & link must be active before we enter in this function */
6382 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
6383 ret = -EINVAL;
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07006384 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006385 }
6386
6387 if (ufshcd_is_runtime_pm(pm_op)) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03006388 if (ufshcd_can_autobkops_during_suspend(hba)) {
6389 /*
6390 * The device is idle with no requests in the queue,
6391 * allow background operations if bkops status shows
6392 * that performance might be impacted.
6393 */
6394 ret = ufshcd_urgent_bkops(hba);
6395 if (ret)
6396 goto enable_gating;
6397 } else {
6398 /* make sure that auto bkops is disabled */
6399 ufshcd_disable_auto_bkops(hba);
6400 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006401 }
6402
6403 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
6404 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
6405 !ufshcd_is_runtime_pm(pm_op))) {
6406 /* ensure that bkops is disabled */
6407 ufshcd_disable_auto_bkops(hba);
6408 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
6409 if (ret)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006410 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006411 }
6412
6413 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
6414 if (ret)
6415 goto set_dev_active;
6416
6417 ufshcd_vreg_set_lpm(hba);
6418
6419disable_clks:
6420 /*
6421 * Call vendor specific suspend callback. As these callbacks may access
6422 * vendor specific host controller register space call them before the
6423 * host clocks are ON.
6424 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006425 ret = ufshcd_vops_suspend(hba, pm_op);
6426 if (ret)
6427 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006428
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006429 if (!ufshcd_is_link_active(hba))
6430 ufshcd_setup_clocks(hba, false);
6431 else
6432 /* If link is active, device ref_clk can't be switched off */
6433 __ufshcd_setup_clocks(hba, false, true);
6434
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006435 hba->clk_gating.state = CLKS_OFF;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006436 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006437 /*
6438 * Disable the host irq as host controller as there won't be any
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006439 * host controller transaction expected till resume.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006440 */
6441 ufshcd_disable_irq(hba);
6442 /* Put the host controller in low power mode if possible */
6443 ufshcd_hba_vreg_set_lpm(hba);
6444 goto out;
6445
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006446set_link_active:
Gilad Bronera5082532016-10-17 17:10:00 -07006447 ufshcd_resume_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006448 ufshcd_vreg_set_hpm(hba);
6449 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
6450 ufshcd_set_link_active(hba);
6451 else if (ufshcd_is_link_off(hba))
6452 ufshcd_host_reset_and_restore(hba);
6453set_dev_active:
6454 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
6455 ufshcd_disable_auto_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006456enable_gating:
Subhash Jadavanid6fcf812016-10-27 17:26:09 -07006457 ufshcd_resume_clkscaling(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006458 hba->clk_gating.is_suspended = false;
6459 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006460out:
6461 hba->pm_op_in_progress = 0;
6462 return ret;
6463}
6464
6465/**
6466 * ufshcd_resume - helper function for resume operations
6467 * @hba: per adapter instance
6468 * @pm_op: runtime PM or system PM
6469 *
6470 * This function basically brings the UFS device, UniPro link and controller
6471 * to active state.
6472 *
6473 * Returns 0 for success and non-zero for failure
6474 */
6475static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6476{
6477 int ret;
6478 enum uic_link_state old_link_state;
6479
6480 hba->pm_op_in_progress = 1;
6481 old_link_state = hba->uic_link_state;
6482
6483 ufshcd_hba_vreg_set_hpm(hba);
6484 /* Make sure clocks are enabled before accessing controller */
6485 ret = ufshcd_setup_clocks(hba, true);
6486 if (ret)
6487 goto out;
6488
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006489 /* enable the host irq as host controller would be active soon */
6490 ret = ufshcd_enable_irq(hba);
6491 if (ret)
6492 goto disable_irq_and_vops_clks;
6493
6494 ret = ufshcd_vreg_set_hpm(hba);
6495 if (ret)
6496 goto disable_irq_and_vops_clks;
6497
6498 /*
6499 * Call vendor specific resume callback. As these callbacks may access
6500 * vendor specific host controller register space call them when the
6501 * host clocks are ON.
6502 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006503 ret = ufshcd_vops_resume(hba, pm_op);
6504 if (ret)
6505 goto disable_vreg;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006506
6507 if (ufshcd_is_link_hibern8(hba)) {
6508 ret = ufshcd_uic_hibern8_exit(hba);
6509 if (!ret)
6510 ufshcd_set_link_active(hba);
6511 else
6512 goto vendor_suspend;
6513 } else if (ufshcd_is_link_off(hba)) {
6514 ret = ufshcd_host_reset_and_restore(hba);
6515 /*
6516 * ufshcd_host_reset_and_restore() should have already
6517 * set the link state as active
6518 */
6519 if (ret || !ufshcd_is_link_active(hba))
6520 goto vendor_suspend;
6521 }
6522
6523 if (!ufshcd_is_ufs_dev_active(hba)) {
6524 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6525 if (ret)
6526 goto set_old_link_state;
6527 }
6528
Subhash Jadavani374a2462014-09-25 15:32:35 +03006529 /*
6530 * If BKOPs operations are urgently needed at this moment then
6531 * keep auto-bkops enabled or else disable it.
6532 */
6533 ufshcd_urgent_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006534 hba->clk_gating.is_suspended = false;
6535
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08006536 if (hba->clk_scaling.is_allowed)
6537 ufshcd_resume_clkscaling(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03006538
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006539 /* Schedule clock gating in case of no access to UFS device yet */
6540 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006541 goto out;
6542
6543set_old_link_state:
6544 ufshcd_link_state_transition(hba, old_link_state, 0);
6545vendor_suspend:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006546 ufshcd_vops_suspend(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006547disable_vreg:
6548 ufshcd_vreg_set_lpm(hba);
6549disable_irq_and_vops_clks:
6550 ufshcd_disable_irq(hba);
Gilad Bronera5082532016-10-17 17:10:00 -07006551 ufshcd_suspend_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006552 ufshcd_setup_clocks(hba, false);
6553out:
6554 hba->pm_op_in_progress = 0;
6555 return ret;
6556}
6557
6558/**
6559 * ufshcd_system_suspend - system suspend routine
6560 * @hba: per adapter instance
6561 * @pm_op: runtime PM or system PM
6562 *
6563 * Check the description of ufshcd_suspend() function for more details.
6564 *
6565 * Returns 0 for success and non-zero for failure
6566 */
6567int ufshcd_system_suspend(struct ufs_hba *hba)
6568{
6569 int ret = 0;
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006570 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006571
6572 if (!hba || !hba->is_powered)
Dolev Raviv233b5942014-10-23 13:25:14 +03006573 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006574
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08006575 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6576 hba->curr_dev_pwr_mode) &&
6577 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
6578 hba->uic_link_state))
6579 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006580
subhashj@codeaurora.org0b257732016-11-23 16:33:08 -08006581 if (pm_runtime_suspended(hba->dev)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006582 /*
6583 * UFS device and/or UFS link low power states during runtime
6584 * suspend seems to be different than what is expected during
6585 * system suspend. Hence runtime resume the devic & link and
6586 * let the system suspend low power states to take effect.
6587 * TODO: If resume takes longer time, we might have optimize
6588 * it in future by not resuming everything if possible.
6589 */
6590 ret = ufshcd_runtime_resume(hba);
6591 if (ret)
6592 goto out;
6593 }
6594
6595 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
6596out:
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006597 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
6598 ktime_to_us(ktime_sub(ktime_get(), start)),
6599 hba->uic_link_state, hba->curr_dev_pwr_mode);
Dolev Ravive7850602014-09-25 15:32:36 +03006600 if (!ret)
6601 hba->is_sys_suspended = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006602 return ret;
6603}
6604EXPORT_SYMBOL(ufshcd_system_suspend);
6605
6606/**
6607 * ufshcd_system_resume - system resume routine
6608 * @hba: per adapter instance
6609 *
6610 * Returns 0 for success and non-zero for failure
6611 */
6612
6613int ufshcd_system_resume(struct ufs_hba *hba)
6614{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006615 int ret = 0;
6616 ktime_t start = ktime_get();
6617
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006618 if (!hba)
6619 return -EINVAL;
6620
6621 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006622 /*
6623 * Let the runtime resume take care of resuming
6624 * if runtime suspended.
6625 */
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006626 goto out;
6627 else
6628 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
6629out:
6630 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
6631 ktime_to_us(ktime_sub(ktime_get(), start)),
6632 hba->uic_link_state, hba->curr_dev_pwr_mode);
6633 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006634}
6635EXPORT_SYMBOL(ufshcd_system_resume);
6636
6637/**
6638 * ufshcd_runtime_suspend - runtime suspend routine
6639 * @hba: per adapter instance
6640 *
6641 * Check the description of ufshcd_suspend() function for more details.
6642 *
6643 * Returns 0 for success and non-zero for failure
6644 */
6645int ufshcd_runtime_suspend(struct ufs_hba *hba)
6646{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006647 int ret = 0;
6648 ktime_t start = ktime_get();
6649
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006650 if (!hba)
6651 return -EINVAL;
6652
6653 if (!hba->is_powered)
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006654 goto out;
6655 else
6656 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
6657out:
6658 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
6659 ktime_to_us(ktime_sub(ktime_get(), start)),
6660 hba->uic_link_state, hba->curr_dev_pwr_mode);
6661 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306662}
6663EXPORT_SYMBOL(ufshcd_runtime_suspend);
6664
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006665/**
6666 * ufshcd_runtime_resume - runtime resume routine
6667 * @hba: per adapter instance
6668 *
6669 * This function basically brings the UFS device, UniPro link and controller
6670 * to active state. Following operations are done in this function:
6671 *
6672 * 1. Turn on all the controller related clocks
6673 * 2. Bring the UniPro link out of Hibernate state
6674 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
6675 * to active state.
6676 * 4. If auto-bkops is enabled on the device, disable it.
6677 *
6678 * So following would be the possible power state after this function return
6679 * successfully:
6680 * S1: UFS device in Active state with VCC rail ON
6681 * UniPro link in Active state
6682 * All the UFS/UniPro controller clocks are ON
6683 *
6684 * Returns 0 for success and non-zero for failure
6685 */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306686int ufshcd_runtime_resume(struct ufs_hba *hba)
6687{
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006688 int ret = 0;
6689 ktime_t start = ktime_get();
6690
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006691 if (!hba)
6692 return -EINVAL;
6693
6694 if (!hba->is_powered)
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006695 goto out;
6696 else
6697 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
6698out:
6699 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
6700 ktime_to_us(ktime_sub(ktime_get(), start)),
6701 hba->uic_link_state, hba->curr_dev_pwr_mode);
6702 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306703}
6704EXPORT_SYMBOL(ufshcd_runtime_resume);
6705
6706int ufshcd_runtime_idle(struct ufs_hba *hba)
6707{
6708 return 0;
6709}
6710EXPORT_SYMBOL(ufshcd_runtime_idle);
6711
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306712/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006713 * ufshcd_shutdown - shutdown routine
6714 * @hba: per adapter instance
6715 *
6716 * This function would power off both UFS device and UFS link.
6717 *
6718 * Returns 0 always to allow force shutdown even in case of errors.
6719 */
6720int ufshcd_shutdown(struct ufs_hba *hba)
6721{
6722 int ret = 0;
6723
6724 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
6725 goto out;
6726
6727 if (pm_runtime_suspended(hba->dev)) {
6728 ret = ufshcd_runtime_resume(hba);
6729 if (ret)
6730 goto out;
6731 }
6732
6733 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
6734out:
6735 if (ret)
6736 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
6737 /* allow force shutdown even in case of errors */
6738 return 0;
6739}
6740EXPORT_SYMBOL(ufshcd_shutdown);
6741
6742/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306743 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306744 * data structure memory
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306745 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306746 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306747void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306748{
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05306749 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306750 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05306751 ufshcd_disable_intr(hba, hba->intr_mask);
Yaniv Gardi596585a2016-03-10 17:37:08 +02006752 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306753
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006754 ufshcd_exit_clk_gating(hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08006755 if (ufshcd_is_clkscaling_supported(hba))
6756 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006757 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306758}
6759EXPORT_SYMBOL_GPL(ufshcd_remove);
6760
6761/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02006762 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
6763 * @hba: pointer to Host Bus Adapter (HBA)
6764 */
6765void ufshcd_dealloc_host(struct ufs_hba *hba)
6766{
6767 scsi_host_put(hba->host);
6768}
6769EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
6770
6771/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09006772 * ufshcd_set_dma_mask - Set dma mask based on the controller
6773 * addressing capability
6774 * @hba: per adapter instance
6775 *
6776 * Returns 0 for success, non-zero for failure
6777 */
6778static int ufshcd_set_dma_mask(struct ufs_hba *hba)
6779{
6780 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
6781 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
6782 return 0;
6783 }
6784 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
6785}
6786
6787/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006788 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306789 * @dev: pointer to device handle
6790 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306791 * Returns 0 on success, non-zero value on failure
6792 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006793int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306794{
6795 struct Scsi_Host *host;
6796 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006797 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306798
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306799 if (!dev) {
6800 dev_err(dev,
6801 "Invalid memory reference for dev is NULL\n");
6802 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306803 goto out_error;
6804 }
6805
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306806 host = scsi_host_alloc(&ufshcd_driver_template,
6807 sizeof(struct ufs_hba));
6808 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306809 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306810 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306811 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306812 }
6813 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306814 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306815 hba->dev = dev;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006816 *hba_handle = hba;
6817
6818out_error:
6819 return err;
6820}
6821EXPORT_SYMBOL(ufshcd_alloc_host);
6822
Sahitya Tummala856b3482014-09-25 15:32:34 +03006823static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
6824{
6825 int ret = 0;
6826 struct ufs_clk_info *clki;
6827 struct list_head *head = &hba->clk_list_head;
6828
6829 if (!head || list_empty(head))
6830 goto out;
6831
Yaniv Gardif06fcc72015-10-28 13:15:51 +02006832 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
6833 if (ret)
6834 return ret;
6835
Sahitya Tummala856b3482014-09-25 15:32:34 +03006836 list_for_each_entry(clki, head, list) {
6837 if (!IS_ERR_OR_NULL(clki->clk)) {
6838 if (scale_up && clki->max_freq) {
6839 if (clki->curr_freq == clki->max_freq)
6840 continue;
6841 ret = clk_set_rate(clki->clk, clki->max_freq);
6842 if (ret) {
6843 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6844 __func__, clki->name,
6845 clki->max_freq, ret);
6846 break;
6847 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006848 trace_ufshcd_clk_scaling(dev_name(hba->dev),
6849 "scaled up", clki->name,
6850 clki->curr_freq,
6851 clki->max_freq);
6852
Sahitya Tummala856b3482014-09-25 15:32:34 +03006853 clki->curr_freq = clki->max_freq;
6854
6855 } else if (!scale_up && clki->min_freq) {
6856 if (clki->curr_freq == clki->min_freq)
6857 continue;
6858 ret = clk_set_rate(clki->clk, clki->min_freq);
6859 if (ret) {
6860 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6861 __func__, clki->name,
6862 clki->min_freq, ret);
6863 break;
6864 }
subhashj@codeaurora.org7ff5ab42016-12-22 18:39:51 -08006865 trace_ufshcd_clk_scaling(dev_name(hba->dev),
6866 "scaled down", clki->name,
6867 clki->curr_freq,
6868 clki->min_freq);
Sahitya Tummala856b3482014-09-25 15:32:34 +03006869 clki->curr_freq = clki->min_freq;
6870 }
6871 }
6872 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
6873 clki->name, clk_get_rate(clki->clk));
6874 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02006875
6876 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
6877
Sahitya Tummala856b3482014-09-25 15:32:34 +03006878out:
6879 return ret;
6880}
6881
6882static int ufshcd_devfreq_target(struct device *dev,
6883 unsigned long *freq, u32 flags)
6884{
6885 int err = 0;
6886 struct ufs_hba *hba = dev_get_drvdata(dev);
Subhash Jadavani30fc33f2016-10-27 17:25:47 -07006887 bool release_clk_hold = false;
6888 unsigned long irq_flags;
Sahitya Tummala856b3482014-09-25 15:32:34 +03006889
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08006890 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03006891 return -EINVAL;
6892
Subhash Jadavani30fc33f2016-10-27 17:25:47 -07006893 spin_lock_irqsave(hba->host->host_lock, irq_flags);
6894 if (ufshcd_eh_in_progress(hba)) {
6895 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6896 return 0;
6897 }
6898
6899 if (ufshcd_is_clkgating_allowed(hba) &&
6900 (hba->clk_gating.state != CLKS_ON)) {
6901 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
6902 /* hold the vote until the scaling work is completed */
6903 hba->clk_gating.active_reqs++;
6904 release_clk_hold = true;
6905 hba->clk_gating.state = CLKS_ON;
6906 } else {
6907 /*
6908 * Clock gating work seems to be running in parallel
6909 * hence skip scaling work to avoid deadlock between
6910 * current scaling work and gating work.
6911 */
6912 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6913 return 0;
6914 }
6915 }
6916 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6917
Sahitya Tummala856b3482014-09-25 15:32:34 +03006918 if (*freq == UINT_MAX)
6919 err = ufshcd_scale_clks(hba, true);
6920 else if (*freq == 0)
6921 err = ufshcd_scale_clks(hba, false);
6922
Subhash Jadavani30fc33f2016-10-27 17:25:47 -07006923 spin_lock_irqsave(hba->host->host_lock, irq_flags);
6924 if (release_clk_hold)
6925 __ufshcd_release(hba);
6926 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6927
Sahitya Tummala856b3482014-09-25 15:32:34 +03006928 return err;
6929}
6930
6931static int ufshcd_devfreq_get_dev_status(struct device *dev,
6932 struct devfreq_dev_status *stat)
6933{
6934 struct ufs_hba *hba = dev_get_drvdata(dev);
6935 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
6936 unsigned long flags;
6937
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08006938 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03006939 return -EINVAL;
6940
6941 memset(stat, 0, sizeof(*stat));
6942
6943 spin_lock_irqsave(hba->host->host_lock, flags);
6944 if (!scaling->window_start_t)
6945 goto start_window;
6946
6947 if (scaling->is_busy_started)
6948 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
6949 scaling->busy_start_t));
6950
6951 stat->total_time = jiffies_to_usecs((long)jiffies -
6952 (long)scaling->window_start_t);
6953 stat->busy_time = scaling->tot_busy_t;
6954start_window:
6955 scaling->window_start_t = jiffies;
6956 scaling->tot_busy_t = 0;
6957
6958 if (hba->outstanding_reqs) {
6959 scaling->busy_start_t = ktime_get();
6960 scaling->is_busy_started = true;
6961 } else {
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01006962 scaling->busy_start_t = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +03006963 scaling->is_busy_started = false;
6964 }
6965 spin_unlock_irqrestore(hba->host->host_lock, flags);
6966 return 0;
6967}
6968
6969static struct devfreq_dev_profile ufs_devfreq_profile = {
6970 .polling_ms = 100,
6971 .target = ufshcd_devfreq_target,
6972 .get_dev_status = ufshcd_devfreq_get_dev_status,
6973};
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08006974static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
6975{
6976 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
6977 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
6978 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
6979 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
6980 hba->clk_scaling.enable_attr.attr.mode = 0644;
6981 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
6982 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
6983}
Sahitya Tummala856b3482014-09-25 15:32:34 +03006984
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006985/**
6986 * ufshcd_init - Driver initialization routine
6987 * @hba: per-adapter instance
6988 * @mmio_base: base register address
6989 * @irq: Interrupt line of device
6990 * Returns 0 on success, non-zero value on failure
6991 */
6992int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6993{
6994 int err;
6995 struct Scsi_Host *host = hba->host;
6996 struct device *dev = hba->dev;
6997
6998 if (!mmio_base) {
6999 dev_err(hba->dev,
7000 "Invalid memory reference for mmio_base is NULL\n");
7001 err = -ENODEV;
7002 goto out_error;
7003 }
7004
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307005 hba->mmio_base = mmio_base;
7006 hba->irq = irq;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307007
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007008 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007009 if (err)
7010 goto out_error;
7011
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307012 /* Read capabilities registers */
7013 ufshcd_hba_capabilities(hba);
7014
7015 /* Get UFS version supported by the controller */
7016 hba->ufs_version = ufshcd_get_ufs_version(hba);
7017
Yaniv Gardic01848c2016-12-05 19:25:02 -08007018 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
7019 (hba->ufs_version != UFSHCI_VERSION_11) &&
7020 (hba->ufs_version != UFSHCI_VERSION_20) &&
7021 (hba->ufs_version != UFSHCI_VERSION_21))
7022 dev_err(hba->dev, "invalid UFS version 0x%x\n",
7023 hba->ufs_version);
7024
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05307025 /* Get Interrupt bit mask per version */
7026 hba->intr_mask = ufshcd_get_intr_mask(hba);
7027
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09007028 err = ufshcd_set_dma_mask(hba);
7029 if (err) {
7030 dev_err(hba->dev, "set dma mask failed\n");
7031 goto out_disable;
7032 }
7033
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307034 /* Allocate memory for host memory space */
7035 err = ufshcd_memory_alloc(hba);
7036 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307037 dev_err(hba->dev, "Memory allocation failed\n");
7038 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307039 }
7040
7041 /* Configure LRB */
7042 ufshcd_host_memory_configure(hba);
7043
7044 host->can_queue = hba->nutrs;
7045 host->cmd_per_lun = hba->nutrs;
7046 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03007047 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307048 host->max_channel = UFSHCD_MAX_CHANNEL;
7049 host->unique_id = host->host_no;
7050 host->max_cmd_len = MAX_CDB_SIZE;
7051
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007052 hba->max_pwr_info.is_valid = false;
7053
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307054 /* Initailize wait queue for task management */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05307055 init_waitqueue_head(&hba->tm_wq);
7056 init_waitqueue_head(&hba->tm_tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307057
7058 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05307059 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307060 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307061
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307062 /* Initialize UIC command mutex */
7063 mutex_init(&hba->uic_cmd_mutex);
7064
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307065 /* Initialize mutex for device management commands */
7066 mutex_init(&hba->dev_cmd.lock);
7067
7068 /* Initialize device management tag acquire wait queue */
7069 init_waitqueue_head(&hba->dev_cmd.tag_wq);
7070
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007071 ufshcd_init_clk_gating(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +02007072
7073 /*
7074 * In order to avoid any spurious interrupt immediately after
7075 * registering UFS controller interrupt handler, clear any pending UFS
7076 * interrupt status and disable all the UFS interrupts.
7077 */
7078 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
7079 REG_INTERRUPT_STATUS);
7080 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
7081 /*
7082 * Make sure that UFS interrupts are disabled and any pending interrupt
7083 * status is cleared before registering UFS interrupt handler.
7084 */
7085 mb();
7086
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307087 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09007088 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307089 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307090 dev_err(hba->dev, "request irq failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007091 goto exit_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007092 } else {
7093 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307094 }
7095
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307096 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307097 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307098 dev_err(hba->dev, "scsi_add_host failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007099 goto exit_gating;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307100 }
7101
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307102 /* Host controller enable */
7103 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307104 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307105 dev_err(hba->dev, "Host controller enable failed\n");
Dolev Raviv66cc8202016-12-22 18:39:42 -08007106 ufshcd_print_host_regs(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307107 goto out_remove_scsi_host;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307108 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307109
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007110 if (ufshcd_is_clkscaling_supported(hba)) {
Chanwoo Choi4861ee12016-11-08 18:13:28 +09007111 hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
Sahitya Tummala856b3482014-09-25 15:32:34 +03007112 "simple_ondemand", NULL);
7113 if (IS_ERR(hba->devfreq)) {
7114 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
7115 PTR_ERR(hba->devfreq));
Wei Yongjun73811c92016-09-28 14:49:42 +00007116 err = PTR_ERR(hba->devfreq);
Sahitya Tummala856b3482014-09-25 15:32:34 +03007117 goto out_remove_scsi_host;
7118 }
7119 /* Suspend devfreq until the UFS device is detected */
Gilad Bronera5082532016-10-17 17:10:00 -07007120 ufshcd_suspend_clkscaling(hba);
Sahitya Tummalafcb0c4b2016-12-22 18:40:50 -08007121 ufshcd_clkscaling_init_sysfs(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03007122 }
7123
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05307124 /* Hold auto suspend until async scan completes */
7125 pm_runtime_get_sync(dev);
7126
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007127 /*
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08007128 * We are assuming that device wasn't put in sleep/power-down
7129 * state exclusively during the boot stage before kernel.
7130 * This assumption helps avoid doing link startup twice during
7131 * ufshcd_probe_hba().
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007132 */
subhashj@codeaurora.org7caf4892016-11-23 16:32:20 -08007133 ufshcd_set_ufs_dev_active(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007134
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307135 async_schedule(ufshcd_async_scan, hba);
7136
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307137 return 0;
7138
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307139out_remove_scsi_host:
7140 scsi_remove_host(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007141exit_gating:
7142 ufshcd_exit_clk_gating(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307143out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007144 hba->is_irq_enabled = false;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007145 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307146out_error:
7147 return err;
7148}
7149EXPORT_SYMBOL_GPL(ufshcd_init);
7150
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307151MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
7152MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05307153MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307154MODULE_LICENSE("GPL");
7155MODULE_VERSION(UFSHCD_DRIVER_VERSION);