blob: 2fbbe62f369633b68ac94c5294d33cbce8a107cf [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02006 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053011 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053016 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053018 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053024 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030035 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053038 */
39
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053040#include <linux/async.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030041#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020042#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020043#include <linux/of.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053044#include "ufshcd.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020045#include "ufs_quirks.h"
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053046#include "unipro.h"
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053047
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053048#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
49 UTP_TASK_REQ_COMPL |\
50 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053051/* UIC command timeout, unit: ms */
52#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053053
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053054/* NOP OUT retries waiting for NOP IN response */
55#define NOP_OUT_RETRIES 10
56/* Timeout after 30 msecs if NOP OUT hangs without response */
57#define NOP_OUT_TIMEOUT 30 /* msecs */
58
Dolev Raviv68078d52013-07-30 00:35:58 +053059/* Query request retries */
60#define QUERY_REQ_RETRIES 10
61/* Query request timeout */
62#define QUERY_REQ_TIMEOUT 30 /* msec */
Yaniv Gardie5ad4062016-02-01 15:02:41 +020063/*
64 * Query request timeout for fDeviceInit flag
65 * fDeviceInit query response time for some devices is too large that default
66 * QUERY_REQ_TIMEOUT may not be enough for such devices.
67 */
68#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
Dolev Raviv68078d52013-07-30 00:35:58 +053069
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053070/* Task management command timeout */
71#define TM_CMD_TIMEOUT 100 /* msecs */
72
Yaniv Gardi64238fb2016-02-01 15:02:43 +020073/* maximum number of retries for a general UIC command */
74#define UFS_UIC_COMMAND_RETRIES 3
75
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030076/* maximum number of link-startup retries */
77#define DME_LINKSTARTUP_RETRIES 3
78
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +020079/* Maximum retries for Hibern8 enter */
80#define UIC_HIBERN8_ENTER_RETRIES 3
81
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030082/* maximum number of reset retries before giving up */
83#define MAX_HOST_RESET_RETRIES 5
84
Dolev Raviv68078d52013-07-30 00:35:58 +053085/* Expose the flag value from utp_upiu_query.value */
86#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87
Seungwon Jeon7d568652013-08-31 21:40:20 +053088/* Interrupt aggregation default timeout, unit: 40us */
89#define INT_AGGR_DEF_TO 0x02
90
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +030091#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
92 ({ \
93 int _ret; \
94 if (_on) \
95 _ret = ufshcd_enable_vreg(_dev, _vreg); \
96 else \
97 _ret = ufshcd_disable_vreg(_dev, _vreg); \
98 _ret; \
99 })
100
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300101static u32 ufs_query_desc_max_size[] = {
102 QUERY_DESC_DEVICE_MAX_SIZE,
103 QUERY_DESC_CONFIGURAION_MAX_SIZE,
104 QUERY_DESC_UNIT_MAX_SIZE,
105 QUERY_DESC_RFU_MAX_SIZE,
106 QUERY_DESC_INTERCONNECT_MAX_SIZE,
107 QUERY_DESC_STRING_MAX_SIZE,
108 QUERY_DESC_RFU_MAX_SIZE,
Tomas Winkler1ce21792016-02-09 10:25:40 +0200109 QUERY_DESC_GEOMETRY_MAX_SIZE,
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300110 QUERY_DESC_POWER_MAX_SIZE,
111 QUERY_DESC_RFU_MAX_SIZE,
112};
113
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530114enum {
115 UFSHCD_MAX_CHANNEL = 0,
116 UFSHCD_MAX_ID = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530117 UFSHCD_CMD_PER_LUN = 32,
118 UFSHCD_CAN_QUEUE = 32,
119};
120
121/* UFSHCD states */
122enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530123 UFSHCD_STATE_RESET,
124 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530125 UFSHCD_STATE_OPERATIONAL,
126};
127
128/* UFSHCD error handling flags */
129enum {
130 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530131};
132
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530133/* UFSHCD UIC layer error flags */
134enum {
135 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200136 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
137 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
138 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
139 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
140 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530141};
142
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530143/* Interrupt configuration options */
144enum {
145 UFSHCD_INT_DISABLE,
146 UFSHCD_INT_ENABLE,
147 UFSHCD_INT_CLEAR,
148};
149
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530150#define ufshcd_set_eh_in_progress(h) \
151 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
152#define ufshcd_eh_in_progress(h) \
153 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
154#define ufshcd_clear_eh_in_progress(h) \
155 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
156
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300157#define ufshcd_set_ufs_dev_active(h) \
158 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
159#define ufshcd_set_ufs_dev_sleep(h) \
160 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
161#define ufshcd_set_ufs_dev_poweroff(h) \
162 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
163#define ufshcd_is_ufs_dev_active(h) \
164 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
165#define ufshcd_is_ufs_dev_sleep(h) \
166 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
167#define ufshcd_is_ufs_dev_poweroff(h) \
168 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
169
170static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
171 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
172 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
173 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
174 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
175 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
176 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
177};
178
179static inline enum ufs_dev_pwr_mode
180ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
181{
182 return ufs_pm_lvl_states[lvl].dev_state;
183}
184
185static inline enum uic_link_state
186ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
187{
188 return ufs_pm_lvl_states[lvl].link_state;
189}
190
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530191static void ufshcd_tmc_handler(struct ufs_hba *hba);
192static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530193static int ufshcd_reset_and_restore(struct ufs_hba *hba);
194static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300195static void ufshcd_hba_exit(struct ufs_hba *hba);
196static int ufshcd_probe_hba(struct ufs_hba *hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300197static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
198 bool skip_ref_clk);
199static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
Yaniv Gardi60f01872016-03-10 17:37:11 +0200200static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300201static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
202static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300203static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300204static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
205static irqreturn_t ufshcd_intr(int irq, void *__hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +0300206static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
207 struct ufs_pa_layer_attr *desired_pwr_mode);
Yaniv Gardi874237f2015-05-17 18:55:03 +0300208static int ufshcd_change_power_mode(struct ufs_hba *hba,
209 struct ufs_pa_layer_attr *pwr_mode);
Yaniv Gardi14497322016-02-01 15:02:39 +0200210static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
211{
212 return tag >= 0 && tag < hba->nutrs;
213}
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300214
215static inline int ufshcd_enable_irq(struct ufs_hba *hba)
216{
217 int ret = 0;
218
219 if (!hba->is_irq_enabled) {
220 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
221 hba);
222 if (ret)
223 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
224 __func__, ret);
225 hba->is_irq_enabled = true;
226 }
227
228 return ret;
229}
230
231static inline void ufshcd_disable_irq(struct ufs_hba *hba)
232{
233 if (hba->is_irq_enabled) {
234 free_irq(hba->irq, hba);
235 hba->is_irq_enabled = false;
236 }
237}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530238
Yaniv Gardib573d482016-03-10 17:37:09 +0200239/* replace non-printable or non-ASCII characters with spaces */
240static inline void ufshcd_remove_non_printable(char *val)
241{
242 if (!val)
243 return;
244
245 if (*val < 0x20 || *val > 0x7e)
246 *val = ' ';
247}
248
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530249/*
250 * ufshcd_wait_for_register - wait for register value to change
251 * @hba - per-adapter interface
252 * @reg - mmio register offset
253 * @mask - mask to apply to read register value
254 * @val - wait condition
255 * @interval_us - polling interval in microsecs
256 * @timeout_ms - timeout in millisecs
Yaniv Gardi596585a2016-03-10 17:37:08 +0200257 * @can_sleep - perform sleep or just spin
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530258 *
259 * Returns -ETIMEDOUT on error, zero on success
260 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200261int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
262 u32 val, unsigned long interval_us,
263 unsigned long timeout_ms, bool can_sleep)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530264{
265 int err = 0;
266 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
267
268 /* ignore bits that we don't intend to wait on */
269 val = val & mask;
270
271 while ((ufshcd_readl(hba, reg) & mask) != val) {
Yaniv Gardi596585a2016-03-10 17:37:08 +0200272 if (can_sleep)
273 usleep_range(interval_us, interval_us + 50);
274 else
275 udelay(interval_us);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530276 if (time_after(jiffies, timeout)) {
277 if ((ufshcd_readl(hba, reg) & mask) != val)
278 err = -ETIMEDOUT;
279 break;
280 }
281 }
282
283 return err;
284}
285
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530286/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530287 * ufshcd_get_intr_mask - Get the interrupt bit mask
288 * @hba - Pointer to adapter instance
289 *
290 * Returns interrupt bit mask per version
291 */
292static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
293{
294 if (hba->ufs_version == UFSHCI_VERSION_10)
295 return INTERRUPT_MASK_ALL_VER_10;
296 else
297 return INTERRUPT_MASK_ALL_VER_11;
298}
299
300/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530301 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
302 * @hba - Pointer to adapter instance
303 *
304 * Returns UFSHCI version supported by the controller
305 */
306static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
307{
Yaniv Gardi0263bcd2015-10-28 13:15:48 +0200308 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
309 return ufshcd_vops_get_ufs_hci_version(hba);
Yaniv Gardi9949e702015-05-17 18:55:05 +0300310
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530311 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530312}
313
314/**
315 * ufshcd_is_device_present - Check if any device connected to
316 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300317 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530318 *
Venkatraman S73ec5132012-07-10 19:39:23 +0530319 * Returns 1 if device present, 0 if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530320 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300321static inline int ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530322{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300323 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
324 DEVICE_PRESENT) ? 1 : 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530325}
326
327/**
328 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
329 * @lrb: pointer to local command reference block
330 *
331 * This function is used to get the OCS field from UTRD
332 * Returns the OCS field in the UTRD
333 */
334static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
335{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530336 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530337}
338
339/**
340 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
341 * @task_req_descp: pointer to utp_task_req_desc structure
342 *
343 * This function is used to get the OCS field from UTMRD
344 * Returns the OCS field in the UTMRD
345 */
346static inline int
347ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
348{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530349 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530350}
351
352/**
353 * ufshcd_get_tm_free_slot - get a free slot for task management request
354 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530355 * @free_slot: pointer to variable with available slot value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530356 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530357 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
358 * Returns 0 if free slot is not available, else return 1 with tag value
359 * in @free_slot.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530360 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530361static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530362{
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530363 int tag;
364 bool ret = false;
365
366 if (!free_slot)
367 goto out;
368
369 do {
370 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
371 if (tag >= hba->nutmrs)
372 goto out;
373 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
374
375 *free_slot = tag;
376 ret = true;
377out:
378 return ret;
379}
380
381static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
382{
383 clear_bit_unlock(slot, &hba->tm_slots_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530384}
385
386/**
387 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
388 * @hba: per adapter instance
389 * @pos: position of the bit to be cleared
390 */
391static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
392{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530393 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530394}
395
396/**
Yaniv Gardia48353f2016-02-01 15:02:40 +0200397 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
398 * @hba: per adapter instance
399 * @tag: position of the bit to be cleared
400 */
401static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
402{
403 __clear_bit(tag, &hba->outstanding_reqs);
404}
405
406/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530407 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
408 * @reg: Register value of host controller status
409 *
410 * Returns integer, 0 on Success and positive value if failed
411 */
412static inline int ufshcd_get_lists_status(u32 reg)
413{
414 /*
415 * The mask 0xFF is for the following HCS register bits
416 * Bit Description
417 * 0 Device Present
418 * 1 UTRLRDY
419 * 2 UTMRLRDY
420 * 3 UCRDY
Yaniv Gardi897efe62016-02-01 15:02:48 +0200421 * 4-7 reserved
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530422 */
Yaniv Gardi897efe62016-02-01 15:02:48 +0200423 return ((reg & 0xFF) >> 1) ^ 0x07;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530424}
425
426/**
427 * ufshcd_get_uic_cmd_result - Get the UIC command result
428 * @hba: Pointer to adapter instance
429 *
430 * This function gets the result of UIC command completion
431 * Returns 0 on success, non zero value on error
432 */
433static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
434{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530435 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530436 MASK_UIC_COMMAND_RESULT;
437}
438
439/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530440 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
441 * @hba: Pointer to adapter instance
442 *
443 * This function gets UIC command argument3
444 * Returns 0 on success, non zero value on error
445 */
446static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
447{
448 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
449}
450
451/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530452 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530453 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530454 */
455static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530456ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530457{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530458 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530459}
460
461/**
462 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
463 * @ucd_rsp_ptr: pointer to response UPIU
464 *
465 * This function gets the response status and scsi_status from response UPIU
466 * Returns the response result code.
467 */
468static inline int
469ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
470{
471 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
472}
473
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530474/*
475 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
476 * from response UPIU
477 * @ucd_rsp_ptr: pointer to response UPIU
478 *
479 * Return the data segment length.
480 */
481static inline unsigned int
482ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
483{
484 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
485 MASK_RSP_UPIU_DATA_SEG_LEN;
486}
487
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530488/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530489 * ufshcd_is_exception_event - Check if the device raised an exception event
490 * @ucd_rsp_ptr: pointer to response UPIU
491 *
492 * The function checks if the device raised an exception event indicated in
493 * the Device Information field of response UPIU.
494 *
495 * Returns true if exception is raised, false otherwise.
496 */
497static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
498{
499 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
500 MASK_RSP_EXCEPTION_EVENT ? true : false;
501}
502
503/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530504 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530505 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530506 */
507static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530508ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530509{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530510 ufshcd_writel(hba, INT_AGGR_ENABLE |
511 INT_AGGR_COUNTER_AND_TIMER_RESET,
512 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
513}
514
515/**
516 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
517 * @hba: per adapter instance
518 * @cnt: Interrupt aggregation counter threshold
519 * @tmout: Interrupt aggregation timeout value
520 */
521static inline void
522ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
523{
524 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
525 INT_AGGR_COUNTER_THLD_VAL(cnt) |
526 INT_AGGR_TIMEOUT_VAL(tmout),
527 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530528}
529
530/**
Yaniv Gardib8521902015-05-17 18:54:57 +0300531 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
532 * @hba: per adapter instance
533 */
534static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
535{
536 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
537}
538
539/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530540 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
541 * When run-stop registers are set to 1, it indicates the
542 * host controller that it can process the requests
543 * @hba: per adapter instance
544 */
545static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
546{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530547 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
548 REG_UTP_TASK_REQ_LIST_RUN_STOP);
549 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
550 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530551}
552
553/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530554 * ufshcd_hba_start - Start controller initialization sequence
555 * @hba: per adapter instance
556 */
557static inline void ufshcd_hba_start(struct ufs_hba *hba)
558{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530559 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530560}
561
562/**
563 * ufshcd_is_hba_active - Get controller state
564 * @hba: per adapter instance
565 *
566 * Returns zero if controller is active, 1 otherwise
567 */
568static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
569{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530570 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530571}
572
Yaniv Gardi37113102016-03-10 17:37:16 +0200573u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
574{
575 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
576 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
577 (hba->ufs_version == UFSHCI_VERSION_11))
578 return UFS_UNIPRO_VER_1_41;
579 else
580 return UFS_UNIPRO_VER_1_6;
581}
582EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
583
584static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
585{
586 /*
587 * If both host and device support UniPro ver1.6 or later, PA layer
588 * parameters tuning happens during link startup itself.
589 *
590 * We can manually tune PA layer parameters if either host or device
591 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
592 * logic simple, we will only do manual tuning if local unipro version
593 * doesn't support ver1.6 or later.
594 */
595 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
596 return true;
597 else
598 return false;
599}
600
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300601static void ufshcd_ungate_work(struct work_struct *work)
602{
603 int ret;
604 unsigned long flags;
605 struct ufs_hba *hba = container_of(work, struct ufs_hba,
606 clk_gating.ungate_work);
607
608 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
609
610 spin_lock_irqsave(hba->host->host_lock, flags);
611 if (hba->clk_gating.state == CLKS_ON) {
612 spin_unlock_irqrestore(hba->host->host_lock, flags);
613 goto unblock_reqs;
614 }
615
616 spin_unlock_irqrestore(hba->host->host_lock, flags);
617 ufshcd_setup_clocks(hba, true);
618
619 /* Exit from hibern8 */
620 if (ufshcd_can_hibern8_during_gating(hba)) {
621 /* Prevent gating in this path */
622 hba->clk_gating.is_suspended = true;
623 if (ufshcd_is_link_hibern8(hba)) {
624 ret = ufshcd_uic_hibern8_exit(hba);
625 if (ret)
626 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
627 __func__, ret);
628 else
629 ufshcd_set_link_active(hba);
630 }
631 hba->clk_gating.is_suspended = false;
632 }
633unblock_reqs:
Sahitya Tummala856b3482014-09-25 15:32:34 +0300634 if (ufshcd_is_clkscaling_enabled(hba))
635 devfreq_resume_device(hba->devfreq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300636 scsi_unblock_requests(hba->host);
637}
638
639/**
640 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
641 * Also, exit from hibern8 mode and set the link as active.
642 * @hba: per adapter instance
643 * @async: This indicates whether caller should ungate clocks asynchronously.
644 */
645int ufshcd_hold(struct ufs_hba *hba, bool async)
646{
647 int rc = 0;
648 unsigned long flags;
649
650 if (!ufshcd_is_clkgating_allowed(hba))
651 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300652 spin_lock_irqsave(hba->host->host_lock, flags);
653 hba->clk_gating.active_reqs++;
654
Yaniv Gardi53c12d02016-02-01 15:02:45 +0200655 if (ufshcd_eh_in_progress(hba)) {
656 spin_unlock_irqrestore(hba->host->host_lock, flags);
657 return 0;
658 }
659
Sahitya Tummala856b3482014-09-25 15:32:34 +0300660start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300661 switch (hba->clk_gating.state) {
662 case CLKS_ON:
663 break;
664 case REQ_CLKS_OFF:
665 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
666 hba->clk_gating.state = CLKS_ON;
667 break;
668 }
669 /*
670 * If we here, it means gating work is either done or
671 * currently running. Hence, fall through to cancel gating
672 * work and to enable clocks.
673 */
674 case CLKS_OFF:
675 scsi_block_requests(hba->host);
676 hba->clk_gating.state = REQ_CLKS_ON;
677 schedule_work(&hba->clk_gating.ungate_work);
678 /*
679 * fall through to check if we should wait for this
680 * work to be done or not.
681 */
682 case REQ_CLKS_ON:
683 if (async) {
684 rc = -EAGAIN;
685 hba->clk_gating.active_reqs--;
686 break;
687 }
688
689 spin_unlock_irqrestore(hba->host->host_lock, flags);
690 flush_work(&hba->clk_gating.ungate_work);
691 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +0300692 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300693 goto start;
694 default:
695 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
696 __func__, hba->clk_gating.state);
697 break;
698 }
699 spin_unlock_irqrestore(hba->host->host_lock, flags);
700out:
701 return rc;
702}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +0200703EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300704
705static void ufshcd_gate_work(struct work_struct *work)
706{
707 struct ufs_hba *hba = container_of(work, struct ufs_hba,
708 clk_gating.gate_work.work);
709 unsigned long flags;
710
711 spin_lock_irqsave(hba->host->host_lock, flags);
712 if (hba->clk_gating.is_suspended) {
713 hba->clk_gating.state = CLKS_ON;
714 goto rel_lock;
715 }
716
717 if (hba->clk_gating.active_reqs
718 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
719 || hba->lrb_in_use || hba->outstanding_tasks
720 || hba->active_uic_cmd || hba->uic_async_done)
721 goto rel_lock;
722
723 spin_unlock_irqrestore(hba->host->host_lock, flags);
724
725 /* put the link into hibern8 mode before turning off clocks */
726 if (ufshcd_can_hibern8_during_gating(hba)) {
727 if (ufshcd_uic_hibern8_enter(hba)) {
728 hba->clk_gating.state = CLKS_ON;
729 goto out;
730 }
731 ufshcd_set_link_hibern8(hba);
732 }
733
Sahitya Tummala856b3482014-09-25 15:32:34 +0300734 if (ufshcd_is_clkscaling_enabled(hba)) {
735 devfreq_suspend_device(hba->devfreq);
736 hba->clk_scaling.window_start_t = 0;
737 }
738
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300739 if (!ufshcd_is_link_active(hba))
740 ufshcd_setup_clocks(hba, false);
741 else
742 /* If link is active, device ref_clk can't be switched off */
743 __ufshcd_setup_clocks(hba, false, true);
744
745 /*
746 * In case you are here to cancel this work the gating state
747 * would be marked as REQ_CLKS_ON. In this case keep the state
748 * as REQ_CLKS_ON which would anyway imply that clocks are off
749 * and a request to turn them on is pending. By doing this way,
750 * we keep the state machine in tact and this would ultimately
751 * prevent from doing cancel work multiple times when there are
752 * new requests arriving before the current cancel work is done.
753 */
754 spin_lock_irqsave(hba->host->host_lock, flags);
755 if (hba->clk_gating.state == REQ_CLKS_OFF)
756 hba->clk_gating.state = CLKS_OFF;
757
758rel_lock:
759 spin_unlock_irqrestore(hba->host->host_lock, flags);
760out:
761 return;
762}
763
764/* host lock must be held before calling this variant */
765static void __ufshcd_release(struct ufs_hba *hba)
766{
767 if (!ufshcd_is_clkgating_allowed(hba))
768 return;
769
770 hba->clk_gating.active_reqs--;
771
772 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
773 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
774 || hba->lrb_in_use || hba->outstanding_tasks
Yaniv Gardi53c12d02016-02-01 15:02:45 +0200775 || hba->active_uic_cmd || hba->uic_async_done
776 || ufshcd_eh_in_progress(hba))
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300777 return;
778
779 hba->clk_gating.state = REQ_CLKS_OFF;
780 schedule_delayed_work(&hba->clk_gating.gate_work,
781 msecs_to_jiffies(hba->clk_gating.delay_ms));
782}
783
784void ufshcd_release(struct ufs_hba *hba)
785{
786 unsigned long flags;
787
788 spin_lock_irqsave(hba->host->host_lock, flags);
789 __ufshcd_release(hba);
790 spin_unlock_irqrestore(hba->host->host_lock, flags);
791}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +0200792EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300793
794static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
795 struct device_attribute *attr, char *buf)
796{
797 struct ufs_hba *hba = dev_get_drvdata(dev);
798
799 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
800}
801
802static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
803 struct device_attribute *attr, const char *buf, size_t count)
804{
805 struct ufs_hba *hba = dev_get_drvdata(dev);
806 unsigned long flags, value;
807
808 if (kstrtoul(buf, 0, &value))
809 return -EINVAL;
810
811 spin_lock_irqsave(hba->host->host_lock, flags);
812 hba->clk_gating.delay_ms = value;
813 spin_unlock_irqrestore(hba->host->host_lock, flags);
814 return count;
815}
816
817static void ufshcd_init_clk_gating(struct ufs_hba *hba)
818{
819 if (!ufshcd_is_clkgating_allowed(hba))
820 return;
821
822 hba->clk_gating.delay_ms = 150;
823 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
824 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
825
826 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
827 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
828 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
829 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
830 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
831 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
832 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
833}
834
835static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
836{
837 if (!ufshcd_is_clkgating_allowed(hba))
838 return;
839 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
Akinobu Mita97cd6802014-11-24 14:24:18 +0900840 cancel_work_sync(&hba->clk_gating.ungate_work);
841 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300842}
843
Sahitya Tummala856b3482014-09-25 15:32:34 +0300844/* Must be called with host lock acquired */
845static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
846{
847 if (!ufshcd_is_clkscaling_enabled(hba))
848 return;
849
850 if (!hba->clk_scaling.is_busy_started) {
851 hba->clk_scaling.busy_start_t = ktime_get();
852 hba->clk_scaling.is_busy_started = true;
853 }
854}
855
856static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
857{
858 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
859
860 if (!ufshcd_is_clkscaling_enabled(hba))
861 return;
862
863 if (!hba->outstanding_reqs && scaling->is_busy_started) {
864 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
865 scaling->busy_start_t));
866 scaling->busy_start_t = ktime_set(0, 0);
867 scaling->is_busy_started = false;
868 }
869}
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530870/**
871 * ufshcd_send_command - Send SCSI or device management commands
872 * @hba: per adapter instance
873 * @task_tag: Task tag of the command
874 */
875static inline
876void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
877{
Sahitya Tummala856b3482014-09-25 15:32:34 +0300878 ufshcd_clk_scaling_start_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530879 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530880 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -0700881 /* Make sure that doorbell is committed immediately */
882 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530883}
884
885/**
886 * ufshcd_copy_sense_data - Copy sense data in case of check condition
887 * @lrb - pointer to local reference block
888 */
889static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
890{
891 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530892 if (lrbp->sense_buffer &&
893 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Yaniv Gardie3ce73d2016-10-17 17:09:24 -0700894 int len_to_copy;
895
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530896 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Yaniv Gardie3ce73d2016-10-17 17:09:24 -0700897 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
898
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530899 memcpy(lrbp->sense_buffer,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530900 lrbp->ucd_rsp_ptr->sr.sense_data,
Yaniv Gardie3ce73d2016-10-17 17:09:24 -0700901 min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530902 }
903}
904
905/**
Dolev Raviv68078d52013-07-30 00:35:58 +0530906 * ufshcd_copy_query_response() - Copy the Query Response and the data
907 * descriptor
908 * @hba: per adapter instance
909 * @lrb - pointer to local reference block
910 */
911static
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300912int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +0530913{
914 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
915
Dolev Raviv68078d52013-07-30 00:35:58 +0530916 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +0530917
Dolev Raviv68078d52013-07-30 00:35:58 +0530918 /* Get the descriptor */
919 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +0300920 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +0530921 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300922 u16 resp_len;
923 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +0530924
925 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300926 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +0530927 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +0300928 buf_len = be16_to_cpu(
929 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300930 if (likely(buf_len >= resp_len)) {
931 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
932 } else {
933 dev_warn(hba->dev,
934 "%s: Response size is bigger than buffer",
935 __func__);
936 return -EINVAL;
937 }
Dolev Raviv68078d52013-07-30 00:35:58 +0530938 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300939
940 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +0530941}
942
943/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530944 * ufshcd_hba_capabilities - Read controller capabilities
945 * @hba: per adapter instance
946 */
947static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
948{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530949 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530950
951 /* nutrs and nutmrs are 0 based values */
952 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
953 hba->nutmrs =
954 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
955}
956
957/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530958 * ufshcd_ready_for_uic_cmd - Check if controller is ready
959 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530960 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530961 * Return true on success, else false
962 */
963static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
964{
965 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
966 return true;
967 else
968 return false;
969}
970
971/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +0530972 * ufshcd_get_upmcrs - Get the power mode change request status
973 * @hba: Pointer to adapter instance
974 *
975 * This function gets the UPMCRS field of HCS register
976 * Returns value of UPMCRS field
977 */
978static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
979{
980 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
981}
982
983/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530984 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
985 * @hba: per adapter instance
986 * @uic_cmd: UIC command
987 *
988 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530989 */
990static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530991ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530992{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530993 WARN_ON(hba->active_uic_cmd);
994
995 hba->active_uic_cmd = uic_cmd;
996
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530997 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530998 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
999 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1000 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301001
1002 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301003 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301004 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301005}
1006
1007/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301008 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1009 * @hba: per adapter instance
1010 * @uic_command: UIC command
1011 *
1012 * Must be called with mutex held.
1013 * Returns 0 only if success.
1014 */
1015static int
1016ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1017{
1018 int ret;
1019 unsigned long flags;
1020
1021 if (wait_for_completion_timeout(&uic_cmd->done,
1022 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1023 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1024 else
1025 ret = -ETIMEDOUT;
1026
1027 spin_lock_irqsave(hba->host->host_lock, flags);
1028 hba->active_uic_cmd = NULL;
1029 spin_unlock_irqrestore(hba->host->host_lock, flags);
1030
1031 return ret;
1032}
1033
1034/**
1035 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1036 * @hba: per adapter instance
1037 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001038 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301039 *
1040 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001041 * with mutex held and host_lock locked.
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301042 * Returns 0 only if success.
1043 */
1044static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001045__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1046 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301047{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301048 if (!ufshcd_ready_for_uic_cmd(hba)) {
1049 dev_err(hba->dev,
1050 "Controller not ready to accept UIC commands\n");
1051 return -EIO;
1052 }
1053
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001054 if (completion)
1055 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301056
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301057 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301058
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001059 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301060}
1061
1062/**
1063 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1064 * @hba: per adapter instance
1065 * @uic_cmd: UIC command
1066 *
1067 * Returns 0 only if success.
1068 */
1069static int
1070ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1071{
1072 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001073 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301074
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001075 ufshcd_hold(hba, false);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301076 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001077 ufshcd_add_delay_before_dme_cmd(hba);
1078
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001079 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001080 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001081 spin_unlock_irqrestore(hba->host->host_lock, flags);
1082 if (!ret)
1083 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1084
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301085 mutex_unlock(&hba->uic_cmd_mutex);
1086
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001087 ufshcd_release(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301088 return ret;
1089}
1090
1091/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301092 * ufshcd_map_sg - Map scatter-gather list to prdt
1093 * @lrbp - pointer to local reference block
1094 *
1095 * Returns 0 in case of success, non-zero value in case of failure
1096 */
1097static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
1098{
1099 struct ufshcd_sg_entry *prd_table;
1100 struct scatterlist *sg;
1101 struct scsi_cmnd *cmd;
1102 int sg_segments;
1103 int i;
1104
1105 cmd = lrbp->cmd;
1106 sg_segments = scsi_dma_map(cmd);
1107 if (sg_segments < 0)
1108 return sg_segments;
1109
1110 if (sg_segments) {
1111 lrbp->utr_descriptor_ptr->prd_table_length =
1112 cpu_to_le16((u16) (sg_segments));
1113
1114 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1115
1116 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1117 prd_table[i].size =
1118 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1119 prd_table[i].base_addr =
1120 cpu_to_le32(lower_32_bits(sg->dma_address));
1121 prd_table[i].upper_addr =
1122 cpu_to_le32(upper_32_bits(sg->dma_address));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001123 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301124 }
1125 } else {
1126 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1127 }
1128
1129 return 0;
1130}
1131
1132/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301133 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301134 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301135 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301136 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301137static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301138{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301139 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1140
1141 if (hba->ufs_version == UFSHCI_VERSION_10) {
1142 u32 rw;
1143 rw = set & INTERRUPT_MASK_RW_VER_10;
1144 set = rw | ((set ^ intrs) & intrs);
1145 } else {
1146 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301147 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301148
1149 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1150}
1151
1152/**
1153 * ufshcd_disable_intr - disable interrupts
1154 * @hba: per adapter instance
1155 * @intrs: interrupt bits
1156 */
1157static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1158{
1159 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1160
1161 if (hba->ufs_version == UFSHCI_VERSION_10) {
1162 u32 rw;
1163 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1164 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1165 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1166
1167 } else {
1168 set &= ~intrs;
1169 }
1170
1171 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301172}
1173
1174/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301175 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1176 * descriptor according to request
1177 * @lrbp: pointer to local reference block
1178 * @upiu_flags: flags required in the header
1179 * @cmd_dir: requests data direction
1180 */
1181static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
Joao Pinto300bb132016-05-11 12:21:27 +01001182 u32 *upiu_flags, enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301183{
1184 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1185 u32 data_direction;
1186 u32 dword_0;
1187
1188 if (cmd_dir == DMA_FROM_DEVICE) {
1189 data_direction = UTP_DEVICE_TO_HOST;
1190 *upiu_flags = UPIU_CMD_FLAGS_READ;
1191 } else if (cmd_dir == DMA_TO_DEVICE) {
1192 data_direction = UTP_HOST_TO_DEVICE;
1193 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1194 } else {
1195 data_direction = UTP_NO_DATA_TRANSFER;
1196 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1197 }
1198
1199 dword_0 = data_direction | (lrbp->command_type
1200 << UPIU_COMMAND_TYPE_OFFSET);
1201 if (lrbp->intr_cmd)
1202 dword_0 |= UTP_REQ_DESC_INT_CMD;
1203
1204 /* Transfer request descriptor header fields */
1205 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001206 /* dword_1 is reserved, hence it is set to 0 */
1207 req_desc->header.dword_1 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301208 /*
1209 * assigning invalid value for command status. Controller
1210 * updates OCS on command completion, with the command
1211 * status
1212 */
1213 req_desc->header.dword_2 =
1214 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001215 /* dword_3 is reserved, hence it is set to 0 */
1216 req_desc->header.dword_3 = 0;
Yaniv Gardi51047262016-02-01 15:02:38 +02001217
1218 req_desc->prd_table_length = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301219}
1220
1221/**
1222 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1223 * for scsi commands
1224 * @lrbp - local reference block pointer
1225 * @upiu_flags - flags
1226 */
1227static
1228void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1229{
1230 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001231 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301232
1233 /* command descriptor fields */
1234 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1235 UPIU_TRANSACTION_COMMAND, upiu_flags,
1236 lrbp->lun, lrbp->task_tag);
1237 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1238 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1239
1240 /* Total EHS length and Data segment length will be zero */
1241 ucd_req_ptr->header.dword_2 = 0;
1242
1243 ucd_req_ptr->sc.exp_data_transfer_len =
1244 cpu_to_be32(lrbp->cmd->sdb.length);
1245
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001246 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
1247 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
1248 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
1249
1250 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301251}
1252
Dolev Raviv68078d52013-07-30 00:35:58 +05301253/**
1254 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1255 * for query requsts
1256 * @hba: UFS hba
1257 * @lrbp: local reference block pointer
1258 * @upiu_flags: flags
1259 */
1260static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1261 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1262{
1263 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1264 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301265 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05301266 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1267
1268 /* Query request header */
1269 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1270 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1271 lrbp->lun, lrbp->task_tag);
1272 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1273 0, query->request.query_func, 0, 0);
1274
Zang Leigang68612852016-08-25 17:39:19 +08001275 /* Data segment length only need for WRITE_DESC */
1276 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1277 ucd_req_ptr->header.dword_2 =
1278 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
1279 else
1280 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05301281
1282 /* Copy the Query Request buffer as is */
1283 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1284 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05301285
1286 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001287 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1288 memcpy(descp, query->descriptor, len);
1289
Yaniv Gardi51047262016-02-01 15:02:38 +02001290 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05301291}
1292
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301293static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1294{
1295 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1296
1297 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1298
1299 /* command descriptor fields */
1300 ucd_req_ptr->header.dword_0 =
1301 UPIU_HEADER_DWORD(
1302 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02001303 /* clear rest of the fields of basic header */
1304 ucd_req_ptr->header.dword_1 = 0;
1305 ucd_req_ptr->header.dword_2 = 0;
1306
1307 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301308}
1309
1310/**
Joao Pinto300bb132016-05-11 12:21:27 +01001311 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
1312 * for Device Management Purposes
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301313 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301314 * @lrb - pointer to local reference block
1315 */
Joao Pinto300bb132016-05-11 12:21:27 +01001316static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301317{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301318 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301319 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301320
Joao Pinto300bb132016-05-11 12:21:27 +01001321 if (hba->ufs_version == UFSHCI_VERSION_20)
1322 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1323 else
1324 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1325
1326 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1327 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1328 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
1329 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1330 ufshcd_prepare_utp_nop_upiu(lrbp);
1331 else
1332 ret = -EINVAL;
1333
1334 return ret;
1335}
1336
1337/**
1338 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
1339 * for SCSI Purposes
1340 * @hba - per adapter instance
1341 * @lrb - pointer to local reference block
1342 */
1343static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1344{
1345 u32 upiu_flags;
1346 int ret = 0;
1347
1348 if (hba->ufs_version == UFSHCI_VERSION_20)
1349 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1350 else
1351 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1352
1353 if (likely(lrbp->cmd)) {
1354 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1355 lrbp->cmd->sc_data_direction);
1356 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1357 } else {
1358 ret = -EINVAL;
1359 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301360
1361 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301362}
1363
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03001364/*
1365 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1366 * @scsi_lun: scsi LUN id
1367 *
1368 * Returns UPIU LUN id
1369 */
1370static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1371{
1372 if (scsi_is_wlun(scsi_lun))
1373 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1374 | UFS_UPIU_WLUN_ID;
1375 else
1376 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1377}
1378
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301379/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03001380 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1381 * @scsi_lun: UPIU W-LUN id
1382 *
1383 * Returns SCSI W-LUN id
1384 */
1385static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1386{
1387 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1388}
1389
1390/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301391 * ufshcd_queuecommand - main entry point for SCSI requests
1392 * @cmd: command from SCSI Midlayer
1393 * @done: call back function
1394 *
1395 * Returns 0 for success, non-zero in case of failure
1396 */
1397static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1398{
1399 struct ufshcd_lrb *lrbp;
1400 struct ufs_hba *hba;
1401 unsigned long flags;
1402 int tag;
1403 int err = 0;
1404
1405 hba = shost_priv(host);
1406
1407 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02001408 if (!ufshcd_valid_tag(hba, tag)) {
1409 dev_err(hba->dev,
1410 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
1411 __func__, tag, cmd, cmd->request);
1412 BUG();
1413 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301414
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301415 spin_lock_irqsave(hba->host->host_lock, flags);
1416 switch (hba->ufshcd_state) {
1417 case UFSHCD_STATE_OPERATIONAL:
1418 break;
1419 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301420 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301421 goto out_unlock;
1422 case UFSHCD_STATE_ERROR:
1423 set_host_byte(cmd, DID_ERROR);
1424 cmd->scsi_done(cmd);
1425 goto out_unlock;
1426 default:
1427 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1428 __func__, hba->ufshcd_state);
1429 set_host_byte(cmd, DID_BAD_TARGET);
1430 cmd->scsi_done(cmd);
1431 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301432 }
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001433
1434 /* if error handling is in progress, don't issue commands */
1435 if (ufshcd_eh_in_progress(hba)) {
1436 set_host_byte(cmd, DID_ERROR);
1437 cmd->scsi_done(cmd);
1438 goto out_unlock;
1439 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301440 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301441
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301442 /* acquire the tag to make sure device cmds don't use it */
1443 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1444 /*
1445 * Dev manage command in progress, requeue the command.
1446 * Requeuing the command helps in cases where the request *may*
1447 * find different tag instead of waiting for dev manage command
1448 * completion.
1449 */
1450 err = SCSI_MLQUEUE_HOST_BUSY;
1451 goto out;
1452 }
1453
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001454 err = ufshcd_hold(hba, true);
1455 if (err) {
1456 err = SCSI_MLQUEUE_HOST_BUSY;
1457 clear_bit_unlock(tag, &hba->lrb_in_use);
1458 goto out;
1459 }
1460 WARN_ON(hba->clk_gating.state != CLKS_ON);
1461
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301462 lrbp = &hba->lrb[tag];
1463
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301464 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301465 lrbp->cmd = cmd;
1466 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
1467 lrbp->sense_buffer = cmd->sense_buffer;
1468 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03001469 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03001470 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301471
Joao Pinto300bb132016-05-11 12:21:27 +01001472 ufshcd_comp_scsi_upiu(hba, lrbp);
1473
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301474 err = ufshcd_map_sg(lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301475 if (err) {
1476 lrbp->cmd = NULL;
1477 clear_bit_unlock(tag, &hba->lrb_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301478 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301479 }
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001480 /* Make sure descriptors are ready before ringing the doorbell */
1481 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301482
1483 /* issue command to the controller */
1484 spin_lock_irqsave(hba->host->host_lock, flags);
1485 ufshcd_send_command(hba, tag);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301486out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301487 spin_unlock_irqrestore(hba->host->host_lock, flags);
1488out:
1489 return err;
1490}
1491
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301492static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1493 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1494{
1495 lrbp->cmd = NULL;
1496 lrbp->sense_bufflen = 0;
1497 lrbp->sense_buffer = NULL;
1498 lrbp->task_tag = tag;
1499 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301500 lrbp->intr_cmd = true; /* No interrupt aggregation */
1501 hba->dev_cmd.type = cmd_type;
1502
Joao Pinto300bb132016-05-11 12:21:27 +01001503 return ufshcd_comp_devman_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301504}
1505
1506static int
1507ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1508{
1509 int err = 0;
1510 unsigned long flags;
1511 u32 mask = 1 << tag;
1512
1513 /* clear outstanding transaction before retry */
1514 spin_lock_irqsave(hba->host->host_lock, flags);
1515 ufshcd_utrl_clear(hba, tag);
1516 spin_unlock_irqrestore(hba->host->host_lock, flags);
1517
1518 /*
1519 * wait for for h/w to clear corresponding bit in door-bell.
1520 * max. wait is 1 sec.
1521 */
1522 err = ufshcd_wait_for_register(hba,
1523 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02001524 mask, ~mask, 1000, 1000, true);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301525
1526 return err;
1527}
1528
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001529static int
1530ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1531{
1532 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1533
1534 /* Get the UPIU response */
1535 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1536 UPIU_RSP_CODE_OFFSET;
1537 return query_res->response;
1538}
1539
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301540/**
1541 * ufshcd_dev_cmd_completion() - handles device management command responses
1542 * @hba: per adapter instance
1543 * @lrbp: pointer to local reference block
1544 */
1545static int
1546ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1547{
1548 int resp;
1549 int err = 0;
1550
1551 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1552
1553 switch (resp) {
1554 case UPIU_TRANSACTION_NOP_IN:
1555 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1556 err = -EINVAL;
1557 dev_err(hba->dev, "%s: unexpected response %x\n",
1558 __func__, resp);
1559 }
1560 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05301561 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001562 err = ufshcd_check_query_response(hba, lrbp);
1563 if (!err)
1564 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05301565 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301566 case UPIU_TRANSACTION_REJECT_UPIU:
1567 /* TODO: handle Reject UPIU Response */
1568 err = -EPERM;
1569 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1570 __func__);
1571 break;
1572 default:
1573 err = -EINVAL;
1574 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1575 __func__, resp);
1576 break;
1577 }
1578
1579 return err;
1580}
1581
1582static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1583 struct ufshcd_lrb *lrbp, int max_timeout)
1584{
1585 int err = 0;
1586 unsigned long time_left;
1587 unsigned long flags;
1588
1589 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1590 msecs_to_jiffies(max_timeout));
1591
Gilad Bronerad1a1b92016-10-17 17:09:36 -07001592 /* Make sure descriptors are ready before ringing the doorbell */
1593 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301594 spin_lock_irqsave(hba->host->host_lock, flags);
1595 hba->dev_cmd.complete = NULL;
1596 if (likely(time_left)) {
1597 err = ufshcd_get_tr_ocs(lrbp);
1598 if (!err)
1599 err = ufshcd_dev_cmd_completion(hba, lrbp);
1600 }
1601 spin_unlock_irqrestore(hba->host->host_lock, flags);
1602
1603 if (!time_left) {
1604 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02001605 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
1606 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301607 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02001608 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301609 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02001610 /*
1611 * in case of an error, after clearing the doorbell,
1612 * we also need to clear the outstanding_request
1613 * field in hba
1614 */
1615 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301616 }
1617
1618 return err;
1619}
1620
1621/**
1622 * ufshcd_get_dev_cmd_tag - Get device management command tag
1623 * @hba: per-adapter instance
1624 * @tag: pointer to variable with available slot value
1625 *
1626 * Get a free slot and lock it until device management command
1627 * completes.
1628 *
1629 * Returns false if free slot is unavailable for locking, else
1630 * return true with tag value in @tag.
1631 */
1632static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1633{
1634 int tag;
1635 bool ret = false;
1636 unsigned long tmp;
1637
1638 if (!tag_out)
1639 goto out;
1640
1641 do {
1642 tmp = ~hba->lrb_in_use;
1643 tag = find_last_bit(&tmp, hba->nutrs);
1644 if (tag >= hba->nutrs)
1645 goto out;
1646 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1647
1648 *tag_out = tag;
1649 ret = true;
1650out:
1651 return ret;
1652}
1653
1654static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1655{
1656 clear_bit_unlock(tag, &hba->lrb_in_use);
1657}
1658
1659/**
1660 * ufshcd_exec_dev_cmd - API for sending device management requests
1661 * @hba - UFS hba
1662 * @cmd_type - specifies the type (NOP, Query...)
1663 * @timeout - time in seconds
1664 *
Dolev Raviv68078d52013-07-30 00:35:58 +05301665 * NOTE: Since there is only one available tag for device management commands,
1666 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301667 */
1668static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1669 enum dev_cmd_type cmd_type, int timeout)
1670{
1671 struct ufshcd_lrb *lrbp;
1672 int err;
1673 int tag;
1674 struct completion wait;
1675 unsigned long flags;
1676
1677 /*
1678 * Get free slot, sleep if slots are unavailable.
1679 * Even though we use wait_event() which sleeps indefinitely,
1680 * the maximum wait time is bounded by SCSI request timeout.
1681 */
1682 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1683
1684 init_completion(&wait);
1685 lrbp = &hba->lrb[tag];
1686 WARN_ON(lrbp->cmd);
1687 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1688 if (unlikely(err))
1689 goto out_put_tag;
1690
1691 hba->dev_cmd.complete = &wait;
1692
Yaniv Gardie3dfdc52016-02-01 15:02:49 +02001693 /* Make sure descriptors are ready before ringing the doorbell */
1694 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301695 spin_lock_irqsave(hba->host->host_lock, flags);
1696 ufshcd_send_command(hba, tag);
1697 spin_unlock_irqrestore(hba->host->host_lock, flags);
1698
1699 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1700
1701out_put_tag:
1702 ufshcd_put_dev_cmd_tag(hba, tag);
1703 wake_up(&hba->dev_cmd.tag_wq);
1704 return err;
1705}
1706
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301707/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001708 * ufshcd_init_query() - init the query response and request parameters
1709 * @hba: per-adapter instance
1710 * @request: address of the request pointer to be initialized
1711 * @response: address of the response pointer to be initialized
1712 * @opcode: operation to perform
1713 * @idn: flag idn to access
1714 * @index: LU number to access
1715 * @selector: query/flag/descriptor further identification
1716 */
1717static inline void ufshcd_init_query(struct ufs_hba *hba,
1718 struct ufs_query_req **request, struct ufs_query_res **response,
1719 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1720{
1721 *request = &hba->dev_cmd.query.request;
1722 *response = &hba->dev_cmd.query.response;
1723 memset(*request, 0, sizeof(struct ufs_query_req));
1724 memset(*response, 0, sizeof(struct ufs_query_res));
1725 (*request)->upiu_req.opcode = opcode;
1726 (*request)->upiu_req.idn = idn;
1727 (*request)->upiu_req.index = index;
1728 (*request)->upiu_req.selector = selector;
1729}
1730
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02001731static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1732 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
1733{
1734 int ret;
1735 int retries;
1736
1737 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1738 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1739 if (ret)
1740 dev_dbg(hba->dev,
1741 "%s: failed with error %d, retries %d\n",
1742 __func__, ret, retries);
1743 else
1744 break;
1745 }
1746
1747 if (ret)
1748 dev_err(hba->dev,
1749 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1750 __func__, opcode, idn, ret, retries);
1751 return ret;
1752}
1753
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001754/**
Dolev Raviv68078d52013-07-30 00:35:58 +05301755 * ufshcd_query_flag() - API function for sending flag query requests
1756 * hba: per-adapter instance
1757 * query_opcode: flag query to perform
1758 * idn: flag idn to access
1759 * flag_res: the flag value after the query request completes
1760 *
1761 * Returns 0 for success, non-zero in case of failure
1762 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02001763int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Dolev Raviv68078d52013-07-30 00:35:58 +05301764 enum flag_idn idn, bool *flag_res)
1765{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001766 struct ufs_query_req *request = NULL;
1767 struct ufs_query_res *response = NULL;
1768 int err, index = 0, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02001769 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05301770
1771 BUG_ON(!hba);
1772
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001773 ufshcd_hold(hba, false);
Dolev Raviv68078d52013-07-30 00:35:58 +05301774 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001775 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1776 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05301777
1778 switch (opcode) {
1779 case UPIU_QUERY_OPCODE_SET_FLAG:
1780 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1781 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1782 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1783 break;
1784 case UPIU_QUERY_OPCODE_READ_FLAG:
1785 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1786 if (!flag_res) {
1787 /* No dummy reads */
1788 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1789 __func__);
1790 err = -EINVAL;
1791 goto out_unlock;
1792 }
1793 break;
1794 default:
1795 dev_err(hba->dev,
1796 "%s: Expected query flag opcode but got = %d\n",
1797 __func__, opcode);
1798 err = -EINVAL;
1799 goto out_unlock;
1800 }
Dolev Raviv68078d52013-07-30 00:35:58 +05301801
Yaniv Gardie5ad4062016-02-01 15:02:41 +02001802 if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
1803 timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
1804
1805 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05301806
1807 if (err) {
1808 dev_err(hba->dev,
1809 "%s: Sending flag query for idn %d failed, err = %d\n",
1810 __func__, idn, err);
1811 goto out_unlock;
1812 }
1813
1814 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301815 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05301816 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1817
1818out_unlock:
1819 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001820 ufshcd_release(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05301821 return err;
1822}
1823
1824/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301825 * ufshcd_query_attr - API function for sending attribute requests
1826 * hba: per-adapter instance
1827 * opcode: attribute opcode
1828 * idn: attribute idn to access
1829 * index: index field
1830 * selector: selector field
1831 * attr_val: the attribute value after the query request completes
1832 *
1833 * Returns 0 for success, non-zero in case of failure
1834*/
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05301835static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301836 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1837{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001838 struct ufs_query_req *request = NULL;
1839 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301840 int err;
1841
1842 BUG_ON(!hba);
1843
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001844 ufshcd_hold(hba, false);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301845 if (!attr_val) {
1846 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1847 __func__, opcode);
1848 err = -EINVAL;
1849 goto out;
1850 }
1851
1852 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001853 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1854 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301855
1856 switch (opcode) {
1857 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1858 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301859 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301860 break;
1861 case UPIU_QUERY_OPCODE_READ_ATTR:
1862 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1863 break;
1864 default:
1865 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1866 __func__, opcode);
1867 err = -EINVAL;
1868 goto out_unlock;
1869 }
1870
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001871 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301872
1873 if (err) {
1874 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1875 __func__, opcode, idn, err);
1876 goto out_unlock;
1877 }
1878
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301879 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301880
1881out_unlock:
1882 mutex_unlock(&hba->dev_cmd.lock);
1883out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001884 ufshcd_release(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301885 return err;
1886}
1887
1888/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02001889 * ufshcd_query_attr_retry() - API function for sending query
1890 * attribute with retries
1891 * @hba: per-adapter instance
1892 * @opcode: attribute opcode
1893 * @idn: attribute idn to access
1894 * @index: index field
1895 * @selector: selector field
1896 * @attr_val: the attribute value after the query request
1897 * completes
1898 *
1899 * Returns 0 for success, non-zero in case of failure
1900*/
1901static int ufshcd_query_attr_retry(struct ufs_hba *hba,
1902 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
1903 u32 *attr_val)
1904{
1905 int ret = 0;
1906 u32 retries;
1907
1908 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1909 ret = ufshcd_query_attr(hba, opcode, idn, index,
1910 selector, attr_val);
1911 if (ret)
1912 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
1913 __func__, ret, retries);
1914 else
1915 break;
1916 }
1917
1918 if (ret)
1919 dev_err(hba->dev,
1920 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
1921 __func__, idn, ret, QUERY_REQ_RETRIES);
1922 return ret;
1923}
1924
Yaniv Gardia70e91b2016-03-10 17:37:14 +02001925static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001926 enum query_opcode opcode, enum desc_idn idn, u8 index,
1927 u8 selector, u8 *desc_buf, int *buf_len)
1928{
1929 struct ufs_query_req *request = NULL;
1930 struct ufs_query_res *response = NULL;
1931 int err;
1932
1933 BUG_ON(!hba);
1934
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001935 ufshcd_hold(hba, false);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001936 if (!desc_buf) {
1937 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1938 __func__, opcode);
1939 err = -EINVAL;
1940 goto out;
1941 }
1942
1943 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1944 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1945 __func__, *buf_len);
1946 err = -EINVAL;
1947 goto out;
1948 }
1949
1950 mutex_lock(&hba->dev_cmd.lock);
1951 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1952 selector);
1953 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03001954 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001955
1956 switch (opcode) {
1957 case UPIU_QUERY_OPCODE_WRITE_DESC:
1958 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1959 break;
1960 case UPIU_QUERY_OPCODE_READ_DESC:
1961 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1962 break;
1963 default:
1964 dev_err(hba->dev,
1965 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1966 __func__, opcode);
1967 err = -EINVAL;
1968 goto out_unlock;
1969 }
1970
1971 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1972
1973 if (err) {
1974 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1975 __func__, opcode, idn, err);
1976 goto out_unlock;
1977 }
1978
1979 hba->dev_cmd.query.descriptor = NULL;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03001980 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001981
1982out_unlock:
1983 mutex_unlock(&hba->dev_cmd.lock);
1984out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001985 ufshcd_release(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001986 return err;
1987}
1988
1989/**
Yaniv Gardia70e91b2016-03-10 17:37:14 +02001990 * ufshcd_query_descriptor_retry - API function for sending descriptor
1991 * requests
1992 * hba: per-adapter instance
1993 * opcode: attribute opcode
1994 * idn: attribute idn to access
1995 * index: index field
1996 * selector: selector field
1997 * desc_buf: the buffer that contains the descriptor
1998 * buf_len: length parameter passed to the device
1999 *
2000 * Returns 0 for success, non-zero in case of failure.
2001 * The buf_len parameter will contain, on return, the length parameter
2002 * received on the response.
2003 */
2004int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2005 enum query_opcode opcode, enum desc_idn idn, u8 index,
2006 u8 selector, u8 *desc_buf, int *buf_len)
2007{
2008 int err;
2009 int retries;
2010
2011 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2012 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2013 selector, desc_buf, buf_len);
2014 if (!err || err == -EINVAL)
2015 break;
2016 }
2017
2018 return err;
2019}
2020EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2021
2022/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002023 * ufshcd_read_desc_param - read the specified descriptor parameter
2024 * @hba: Pointer to adapter instance
2025 * @desc_id: descriptor idn value
2026 * @desc_index: descriptor index
2027 * @param_offset: offset of the parameter to read
2028 * @param_read_buf: pointer to buffer where parameter would be read
2029 * @param_size: sizeof(param_read_buf)
2030 *
2031 * Return 0 in case of success, non-zero otherwise
2032 */
2033static int ufshcd_read_desc_param(struct ufs_hba *hba,
2034 enum desc_idn desc_id,
2035 int desc_index,
2036 u32 param_offset,
2037 u8 *param_read_buf,
2038 u32 param_size)
2039{
2040 int ret;
2041 u8 *desc_buf;
2042 u32 buff_len;
2043 bool is_kmalloc = true;
2044
2045 /* safety checks */
2046 if (desc_id >= QUERY_DESC_IDN_MAX)
2047 return -EINVAL;
2048
2049 buff_len = ufs_query_desc_max_size[desc_id];
2050 if ((param_offset + param_size) > buff_len)
2051 return -EINVAL;
2052
2053 if (!param_offset && (param_size == buff_len)) {
2054 /* memory space already available to hold full descriptor */
2055 desc_buf = param_read_buf;
2056 is_kmalloc = false;
2057 } else {
2058 /* allocate memory to hold full descriptor */
2059 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2060 if (!desc_buf)
2061 return -ENOMEM;
2062 }
2063
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002064 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2065 desc_id, desc_index, 0, desc_buf,
2066 &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002067
2068 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
2069 (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
2070 ufs_query_desc_max_size[desc_id])
2071 || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
2072 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
2073 __func__, desc_id, param_offset, buff_len, ret);
2074 if (!ret)
2075 ret = -EINVAL;
2076
2077 goto out;
2078 }
2079
2080 if (is_kmalloc)
2081 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2082out:
2083 if (is_kmalloc)
2084 kfree(desc_buf);
2085 return ret;
2086}
2087
2088static inline int ufshcd_read_desc(struct ufs_hba *hba,
2089 enum desc_idn desc_id,
2090 int desc_index,
2091 u8 *buf,
2092 u32 size)
2093{
2094 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
2095}
2096
2097static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2098 u8 *buf,
2099 u32 size)
2100{
2101 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2102}
2103
Yaniv Gardib573d482016-03-10 17:37:09 +02002104int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2105{
2106 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2107}
2108EXPORT_SYMBOL(ufshcd_read_device_desc);
2109
2110/**
2111 * ufshcd_read_string_desc - read string descriptor
2112 * @hba: pointer to adapter instance
2113 * @desc_index: descriptor index
2114 * @buf: pointer to buffer where descriptor would be read
2115 * @size: size of buf
2116 * @ascii: if true convert from unicode to ascii characters
2117 *
2118 * Return 0 in case of success, non-zero otherwise
2119 */
2120int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2121 u32 size, bool ascii)
2122{
2123 int err = 0;
2124
2125 err = ufshcd_read_desc(hba,
2126 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2127
2128 if (err) {
2129 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2130 __func__, QUERY_REQ_RETRIES, err);
2131 goto out;
2132 }
2133
2134 if (ascii) {
2135 int desc_len;
2136 int ascii_len;
2137 int i;
2138 char *buff_ascii;
2139
2140 desc_len = buf[0];
2141 /* remove header and divide by 2 to move from UTF16 to UTF8 */
2142 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2143 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2144 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2145 __func__);
2146 err = -ENOMEM;
2147 goto out;
2148 }
2149
2150 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2151 if (!buff_ascii) {
2152 err = -ENOMEM;
Tiezhu Yangfcbefc32016-06-25 12:35:22 +08002153 goto out;
Yaniv Gardib573d482016-03-10 17:37:09 +02002154 }
2155
2156 /*
2157 * the descriptor contains string in UTF16 format
2158 * we need to convert to utf-8 so it can be displayed
2159 */
2160 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2161 desc_len - QUERY_DESC_HDR_SIZE,
2162 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2163
2164 /* replace non-printable or non-ASCII characters with spaces */
2165 for (i = 0; i < ascii_len; i++)
2166 ufshcd_remove_non_printable(&buff_ascii[i]);
2167
2168 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2169 size - QUERY_DESC_HDR_SIZE);
2170 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2171 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
Yaniv Gardib573d482016-03-10 17:37:09 +02002172 kfree(buff_ascii);
2173 }
2174out:
2175 return err;
2176}
2177EXPORT_SYMBOL(ufshcd_read_string_desc);
2178
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002179/**
2180 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
2181 * @hba: Pointer to adapter instance
2182 * @lun: lun id
2183 * @param_offset: offset of the parameter to read
2184 * @param_read_buf: pointer to buffer where parameter would be read
2185 * @param_size: sizeof(param_read_buf)
2186 *
2187 * Return 0 in case of success, non-zero otherwise
2188 */
2189static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
2190 int lun,
2191 enum unit_desc_param param_offset,
2192 u8 *param_read_buf,
2193 u32 param_size)
2194{
2195 /*
2196 * Unit descriptors are only available for general purpose LUs (LUN id
2197 * from 0 to 7) and RPMB Well known LU.
2198 */
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002199 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002200 return -EOPNOTSUPP;
2201
2202 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
2203 param_offset, param_read_buf, param_size);
2204}
2205
2206/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302207 * ufshcd_memory_alloc - allocate memory for host memory space data structures
2208 * @hba: per adapter instance
2209 *
2210 * 1. Allocate DMA memory for Command Descriptor array
2211 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
2212 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
2213 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
2214 * (UTMRDL)
2215 * 4. Allocate memory for local reference block(lrb).
2216 *
2217 * Returns 0 for success, non-zero in case of failure
2218 */
2219static int ufshcd_memory_alloc(struct ufs_hba *hba)
2220{
2221 size_t utmrdl_size, utrdl_size, ucdl_size;
2222
2223 /* Allocate memory for UTP command descriptors */
2224 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09002225 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
2226 ucdl_size,
2227 &hba->ucdl_dma_addr,
2228 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302229
2230 /*
2231 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
2232 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
2233 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
2234 * be aligned to 128 bytes as well
2235 */
2236 if (!hba->ucdl_base_addr ||
2237 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302238 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302239 "Command Descriptor Memory allocation failed\n");
2240 goto out;
2241 }
2242
2243 /*
2244 * Allocate memory for UTP Transfer descriptors
2245 * UFSHCI requires 1024 byte alignment of UTRD
2246 */
2247 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09002248 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
2249 utrdl_size,
2250 &hba->utrdl_dma_addr,
2251 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302252 if (!hba->utrdl_base_addr ||
2253 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302254 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302255 "Transfer Descriptor Memory allocation failed\n");
2256 goto out;
2257 }
2258
2259 /*
2260 * Allocate memory for UTP Task Management descriptors
2261 * UFSHCI requires 1024 byte alignment of UTMRD
2262 */
2263 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09002264 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2265 utmrdl_size,
2266 &hba->utmrdl_dma_addr,
2267 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302268 if (!hba->utmrdl_base_addr ||
2269 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302270 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302271 "Task Management Descriptor Memory allocation failed\n");
2272 goto out;
2273 }
2274
2275 /* Allocate memory for local reference block */
Seungwon Jeon2953f852013-06-27 13:31:54 +09002276 hba->lrb = devm_kzalloc(hba->dev,
2277 hba->nutrs * sizeof(struct ufshcd_lrb),
2278 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302279 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302280 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302281 goto out;
2282 }
2283 return 0;
2284out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302285 return -ENOMEM;
2286}
2287
2288/**
2289 * ufshcd_host_memory_configure - configure local reference block with
2290 * memory offsets
2291 * @hba: per adapter instance
2292 *
2293 * Configure Host memory space
2294 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
2295 * address.
2296 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
2297 * and PRDT offset.
2298 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
2299 * into local reference block.
2300 */
2301static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2302{
2303 struct utp_transfer_cmd_desc *cmd_descp;
2304 struct utp_transfer_req_desc *utrdlp;
2305 dma_addr_t cmd_desc_dma_addr;
2306 dma_addr_t cmd_desc_element_addr;
2307 u16 response_offset;
2308 u16 prdt_offset;
2309 int cmd_desc_size;
2310 int i;
2311
2312 utrdlp = hba->utrdl_base_addr;
2313 cmd_descp = hba->ucdl_base_addr;
2314
2315 response_offset =
2316 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2317 prdt_offset =
2318 offsetof(struct utp_transfer_cmd_desc, prd_table);
2319
2320 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2321 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2322
2323 for (i = 0; i < hba->nutrs; i++) {
2324 /* Configure UTRD with command descriptor base address */
2325 cmd_desc_element_addr =
2326 (cmd_desc_dma_addr + (cmd_desc_size * i));
2327 utrdlp[i].command_desc_base_addr_lo =
2328 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2329 utrdlp[i].command_desc_base_addr_hi =
2330 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2331
2332 /* Response upiu and prdt offset should be in double words */
2333 utrdlp[i].response_upiu_offset =
2334 cpu_to_le16((response_offset >> 2));
2335 utrdlp[i].prd_table_offset =
2336 cpu_to_le16((prdt_offset >> 2));
2337 utrdlp[i].response_upiu_length =
Sujit Reddy Thumma3ca316c2013-06-26 22:39:30 +05302338 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302339
2340 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302341 hba->lrb[i].ucd_req_ptr =
2342 (struct utp_upiu_req *)(cmd_descp + i);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302343 hba->lrb[i].ucd_rsp_ptr =
2344 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2345 hba->lrb[i].ucd_prdt_ptr =
2346 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2347 }
2348}
2349
2350/**
2351 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2352 * @hba: per adapter instance
2353 *
2354 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2355 * in order to initialize the Unipro link startup procedure.
2356 * Once the Unipro links are up, the device connected to the controller
2357 * is detected.
2358 *
2359 * Returns 0 on success, non-zero value on failure
2360 */
2361static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2362{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302363 struct uic_command uic_cmd = {0};
2364 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302365
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302366 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2367
2368 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2369 if (ret)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302370 dev_err(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302371 "dme-link-startup: error code %d\n", ret);
2372 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302373}
2374
Yaniv Gardicad2e032015-03-31 17:37:14 +03002375static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2376{
2377 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
2378 unsigned long min_sleep_time_us;
2379
2380 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2381 return;
2382
2383 /*
2384 * last_dme_cmd_tstamp will be 0 only for 1st call to
2385 * this function
2386 */
2387 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2388 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2389 } else {
2390 unsigned long delta =
2391 (unsigned long) ktime_to_us(
2392 ktime_sub(ktime_get(),
2393 hba->last_dme_cmd_tstamp));
2394
2395 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2396 min_sleep_time_us =
2397 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2398 else
2399 return; /* no more delay required */
2400 }
2401
2402 /* allow sleep for extra 50us if needed */
2403 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2404}
2405
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302406/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302407 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2408 * @hba: per adapter instance
2409 * @attr_sel: uic command argument1
2410 * @attr_set: attribute set type as uic command argument2
2411 * @mib_val: setting value as uic command argument3
2412 * @peer: indicate whether peer or local
2413 *
2414 * Returns 0 on success, non-zero value on failure
2415 */
2416int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2417 u8 attr_set, u32 mib_val, u8 peer)
2418{
2419 struct uic_command uic_cmd = {0};
2420 static const char *const action[] = {
2421 "dme-set",
2422 "dme-peer-set"
2423 };
2424 const char *set = action[!!peer];
2425 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002426 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302427
2428 uic_cmd.command = peer ?
2429 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2430 uic_cmd.argument1 = attr_sel;
2431 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2432 uic_cmd.argument3 = mib_val;
2433
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002434 do {
2435 /* for peer attributes we retry upon failure */
2436 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2437 if (ret)
2438 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2439 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2440 } while (ret && peer && --retries);
2441
2442 if (!retries)
2443 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
2444 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2445 retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302446
2447 return ret;
2448}
2449EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2450
2451/**
2452 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2453 * @hba: per adapter instance
2454 * @attr_sel: uic command argument1
2455 * @mib_val: the value of the attribute as returned by the UIC command
2456 * @peer: indicate whether peer or local
2457 *
2458 * Returns 0 on success, non-zero value on failure
2459 */
2460int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2461 u32 *mib_val, u8 peer)
2462{
2463 struct uic_command uic_cmd = {0};
2464 static const char *const action[] = {
2465 "dme-get",
2466 "dme-peer-get"
2467 };
2468 const char *get = action[!!peer];
2469 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002470 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03002471 struct ufs_pa_layer_attr orig_pwr_info;
2472 struct ufs_pa_layer_attr temp_pwr_info;
2473 bool pwr_mode_change = false;
2474
2475 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2476 orig_pwr_info = hba->pwr_info;
2477 temp_pwr_info = orig_pwr_info;
2478
2479 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2480 orig_pwr_info.pwr_rx == FAST_MODE) {
2481 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2482 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2483 pwr_mode_change = true;
2484 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2485 orig_pwr_info.pwr_rx == SLOW_MODE) {
2486 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2487 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2488 pwr_mode_change = true;
2489 }
2490 if (pwr_mode_change) {
2491 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2492 if (ret)
2493 goto out;
2494 }
2495 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302496
2497 uic_cmd.command = peer ?
2498 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2499 uic_cmd.argument1 = attr_sel;
2500
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002501 do {
2502 /* for peer attributes we retry upon failure */
2503 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2504 if (ret)
2505 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
2506 get, UIC_GET_ATTR_ID(attr_sel), ret);
2507 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302508
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002509 if (!retries)
2510 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
2511 get, UIC_GET_ATTR_ID(attr_sel), retries);
2512
2513 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302514 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03002515
2516 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2517 && pwr_mode_change)
2518 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302519out:
2520 return ret;
2521}
2522EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2523
2524/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002525 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2526 * state) and waits for it to take effect.
2527 *
2528 * @hba: per adapter instance
2529 * @cmd: UIC command to execute
2530 *
2531 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2532 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2533 * and device UniPro link and hence it's final completion would be indicated by
2534 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2535 * addition to normal UIC command completion Status (UCCS). This function only
2536 * returns after the relevant status bits indicate the completion.
2537 *
2538 * Returns 0 on success, non-zero value on failure
2539 */
2540static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2541{
2542 struct completion uic_async_done;
2543 unsigned long flags;
2544 u8 status;
2545 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002546 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002547
2548 mutex_lock(&hba->uic_cmd_mutex);
2549 init_completion(&uic_async_done);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002550 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002551
2552 spin_lock_irqsave(hba->host->host_lock, flags);
2553 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002554 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
2555 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
2556 /*
2557 * Make sure UIC command completion interrupt is disabled before
2558 * issuing UIC command.
2559 */
2560 wmb();
2561 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002562 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002563 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
2564 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002565 if (ret) {
2566 dev_err(hba->dev,
2567 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2568 cmd->command, cmd->argument3, ret);
2569 goto out;
2570 }
2571
2572 if (!wait_for_completion_timeout(hba->uic_async_done,
2573 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2574 dev_err(hba->dev,
2575 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2576 cmd->command, cmd->argument3);
2577 ret = -ETIMEDOUT;
2578 goto out;
2579 }
2580
2581 status = ufshcd_get_upmcrs(hba);
2582 if (status != PWR_LOCAL) {
2583 dev_err(hba->dev,
Kiwoong Kim73615422016-09-08 16:50:02 +09002584 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002585 cmd->command, status);
2586 ret = (status != PWR_OK) ? status : -1;
2587 }
2588out:
2589 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002590 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002591 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002592 if (reenable_intr)
2593 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002594 spin_unlock_irqrestore(hba->host->host_lock, flags);
2595 mutex_unlock(&hba->uic_cmd_mutex);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002596
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002597 return ret;
2598}
2599
2600/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302601 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2602 * using DME_SET primitives.
2603 * @hba: per adapter instance
2604 * @mode: powr mode value
2605 *
2606 * Returns 0 on success, non-zero value on failure
2607 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05302608static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302609{
2610 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002611 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302612
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03002613 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
2614 ret = ufshcd_dme_set(hba,
2615 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
2616 if (ret) {
2617 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
2618 __func__, ret);
2619 goto out;
2620 }
2621 }
2622
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302623 uic_cmd.command = UIC_CMD_DME_SET;
2624 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2625 uic_cmd.argument3 = mode;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002626 ufshcd_hold(hba, false);
2627 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2628 ufshcd_release(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302629
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03002630out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002631 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002632}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302633
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002634static int ufshcd_link_recovery(struct ufs_hba *hba)
2635{
2636 int ret;
2637 unsigned long flags;
2638
2639 spin_lock_irqsave(hba->host->host_lock, flags);
2640 hba->ufshcd_state = UFSHCD_STATE_RESET;
2641 ufshcd_set_eh_in_progress(hba);
2642 spin_unlock_irqrestore(hba->host->host_lock, flags);
2643
2644 ret = ufshcd_host_reset_and_restore(hba);
2645
2646 spin_lock_irqsave(hba->host->host_lock, flags);
2647 if (ret)
2648 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2649 ufshcd_clear_eh_in_progress(hba);
2650 spin_unlock_irqrestore(hba->host->host_lock, flags);
2651
2652 if (ret)
2653 dev_err(hba->dev, "%s: link recovery failed, err %d",
2654 __func__, ret);
2655
2656 return ret;
2657}
2658
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002659static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002660{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002661 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002662 struct uic_command uic_cmd = {0};
2663
2664 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002665 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002666
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002667 if (ret) {
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002668 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
2669 __func__, ret);
2670
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002671 /*
2672 * If link recovery fails then return error so that caller
2673 * don't retry the hibern8 enter again.
2674 */
2675 if (ufshcd_link_recovery(hba))
2676 ret = -ENOLINK;
2677 }
2678
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002679 return ret;
2680}
2681
2682static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2683{
2684 int ret = 0, retries;
2685
2686 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
2687 ret = __ufshcd_uic_hibern8_enter(hba);
2688 if (!ret || ret == -ENOLINK)
2689 goto out;
2690 }
2691out:
2692 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002693}
2694
2695static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2696{
2697 struct uic_command uic_cmd = {0};
2698 int ret;
2699
2700 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2701 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302702 if (ret) {
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002703 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
2704 __func__, ret);
2705 ret = ufshcd_link_recovery(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302706 }
2707
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302708 return ret;
2709}
2710
Yaniv Gardi50646362014-10-23 13:25:13 +03002711 /**
2712 * ufshcd_init_pwr_info - setting the POR (power on reset)
2713 * values in hba power info
2714 * @hba: per-adapter instance
2715 */
2716static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2717{
2718 hba->pwr_info.gear_rx = UFS_PWM_G1;
2719 hba->pwr_info.gear_tx = UFS_PWM_G1;
2720 hba->pwr_info.lane_rx = 1;
2721 hba->pwr_info.lane_tx = 1;
2722 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2723 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2724 hba->pwr_info.hs_rate = 0;
2725}
2726
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302727/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002728 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2729 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302730 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002731static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302732{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002733 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2734
2735 if (hba->max_pwr_info.is_valid)
2736 return 0;
2737
2738 pwr_info->pwr_tx = FASTAUTO_MODE;
2739 pwr_info->pwr_rx = FASTAUTO_MODE;
2740 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302741
2742 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002743 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2744 &pwr_info->lane_rx);
2745 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2746 &pwr_info->lane_tx);
2747
2748 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2749 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2750 __func__,
2751 pwr_info->lane_rx,
2752 pwr_info->lane_tx);
2753 return -EINVAL;
2754 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302755
2756 /*
2757 * First, get the maximum gears of HS speed.
2758 * If a zero value, it means there is no HSGEAR capability.
2759 * Then, get the maximum gears of PWM speed.
2760 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002761 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2762 if (!pwr_info->gear_rx) {
2763 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2764 &pwr_info->gear_rx);
2765 if (!pwr_info->gear_rx) {
2766 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2767 __func__, pwr_info->gear_rx);
2768 return -EINVAL;
2769 }
2770 pwr_info->pwr_rx = SLOWAUTO_MODE;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302771 }
2772
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002773 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2774 &pwr_info->gear_tx);
2775 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302776 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002777 &pwr_info->gear_tx);
2778 if (!pwr_info->gear_tx) {
2779 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2780 __func__, pwr_info->gear_tx);
2781 return -EINVAL;
2782 }
2783 pwr_info->pwr_tx = SLOWAUTO_MODE;
2784 }
2785
2786 hba->max_pwr_info.is_valid = true;
2787 return 0;
2788}
2789
2790static int ufshcd_change_power_mode(struct ufs_hba *hba,
2791 struct ufs_pa_layer_attr *pwr_mode)
2792{
2793 int ret;
2794
2795 /* if already configured to the requested pwr_mode */
2796 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2797 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2798 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2799 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2800 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2801 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2802 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2803 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2804 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302805 }
2806
2807 /*
2808 * Configure attributes for power mode change with below.
2809 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
2810 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2811 * - PA_HSSERIES
2812 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002813 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2814 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2815 pwr_mode->lane_rx);
2816 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2817 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302818 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002819 else
2820 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302821
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002822 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2823 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2824 pwr_mode->lane_tx);
2825 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2826 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302827 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002828 else
2829 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302830
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002831 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2832 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2833 pwr_mode->pwr_rx == FAST_MODE ||
2834 pwr_mode->pwr_tx == FAST_MODE)
2835 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2836 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302837
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002838 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2839 | pwr_mode->pwr_tx);
2840
2841 if (ret) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302842 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002843 "%s: power mode change failed %d\n", __func__, ret);
2844 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02002845 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
2846 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002847
2848 memcpy(&hba->pwr_info, pwr_mode,
2849 sizeof(struct ufs_pa_layer_attr));
2850 }
2851
2852 return ret;
2853}
2854
2855/**
2856 * ufshcd_config_pwr_mode - configure a new power mode
2857 * @hba: per-adapter instance
2858 * @desired_pwr_mode: desired power configuration
2859 */
2860static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2861 struct ufs_pa_layer_attr *desired_pwr_mode)
2862{
2863 struct ufs_pa_layer_attr final_params = { 0 };
2864 int ret;
2865
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02002866 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
2867 desired_pwr_mode, &final_params);
2868
2869 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002870 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2871
2872 ret = ufshcd_change_power_mode(hba, &final_params);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302873
2874 return ret;
2875}
2876
2877/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302878 * ufshcd_complete_dev_init() - checks device readiness
2879 * hba: per-adapter instance
2880 *
2881 * Set fDeviceInit flag and poll until device toggles it.
2882 */
2883static int ufshcd_complete_dev_init(struct ufs_hba *hba)
2884{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002885 int i;
2886 int err;
Dolev Raviv68078d52013-07-30 00:35:58 +05302887 bool flag_res = 1;
2888
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002889 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2890 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05302891 if (err) {
2892 dev_err(hba->dev,
2893 "%s setting fDeviceInit flag failed with error %d\n",
2894 __func__, err);
2895 goto out;
2896 }
2897
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002898 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
2899 for (i = 0; i < 1000 && !err && flag_res; i++)
2900 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2901 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
2902
Dolev Raviv68078d52013-07-30 00:35:58 +05302903 if (err)
2904 dev_err(hba->dev,
2905 "%s reading fDeviceInit flag failed with error %d\n",
2906 __func__, err);
2907 else if (flag_res)
2908 dev_err(hba->dev,
2909 "%s fDeviceInit was not cleared by the device\n",
2910 __func__);
2911
2912out:
2913 return err;
2914}
2915
2916/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302917 * ufshcd_make_hba_operational - Make UFS controller operational
2918 * @hba: per adapter instance
2919 *
2920 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03002921 * 1. Enable required interrupts
2922 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02002923 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03002924 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302925 *
2926 * Returns 0 on success, non-zero value on failure
2927 */
2928static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2929{
2930 int err = 0;
2931 u32 reg;
2932
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302933 /* Enable required interrupts */
2934 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2935
2936 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03002937 if (ufshcd_is_intr_aggr_allowed(hba))
2938 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2939 else
2940 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302941
2942 /* Configure UTRL and UTMRL base address registers */
2943 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
2944 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
2945 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
2946 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
2947 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
2948 REG_UTP_TASK_REQ_LIST_BASE_L);
2949 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
2950 REG_UTP_TASK_REQ_LIST_BASE_H);
2951
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302952 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02002953 * Make sure base address and interrupt setup are updated before
2954 * enabling the run/stop registers below.
2955 */
2956 wmb();
2957
2958 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302959 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302960 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03002961 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302962 if (!(ufshcd_get_lists_status(reg))) {
2963 ufshcd_enable_run_stop_reg(hba);
2964 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302965 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302966 "Host controller not ready to process requests");
2967 err = -EIO;
2968 goto out;
2969 }
2970
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302971out:
2972 return err;
2973}
2974
2975/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02002976 * ufshcd_hba_stop - Send controller to reset state
2977 * @hba: per adapter instance
2978 * @can_sleep: perform sleep or just spin
2979 */
2980static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
2981{
2982 int err;
2983
2984 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
2985 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
2986 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
2987 10, 1, can_sleep);
2988 if (err)
2989 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
2990}
2991
2992/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302993 * ufshcd_hba_enable - initialize the controller
2994 * @hba: per adapter instance
2995 *
2996 * The controller resets itself and controller firmware initialization
2997 * sequence kicks off. When controller is ready it will set
2998 * the Host Controller Enable bit to 1.
2999 *
3000 * Returns 0 on success, non-zero value on failure
3001 */
3002static int ufshcd_hba_enable(struct ufs_hba *hba)
3003{
3004 int retry;
3005
3006 /*
3007 * msleep of 1 and 5 used in this function might result in msleep(20),
3008 * but it was necessary to send the UFS FPGA to reset mode during
3009 * development and testing of this driver. msleep can be changed to
3010 * mdelay and retry count can be reduced based on the controller.
3011 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02003012 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303013 /* change controller state to "reset state" */
Yaniv Gardi596585a2016-03-10 17:37:08 +02003014 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303015
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003016 /* UniPro link is disabled at this point */
3017 ufshcd_set_link_off(hba);
3018
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003019 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003020
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303021 /* start controller initialization sequence */
3022 ufshcd_hba_start(hba);
3023
3024 /*
3025 * To initialize a UFS host controller HCE bit must be set to 1.
3026 * During initialization the HCE bit value changes from 1->0->1.
3027 * When the host controller completes initialization sequence
3028 * it sets the value of HCE bit to 1. The same HCE bit is read back
3029 * to check if the controller has completed initialization sequence.
3030 * So without this delay the value HCE = 1, set in the previous
3031 * instruction might be read back.
3032 * This delay can be changed based on the controller.
3033 */
3034 msleep(1);
3035
3036 /* wait for the host controller to complete initialization */
3037 retry = 10;
3038 while (ufshcd_is_hba_active(hba)) {
3039 if (retry) {
3040 retry--;
3041 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303042 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303043 "Controller enable failed\n");
3044 return -EIO;
3045 }
3046 msleep(5);
3047 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003048
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003049 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003050 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003051
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003052 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003053
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303054 return 0;
3055}
3056
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03003057static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
3058{
3059 int tx_lanes, i, err = 0;
3060
3061 if (!peer)
3062 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3063 &tx_lanes);
3064 else
3065 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3066 &tx_lanes);
3067 for (i = 0; i < tx_lanes; i++) {
3068 if (!peer)
3069 err = ufshcd_dme_set(hba,
3070 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3071 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3072 0);
3073 else
3074 err = ufshcd_dme_peer_set(hba,
3075 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3076 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3077 0);
3078 if (err) {
3079 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
3080 __func__, peer, i, err);
3081 break;
3082 }
3083 }
3084
3085 return err;
3086}
3087
3088static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
3089{
3090 return ufshcd_disable_tx_lcc(hba, true);
3091}
3092
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303093/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303094 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303095 * @hba: per adapter instance
3096 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303097 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303098 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303099static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303100{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303101 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003102 int retries = DME_LINKSTARTUP_RETRIES;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303103
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003104 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003105 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303106
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003107 ret = ufshcd_dme_link_startup(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003108
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003109 /* check if device is detected by inter-connect layer */
3110 if (!ret && !ufshcd_is_device_present(hba)) {
3111 dev_err(hba->dev, "%s: Device not present\n", __func__);
3112 ret = -ENXIO;
3113 goto out;
3114 }
3115
3116 /*
3117 * DME link lost indication is only received when link is up,
3118 * but we can't be sure if the link is up until link startup
3119 * succeeds. So reset the local Uni-Pro and try again.
3120 */
3121 if (ret && ufshcd_hba_enable(hba))
3122 goto out;
3123 } while (ret && retries--);
3124
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303125 if (ret)
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003126 /* failed to get the link up... retire */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303127 goto out;
3128
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03003129 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3130 ret = ufshcd_disable_device_tx_lcc(hba);
3131 if (ret)
3132 goto out;
3133 }
3134
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003135 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003136 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
3137 if (ret)
3138 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003139
3140 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303141out:
3142 if (ret)
3143 dev_err(hba->dev, "link startup failed %d\n", ret);
3144 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303145}
3146
3147/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303148 * ufshcd_verify_dev_init() - Verify device initialization
3149 * @hba: per-adapter instance
3150 *
3151 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
3152 * device Transport Protocol (UTP) layer is ready after a reset.
3153 * If the UTP layer at the device side is not initialized, it may
3154 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
3155 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
3156 */
3157static int ufshcd_verify_dev_init(struct ufs_hba *hba)
3158{
3159 int err = 0;
3160 int retries;
3161
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003162 ufshcd_hold(hba, false);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303163 mutex_lock(&hba->dev_cmd.lock);
3164 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
3165 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
3166 NOP_OUT_TIMEOUT);
3167
3168 if (!err || err == -ETIMEDOUT)
3169 break;
3170
3171 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
3172 }
3173 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003174 ufshcd_release(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303175
3176 if (err)
3177 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
3178 return err;
3179}
3180
3181/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003182 * ufshcd_set_queue_depth - set lun queue depth
3183 * @sdev: pointer to SCSI device
3184 *
3185 * Read bLUQueueDepth value and activate scsi tagged command
3186 * queueing. For WLUN, queue depth is set to 1. For best-effort
3187 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
3188 * value that host can queue.
3189 */
3190static void ufshcd_set_queue_depth(struct scsi_device *sdev)
3191{
3192 int ret = 0;
3193 u8 lun_qdepth;
3194 struct ufs_hba *hba;
3195
3196 hba = shost_priv(sdev->host);
3197
3198 lun_qdepth = hba->nutrs;
3199 ret = ufshcd_read_unit_desc_param(hba,
3200 ufshcd_scsi_to_upiu_lun(sdev->lun),
3201 UNIT_DESC_PARAM_LU_Q_DEPTH,
3202 &lun_qdepth,
3203 sizeof(lun_qdepth));
3204
3205 /* Some WLUN doesn't support unit descriptor */
3206 if (ret == -EOPNOTSUPP)
3207 lun_qdepth = 1;
3208 else if (!lun_qdepth)
3209 /* eventually, we can figure out the real queue depth */
3210 lun_qdepth = hba->nutrs;
3211 else
3212 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
3213
3214 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
3215 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003216 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003217}
3218
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003219/*
3220 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
3221 * @hba: per-adapter instance
3222 * @lun: UFS device lun id
3223 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
3224 *
3225 * Returns 0 in case of success and b_lu_write_protect status would be returned
3226 * @b_lu_write_protect parameter.
3227 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
3228 * Returns -EINVAL in case of invalid parameters passed to this function.
3229 */
3230static int ufshcd_get_lu_wp(struct ufs_hba *hba,
3231 u8 lun,
3232 u8 *b_lu_write_protect)
3233{
3234 int ret;
3235
3236 if (!b_lu_write_protect)
3237 ret = -EINVAL;
3238 /*
3239 * According to UFS device spec, RPMB LU can't be write
3240 * protected so skip reading bLUWriteProtect parameter for
3241 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
3242 */
3243 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
3244 ret = -ENOTSUPP;
3245 else
3246 ret = ufshcd_read_unit_desc_param(hba,
3247 lun,
3248 UNIT_DESC_PARAM_LU_WR_PROTECT,
3249 b_lu_write_protect,
3250 sizeof(*b_lu_write_protect));
3251 return ret;
3252}
3253
3254/**
3255 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
3256 * status
3257 * @hba: per-adapter instance
3258 * @sdev: pointer to SCSI device
3259 *
3260 */
3261static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
3262 struct scsi_device *sdev)
3263{
3264 if (hba->dev_info.f_power_on_wp_en &&
3265 !hba->dev_info.is_lu_power_on_wp) {
3266 u8 b_lu_write_protect;
3267
3268 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
3269 &b_lu_write_protect) &&
3270 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
3271 hba->dev_info.is_lu_power_on_wp = true;
3272 }
3273}
3274
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003275/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303276 * ufshcd_slave_alloc - handle initial SCSI device configurations
3277 * @sdev: pointer to SCSI device
3278 *
3279 * Returns success
3280 */
3281static int ufshcd_slave_alloc(struct scsi_device *sdev)
3282{
3283 struct ufs_hba *hba;
3284
3285 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303286
3287 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
3288 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303289
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303290 /* allow SCSI layer to restart the device in case of errors */
3291 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003292
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03003293 /* REPORT SUPPORTED OPERATION CODES is not supported */
3294 sdev->no_report_opcodes = 1;
3295
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003296
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003297 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003298
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003299 ufshcd_get_lu_power_on_wp_status(hba, sdev);
3300
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003301 return 0;
3302}
3303
3304/**
3305 * ufshcd_change_queue_depth - change queue depth
3306 * @sdev: pointer to SCSI device
3307 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003308 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003309 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003310 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003311static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003312{
3313 struct ufs_hba *hba = shost_priv(sdev->host);
3314
3315 if (depth > hba->nutrs)
3316 depth = hba->nutrs;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003317 return scsi_change_queue_depth(sdev, depth);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303318}
3319
3320/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09003321 * ufshcd_slave_configure - adjust SCSI device configurations
3322 * @sdev: pointer to SCSI device
3323 */
3324static int ufshcd_slave_configure(struct scsi_device *sdev)
3325{
3326 struct request_queue *q = sdev->request_queue;
3327
3328 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
3329 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
3330
3331 return 0;
3332}
3333
3334/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303335 * ufshcd_slave_destroy - remove SCSI device configurations
3336 * @sdev: pointer to SCSI device
3337 */
3338static void ufshcd_slave_destroy(struct scsi_device *sdev)
3339{
3340 struct ufs_hba *hba;
3341
3342 hba = shost_priv(sdev->host);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003343 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03003344 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
3345 unsigned long flags;
3346
3347 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003348 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03003349 spin_unlock_irqrestore(hba->host->host_lock, flags);
3350 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303351}
3352
3353/**
3354 * ufshcd_task_req_compl - handle task management request completion
3355 * @hba: per adapter instance
3356 * @index: index of the completed request
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303357 * @resp: task management service response
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303358 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303359 * Returns non-zero value on error, zero on success
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303360 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303361static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303362{
3363 struct utp_task_req_desc *task_req_descp;
3364 struct utp_upiu_task_rsp *task_rsp_upiup;
3365 unsigned long flags;
3366 int ocs_value;
3367 int task_result;
3368
3369 spin_lock_irqsave(hba->host->host_lock, flags);
3370
3371 /* Clear completed tasks from outstanding_tasks */
3372 __clear_bit(index, &hba->outstanding_tasks);
3373
3374 task_req_descp = hba->utmrdl_base_addr;
3375 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
3376
3377 if (ocs_value == OCS_SUCCESS) {
3378 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3379 task_req_descp[index].task_rsp_upiu;
Kiwoong Kim8794ee02016-09-09 08:22:22 +09003380 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
3381 task_result = task_result & MASK_TM_SERVICE_RESP;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303382 if (resp)
3383 *resp = (u8)task_result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303384 } else {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303385 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
3386 __func__, ocs_value);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303387 }
3388 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303389
3390 return ocs_value;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303391}
3392
3393/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303394 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
3395 * @lrb: pointer to local reference block of completed command
3396 * @scsi_status: SCSI command status
3397 *
3398 * Returns value base on SCSI command status
3399 */
3400static inline int
3401ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
3402{
3403 int result = 0;
3404
3405 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303406 case SAM_STAT_CHECK_CONDITION:
3407 ufshcd_copy_sense_data(lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303408 case SAM_STAT_GOOD:
3409 result |= DID_OK << 16 |
3410 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303411 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303412 break;
3413 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303414 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303415 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303416 ufshcd_copy_sense_data(lrbp);
3417 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303418 break;
3419 default:
3420 result |= DID_ERROR << 16;
3421 break;
3422 } /* end of switch */
3423
3424 return result;
3425}
3426
3427/**
3428 * ufshcd_transfer_rsp_status - Get overall status of the response
3429 * @hba: per adapter instance
3430 * @lrb: pointer to local reference block of completed command
3431 *
3432 * Returns result of the command to notify SCSI midlayer
3433 */
3434static inline int
3435ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3436{
3437 int result = 0;
3438 int scsi_status;
3439 int ocs;
3440
3441 /* overall command status of utrd */
3442 ocs = ufshcd_get_tr_ocs(lrbp);
3443
3444 switch (ocs) {
3445 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303446 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303447
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303448 switch (result) {
3449 case UPIU_TRANSACTION_RESPONSE:
3450 /*
3451 * get the response UPIU result to extract
3452 * the SCSI command status
3453 */
3454 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3455
3456 /*
3457 * get the result based on SCSI status response
3458 * to notify the SCSI midlayer of the command status
3459 */
3460 scsi_status = result & MASK_SCSI_STATUS;
3461 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303462
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02003463 /*
3464 * Currently we are only supporting BKOPs exception
3465 * events hence we can ignore BKOPs exception event
3466 * during power management callbacks. BKOPs exception
3467 * event is not expected to be raised in runtime suspend
3468 * callback as it allows the urgent bkops.
3469 * During system suspend, we are anyway forcefully
3470 * disabling the bkops and if urgent bkops is needed
3471 * it will be enabled on system resume. Long term
3472 * solution could be to abort the system suspend if
3473 * UFS device needs urgent BKOPs.
3474 */
3475 if (!hba->pm_op_in_progress &&
3476 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303477 schedule_work(&hba->eeh_work);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303478 break;
3479 case UPIU_TRANSACTION_REJECT_UPIU:
3480 /* TODO: handle Reject UPIU Response */
3481 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303482 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303483 "Reject UPIU not fully implemented\n");
3484 break;
3485 default:
3486 result = DID_ERROR << 16;
3487 dev_err(hba->dev,
3488 "Unexpected request response code = %x\n",
3489 result);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303490 break;
3491 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303492 break;
3493 case OCS_ABORTED:
3494 result |= DID_ABORT << 16;
3495 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303496 case OCS_INVALID_COMMAND_STATUS:
3497 result |= DID_REQUEUE << 16;
3498 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303499 case OCS_INVALID_CMD_TABLE_ATTR:
3500 case OCS_INVALID_PRDT_ATTR:
3501 case OCS_MISMATCH_DATA_BUF_SIZE:
3502 case OCS_MISMATCH_RESP_UPIU_SIZE:
3503 case OCS_PEER_COMM_FAILURE:
3504 case OCS_FATAL_ERROR:
3505 default:
3506 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303507 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303508 "OCS error from controller = %x\n", ocs);
3509 break;
3510 } /* end of switch */
3511
3512 return result;
3513}
3514
3515/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303516 * ufshcd_uic_cmd_compl - handle completion of uic command
3517 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303518 * @intr_status: interrupt status generated by the controller
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303519 */
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303520static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303521{
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303522 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303523 hba->active_uic_cmd->argument2 |=
3524 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303525 hba->active_uic_cmd->argument3 =
3526 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303527 complete(&hba->active_uic_cmd->done);
3528 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303529
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003530 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3531 complete(hba->uic_async_done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303532}
3533
3534/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003535 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303536 * @hba: per adapter instance
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003537 * @completed_reqs: requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303538 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003539static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3540 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303541{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303542 struct ufshcd_lrb *lrbp;
3543 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303544 int result;
3545 int index;
Dolev Ravive9d501b2014-07-01 12:22:37 +03003546
Dolev Ravive9d501b2014-07-01 12:22:37 +03003547 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3548 lrbp = &hba->lrb[index];
3549 cmd = lrbp->cmd;
3550 if (cmd) {
3551 result = ufshcd_transfer_rsp_status(hba, lrbp);
3552 scsi_dma_unmap(cmd);
3553 cmd->result = result;
3554 /* Mark completed command as NULL in LRB */
3555 lrbp->cmd = NULL;
3556 clear_bit_unlock(index, &hba->lrb_in_use);
3557 /* Do not touch lrbp after scsi done */
3558 cmd->scsi_done(cmd);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003559 __ufshcd_release(hba);
Joao Pinto300bb132016-05-11 12:21:27 +01003560 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
3561 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
Dolev Ravive9d501b2014-07-01 12:22:37 +03003562 if (hba->dev_cmd.complete)
3563 complete(hba->dev_cmd.complete);
3564 }
3565 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303566
3567 /* clear corresponding bits of completed commands */
3568 hba->outstanding_reqs ^= completed_reqs;
3569
Sahitya Tummala856b3482014-09-25 15:32:34 +03003570 ufshcd_clk_scaling_update_busy(hba);
3571
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303572 /* we might have free'd some tags above */
3573 wake_up(&hba->dev_cmd.tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303574}
3575
3576/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003577 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3578 * @hba: per adapter instance
3579 */
3580static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3581{
3582 unsigned long completed_reqs;
3583 u32 tr_doorbell;
3584
3585 /* Resetting interrupt aggregation counters first and reading the
3586 * DOOR_BELL afterward allows us to handle all the completed requests.
3587 * In order to prevent other interrupts starvation the DB is read once
3588 * after reset. The down side of this solution is the possibility of
3589 * false interrupt if device completes another request after resetting
3590 * aggregation and before reading the DB.
3591 */
3592 if (ufshcd_is_intr_aggr_allowed(hba))
3593 ufshcd_reset_intr_aggr(hba);
3594
3595 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3596 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3597
3598 __ufshcd_transfer_req_compl(hba, completed_reqs);
3599}
3600
3601/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303602 * ufshcd_disable_ee - disable exception event
3603 * @hba: per-adapter instance
3604 * @mask: exception event to disable
3605 *
3606 * Disables exception event in the device so that the EVENT_ALERT
3607 * bit is not set.
3608 *
3609 * Returns zero on success, non-zero error value on failure.
3610 */
3611static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3612{
3613 int err = 0;
3614 u32 val;
3615
3616 if (!(hba->ee_ctrl_mask & mask))
3617 goto out;
3618
3619 val = hba->ee_ctrl_mask & ~mask;
3620 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003621 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303622 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3623 if (!err)
3624 hba->ee_ctrl_mask &= ~mask;
3625out:
3626 return err;
3627}
3628
3629/**
3630 * ufshcd_enable_ee - enable exception event
3631 * @hba: per-adapter instance
3632 * @mask: exception event to enable
3633 *
3634 * Enable corresponding exception event in the device to allow
3635 * device to alert host in critical scenarios.
3636 *
3637 * Returns zero on success, non-zero error value on failure.
3638 */
3639static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3640{
3641 int err = 0;
3642 u32 val;
3643
3644 if (hba->ee_ctrl_mask & mask)
3645 goto out;
3646
3647 val = hba->ee_ctrl_mask | mask;
3648 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003649 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303650 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3651 if (!err)
3652 hba->ee_ctrl_mask |= mask;
3653out:
3654 return err;
3655}
3656
3657/**
3658 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
3659 * @hba: per-adapter instance
3660 *
3661 * Allow device to manage background operations on its own. Enabling
3662 * this might lead to inconsistent latencies during normal data transfers
3663 * as the device is allowed to manage its own way of handling background
3664 * operations.
3665 *
3666 * Returns zero on success, non-zero on failure.
3667 */
3668static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3669{
3670 int err = 0;
3671
3672 if (hba->auto_bkops_enabled)
3673 goto out;
3674
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003675 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303676 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3677 if (err) {
3678 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3679 __func__, err);
3680 goto out;
3681 }
3682
3683 hba->auto_bkops_enabled = true;
3684
3685 /* No need of URGENT_BKOPS exception from the device */
3686 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3687 if (err)
3688 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3689 __func__, err);
3690out:
3691 return err;
3692}
3693
3694/**
3695 * ufshcd_disable_auto_bkops - block device in doing background operations
3696 * @hba: per-adapter instance
3697 *
3698 * Disabling background operations improves command response latency but
3699 * has drawback of device moving into critical state where the device is
3700 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
3701 * host is idle so that BKOPS are managed effectively without any negative
3702 * impacts.
3703 *
3704 * Returns zero on success, non-zero on failure.
3705 */
3706static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3707{
3708 int err = 0;
3709
3710 if (!hba->auto_bkops_enabled)
3711 goto out;
3712
3713 /*
3714 * If host assisted BKOPs is to be enabled, make sure
3715 * urgent bkops exception is allowed.
3716 */
3717 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3718 if (err) {
3719 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3720 __func__, err);
3721 goto out;
3722 }
3723
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003724 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303725 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3726 if (err) {
3727 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3728 __func__, err);
3729 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3730 goto out;
3731 }
3732
3733 hba->auto_bkops_enabled = false;
3734out:
3735 return err;
3736}
3737
3738/**
3739 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
3740 * @hba: per adapter instance
3741 *
3742 * After a device reset the device may toggle the BKOPS_EN flag
3743 * to default value. The s/w tracking variables should be updated
3744 * as well. Do this by forcing enable of auto bkops.
3745 */
3746static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3747{
3748 hba->auto_bkops_enabled = false;
3749 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3750 ufshcd_enable_auto_bkops(hba);
3751}
3752
3753static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3754{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003755 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303756 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
3757}
3758
3759/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003760 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
3761 * @hba: per-adapter instance
3762 * @status: bkops_status value
3763 *
3764 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
3765 * flag in the device to permit background operations if the device
3766 * bkops_status is greater than or equal to "status" argument passed to
3767 * this function, disable otherwise.
3768 *
3769 * Returns 0 for success, non-zero in case of failure.
3770 *
3771 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3772 * to know whether auto bkops is enabled or disabled after this function
3773 * returns control to it.
3774 */
3775static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3776 enum bkops_status status)
3777{
3778 int err;
3779 u32 curr_status = 0;
3780
3781 err = ufshcd_get_bkops_status(hba, &curr_status);
3782 if (err) {
3783 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3784 __func__, err);
3785 goto out;
3786 } else if (curr_status > BKOPS_STATUS_MAX) {
3787 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3788 __func__, curr_status);
3789 err = -EINVAL;
3790 goto out;
3791 }
3792
3793 if (curr_status >= status)
3794 err = ufshcd_enable_auto_bkops(hba);
3795 else
3796 err = ufshcd_disable_auto_bkops(hba);
3797out:
3798 return err;
3799}
3800
3801/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303802 * ufshcd_urgent_bkops - handle urgent bkops exception event
3803 * @hba: per-adapter instance
3804 *
3805 * Enable fBackgroundOpsEn flag in the device to permit background
3806 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003807 *
3808 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3809 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303810 */
3811static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3812{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02003813 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303814}
3815
3816static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3817{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003818 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303819 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3820}
3821
Yaniv Gardiafdfff52016-03-10 17:37:15 +02003822static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
3823{
3824 int err;
3825 u32 curr_status = 0;
3826
3827 if (hba->is_urgent_bkops_lvl_checked)
3828 goto enable_auto_bkops;
3829
3830 err = ufshcd_get_bkops_status(hba, &curr_status);
3831 if (err) {
3832 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3833 __func__, err);
3834 goto out;
3835 }
3836
3837 /*
3838 * We are seeing that some devices are raising the urgent bkops
3839 * exception events even when BKOPS status doesn't indicate performace
3840 * impacted or critical. Handle these device by determining their urgent
3841 * bkops status at runtime.
3842 */
3843 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
3844 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
3845 __func__, curr_status);
3846 /* update the current status as the urgent bkops level */
3847 hba->urgent_bkops_lvl = curr_status;
3848 hba->is_urgent_bkops_lvl_checked = true;
3849 }
3850
3851enable_auto_bkops:
3852 err = ufshcd_enable_auto_bkops(hba);
3853out:
3854 if (err < 0)
3855 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3856 __func__, err);
3857}
3858
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303859/**
3860 * ufshcd_exception_event_handler - handle exceptions raised by device
3861 * @work: pointer to work data
3862 *
3863 * Read bExceptionEventStatus attribute from the device and handle the
3864 * exception event accordingly.
3865 */
3866static void ufshcd_exception_event_handler(struct work_struct *work)
3867{
3868 struct ufs_hba *hba;
3869 int err;
3870 u32 status = 0;
3871 hba = container_of(work, struct ufs_hba, eeh_work);
3872
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05303873 pm_runtime_get_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303874 err = ufshcd_get_ee_status(hba, &status);
3875 if (err) {
3876 dev_err(hba->dev, "%s: failed to get exception status %d\n",
3877 __func__, err);
3878 goto out;
3879 }
3880
3881 status &= hba->ee_ctrl_mask;
Yaniv Gardiafdfff52016-03-10 17:37:15 +02003882
3883 if (status & MASK_EE_URGENT_BKOPS)
3884 ufshcd_bkops_exception_event_handler(hba);
3885
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303886out:
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05303887 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303888 return;
3889}
3890
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003891/* Complete requests that have door-bell cleared */
3892static void ufshcd_complete_requests(struct ufs_hba *hba)
3893{
3894 ufshcd_transfer_req_compl(hba);
3895 ufshcd_tmc_handler(hba);
3896}
3897
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303898/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02003899 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
3900 * to recover from the DL NAC errors or not.
3901 * @hba: per-adapter instance
3902 *
3903 * Returns true if error handling is required, false otherwise
3904 */
3905static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
3906{
3907 unsigned long flags;
3908 bool err_handling = true;
3909
3910 spin_lock_irqsave(hba->host->host_lock, flags);
3911 /*
3912 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
3913 * device fatal error and/or DL NAC & REPLAY timeout errors.
3914 */
3915 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
3916 goto out;
3917
3918 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
3919 ((hba->saved_err & UIC_ERROR) &&
3920 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
3921 goto out;
3922
3923 if ((hba->saved_err & UIC_ERROR) &&
3924 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
3925 int err;
3926 /*
3927 * wait for 50ms to see if we can get any other errors or not.
3928 */
3929 spin_unlock_irqrestore(hba->host->host_lock, flags);
3930 msleep(50);
3931 spin_lock_irqsave(hba->host->host_lock, flags);
3932
3933 /*
3934 * now check if we have got any other severe errors other than
3935 * DL NAC error?
3936 */
3937 if ((hba->saved_err & INT_FATAL_ERRORS) ||
3938 ((hba->saved_err & UIC_ERROR) &&
3939 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
3940 goto out;
3941
3942 /*
3943 * As DL NAC is the only error received so far, send out NOP
3944 * command to confirm if link is still active or not.
3945 * - If we don't get any response then do error recovery.
3946 * - If we get response then clear the DL NAC error bit.
3947 */
3948
3949 spin_unlock_irqrestore(hba->host->host_lock, flags);
3950 err = ufshcd_verify_dev_init(hba);
3951 spin_lock_irqsave(hba->host->host_lock, flags);
3952
3953 if (err)
3954 goto out;
3955
3956 /* Link seems to be alive hence ignore the DL NAC errors */
3957 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
3958 hba->saved_err &= ~UIC_ERROR;
3959 /* clear NAC error */
3960 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
3961 if (!hba->saved_uic_err) {
3962 err_handling = false;
3963 goto out;
3964 }
3965 }
3966out:
3967 spin_unlock_irqrestore(hba->host->host_lock, flags);
3968 return err_handling;
3969}
3970
3971/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303972 * ufshcd_err_handler - handle UFS errors that require s/w attention
3973 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303974 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303975static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303976{
3977 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303978 unsigned long flags;
3979 u32 err_xfer = 0;
3980 u32 err_tm = 0;
3981 int err = 0;
3982 int tag;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003983 bool needs_reset = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303984
3985 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303986
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05303987 pm_runtime_get_sync(hba->dev);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003988 ufshcd_hold(hba, false);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303989
3990 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003991 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303992 goto out;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303993
3994 hba->ufshcd_state = UFSHCD_STATE_RESET;
3995 ufshcd_set_eh_in_progress(hba);
3996
3997 /* Complete requests that have door-bell cleared by h/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003998 ufshcd_complete_requests(hba);
Yaniv Gardi583fa622016-03-10 17:37:13 +02003999
4000 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4001 bool ret;
4002
4003 spin_unlock_irqrestore(hba->host->host_lock, flags);
4004 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
4005 ret = ufshcd_quirk_dl_nac_errors(hba);
4006 spin_lock_irqsave(hba->host->host_lock, flags);
4007 if (!ret)
4008 goto skip_err_handling;
4009 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004010 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4011 ((hba->saved_err & UIC_ERROR) &&
4012 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
4013 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
4014 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
4015 needs_reset = true;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304016
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004017 /*
4018 * if host reset is required then skip clearing the pending
4019 * transfers forcefully because they will automatically get
4020 * cleared after link startup.
4021 */
4022 if (needs_reset)
4023 goto skip_pending_xfer_clear;
4024
4025 /* release lock as clear command might sleep */
4026 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304027 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004028 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
4029 if (ufshcd_clear_cmd(hba, tag)) {
4030 err_xfer = true;
4031 goto lock_skip_pending_xfer_clear;
4032 }
4033 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304034
4035 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004036 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
4037 if (ufshcd_clear_tm_cmd(hba, tag)) {
4038 err_tm = true;
4039 goto lock_skip_pending_xfer_clear;
4040 }
4041 }
4042
4043lock_skip_pending_xfer_clear:
4044 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304045
4046 /* Complete the requests that are cleared by s/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004047 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304048
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004049 if (err_xfer || err_tm)
4050 needs_reset = true;
4051
4052skip_pending_xfer_clear:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304053 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004054 if (needs_reset) {
4055 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
4056
4057 /*
4058 * ufshcd_reset_and_restore() does the link reinitialization
4059 * which will need atleast one empty doorbell slot to send the
4060 * device management commands (NOP and query commands).
4061 * If there is no slot empty at this moment then free up last
4062 * slot forcefully.
4063 */
4064 if (hba->outstanding_reqs == max_doorbells)
4065 __ufshcd_transfer_req_compl(hba,
4066 (1UL << (hba->nutrs - 1)));
4067
4068 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304069 err = ufshcd_reset_and_restore(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004070 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304071 if (err) {
4072 dev_err(hba->dev, "%s: reset and restore failed\n",
4073 __func__);
4074 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4075 }
4076 /*
4077 * Inform scsi mid-layer that we did reset and allow to handle
4078 * Unit Attention properly.
4079 */
4080 scsi_report_bus_reset(hba->host, 0);
4081 hba->saved_err = 0;
4082 hba->saved_uic_err = 0;
4083 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004084
Yaniv Gardi583fa622016-03-10 17:37:13 +02004085skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004086 if (!needs_reset) {
4087 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4088 if (hba->saved_err || hba->saved_uic_err)
4089 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
4090 __func__, hba->saved_err, hba->saved_uic_err);
4091 }
4092
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304093 ufshcd_clear_eh_in_progress(hba);
4094
4095out:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004096 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304097 scsi_unblock_requests(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004098 ufshcd_release(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304099 pm_runtime_put_sync(hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304100}
4101
4102/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304103 * ufshcd_update_uic_error - check and set fatal UIC error flags.
4104 * @hba: per-adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304105 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304106static void ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304107{
4108 u32 reg;
4109
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304110 /* PA_INIT_ERROR is fatal and needs UIC reset */
4111 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
4112 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
4113 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
Yaniv Gardi583fa622016-03-10 17:37:13 +02004114 else if (hba->dev_quirks &
4115 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4116 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
4117 hba->uic_error |=
4118 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4119 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
4120 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
4121 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304122
4123 /* UIC NL/TL/DME errors needs software retry */
4124 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
4125 if (reg)
4126 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
4127
4128 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
4129 if (reg)
4130 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
4131
4132 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
4133 if (reg)
4134 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
4135
4136 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
4137 __func__, hba->uic_error);
4138}
4139
4140/**
4141 * ufshcd_check_errors - Check for errors that need s/w attention
4142 * @hba: per-adapter instance
4143 */
4144static void ufshcd_check_errors(struct ufs_hba *hba)
4145{
4146 bool queue_eh_work = false;
4147
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304148 if (hba->errors & INT_FATAL_ERRORS)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304149 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304150
4151 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304152 hba->uic_error = 0;
4153 ufshcd_update_uic_error(hba);
4154 if (hba->uic_error)
4155 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304156 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304157
4158 if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004159 /*
4160 * update the transfer error masks to sticky bits, let's do this
4161 * irrespective of current ufshcd_state.
4162 */
4163 hba->saved_err |= hba->errors;
4164 hba->saved_uic_err |= hba->uic_error;
4165
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304166 /* handle fatal errors only when link is functional */
4167 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
4168 /* block commands from scsi mid-layer */
4169 scsi_block_requests(hba->host);
4170
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304171 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4172 schedule_work(&hba->eh_work);
4173 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304174 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304175 /*
4176 * if (!queue_eh_work) -
4177 * Other errors are either non-fatal where host recovers
4178 * itself without s/w intervention or errors that will be
4179 * handled by the SCSI core layer.
4180 */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304181}
4182
4183/**
4184 * ufshcd_tmc_handler - handle task management function completion
4185 * @hba: per adapter instance
4186 */
4187static void ufshcd_tmc_handler(struct ufs_hba *hba)
4188{
4189 u32 tm_doorbell;
4190
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304191 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304192 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304193 wake_up(&hba->tm_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304194}
4195
4196/**
4197 * ufshcd_sl_intr - Interrupt service routine
4198 * @hba: per adapter instance
4199 * @intr_status: contains interrupts generated by the controller
4200 */
4201static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
4202{
4203 hba->errors = UFSHCD_ERROR_MASK & intr_status;
4204 if (hba->errors)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304205 ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304206
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304207 if (intr_status & UFSHCD_UIC_MASK)
4208 ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304209
4210 if (intr_status & UTP_TASK_REQ_COMPL)
4211 ufshcd_tmc_handler(hba);
4212
4213 if (intr_status & UTP_TRANSFER_REQ_COMPL)
4214 ufshcd_transfer_req_compl(hba);
4215}
4216
4217/**
4218 * ufshcd_intr - Main interrupt service routine
4219 * @irq: irq number
4220 * @__hba: pointer to adapter instance
4221 *
4222 * Returns IRQ_HANDLED - If interrupt is valid
4223 * IRQ_NONE - If invalid interrupt
4224 */
4225static irqreturn_t ufshcd_intr(int irq, void *__hba)
4226{
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004227 u32 intr_status, enabled_intr_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304228 irqreturn_t retval = IRQ_NONE;
4229 struct ufs_hba *hba = __hba;
4230
4231 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304232 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004233 enabled_intr_status =
4234 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304235
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004236 if (intr_status)
Seungwon Jeon261ea452013-06-26 22:39:28 +05304237 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004238
4239 if (enabled_intr_status) {
4240 ufshcd_sl_intr(hba, enabled_intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304241 retval = IRQ_HANDLED;
4242 }
4243 spin_unlock(hba->host->host_lock);
4244 return retval;
4245}
4246
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304247static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
4248{
4249 int err = 0;
4250 u32 mask = 1 << tag;
4251 unsigned long flags;
4252
4253 if (!test_bit(tag, &hba->outstanding_tasks))
4254 goto out;
4255
4256 spin_lock_irqsave(hba->host->host_lock, flags);
4257 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
4258 spin_unlock_irqrestore(hba->host->host_lock, flags);
4259
4260 /* poll for max. 1 sec to clear door bell register by h/w */
4261 err = ufshcd_wait_for_register(hba,
4262 REG_UTP_TASK_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02004263 mask, 0, 1000, 1000, true);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304264out:
4265 return err;
4266}
4267
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304268/**
4269 * ufshcd_issue_tm_cmd - issues task management commands to controller
4270 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304271 * @lun_id: LUN ID to which TM command is sent
4272 * @task_id: task ID to which the TM command is applicable
4273 * @tm_function: task management function opcode
4274 * @tm_response: task management service response return value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304275 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304276 * Returns non-zero value on error, zero on success.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304277 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304278static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4279 u8 tm_function, u8 *tm_response)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304280{
4281 struct utp_task_req_desc *task_req_descp;
4282 struct utp_upiu_task_req *task_req_upiup;
4283 struct Scsi_Host *host;
4284 unsigned long flags;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304285 int free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304286 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304287 int task_tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304288
4289 host = hba->host;
4290
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304291 /*
4292 * Get free slot, sleep if slots are unavailable.
4293 * Even though we use wait_event() which sleeps indefinitely,
4294 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
4295 */
4296 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004297 ufshcd_hold(hba, false);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304298
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304299 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304300 task_req_descp = hba->utmrdl_base_addr;
4301 task_req_descp += free_slot;
4302
4303 /* Configure task request descriptor */
4304 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
4305 task_req_descp->header.dword_2 =
4306 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
4307
4308 /* Configure task request UPIU */
4309 task_req_upiup =
4310 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304311 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304312 task_req_upiup->header.dword_0 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304313 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304314 lun_id, task_tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304315 task_req_upiup->header.dword_1 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304316 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004317 /*
4318 * The host shall provide the same value for LUN field in the basic
4319 * header and for Input Parameter.
4320 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304321 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
4322 task_req_upiup->input_param2 = cpu_to_be32(task_id);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304323
4324 /* send command to the controller */
4325 __set_bit(free_slot, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02004326
4327 /* Make sure descriptors are ready before ringing the task doorbell */
4328 wmb();
4329
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304330 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Gilad Bronerad1a1b92016-10-17 17:09:36 -07004331 /* Make sure that doorbell is committed immediately */
4332 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304333
4334 spin_unlock_irqrestore(host->host_lock, flags);
4335
4336 /* wait until the task management command is completed */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304337 err = wait_event_timeout(hba->tm_wq,
4338 test_bit(free_slot, &hba->tm_condition),
4339 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304340 if (!err) {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304341 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
4342 __func__, tm_function);
4343 if (ufshcd_clear_tm_cmd(hba, free_slot))
4344 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
4345 __func__, free_slot);
4346 err = -ETIMEDOUT;
4347 } else {
4348 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304349 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304350
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304351 clear_bit(free_slot, &hba->tm_condition);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304352 ufshcd_put_tm_slot(hba, free_slot);
4353 wake_up(&hba->tm_tag_wq);
4354
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004355 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304356 return err;
4357}
4358
4359/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304360 * ufshcd_eh_device_reset_handler - device reset handler registered to
4361 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304362 * @cmd: SCSI command pointer
4363 *
4364 * Returns SUCCESS/FAILED
4365 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304366static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304367{
4368 struct Scsi_Host *host;
4369 struct ufs_hba *hba;
4370 unsigned int tag;
4371 u32 pos;
4372 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304373 u8 resp = 0xF;
4374 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304375 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304376
4377 host = cmd->device->host;
4378 hba = shost_priv(host);
4379 tag = cmd->request->tag;
4380
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304381 lrbp = &hba->lrb[tag];
4382 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
4383 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304384 if (!err)
4385 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304386 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304387 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304388
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304389 /* clear the commands that were pending for corresponding LUN */
4390 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
4391 if (hba->lrb[pos].lun == lrbp->lun) {
4392 err = ufshcd_clear_cmd(hba, pos);
4393 if (err)
4394 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304395 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304396 }
4397 spin_lock_irqsave(host->host_lock, flags);
4398 ufshcd_transfer_req_compl(hba);
4399 spin_unlock_irqrestore(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304400out:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304401 if (!err) {
4402 err = SUCCESS;
4403 } else {
4404 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4405 err = FAILED;
4406 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304407 return err;
4408}
4409
4410/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304411 * ufshcd_abort - abort a specific command
4412 * @cmd: SCSI command pointer
4413 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304414 * Abort the pending command in device by sending UFS_ABORT_TASK task management
4415 * command, and in host controller by clearing the door-bell register. There can
4416 * be race between controller sending the command to the device while abort is
4417 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
4418 * really issued and then try to abort it.
4419 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304420 * Returns SUCCESS/FAILED
4421 */
4422static int ufshcd_abort(struct scsi_cmnd *cmd)
4423{
4424 struct Scsi_Host *host;
4425 struct ufs_hba *hba;
4426 unsigned long flags;
4427 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304428 int err = 0;
4429 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304430 u8 resp = 0xF;
4431 struct ufshcd_lrb *lrbp;
Dolev Ravive9d501b2014-07-01 12:22:37 +03004432 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304433
4434 host = cmd->device->host;
4435 hba = shost_priv(host);
4436 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02004437 if (!ufshcd_valid_tag(hba, tag)) {
4438 dev_err(hba->dev,
4439 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
4440 __func__, tag, cmd, cmd->request);
4441 BUG();
4442 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304443
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004444 ufshcd_hold(hba, false);
Dolev Ravive9d501b2014-07-01 12:22:37 +03004445 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Yaniv Gardi14497322016-02-01 15:02:39 +02004446 /* If command is already aborted/completed, return SUCCESS */
4447 if (!(test_bit(tag, &hba->outstanding_reqs))) {
4448 dev_err(hba->dev,
4449 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
4450 __func__, tag, hba->outstanding_reqs, reg);
4451 goto out;
4452 }
4453
Dolev Ravive9d501b2014-07-01 12:22:37 +03004454 if (!(reg & (1 << tag))) {
4455 dev_err(hba->dev,
4456 "%s: cmd was completed, but without a notifying intr, tag = %d",
4457 __func__, tag);
4458 }
4459
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304460 lrbp = &hba->lrb[tag];
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304461 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
4462 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4463 UFS_QUERY_TASK, &resp);
4464 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
4465 /* cmd pending in the device */
4466 break;
4467 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304468 /*
4469 * cmd not pending in the device, check if it is
4470 * in transition.
4471 */
4472 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4473 if (reg & (1 << tag)) {
4474 /* sleep for max. 200us to stabilize */
4475 usleep_range(100, 200);
4476 continue;
4477 }
4478 /* command completed already */
4479 goto out;
4480 } else {
4481 if (!err)
4482 err = resp; /* service response error */
4483 goto out;
4484 }
4485 }
4486
4487 if (!poll_cnt) {
4488 err = -EBUSY;
4489 goto out;
4490 }
4491
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304492 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4493 UFS_ABORT_TASK, &resp);
4494 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304495 if (!err)
4496 err = resp; /* service response error */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304497 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304498 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304499
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304500 err = ufshcd_clear_cmd(hba, tag);
4501 if (err)
4502 goto out;
4503
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304504 scsi_dma_unmap(cmd);
4505
4506 spin_lock_irqsave(host->host_lock, flags);
Yaniv Gardia48353f2016-02-01 15:02:40 +02004507 ufshcd_outstanding_req_clear(hba, tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304508 hba->lrb[tag].cmd = NULL;
4509 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304510
4511 clear_bit_unlock(tag, &hba->lrb_in_use);
4512 wake_up(&hba->dev_cmd.tag_wq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004513
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304514out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304515 if (!err) {
4516 err = SUCCESS;
4517 } else {
4518 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4519 err = FAILED;
4520 }
4521
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004522 /*
4523 * This ufshcd_release() corresponds to the original scsi cmd that got
4524 * aborted here (as we won't get any IRQ for it).
4525 */
4526 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304527 return err;
4528}
4529
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304530/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304531 * ufshcd_host_reset_and_restore - reset and restore host controller
4532 * @hba: per-adapter instance
4533 *
4534 * Note that host controller reset may issue DME_RESET to
4535 * local and remote (device) Uni-Pro stack and the attributes
4536 * are reset to default state.
4537 *
4538 * Returns zero on success, non-zero on failure
4539 */
4540static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4541{
4542 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304543 unsigned long flags;
4544
4545 /* Reset the host controller */
4546 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02004547 ufshcd_hba_stop(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304548 spin_unlock_irqrestore(hba->host->host_lock, flags);
4549
4550 err = ufshcd_hba_enable(hba);
4551 if (err)
4552 goto out;
4553
4554 /* Establish the link again and restore the device */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004555 err = ufshcd_probe_hba(hba);
4556
4557 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304558 err = -EIO;
4559out:
4560 if (err)
4561 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
4562
4563 return err;
4564}
4565
4566/**
4567 * ufshcd_reset_and_restore - reset and re-initialize host/device
4568 * @hba: per-adapter instance
4569 *
4570 * Reset and recover device, host and re-establish link. This
4571 * is helpful to recover the communication in fatal error conditions.
4572 *
4573 * Returns zero on success, non-zero on failure
4574 */
4575static int ufshcd_reset_and_restore(struct ufs_hba *hba)
4576{
4577 int err = 0;
4578 unsigned long flags;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004579 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304580
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004581 do {
4582 err = ufshcd_host_reset_and_restore(hba);
4583 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304584
4585 /*
4586 * After reset the door-bell might be cleared, complete
4587 * outstanding requests in s/w here.
4588 */
4589 spin_lock_irqsave(hba->host->host_lock, flags);
4590 ufshcd_transfer_req_compl(hba);
4591 ufshcd_tmc_handler(hba);
4592 spin_unlock_irqrestore(hba->host->host_lock, flags);
4593
4594 return err;
4595}
4596
4597/**
4598 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
4599 * @cmd - SCSI command pointer
4600 *
4601 * Returns SUCCESS/FAILED
4602 */
4603static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
4604{
4605 int err;
4606 unsigned long flags;
4607 struct ufs_hba *hba;
4608
4609 hba = shost_priv(cmd->device->host);
4610
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004611 ufshcd_hold(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304612 /*
4613 * Check if there is any race with fatal error handling.
4614 * If so, wait for it to complete. Even though fatal error
4615 * handling does reset and restore in some cases, don't assume
4616 * anything out of it. We are just avoiding race here.
4617 */
4618 do {
4619 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304620 if (!(work_pending(&hba->eh_work) ||
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304621 hba->ufshcd_state == UFSHCD_STATE_RESET))
4622 break;
4623 spin_unlock_irqrestore(hba->host->host_lock, flags);
4624 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304625 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304626 } while (1);
4627
4628 hba->ufshcd_state = UFSHCD_STATE_RESET;
4629 ufshcd_set_eh_in_progress(hba);
4630 spin_unlock_irqrestore(hba->host->host_lock, flags);
4631
4632 err = ufshcd_reset_and_restore(hba);
4633
4634 spin_lock_irqsave(hba->host->host_lock, flags);
4635 if (!err) {
4636 err = SUCCESS;
4637 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4638 } else {
4639 err = FAILED;
4640 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4641 }
4642 ufshcd_clear_eh_in_progress(hba);
4643 spin_unlock_irqrestore(hba->host->host_lock, flags);
4644
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004645 ufshcd_release(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304646 return err;
4647}
4648
4649/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03004650 * ufshcd_get_max_icc_level - calculate the ICC level
4651 * @sup_curr_uA: max. current supported by the regulator
4652 * @start_scan: row at the desc table to start scan from
4653 * @buff: power descriptor buffer
4654 *
4655 * Returns calculated max ICC level for specific regulator
4656 */
4657static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
4658{
4659 int i;
4660 int curr_uA;
4661 u16 data;
4662 u16 unit;
4663
4664 for (i = start_scan; i >= 0; i--) {
4665 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
4666 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
4667 ATTR_ICC_LVL_UNIT_OFFSET;
4668 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
4669 switch (unit) {
4670 case UFSHCD_NANO_AMP:
4671 curr_uA = curr_uA / 1000;
4672 break;
4673 case UFSHCD_MILI_AMP:
4674 curr_uA = curr_uA * 1000;
4675 break;
4676 case UFSHCD_AMP:
4677 curr_uA = curr_uA * 1000 * 1000;
4678 break;
4679 case UFSHCD_MICRO_AMP:
4680 default:
4681 break;
4682 }
4683 if (sup_curr_uA >= curr_uA)
4684 break;
4685 }
4686 if (i < 0) {
4687 i = 0;
4688 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
4689 }
4690
4691 return (u32)i;
4692}
4693
4694/**
4695 * ufshcd_calc_icc_level - calculate the max ICC level
4696 * In case regulators are not initialized we'll return 0
4697 * @hba: per-adapter instance
4698 * @desc_buf: power descriptor buffer to extract ICC levels from.
4699 * @len: length of desc_buff
4700 *
4701 * Returns calculated ICC level
4702 */
4703static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
4704 u8 *desc_buf, int len)
4705{
4706 u32 icc_level = 0;
4707
4708 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4709 !hba->vreg_info.vccq2) {
4710 dev_err(hba->dev,
4711 "%s: Regulator capability was not set, actvIccLevel=%d",
4712 __func__, icc_level);
4713 goto out;
4714 }
4715
4716 if (hba->vreg_info.vcc)
4717 icc_level = ufshcd_get_max_icc_level(
4718 hba->vreg_info.vcc->max_uA,
4719 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
4720 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
4721
4722 if (hba->vreg_info.vccq)
4723 icc_level = ufshcd_get_max_icc_level(
4724 hba->vreg_info.vccq->max_uA,
4725 icc_level,
4726 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
4727
4728 if (hba->vreg_info.vccq2)
4729 icc_level = ufshcd_get_max_icc_level(
4730 hba->vreg_info.vccq2->max_uA,
4731 icc_level,
4732 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4733out:
4734 return icc_level;
4735}
4736
4737static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4738{
4739 int ret;
4740 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
4741 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
4742
4743 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4744 if (ret) {
4745 dev_err(hba->dev,
4746 "%s: Failed reading power descriptor.len = %d ret = %d",
4747 __func__, buff_len, ret);
4748 return;
4749 }
4750
4751 hba->init_prefetch_data.icc_level =
4752 ufshcd_find_max_sup_active_icc_level(hba,
4753 desc_buf, buff_len);
4754 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4755 __func__, hba->init_prefetch_data.icc_level);
4756
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004757 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4758 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4759 &hba->init_prefetch_data.icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03004760
4761 if (ret)
4762 dev_err(hba->dev,
4763 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4764 __func__, hba->init_prefetch_data.icc_level , ret);
4765
4766}
4767
4768/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004769 * ufshcd_scsi_add_wlus - Adds required W-LUs
4770 * @hba: per-adapter instance
4771 *
4772 * UFS device specification requires the UFS devices to support 4 well known
4773 * logical units:
4774 * "REPORT_LUNS" (address: 01h)
4775 * "UFS Device" (address: 50h)
4776 * "RPMB" (address: 44h)
4777 * "BOOT" (address: 30h)
4778 * UFS device's power management needs to be controlled by "POWER CONDITION"
4779 * field of SSU (START STOP UNIT) command. But this "power condition" field
4780 * will take effect only when its sent to "UFS device" well known logical unit
4781 * hence we require the scsi_device instance to represent this logical unit in
4782 * order for the UFS host driver to send the SSU command for power management.
4783
4784 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4785 * Block) LU so user space process can control this LU. User space may also
4786 * want to have access to BOOT LU.
4787
4788 * This function adds scsi device instances for each of all well known LUs
4789 * (except "REPORT LUNS" LU).
4790 *
4791 * Returns zero on success (all required W-LUs are added successfully),
4792 * non-zero error value on failure (if failed to add any of the required W-LU).
4793 */
4794static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4795{
4796 int ret = 0;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004797 struct scsi_device *sdev_rpmb;
4798 struct scsi_device *sdev_boot;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004799
4800 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4801 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4802 if (IS_ERR(hba->sdev_ufs_device)) {
4803 ret = PTR_ERR(hba->sdev_ufs_device);
4804 hba->sdev_ufs_device = NULL;
4805 goto out;
4806 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004807 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004808
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004809 sdev_boot = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004810 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004811 if (IS_ERR(sdev_boot)) {
4812 ret = PTR_ERR(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004813 goto remove_sdev_ufs_device;
4814 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004815 scsi_device_put(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004816
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004817 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004818 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004819 if (IS_ERR(sdev_rpmb)) {
4820 ret = PTR_ERR(sdev_rpmb);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004821 goto remove_sdev_boot;
4822 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004823 scsi_device_put(sdev_rpmb);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004824 goto out;
4825
4826remove_sdev_boot:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004827 scsi_remove_device(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004828remove_sdev_ufs_device:
4829 scsi_remove_device(hba->sdev_ufs_device);
4830out:
4831 return ret;
4832}
4833
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02004834static int ufs_get_device_info(struct ufs_hba *hba,
4835 struct ufs_device_info *card_data)
4836{
4837 int err;
4838 u8 model_index;
4839 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
4840 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
4841
4842 err = ufshcd_read_device_desc(hba, desc_buf,
4843 QUERY_DESC_DEVICE_MAX_SIZE);
4844 if (err) {
4845 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
4846 __func__, err);
4847 goto out;
4848 }
4849
4850 /*
4851 * getting vendor (manufacturerID) and Bank Index in big endian
4852 * format
4853 */
4854 card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
4855 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
4856
4857 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
4858
4859 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
4860 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
4861 if (err) {
4862 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
4863 __func__, err);
4864 goto out;
4865 }
4866
4867 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
4868 strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
4869 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
4870 MAX_MODEL_LEN));
4871
4872 /* Null terminate the model string */
4873 card_data->model[MAX_MODEL_LEN] = '\0';
4874
4875out:
4876 return err;
4877}
4878
4879void ufs_advertise_fixup_device(struct ufs_hba *hba)
4880{
4881 int err;
4882 struct ufs_dev_fix *f;
4883 struct ufs_device_info card_data;
4884
4885 card_data.wmanufacturerid = 0;
4886
4887 err = ufs_get_device_info(hba, &card_data);
4888 if (err) {
4889 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
4890 __func__, err);
4891 return;
4892 }
4893
4894 for (f = ufs_fixups; f->quirk; f++) {
4895 if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
4896 (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
4897 (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
4898 !strcmp(f->card.model, UFS_ANY_MODEL)))
4899 hba->dev_quirks |= f->quirk;
4900 }
4901}
4902
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004903/**
Yaniv Gardi37113102016-03-10 17:37:16 +02004904 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
4905 * @hba: per-adapter instance
4906 *
4907 * PA_TActivate parameter can be tuned manually if UniPro version is less than
4908 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
4909 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
4910 * the hibern8 exit latency.
4911 *
4912 * Returns zero on success, non-zero error value on failure.
4913 */
4914static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
4915{
4916 int ret = 0;
4917 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
4918
4919 ret = ufshcd_dme_peer_get(hba,
4920 UIC_ARG_MIB_SEL(
4921 RX_MIN_ACTIVATETIME_CAPABILITY,
4922 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4923 &peer_rx_min_activatetime);
4924 if (ret)
4925 goto out;
4926
4927 /* make sure proper unit conversion is applied */
4928 tuned_pa_tactivate =
4929 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
4930 / PA_TACTIVATE_TIME_UNIT_US);
4931 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
4932 tuned_pa_tactivate);
4933
4934out:
4935 return ret;
4936}
4937
4938/**
4939 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
4940 * @hba: per-adapter instance
4941 *
4942 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
4943 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
4944 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
4945 * This optimal value can help reduce the hibern8 exit latency.
4946 *
4947 * Returns zero on success, non-zero error value on failure.
4948 */
4949static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
4950{
4951 int ret = 0;
4952 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
4953 u32 max_hibern8_time, tuned_pa_hibern8time;
4954
4955 ret = ufshcd_dme_get(hba,
4956 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
4957 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
4958 &local_tx_hibern8_time_cap);
4959 if (ret)
4960 goto out;
4961
4962 ret = ufshcd_dme_peer_get(hba,
4963 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
4964 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4965 &peer_rx_hibern8_time_cap);
4966 if (ret)
4967 goto out;
4968
4969 max_hibern8_time = max(local_tx_hibern8_time_cap,
4970 peer_rx_hibern8_time_cap);
4971 /* make sure proper unit conversion is applied */
4972 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
4973 / PA_HIBERN8_TIME_UNIT_US);
4974 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
4975 tuned_pa_hibern8time);
4976out:
4977 return ret;
4978}
4979
4980static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
4981{
4982 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
4983 ufshcd_tune_pa_tactivate(hba);
4984 ufshcd_tune_pa_hibern8time(hba);
4985 }
4986
4987 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
4988 /* set 1ms timeout for PA_TACTIVATE */
4989 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
4990}
4991
4992/**
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004993 * ufshcd_probe_hba - probe hba to detect device and initialize
4994 * @hba: per-adapter instance
4995 *
4996 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304997 */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004998static int ufshcd_probe_hba(struct ufs_hba *hba)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304999{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305000 int ret;
5001
5002 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305003 if (ret)
5004 goto out;
5005
Yaniv Gardi50646362014-10-23 13:25:13 +03005006 ufshcd_init_pwr_info(hba);
5007
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005008 /* set the default level for urgent bkops */
5009 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5010 hba->is_urgent_bkops_lvl_checked = false;
5011
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005012 /* UniPro link is active now */
5013 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05305014
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305015 ret = ufshcd_verify_dev_init(hba);
5016 if (ret)
5017 goto out;
5018
Dolev Raviv68078d52013-07-30 00:35:58 +05305019 ret = ufshcd_complete_dev_init(hba);
5020 if (ret)
5021 goto out;
5022
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02005023 ufs_advertise_fixup_device(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02005024 ufshcd_tune_unipro_params(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02005025
5026 ret = ufshcd_set_vccq_rail_unused(hba,
5027 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
5028 if (ret)
5029 goto out;
5030
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005031 /* UFS device is also active now */
5032 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305033 ufshcd_force_reset_auto_bkops(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005034 hba->wlun_dev_clr_ua = true;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305035
Dolev Raviv7eb584d2014-09-25 15:32:31 +03005036 if (ufshcd_get_max_pwr_mode(hba)) {
5037 dev_err(hba->dev,
5038 "%s: Failed getting max supported power mode\n",
5039 __func__);
5040 } else {
5041 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
5042 if (ret)
5043 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
5044 __func__, ret);
5045 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005046
Yaniv Gardi53c12d02016-02-01 15:02:45 +02005047 /* set the state as operational after switching to desired gear */
5048 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005049 /*
5050 * If we are in error handling context or in power management callbacks
5051 * context, no need to scan the host
5052 */
5053 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5054 bool flag;
5055
5056 /* clear any previous UFS device information */
5057 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005058 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
5059 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005060 hba->dev_info.f_power_on_wp_en = flag;
5061
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005062 if (!hba->is_init_prefetch)
5063 ufshcd_init_icc_levels(hba);
5064
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005065 /* Add required well known logical units to scsi mid layer */
5066 if (ufshcd_scsi_add_wlus(hba))
5067 goto out;
5068
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305069 scsi_scan_host(hba->host);
5070 pm_runtime_put_sync(hba->dev);
5071 }
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005072
5073 if (!hba->is_init_prefetch)
5074 hba->is_init_prefetch = true;
5075
Sahitya Tummala856b3482014-09-25 15:32:34 +03005076 /* Resume devfreq after UFS device is detected */
5077 if (ufshcd_is_clkscaling_enabled(hba))
5078 devfreq_resume_device(hba->devfreq);
5079
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305080out:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005081 /*
5082 * If we failed to initialize the device or the device is not
5083 * present, turn off the power/clocks etc.
5084 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005085 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5086 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005087 ufshcd_hba_exit(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005088 }
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005089
5090 return ret;
5091}
5092
5093/**
5094 * ufshcd_async_scan - asynchronous execution for probing hba
5095 * @data: data pointer to pass to this function
5096 * @cookie: cookie data
5097 */
5098static void ufshcd_async_scan(void *data, async_cookie_t cookie)
5099{
5100 struct ufs_hba *hba = (struct ufs_hba *)data;
5101
5102 ufshcd_probe_hba(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305103}
5104
Yaniv Gardif550c652016-03-10 17:37:07 +02005105static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
5106{
5107 unsigned long flags;
5108 struct Scsi_Host *host;
5109 struct ufs_hba *hba;
5110 int index;
5111 bool found = false;
5112
5113 if (!scmd || !scmd->device || !scmd->device->host)
5114 return BLK_EH_NOT_HANDLED;
5115
5116 host = scmd->device->host;
5117 hba = shost_priv(host);
5118 if (!hba)
5119 return BLK_EH_NOT_HANDLED;
5120
5121 spin_lock_irqsave(host->host_lock, flags);
5122
5123 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5124 if (hba->lrb[index].cmd == scmd) {
5125 found = true;
5126 break;
5127 }
5128 }
5129
5130 spin_unlock_irqrestore(host->host_lock, flags);
5131
5132 /*
5133 * Bypass SCSI error handling and reset the block layer timer if this
5134 * SCSI command was not actually dispatched to UFS driver, otherwise
5135 * let SCSI layer handle the error as usual.
5136 */
5137 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
5138}
5139
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305140static struct scsi_host_template ufshcd_driver_template = {
5141 .module = THIS_MODULE,
5142 .name = UFSHCD,
5143 .proc_name = UFSHCD,
5144 .queuecommand = ufshcd_queuecommand,
5145 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09005146 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305147 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005148 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305149 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305150 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
5151 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Yaniv Gardif550c652016-03-10 17:37:07 +02005152 .eh_timed_out = ufshcd_eh_timed_out,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305153 .this_id = -1,
5154 .sg_tablesize = SG_ALL,
5155 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
5156 .can_queue = UFSHCD_CAN_QUEUE,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005157 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01005158 .track_queue_depth = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305159};
5160
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005161static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
5162 int ua)
5163{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005164 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005165
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005166 if (!vreg)
5167 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005168
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005169 ret = regulator_set_load(vreg->reg, ua);
5170 if (ret < 0) {
5171 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
5172 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005173 }
5174
5175 return ret;
5176}
5177
5178static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
5179 struct ufs_vreg *vreg)
5180{
Yaniv Gardi60f01872016-03-10 17:37:11 +02005181 if (!vreg)
5182 return 0;
5183 else if (vreg->unused)
5184 return 0;
5185 else
5186 return ufshcd_config_vreg_load(hba->dev, vreg,
5187 UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005188}
5189
5190static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
5191 struct ufs_vreg *vreg)
5192{
Yaniv Gardi60f01872016-03-10 17:37:11 +02005193 if (!vreg)
5194 return 0;
5195 else if (vreg->unused)
5196 return 0;
5197 else
5198 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005199}
5200
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005201static int ufshcd_config_vreg(struct device *dev,
5202 struct ufs_vreg *vreg, bool on)
5203{
5204 int ret = 0;
5205 struct regulator *reg = vreg->reg;
5206 const char *name = vreg->name;
5207 int min_uV, uA_load;
5208
5209 BUG_ON(!vreg);
5210
5211 if (regulator_count_voltages(reg) > 0) {
5212 min_uV = on ? vreg->min_uV : 0;
5213 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
5214 if (ret) {
5215 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
5216 __func__, name, ret);
5217 goto out;
5218 }
5219
5220 uA_load = on ? vreg->max_uA : 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005221 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
5222 if (ret)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005223 goto out;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005224 }
5225out:
5226 return ret;
5227}
5228
5229static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
5230{
5231 int ret = 0;
5232
Yaniv Gardi60f01872016-03-10 17:37:11 +02005233 if (!vreg)
5234 goto out;
5235 else if (vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005236 goto out;
5237
5238 ret = ufshcd_config_vreg(dev, vreg, true);
5239 if (!ret)
5240 ret = regulator_enable(vreg->reg);
5241
5242 if (!ret)
5243 vreg->enabled = true;
5244 else
5245 dev_err(dev, "%s: %s enable failed, err=%d\n",
5246 __func__, vreg->name, ret);
5247out:
5248 return ret;
5249}
5250
5251static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
5252{
5253 int ret = 0;
5254
Yaniv Gardi60f01872016-03-10 17:37:11 +02005255 if (!vreg)
5256 goto out;
5257 else if (!vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005258 goto out;
5259
5260 ret = regulator_disable(vreg->reg);
5261
5262 if (!ret) {
5263 /* ignore errors on applying disable config */
5264 ufshcd_config_vreg(dev, vreg, false);
5265 vreg->enabled = false;
5266 } else {
5267 dev_err(dev, "%s: %s disable failed, err=%d\n",
5268 __func__, vreg->name, ret);
5269 }
5270out:
5271 return ret;
5272}
5273
5274static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
5275{
5276 int ret = 0;
5277 struct device *dev = hba->dev;
5278 struct ufs_vreg_info *info = &hba->vreg_info;
5279
5280 if (!info)
5281 goto out;
5282
5283 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
5284 if (ret)
5285 goto out;
5286
5287 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
5288 if (ret)
5289 goto out;
5290
5291 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
5292 if (ret)
5293 goto out;
5294
5295out:
5296 if (ret) {
5297 ufshcd_toggle_vreg(dev, info->vccq2, false);
5298 ufshcd_toggle_vreg(dev, info->vccq, false);
5299 ufshcd_toggle_vreg(dev, info->vcc, false);
5300 }
5301 return ret;
5302}
5303
Raviv Shvili6a771a62014-09-25 15:32:24 +03005304static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
5305{
5306 struct ufs_vreg_info *info = &hba->vreg_info;
5307
5308 if (info)
5309 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
5310
5311 return 0;
5312}
5313
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005314static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
5315{
5316 int ret = 0;
5317
5318 if (!vreg)
5319 goto out;
5320
5321 vreg->reg = devm_regulator_get(dev, vreg->name);
5322 if (IS_ERR(vreg->reg)) {
5323 ret = PTR_ERR(vreg->reg);
5324 dev_err(dev, "%s: %s get failed, err=%d\n",
5325 __func__, vreg->name, ret);
5326 }
5327out:
5328 return ret;
5329}
5330
5331static int ufshcd_init_vreg(struct ufs_hba *hba)
5332{
5333 int ret = 0;
5334 struct device *dev = hba->dev;
5335 struct ufs_vreg_info *info = &hba->vreg_info;
5336
5337 if (!info)
5338 goto out;
5339
5340 ret = ufshcd_get_vreg(dev, info->vcc);
5341 if (ret)
5342 goto out;
5343
5344 ret = ufshcd_get_vreg(dev, info->vccq);
5345 if (ret)
5346 goto out;
5347
5348 ret = ufshcd_get_vreg(dev, info->vccq2);
5349out:
5350 return ret;
5351}
5352
Raviv Shvili6a771a62014-09-25 15:32:24 +03005353static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
5354{
5355 struct ufs_vreg_info *info = &hba->vreg_info;
5356
5357 if (info)
5358 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
5359
5360 return 0;
5361}
5362
Yaniv Gardi60f01872016-03-10 17:37:11 +02005363static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
5364{
5365 int ret = 0;
5366 struct ufs_vreg_info *info = &hba->vreg_info;
5367
5368 if (!info)
5369 goto out;
5370 else if (!info->vccq)
5371 goto out;
5372
5373 if (unused) {
5374 /* shut off the rail here */
5375 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
5376 /*
5377 * Mark this rail as no longer used, so it doesn't get enabled
5378 * later by mistake
5379 */
5380 if (!ret)
5381 info->vccq->unused = true;
5382 } else {
5383 /*
5384 * rail should have been already enabled hence just make sure
5385 * that unused flag is cleared.
5386 */
5387 info->vccq->unused = false;
5388 }
5389out:
5390 return ret;
5391}
5392
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005393static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5394 bool skip_ref_clk)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005395{
5396 int ret = 0;
5397 struct ufs_clk_info *clki;
5398 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005399 unsigned long flags;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005400
5401 if (!head || list_empty(head))
5402 goto out;
5403
Subhash Jadavani1e879e82016-10-06 21:48:22 -07005404 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
5405 if (ret)
5406 return ret;
5407
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005408 list_for_each_entry(clki, head, list) {
5409 if (!IS_ERR_OR_NULL(clki->clk)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005410 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
5411 continue;
5412
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005413 if (on && !clki->enabled) {
5414 ret = clk_prepare_enable(clki->clk);
5415 if (ret) {
5416 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
5417 __func__, clki->name, ret);
5418 goto out;
5419 }
5420 } else if (!on && clki->enabled) {
5421 clk_disable_unprepare(clki->clk);
5422 }
5423 clki->enabled = on;
5424 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
5425 clki->name, on ? "en" : "dis");
5426 }
5427 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005428
Subhash Jadavani1e879e82016-10-06 21:48:22 -07005429 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
5430 if (ret)
5431 return ret;
5432
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005433out:
5434 if (ret) {
5435 list_for_each_entry(clki, head, list) {
5436 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
5437 clk_disable_unprepare(clki->clk);
5438 }
Dolev Raviveda910e2014-10-23 13:25:16 +03005439 } else if (on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005440 spin_lock_irqsave(hba->host->host_lock, flags);
5441 hba->clk_gating.state = CLKS_ON;
5442 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005443 }
5444 return ret;
5445}
5446
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005447static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
5448{
5449 return __ufshcd_setup_clocks(hba, on, false);
5450}
5451
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005452static int ufshcd_init_clocks(struct ufs_hba *hba)
5453{
5454 int ret = 0;
5455 struct ufs_clk_info *clki;
5456 struct device *dev = hba->dev;
5457 struct list_head *head = &hba->clk_list_head;
5458
5459 if (!head || list_empty(head))
5460 goto out;
5461
5462 list_for_each_entry(clki, head, list) {
5463 if (!clki->name)
5464 continue;
5465
5466 clki->clk = devm_clk_get(dev, clki->name);
5467 if (IS_ERR(clki->clk)) {
5468 ret = PTR_ERR(clki->clk);
5469 dev_err(dev, "%s: %s clk get failed, %d\n",
5470 __func__, clki->name, ret);
5471 goto out;
5472 }
5473
5474 if (clki->max_freq) {
5475 ret = clk_set_rate(clki->clk, clki->max_freq);
5476 if (ret) {
5477 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5478 __func__, clki->name,
5479 clki->max_freq, ret);
5480 goto out;
5481 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03005482 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005483 }
5484 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
5485 clki->name, clk_get_rate(clki->clk));
5486 }
5487out:
5488 return ret;
5489}
5490
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005491static int ufshcd_variant_hba_init(struct ufs_hba *hba)
5492{
5493 int err = 0;
5494
5495 if (!hba->vops)
5496 goto out;
5497
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005498 err = ufshcd_vops_init(hba);
5499 if (err)
5500 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005501
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005502 err = ufshcd_vops_setup_regulators(hba, true);
5503 if (err)
5504 goto out_exit;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005505
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005506 goto out;
5507
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005508out_exit:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005509 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005510out:
5511 if (err)
5512 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005513 __func__, ufshcd_get_var_name(hba), err);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005514 return err;
5515}
5516
5517static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
5518{
5519 if (!hba->vops)
5520 return;
5521
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005522 ufshcd_vops_setup_regulators(hba, false);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005523
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005524 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005525}
5526
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005527static int ufshcd_hba_init(struct ufs_hba *hba)
5528{
5529 int err;
5530
Raviv Shvili6a771a62014-09-25 15:32:24 +03005531 /*
5532 * Handle host controller power separately from the UFS device power
5533 * rails as it will help controlling the UFS host controller power
5534 * collapse easily which is different than UFS device power collapse.
5535 * Also, enable the host controller power before we go ahead with rest
5536 * of the initialization here.
5537 */
5538 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005539 if (err)
5540 goto out;
5541
Raviv Shvili6a771a62014-09-25 15:32:24 +03005542 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005543 if (err)
5544 goto out;
5545
Raviv Shvili6a771a62014-09-25 15:32:24 +03005546 err = ufshcd_init_clocks(hba);
5547 if (err)
5548 goto out_disable_hba_vreg;
5549
5550 err = ufshcd_setup_clocks(hba, true);
5551 if (err)
5552 goto out_disable_hba_vreg;
5553
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005554 err = ufshcd_init_vreg(hba);
5555 if (err)
5556 goto out_disable_clks;
5557
5558 err = ufshcd_setup_vreg(hba, true);
5559 if (err)
5560 goto out_disable_clks;
5561
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005562 err = ufshcd_variant_hba_init(hba);
5563 if (err)
5564 goto out_disable_vreg;
5565
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005566 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005567 goto out;
5568
5569out_disable_vreg:
5570 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005571out_disable_clks:
5572 ufshcd_setup_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03005573out_disable_hba_vreg:
5574 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005575out:
5576 return err;
5577}
5578
5579static void ufshcd_hba_exit(struct ufs_hba *hba)
5580{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005581 if (hba->is_powered) {
5582 ufshcd_variant_hba_exit(hba);
5583 ufshcd_setup_vreg(hba, false);
5584 ufshcd_setup_clocks(hba, false);
5585 ufshcd_setup_hba_vreg(hba, false);
5586 hba->is_powered = false;
5587 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005588}
5589
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005590static int
5591ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305592{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005593 unsigned char cmd[6] = {REQUEST_SENSE,
5594 0,
5595 0,
5596 0,
5597 SCSI_SENSE_BUFFERSIZE,
5598 0};
5599 char *buffer;
5600 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305601
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005602 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5603 if (!buffer) {
5604 ret = -ENOMEM;
5605 goto out;
5606 }
5607
5608 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
5609 SCSI_SENSE_BUFFERSIZE, NULL,
5610 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
5611 if (ret)
5612 pr_err("%s: failed with err %d\n", __func__, ret);
5613
5614 kfree(buffer);
5615out:
5616 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305617}
5618
5619/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005620 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
5621 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305622 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005623 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305624 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005625 * Returns 0 if requested power mode is set successfully
5626 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305627 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005628static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
5629 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305630{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005631 unsigned char cmd[6] = { START_STOP };
5632 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005633 struct scsi_device *sdp;
5634 unsigned long flags;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005635 int ret;
5636
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005637 spin_lock_irqsave(hba->host->host_lock, flags);
5638 sdp = hba->sdev_ufs_device;
5639 if (sdp) {
5640 ret = scsi_device_get(sdp);
5641 if (!ret && !scsi_device_online(sdp)) {
5642 ret = -ENODEV;
5643 scsi_device_put(sdp);
5644 }
5645 } else {
5646 ret = -ENODEV;
5647 }
5648 spin_unlock_irqrestore(hba->host->host_lock, flags);
5649
5650 if (ret)
5651 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005652
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305653 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005654 * If scsi commands fail, the scsi mid-layer schedules scsi error-
5655 * handling, which would wait for host to be resumed. Since we know
5656 * we are functional while we are here, skip host resume in error
5657 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305658 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005659 hba->host->eh_noresume = 1;
5660 if (hba->wlun_dev_clr_ua) {
5661 ret = ufshcd_send_request_sense(hba, sdp);
5662 if (ret)
5663 goto out;
5664 /* Unit attention condition is cleared now */
5665 hba->wlun_dev_clr_ua = false;
5666 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305667
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005668 cmd[4] = pwr_mode << 4;
5669
5670 /*
5671 * Current function would be generally called from the power management
5672 * callbacks hence set the REQ_PM flag so that it doesn't resume the
5673 * already suspended childs.
5674 */
5675 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
5676 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
5677 if (ret) {
5678 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02005679 "START_STOP failed for power mode: %d, result %x\n",
5680 pwr_mode, ret);
Hannes Reinecke21045512015-01-08 07:43:46 +01005681 if (driver_byte(ret) & DRIVER_SENSE)
5682 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005683 }
5684
5685 if (!ret)
5686 hba->curr_dev_pwr_mode = pwr_mode;
5687out:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005688 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005689 hba->host->eh_noresume = 0;
5690 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305691}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305692
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005693static int ufshcd_link_state_transition(struct ufs_hba *hba,
5694 enum uic_link_state req_link_state,
5695 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305696{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005697 int ret = 0;
5698
5699 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305700 return 0;
5701
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005702 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
5703 ret = ufshcd_uic_hibern8_enter(hba);
5704 if (!ret)
5705 ufshcd_set_link_hibern8(hba);
5706 else
5707 goto out;
5708 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305709 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005710 * If autobkops is enabled, link can't be turned off because
5711 * turning off the link would also turn off the device.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305712 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005713 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
5714 (!check_for_bkops || (check_for_bkops &&
5715 !hba->auto_bkops_enabled))) {
5716 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02005717 * Let's make sure that link is in low power mode, we are doing
5718 * this currently by putting the link in Hibern8. Otherway to
5719 * put the link in low power mode is to send the DME end point
5720 * to device and then send the DME reset command to local
5721 * unipro. But putting the link in hibern8 is much faster.
5722 */
5723 ret = ufshcd_uic_hibern8_enter(hba);
5724 if (ret)
5725 goto out;
5726 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005727 * Change controller state to "reset state" which
5728 * should also put the link in off/reset state
5729 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02005730 ufshcd_hba_stop(hba, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005731 /*
5732 * TODO: Check if we need any delay to make sure that
5733 * controller is reset
5734 */
5735 ufshcd_set_link_off(hba);
5736 }
5737
5738out:
5739 return ret;
5740}
5741
5742static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
5743{
5744 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02005745 * It seems some UFS devices may keep drawing more than sleep current
5746 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
5747 * To avoid this situation, add 2ms delay before putting these UFS
5748 * rails in LPM mode.
5749 */
5750 if (!ufshcd_is_link_active(hba) &&
5751 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
5752 usleep_range(2000, 2100);
5753
5754 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005755 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
5756 * power.
5757 *
5758 * If UFS device and link is in OFF state, all power supplies (VCC,
5759 * VCCQ, VCCQ2) can be turned off if power on write protect is not
5760 * required. If UFS link is inactive (Hibern8 or OFF state) and device
5761 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
5762 *
5763 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
5764 * in low power state which would save some power.
5765 */
5766 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
5767 !hba->dev_info.is_lu_power_on_wp) {
5768 ufshcd_setup_vreg(hba, false);
5769 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5770 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5771 if (!ufshcd_is_link_active(hba)) {
5772 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
5773 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
5774 }
5775 }
5776}
5777
5778static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
5779{
5780 int ret = 0;
5781
5782 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
5783 !hba->dev_info.is_lu_power_on_wp) {
5784 ret = ufshcd_setup_vreg(hba, true);
5785 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5786 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
5787 if (!ret && !ufshcd_is_link_active(hba)) {
5788 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5789 if (ret)
5790 goto vcc_disable;
5791 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5792 if (ret)
5793 goto vccq_lpm;
5794 }
5795 }
5796 goto out;
5797
5798vccq_lpm:
5799 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
5800vcc_disable:
5801 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5802out:
5803 return ret;
5804}
5805
5806static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
5807{
5808 if (ufshcd_is_link_off(hba))
5809 ufshcd_setup_hba_vreg(hba, false);
5810}
5811
5812static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
5813{
5814 if (ufshcd_is_link_off(hba))
5815 ufshcd_setup_hba_vreg(hba, true);
5816}
5817
5818/**
5819 * ufshcd_suspend - helper function for suspend operations
5820 * @hba: per adapter instance
5821 * @pm_op: desired low power operation type
5822 *
5823 * This function will try to put the UFS device and link into low power
5824 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
5825 * (System PM level).
5826 *
5827 * If this function is called during shutdown, it will make sure that
5828 * both UFS device and UFS link is powered off.
5829 *
5830 * NOTE: UFS device & link must be active before we enter in this function.
5831 *
5832 * Returns 0 for success and non-zero for failure
5833 */
5834static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5835{
5836 int ret = 0;
5837 enum ufs_pm_level pm_lvl;
5838 enum ufs_dev_pwr_mode req_dev_pwr_mode;
5839 enum uic_link_state req_link_state;
5840
5841 hba->pm_op_in_progress = 1;
5842 if (!ufshcd_is_shutdown_pm(pm_op)) {
5843 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
5844 hba->rpm_lvl : hba->spm_lvl;
5845 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
5846 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
5847 } else {
5848 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
5849 req_link_state = UIC_LINK_OFF_STATE;
5850 }
5851
5852 /*
5853 * If we can't transition into any of the low power modes
5854 * just gate the clocks.
5855 */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005856 ufshcd_hold(hba, false);
5857 hba->clk_gating.is_suspended = true;
5858
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005859 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
5860 req_link_state == UIC_LINK_ACTIVE_STATE) {
5861 goto disable_clks;
5862 }
5863
5864 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
5865 (req_link_state == hba->uic_link_state))
5866 goto out;
5867
5868 /* UFS device & link must be active before we enter in this function */
5869 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
5870 ret = -EINVAL;
5871 goto out;
5872 }
5873
5874 if (ufshcd_is_runtime_pm(pm_op)) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03005875 if (ufshcd_can_autobkops_during_suspend(hba)) {
5876 /*
5877 * The device is idle with no requests in the queue,
5878 * allow background operations if bkops status shows
5879 * that performance might be impacted.
5880 */
5881 ret = ufshcd_urgent_bkops(hba);
5882 if (ret)
5883 goto enable_gating;
5884 } else {
5885 /* make sure that auto bkops is disabled */
5886 ufshcd_disable_auto_bkops(hba);
5887 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005888 }
5889
5890 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
5891 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
5892 !ufshcd_is_runtime_pm(pm_op))) {
5893 /* ensure that bkops is disabled */
5894 ufshcd_disable_auto_bkops(hba);
5895 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
5896 if (ret)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005897 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005898 }
5899
5900 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
5901 if (ret)
5902 goto set_dev_active;
5903
5904 ufshcd_vreg_set_lpm(hba);
5905
5906disable_clks:
5907 /*
Sahitya Tummala856b3482014-09-25 15:32:34 +03005908 * The clock scaling needs access to controller registers. Hence, Wait
5909 * for pending clock scaling work to be done before clocks are
5910 * turned off.
5911 */
5912 if (ufshcd_is_clkscaling_enabled(hba)) {
5913 devfreq_suspend_device(hba->devfreq);
5914 hba->clk_scaling.window_start_t = 0;
5915 }
5916 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005917 * Call vendor specific suspend callback. As these callbacks may access
5918 * vendor specific host controller register space call them before the
5919 * host clocks are ON.
5920 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005921 ret = ufshcd_vops_suspend(hba, pm_op);
5922 if (ret)
5923 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005924
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005925 if (!ufshcd_is_link_active(hba))
5926 ufshcd_setup_clocks(hba, false);
5927 else
5928 /* If link is active, device ref_clk can't be switched off */
5929 __ufshcd_setup_clocks(hba, false, true);
5930
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005931 hba->clk_gating.state = CLKS_OFF;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005932 /*
5933 * Disable the host irq as host controller as there won't be any
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005934 * host controller transaction expected till resume.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005935 */
5936 ufshcd_disable_irq(hba);
5937 /* Put the host controller in low power mode if possible */
5938 ufshcd_hba_vreg_set_lpm(hba);
5939 goto out;
5940
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005941set_link_active:
5942 ufshcd_vreg_set_hpm(hba);
5943 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
5944 ufshcd_set_link_active(hba);
5945 else if (ufshcd_is_link_off(hba))
5946 ufshcd_host_reset_and_restore(hba);
5947set_dev_active:
5948 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
5949 ufshcd_disable_auto_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005950enable_gating:
5951 hba->clk_gating.is_suspended = false;
5952 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005953out:
5954 hba->pm_op_in_progress = 0;
5955 return ret;
5956}
5957
5958/**
5959 * ufshcd_resume - helper function for resume operations
5960 * @hba: per adapter instance
5961 * @pm_op: runtime PM or system PM
5962 *
5963 * This function basically brings the UFS device, UniPro link and controller
5964 * to active state.
5965 *
5966 * Returns 0 for success and non-zero for failure
5967 */
5968static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5969{
5970 int ret;
5971 enum uic_link_state old_link_state;
5972
5973 hba->pm_op_in_progress = 1;
5974 old_link_state = hba->uic_link_state;
5975
5976 ufshcd_hba_vreg_set_hpm(hba);
5977 /* Make sure clocks are enabled before accessing controller */
5978 ret = ufshcd_setup_clocks(hba, true);
5979 if (ret)
5980 goto out;
5981
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005982 /* enable the host irq as host controller would be active soon */
5983 ret = ufshcd_enable_irq(hba);
5984 if (ret)
5985 goto disable_irq_and_vops_clks;
5986
5987 ret = ufshcd_vreg_set_hpm(hba);
5988 if (ret)
5989 goto disable_irq_and_vops_clks;
5990
5991 /*
5992 * Call vendor specific resume callback. As these callbacks may access
5993 * vendor specific host controller register space call them when the
5994 * host clocks are ON.
5995 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005996 ret = ufshcd_vops_resume(hba, pm_op);
5997 if (ret)
5998 goto disable_vreg;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005999
6000 if (ufshcd_is_link_hibern8(hba)) {
6001 ret = ufshcd_uic_hibern8_exit(hba);
6002 if (!ret)
6003 ufshcd_set_link_active(hba);
6004 else
6005 goto vendor_suspend;
6006 } else if (ufshcd_is_link_off(hba)) {
6007 ret = ufshcd_host_reset_and_restore(hba);
6008 /*
6009 * ufshcd_host_reset_and_restore() should have already
6010 * set the link state as active
6011 */
6012 if (ret || !ufshcd_is_link_active(hba))
6013 goto vendor_suspend;
6014 }
6015
6016 if (!ufshcd_is_ufs_dev_active(hba)) {
6017 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6018 if (ret)
6019 goto set_old_link_state;
6020 }
6021
Subhash Jadavani374a2462014-09-25 15:32:35 +03006022 /*
6023 * If BKOPs operations are urgently needed at this moment then
6024 * keep auto-bkops enabled or else disable it.
6025 */
6026 ufshcd_urgent_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006027 hba->clk_gating.is_suspended = false;
6028
Sahitya Tummala856b3482014-09-25 15:32:34 +03006029 if (ufshcd_is_clkscaling_enabled(hba))
6030 devfreq_resume_device(hba->devfreq);
6031
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006032 /* Schedule clock gating in case of no access to UFS device yet */
6033 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006034 goto out;
6035
6036set_old_link_state:
6037 ufshcd_link_state_transition(hba, old_link_state, 0);
6038vendor_suspend:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006039 ufshcd_vops_suspend(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006040disable_vreg:
6041 ufshcd_vreg_set_lpm(hba);
6042disable_irq_and_vops_clks:
6043 ufshcd_disable_irq(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006044 ufshcd_setup_clocks(hba, false);
6045out:
6046 hba->pm_op_in_progress = 0;
6047 return ret;
6048}
6049
6050/**
6051 * ufshcd_system_suspend - system suspend routine
6052 * @hba: per adapter instance
6053 * @pm_op: runtime PM or system PM
6054 *
6055 * Check the description of ufshcd_suspend() function for more details.
6056 *
6057 * Returns 0 for success and non-zero for failure
6058 */
6059int ufshcd_system_suspend(struct ufs_hba *hba)
6060{
6061 int ret = 0;
6062
6063 if (!hba || !hba->is_powered)
Dolev Raviv233b5942014-10-23 13:25:14 +03006064 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006065
6066 if (pm_runtime_suspended(hba->dev)) {
6067 if (hba->rpm_lvl == hba->spm_lvl)
6068 /*
6069 * There is possibility that device may still be in
6070 * active state during the runtime suspend.
6071 */
6072 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6073 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
6074 goto out;
6075
6076 /*
6077 * UFS device and/or UFS link low power states during runtime
6078 * suspend seems to be different than what is expected during
6079 * system suspend. Hence runtime resume the devic & link and
6080 * let the system suspend low power states to take effect.
6081 * TODO: If resume takes longer time, we might have optimize
6082 * it in future by not resuming everything if possible.
6083 */
6084 ret = ufshcd_runtime_resume(hba);
6085 if (ret)
6086 goto out;
6087 }
6088
6089 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
6090out:
Dolev Ravive7850602014-09-25 15:32:36 +03006091 if (!ret)
6092 hba->is_sys_suspended = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006093 return ret;
6094}
6095EXPORT_SYMBOL(ufshcd_system_suspend);
6096
6097/**
6098 * ufshcd_system_resume - system resume routine
6099 * @hba: per adapter instance
6100 *
6101 * Returns 0 for success and non-zero for failure
6102 */
6103
6104int ufshcd_system_resume(struct ufs_hba *hba)
6105{
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006106 if (!hba)
6107 return -EINVAL;
6108
6109 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006110 /*
6111 * Let the runtime resume take care of resuming
6112 * if runtime suspended.
6113 */
6114 return 0;
6115
6116 return ufshcd_resume(hba, UFS_SYSTEM_PM);
6117}
6118EXPORT_SYMBOL(ufshcd_system_resume);
6119
6120/**
6121 * ufshcd_runtime_suspend - runtime suspend routine
6122 * @hba: per adapter instance
6123 *
6124 * Check the description of ufshcd_suspend() function for more details.
6125 *
6126 * Returns 0 for success and non-zero for failure
6127 */
6128int ufshcd_runtime_suspend(struct ufs_hba *hba)
6129{
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006130 if (!hba)
6131 return -EINVAL;
6132
6133 if (!hba->is_powered)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006134 return 0;
6135
6136 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306137}
6138EXPORT_SYMBOL(ufshcd_runtime_suspend);
6139
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006140/**
6141 * ufshcd_runtime_resume - runtime resume routine
6142 * @hba: per adapter instance
6143 *
6144 * This function basically brings the UFS device, UniPro link and controller
6145 * to active state. Following operations are done in this function:
6146 *
6147 * 1. Turn on all the controller related clocks
6148 * 2. Bring the UniPro link out of Hibernate state
6149 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
6150 * to active state.
6151 * 4. If auto-bkops is enabled on the device, disable it.
6152 *
6153 * So following would be the possible power state after this function return
6154 * successfully:
6155 * S1: UFS device in Active state with VCC rail ON
6156 * UniPro link in Active state
6157 * All the UFS/UniPro controller clocks are ON
6158 *
6159 * Returns 0 for success and non-zero for failure
6160 */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306161int ufshcd_runtime_resume(struct ufs_hba *hba)
6162{
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006163 if (!hba)
6164 return -EINVAL;
6165
6166 if (!hba->is_powered)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306167 return 0;
Yaniv Gardie3ce73d2016-10-17 17:09:24 -07006168
6169 return ufshcd_resume(hba, UFS_RUNTIME_PM);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306170}
6171EXPORT_SYMBOL(ufshcd_runtime_resume);
6172
6173int ufshcd_runtime_idle(struct ufs_hba *hba)
6174{
6175 return 0;
6176}
6177EXPORT_SYMBOL(ufshcd_runtime_idle);
6178
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306179/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006180 * ufshcd_shutdown - shutdown routine
6181 * @hba: per adapter instance
6182 *
6183 * This function would power off both UFS device and UFS link.
6184 *
6185 * Returns 0 always to allow force shutdown even in case of errors.
6186 */
6187int ufshcd_shutdown(struct ufs_hba *hba)
6188{
6189 int ret = 0;
6190
6191 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
6192 goto out;
6193
6194 if (pm_runtime_suspended(hba->dev)) {
6195 ret = ufshcd_runtime_resume(hba);
6196 if (ret)
6197 goto out;
6198 }
6199
6200 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
6201out:
6202 if (ret)
6203 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
6204 /* allow force shutdown even in case of errors */
6205 return 0;
6206}
6207EXPORT_SYMBOL(ufshcd_shutdown);
6208
6209/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306210 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306211 * data structure memory
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306212 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306213 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306214void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306215{
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05306216 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306217 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05306218 ufshcd_disable_intr(hba, hba->intr_mask);
Yaniv Gardi596585a2016-03-10 17:37:08 +02006219 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306220
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306221 scsi_host_put(hba->host);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006222
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006223 ufshcd_exit_clk_gating(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03006224 if (ufshcd_is_clkscaling_enabled(hba))
6225 devfreq_remove_device(hba->devfreq);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006226 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306227}
6228EXPORT_SYMBOL_GPL(ufshcd_remove);
6229
6230/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02006231 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
6232 * @hba: pointer to Host Bus Adapter (HBA)
6233 */
6234void ufshcd_dealloc_host(struct ufs_hba *hba)
6235{
6236 scsi_host_put(hba->host);
6237}
6238EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
6239
6240/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09006241 * ufshcd_set_dma_mask - Set dma mask based on the controller
6242 * addressing capability
6243 * @hba: per adapter instance
6244 *
6245 * Returns 0 for success, non-zero for failure
6246 */
6247static int ufshcd_set_dma_mask(struct ufs_hba *hba)
6248{
6249 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
6250 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
6251 return 0;
6252 }
6253 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
6254}
6255
6256/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006257 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306258 * @dev: pointer to device handle
6259 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306260 * Returns 0 on success, non-zero value on failure
6261 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006262int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306263{
6264 struct Scsi_Host *host;
6265 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006266 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306267
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306268 if (!dev) {
6269 dev_err(dev,
6270 "Invalid memory reference for dev is NULL\n");
6271 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306272 goto out_error;
6273 }
6274
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306275 host = scsi_host_alloc(&ufshcd_driver_template,
6276 sizeof(struct ufs_hba));
6277 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306278 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306279 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306280 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306281 }
6282 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306283 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306284 hba->dev = dev;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006285 *hba_handle = hba;
6286
6287out_error:
6288 return err;
6289}
6290EXPORT_SYMBOL(ufshcd_alloc_host);
6291
Sahitya Tummala856b3482014-09-25 15:32:34 +03006292static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
6293{
6294 int ret = 0;
6295 struct ufs_clk_info *clki;
6296 struct list_head *head = &hba->clk_list_head;
6297
6298 if (!head || list_empty(head))
6299 goto out;
6300
Yaniv Gardif06fcc72015-10-28 13:15:51 +02006301 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
6302 if (ret)
6303 return ret;
6304
Sahitya Tummala856b3482014-09-25 15:32:34 +03006305 list_for_each_entry(clki, head, list) {
6306 if (!IS_ERR_OR_NULL(clki->clk)) {
6307 if (scale_up && clki->max_freq) {
6308 if (clki->curr_freq == clki->max_freq)
6309 continue;
6310 ret = clk_set_rate(clki->clk, clki->max_freq);
6311 if (ret) {
6312 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6313 __func__, clki->name,
6314 clki->max_freq, ret);
6315 break;
6316 }
6317 clki->curr_freq = clki->max_freq;
6318
6319 } else if (!scale_up && clki->min_freq) {
6320 if (clki->curr_freq == clki->min_freq)
6321 continue;
6322 ret = clk_set_rate(clki->clk, clki->min_freq);
6323 if (ret) {
6324 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6325 __func__, clki->name,
6326 clki->min_freq, ret);
6327 break;
6328 }
6329 clki->curr_freq = clki->min_freq;
6330 }
6331 }
6332 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
6333 clki->name, clk_get_rate(clki->clk));
6334 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02006335
6336 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
6337
Sahitya Tummala856b3482014-09-25 15:32:34 +03006338out:
6339 return ret;
6340}
6341
6342static int ufshcd_devfreq_target(struct device *dev,
6343 unsigned long *freq, u32 flags)
6344{
6345 int err = 0;
6346 struct ufs_hba *hba = dev_get_drvdata(dev);
6347
6348 if (!ufshcd_is_clkscaling_enabled(hba))
6349 return -EINVAL;
6350
6351 if (*freq == UINT_MAX)
6352 err = ufshcd_scale_clks(hba, true);
6353 else if (*freq == 0)
6354 err = ufshcd_scale_clks(hba, false);
6355
6356 return err;
6357}
6358
6359static int ufshcd_devfreq_get_dev_status(struct device *dev,
6360 struct devfreq_dev_status *stat)
6361{
6362 struct ufs_hba *hba = dev_get_drvdata(dev);
6363 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
6364 unsigned long flags;
6365
6366 if (!ufshcd_is_clkscaling_enabled(hba))
6367 return -EINVAL;
6368
6369 memset(stat, 0, sizeof(*stat));
6370
6371 spin_lock_irqsave(hba->host->host_lock, flags);
6372 if (!scaling->window_start_t)
6373 goto start_window;
6374
6375 if (scaling->is_busy_started)
6376 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
6377 scaling->busy_start_t));
6378
6379 stat->total_time = jiffies_to_usecs((long)jiffies -
6380 (long)scaling->window_start_t);
6381 stat->busy_time = scaling->tot_busy_t;
6382start_window:
6383 scaling->window_start_t = jiffies;
6384 scaling->tot_busy_t = 0;
6385
6386 if (hba->outstanding_reqs) {
6387 scaling->busy_start_t = ktime_get();
6388 scaling->is_busy_started = true;
6389 } else {
6390 scaling->busy_start_t = ktime_set(0, 0);
6391 scaling->is_busy_started = false;
6392 }
6393 spin_unlock_irqrestore(hba->host->host_lock, flags);
6394 return 0;
6395}
6396
6397static struct devfreq_dev_profile ufs_devfreq_profile = {
6398 .polling_ms = 100,
6399 .target = ufshcd_devfreq_target,
6400 .get_dev_status = ufshcd_devfreq_get_dev_status,
6401};
6402
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006403/**
6404 * ufshcd_init - Driver initialization routine
6405 * @hba: per-adapter instance
6406 * @mmio_base: base register address
6407 * @irq: Interrupt line of device
6408 * Returns 0 on success, non-zero value on failure
6409 */
6410int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6411{
6412 int err;
6413 struct Scsi_Host *host = hba->host;
6414 struct device *dev = hba->dev;
6415
6416 if (!mmio_base) {
6417 dev_err(hba->dev,
6418 "Invalid memory reference for mmio_base is NULL\n");
6419 err = -ENODEV;
6420 goto out_error;
6421 }
6422
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306423 hba->mmio_base = mmio_base;
6424 hba->irq = irq;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306425
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006426 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006427 if (err)
6428 goto out_error;
6429
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306430 /* Read capabilities registers */
6431 ufshcd_hba_capabilities(hba);
6432
6433 /* Get UFS version supported by the controller */
6434 hba->ufs_version = ufshcd_get_ufs_version(hba);
6435
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05306436 /* Get Interrupt bit mask per version */
6437 hba->intr_mask = ufshcd_get_intr_mask(hba);
6438
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09006439 err = ufshcd_set_dma_mask(hba);
6440 if (err) {
6441 dev_err(hba->dev, "set dma mask failed\n");
6442 goto out_disable;
6443 }
6444
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306445 /* Allocate memory for host memory space */
6446 err = ufshcd_memory_alloc(hba);
6447 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306448 dev_err(hba->dev, "Memory allocation failed\n");
6449 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306450 }
6451
6452 /* Configure LRB */
6453 ufshcd_host_memory_configure(hba);
6454
6455 host->can_queue = hba->nutrs;
6456 host->cmd_per_lun = hba->nutrs;
6457 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03006458 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306459 host->max_channel = UFSHCD_MAX_CHANNEL;
6460 host->unique_id = host->host_no;
6461 host->max_cmd_len = MAX_CDB_SIZE;
6462
Dolev Raviv7eb584d2014-09-25 15:32:31 +03006463 hba->max_pwr_info.is_valid = false;
6464
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306465 /* Initailize wait queue for task management */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306466 init_waitqueue_head(&hba->tm_wq);
6467 init_waitqueue_head(&hba->tm_tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306468
6469 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306470 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306471 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306472
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306473 /* Initialize UIC command mutex */
6474 mutex_init(&hba->uic_cmd_mutex);
6475
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306476 /* Initialize mutex for device management commands */
6477 mutex_init(&hba->dev_cmd.lock);
6478
6479 /* Initialize device management tag acquire wait queue */
6480 init_waitqueue_head(&hba->dev_cmd.tag_wq);
6481
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006482 ufshcd_init_clk_gating(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +02006483
6484 /*
6485 * In order to avoid any spurious interrupt immediately after
6486 * registering UFS controller interrupt handler, clear any pending UFS
6487 * interrupt status and disable all the UFS interrupts.
6488 */
6489 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
6490 REG_INTERRUPT_STATUS);
6491 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
6492 /*
6493 * Make sure that UFS interrupts are disabled and any pending interrupt
6494 * status is cleared before registering UFS interrupt handler.
6495 */
6496 mb();
6497
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306498 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09006499 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306500 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306501 dev_err(hba->dev, "request irq failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006502 goto exit_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006503 } else {
6504 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306505 }
6506
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306507 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306508 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306509 dev_err(hba->dev, "scsi_add_host failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006510 goto exit_gating;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306511 }
6512
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306513 /* Host controller enable */
6514 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306515 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306516 dev_err(hba->dev, "Host controller enable failed\n");
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306517 goto out_remove_scsi_host;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306518 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306519
Sahitya Tummala856b3482014-09-25 15:32:34 +03006520 if (ufshcd_is_clkscaling_enabled(hba)) {
6521 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
6522 "simple_ondemand", NULL);
6523 if (IS_ERR(hba->devfreq)) {
6524 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
6525 PTR_ERR(hba->devfreq));
Wei Yongjun73811c92016-09-28 14:49:42 +00006526 err = PTR_ERR(hba->devfreq);
Sahitya Tummala856b3482014-09-25 15:32:34 +03006527 goto out_remove_scsi_host;
6528 }
6529 /* Suspend devfreq until the UFS device is detected */
6530 devfreq_suspend_device(hba->devfreq);
6531 hba->clk_scaling.window_start_t = 0;
6532 }
6533
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05306534 /* Hold auto suspend until async scan completes */
6535 pm_runtime_get_sync(dev);
6536
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006537 /*
6538 * The device-initialize-sequence hasn't been invoked yet.
6539 * Set the device to power-off state
6540 */
6541 ufshcd_set_ufs_dev_poweroff(hba);
6542
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306543 async_schedule(ufshcd_async_scan, hba);
6544
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306545 return 0;
6546
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306547out_remove_scsi_host:
6548 scsi_remove_host(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006549exit_gating:
6550 ufshcd_exit_clk_gating(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306551out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006552 hba->is_irq_enabled = false;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306553 scsi_host_put(host);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006554 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306555out_error:
6556 return err;
6557}
6558EXPORT_SYMBOL_GPL(ufshcd_init);
6559
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306560MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
6561MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05306562MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306563MODULE_LICENSE("GPL");
6564MODULE_VERSION(UFSHCD_DRIVER_VERSION);