blob: f3768ec25c2f5fb972303ef4d751e605974bc6ee [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053015 * See the COPYING file in the top-level directory or visit
16 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053017 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053023 * This program is provided "AS IS" and "WITH ALL FAULTS" and
24 * without warranty of any kind. You are solely responsible for
25 * determining the appropriateness of using and distributing
26 * the program and assume all risks associated with your exercise
27 * of rights with respect to the program, including but not limited
28 * to infringement of third party rights, the risks and costs of
29 * program errors, damage to or loss of data, programs or equipment,
30 * and unavailability or interruption of operations. Under no
31 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of
33 * this program.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053034 */
35
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053036#include <linux/async.h>
37
Vinayak Holikattie0eca632013-02-25 21:44:33 +053038#include "ufshcd.h"
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053039#include "unipro.h"
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053040
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053041#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
42 UTP_TASK_REQ_COMPL |\
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053043 UIC_POWER_MODE |\
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053044 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053045/* UIC command timeout, unit: ms */
46#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053047
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053048/* NOP OUT retries waiting for NOP IN response */
49#define NOP_OUT_RETRIES 10
50/* Timeout after 30 msecs if NOP OUT hangs without response */
51#define NOP_OUT_TIMEOUT 30 /* msecs */
52
Dolev Raviv68078d52013-07-30 00:35:58 +053053/* Query request retries */
54#define QUERY_REQ_RETRIES 10
55/* Query request timeout */
56#define QUERY_REQ_TIMEOUT 30 /* msec */
57
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053058/* Task management command timeout */
59#define TM_CMD_TIMEOUT 100 /* msecs */
60
Dolev Raviv68078d52013-07-30 00:35:58 +053061/* Expose the flag value from utp_upiu_query.value */
62#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
63
Seungwon Jeon7d568652013-08-31 21:40:20 +053064/* Interrupt aggregation default timeout, unit: 40us */
65#define INT_AGGR_DEF_TO 0x02
66
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053067enum {
68 UFSHCD_MAX_CHANNEL = 0,
69 UFSHCD_MAX_ID = 1,
70 UFSHCD_MAX_LUNS = 8,
71 UFSHCD_CMD_PER_LUN = 32,
72 UFSHCD_CAN_QUEUE = 32,
73};
74
75/* UFSHCD states */
76enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053077 UFSHCD_STATE_RESET,
78 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +053079 UFSHCD_STATE_OPERATIONAL,
80};
81
82/* UFSHCD error handling flags */
83enum {
84 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053085};
86
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +053087/* UFSHCD UIC layer error flags */
88enum {
89 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
90 UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
91 UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
92 UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
93};
94
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053095/* Interrupt configuration options */
96enum {
97 UFSHCD_INT_DISABLE,
98 UFSHCD_INT_ENABLE,
99 UFSHCD_INT_CLEAR,
100};
101
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530102#define ufshcd_set_eh_in_progress(h) \
103 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
104#define ufshcd_eh_in_progress(h) \
105 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
106#define ufshcd_clear_eh_in_progress(h) \
107 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
108
109static void ufshcd_tmc_handler(struct ufs_hba *hba);
110static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530111static int ufshcd_reset_and_restore(struct ufs_hba *hba);
112static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530113
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530114/*
115 * ufshcd_wait_for_register - wait for register value to change
116 * @hba - per-adapter interface
117 * @reg - mmio register offset
118 * @mask - mask to apply to read register value
119 * @val - wait condition
120 * @interval_us - polling interval in microsecs
121 * @timeout_ms - timeout in millisecs
122 *
123 * Returns -ETIMEDOUT on error, zero on success
124 */
125static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
126 u32 val, unsigned long interval_us, unsigned long timeout_ms)
127{
128 int err = 0;
129 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
130
131 /* ignore bits that we don't intend to wait on */
132 val = val & mask;
133
134 while ((ufshcd_readl(hba, reg) & mask) != val) {
135 /* wakeup within 50us of expiry */
136 usleep_range(interval_us, interval_us + 50);
137
138 if (time_after(jiffies, timeout)) {
139 if ((ufshcd_readl(hba, reg) & mask) != val)
140 err = -ETIMEDOUT;
141 break;
142 }
143 }
144
145 return err;
146}
147
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530148/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530149 * ufshcd_get_intr_mask - Get the interrupt bit mask
150 * @hba - Pointer to adapter instance
151 *
152 * Returns interrupt bit mask per version
153 */
154static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
155{
156 if (hba->ufs_version == UFSHCI_VERSION_10)
157 return INTERRUPT_MASK_ALL_VER_10;
158 else
159 return INTERRUPT_MASK_ALL_VER_11;
160}
161
162/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530163 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
164 * @hba - Pointer to adapter instance
165 *
166 * Returns UFSHCI version supported by the controller
167 */
168static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
169{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530170 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530171}
172
173/**
174 * ufshcd_is_device_present - Check if any device connected to
175 * the host controller
176 * @reg_hcs - host controller status register value
177 *
Venkatraman S73ec5132012-07-10 19:39:23 +0530178 * Returns 1 if device present, 0 if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530179 */
180static inline int ufshcd_is_device_present(u32 reg_hcs)
181{
Venkatraman S73ec5132012-07-10 19:39:23 +0530182 return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530183}
184
185/**
186 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
187 * @lrb: pointer to local command reference block
188 *
189 * This function is used to get the OCS field from UTRD
190 * Returns the OCS field in the UTRD
191 */
192static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
193{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530194 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530195}
196
197/**
198 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
199 * @task_req_descp: pointer to utp_task_req_desc structure
200 *
201 * This function is used to get the OCS field from UTMRD
202 * Returns the OCS field in the UTMRD
203 */
204static inline int
205ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
206{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530207 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530208}
209
210/**
211 * ufshcd_get_tm_free_slot - get a free slot for task management request
212 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530213 * @free_slot: pointer to variable with available slot value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530214 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530215 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
216 * Returns 0 if free slot is not available, else return 1 with tag value
217 * in @free_slot.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530218 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530219static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530220{
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530221 int tag;
222 bool ret = false;
223
224 if (!free_slot)
225 goto out;
226
227 do {
228 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
229 if (tag >= hba->nutmrs)
230 goto out;
231 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
232
233 *free_slot = tag;
234 ret = true;
235out:
236 return ret;
237}
238
239static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
240{
241 clear_bit_unlock(slot, &hba->tm_slots_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530242}
243
244/**
245 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
246 * @hba: per adapter instance
247 * @pos: position of the bit to be cleared
248 */
249static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
250{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530251 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530252}
253
254/**
255 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
256 * @reg: Register value of host controller status
257 *
258 * Returns integer, 0 on Success and positive value if failed
259 */
260static inline int ufshcd_get_lists_status(u32 reg)
261{
262 /*
263 * The mask 0xFF is for the following HCS register bits
264 * Bit Description
265 * 0 Device Present
266 * 1 UTRLRDY
267 * 2 UTMRLRDY
268 * 3 UCRDY
269 * 4 HEI
270 * 5 DEI
271 * 6-7 reserved
272 */
273 return (((reg) & (0xFF)) >> 1) ^ (0x07);
274}
275
276/**
277 * ufshcd_get_uic_cmd_result - Get the UIC command result
278 * @hba: Pointer to adapter instance
279 *
280 * This function gets the result of UIC command completion
281 * Returns 0 on success, non zero value on error
282 */
283static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
284{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530285 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530286 MASK_UIC_COMMAND_RESULT;
287}
288
289/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530290 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
291 * @hba: Pointer to adapter instance
292 *
293 * This function gets UIC command argument3
294 * Returns 0 on success, non zero value on error
295 */
296static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
297{
298 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
299}
300
301/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530302 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530303 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530304 */
305static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530306ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530307{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530308 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530309}
310
311/**
312 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
313 * @ucd_rsp_ptr: pointer to response UPIU
314 *
315 * This function gets the response status and scsi_status from response UPIU
316 * Returns the response result code.
317 */
318static inline int
319ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
320{
321 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
322}
323
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530324/*
325 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
326 * from response UPIU
327 * @ucd_rsp_ptr: pointer to response UPIU
328 *
329 * Return the data segment length.
330 */
331static inline unsigned int
332ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
333{
334 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
335 MASK_RSP_UPIU_DATA_SEG_LEN;
336}
337
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530338/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530339 * ufshcd_is_exception_event - Check if the device raised an exception event
340 * @ucd_rsp_ptr: pointer to response UPIU
341 *
342 * The function checks if the device raised an exception event indicated in
343 * the Device Information field of response UPIU.
344 *
345 * Returns true if exception is raised, false otherwise.
346 */
347static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
348{
349 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
350 MASK_RSP_EXCEPTION_EVENT ? true : false;
351}
352
353/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530354 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530355 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530356 */
357static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530358ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530359{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530360 ufshcd_writel(hba, INT_AGGR_ENABLE |
361 INT_AGGR_COUNTER_AND_TIMER_RESET,
362 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
363}
364
365/**
366 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
367 * @hba: per adapter instance
368 * @cnt: Interrupt aggregation counter threshold
369 * @tmout: Interrupt aggregation timeout value
370 */
371static inline void
372ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
373{
374 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
375 INT_AGGR_COUNTER_THLD_VAL(cnt) |
376 INT_AGGR_TIMEOUT_VAL(tmout),
377 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530378}
379
380/**
381 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
382 * When run-stop registers are set to 1, it indicates the
383 * host controller that it can process the requests
384 * @hba: per adapter instance
385 */
386static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
387{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530388 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
389 REG_UTP_TASK_REQ_LIST_RUN_STOP);
390 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
391 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530392}
393
394/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530395 * ufshcd_hba_start - Start controller initialization sequence
396 * @hba: per adapter instance
397 */
398static inline void ufshcd_hba_start(struct ufs_hba *hba)
399{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530400 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530401}
402
403/**
404 * ufshcd_is_hba_active - Get controller state
405 * @hba: per adapter instance
406 *
407 * Returns zero if controller is active, 1 otherwise
408 */
409static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
410{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530411 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530412}
413
414/**
415 * ufshcd_send_command - Send SCSI or device management commands
416 * @hba: per adapter instance
417 * @task_tag: Task tag of the command
418 */
419static inline
420void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
421{
422 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530423 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530424}
425
426/**
427 * ufshcd_copy_sense_data - Copy sense data in case of check condition
428 * @lrb - pointer to local reference block
429 */
430static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
431{
432 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530433 if (lrbp->sense_buffer &&
434 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530435 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530436 memcpy(lrbp->sense_buffer,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530437 lrbp->ucd_rsp_ptr->sr.sense_data,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530438 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
439 }
440}
441
442/**
Dolev Raviv68078d52013-07-30 00:35:58 +0530443 * ufshcd_copy_query_response() - Copy the Query Response and the data
444 * descriptor
445 * @hba: per adapter instance
446 * @lrb - pointer to local reference block
447 */
448static
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300449int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +0530450{
451 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
452
Dolev Raviv68078d52013-07-30 00:35:58 +0530453 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +0530454
Dolev Raviv68078d52013-07-30 00:35:58 +0530455 /* Get the descriptor */
456 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +0300457 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +0530458 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300459 u16 resp_len;
460 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +0530461
462 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300463 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +0530464 MASK_QUERY_DATA_SEG_LEN;
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300465 buf_len = hba->dev_cmd.query.request.upiu_req.length;
466 if (likely(buf_len >= resp_len)) {
467 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
468 } else {
469 dev_warn(hba->dev,
470 "%s: Response size is bigger than buffer",
471 __func__);
472 return -EINVAL;
473 }
Dolev Raviv68078d52013-07-30 00:35:58 +0530474 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300475
476 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +0530477}
478
479/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530480 * ufshcd_hba_capabilities - Read controller capabilities
481 * @hba: per adapter instance
482 */
483static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
484{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530485 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530486
487 /* nutrs and nutmrs are 0 based values */
488 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
489 hba->nutmrs =
490 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
491}
492
493/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530494 * ufshcd_ready_for_uic_cmd - Check if controller is ready
495 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530496 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530497 * Return true on success, else false
498 */
499static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
500{
501 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
502 return true;
503 else
504 return false;
505}
506
507/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +0530508 * ufshcd_get_upmcrs - Get the power mode change request status
509 * @hba: Pointer to adapter instance
510 *
511 * This function gets the UPMCRS field of HCS register
512 * Returns value of UPMCRS field
513 */
514static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
515{
516 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
517}
518
519/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530520 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
521 * @hba: per adapter instance
522 * @uic_cmd: UIC command
523 *
524 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530525 */
526static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530527ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530528{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530529 WARN_ON(hba->active_uic_cmd);
530
531 hba->active_uic_cmd = uic_cmd;
532
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530533 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530534 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
535 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
536 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530537
538 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530539 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530540 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530541}
542
543/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530544 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
545 * @hba: per adapter instance
546 * @uic_command: UIC command
547 *
548 * Must be called with mutex held.
549 * Returns 0 only if success.
550 */
551static int
552ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
553{
554 int ret;
555 unsigned long flags;
556
557 if (wait_for_completion_timeout(&uic_cmd->done,
558 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
559 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
560 else
561 ret = -ETIMEDOUT;
562
563 spin_lock_irqsave(hba->host->host_lock, flags);
564 hba->active_uic_cmd = NULL;
565 spin_unlock_irqrestore(hba->host->host_lock, flags);
566
567 return ret;
568}
569
570/**
571 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
572 * @hba: per adapter instance
573 * @uic_cmd: UIC command
574 *
575 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
576 * with mutex held.
577 * Returns 0 only if success.
578 */
579static int
580__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
581{
582 int ret;
583 unsigned long flags;
584
585 if (!ufshcd_ready_for_uic_cmd(hba)) {
586 dev_err(hba->dev,
587 "Controller not ready to accept UIC commands\n");
588 return -EIO;
589 }
590
591 init_completion(&uic_cmd->done);
592
593 spin_lock_irqsave(hba->host->host_lock, flags);
594 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
595 spin_unlock_irqrestore(hba->host->host_lock, flags);
596
597 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
598
599 return ret;
600}
601
602/**
603 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
604 * @hba: per adapter instance
605 * @uic_cmd: UIC command
606 *
607 * Returns 0 only if success.
608 */
609static int
610ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
611{
612 int ret;
613
614 mutex_lock(&hba->uic_cmd_mutex);
615 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
616 mutex_unlock(&hba->uic_cmd_mutex);
617
618 return ret;
619}
620
621/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530622 * ufshcd_map_sg - Map scatter-gather list to prdt
623 * @lrbp - pointer to local reference block
624 *
625 * Returns 0 in case of success, non-zero value in case of failure
626 */
627static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
628{
629 struct ufshcd_sg_entry *prd_table;
630 struct scatterlist *sg;
631 struct scsi_cmnd *cmd;
632 int sg_segments;
633 int i;
634
635 cmd = lrbp->cmd;
636 sg_segments = scsi_dma_map(cmd);
637 if (sg_segments < 0)
638 return sg_segments;
639
640 if (sg_segments) {
641 lrbp->utr_descriptor_ptr->prd_table_length =
642 cpu_to_le16((u16) (sg_segments));
643
644 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
645
646 scsi_for_each_sg(cmd, sg, sg_segments, i) {
647 prd_table[i].size =
648 cpu_to_le32(((u32) sg_dma_len(sg))-1);
649 prd_table[i].base_addr =
650 cpu_to_le32(lower_32_bits(sg->dma_address));
651 prd_table[i].upper_addr =
652 cpu_to_le32(upper_32_bits(sg->dma_address));
653 }
654 } else {
655 lrbp->utr_descriptor_ptr->prd_table_length = 0;
656 }
657
658 return 0;
659}
660
661/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530662 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530663 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530664 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530665 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530666static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530667{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530668 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
669
670 if (hba->ufs_version == UFSHCI_VERSION_10) {
671 u32 rw;
672 rw = set & INTERRUPT_MASK_RW_VER_10;
673 set = rw | ((set ^ intrs) & intrs);
674 } else {
675 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530676 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530677
678 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
679}
680
681/**
682 * ufshcd_disable_intr - disable interrupts
683 * @hba: per adapter instance
684 * @intrs: interrupt bits
685 */
686static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
687{
688 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
689
690 if (hba->ufs_version == UFSHCI_VERSION_10) {
691 u32 rw;
692 rw = (set & INTERRUPT_MASK_RW_VER_10) &
693 ~(intrs & INTERRUPT_MASK_RW_VER_10);
694 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
695
696 } else {
697 set &= ~intrs;
698 }
699
700 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530701}
702
703/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530704 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
705 * descriptor according to request
706 * @lrbp: pointer to local reference block
707 * @upiu_flags: flags required in the header
708 * @cmd_dir: requests data direction
709 */
710static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
711 u32 *upiu_flags, enum dma_data_direction cmd_dir)
712{
713 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
714 u32 data_direction;
715 u32 dword_0;
716
717 if (cmd_dir == DMA_FROM_DEVICE) {
718 data_direction = UTP_DEVICE_TO_HOST;
719 *upiu_flags = UPIU_CMD_FLAGS_READ;
720 } else if (cmd_dir == DMA_TO_DEVICE) {
721 data_direction = UTP_HOST_TO_DEVICE;
722 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
723 } else {
724 data_direction = UTP_NO_DATA_TRANSFER;
725 *upiu_flags = UPIU_CMD_FLAGS_NONE;
726 }
727
728 dword_0 = data_direction | (lrbp->command_type
729 << UPIU_COMMAND_TYPE_OFFSET);
730 if (lrbp->intr_cmd)
731 dword_0 |= UTP_REQ_DESC_INT_CMD;
732
733 /* Transfer request descriptor header fields */
734 req_desc->header.dword_0 = cpu_to_le32(dword_0);
735
736 /*
737 * assigning invalid value for command status. Controller
738 * updates OCS on command completion, with the command
739 * status
740 */
741 req_desc->header.dword_2 =
742 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
743}
744
745/**
746 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
747 * for scsi commands
748 * @lrbp - local reference block pointer
749 * @upiu_flags - flags
750 */
751static
752void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
753{
754 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
755
756 /* command descriptor fields */
757 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
758 UPIU_TRANSACTION_COMMAND, upiu_flags,
759 lrbp->lun, lrbp->task_tag);
760 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
761 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
762
763 /* Total EHS length and Data segment length will be zero */
764 ucd_req_ptr->header.dword_2 = 0;
765
766 ucd_req_ptr->sc.exp_data_transfer_len =
767 cpu_to_be32(lrbp->cmd->sdb.length);
768
769 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
770 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
771}
772
Dolev Raviv68078d52013-07-30 00:35:58 +0530773/**
774 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
775 * for query requsts
776 * @hba: UFS hba
777 * @lrbp: local reference block pointer
778 * @upiu_flags: flags
779 */
780static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
781 struct ufshcd_lrb *lrbp, u32 upiu_flags)
782{
783 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
784 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530785 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +0530786 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
787
788 /* Query request header */
789 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
790 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
791 lrbp->lun, lrbp->task_tag);
792 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
793 0, query->request.query_func, 0, 0);
794
795 /* Data segment length */
796 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
797 0, 0, len >> 8, (u8)len);
798
799 /* Copy the Query Request buffer as is */
800 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
801 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +0530802
803 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300804 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
805 memcpy(descp, query->descriptor, len);
806
Dolev Raviv68078d52013-07-30 00:35:58 +0530807}
808
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530809static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
810{
811 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
812
813 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
814
815 /* command descriptor fields */
816 ucd_req_ptr->header.dword_0 =
817 UPIU_HEADER_DWORD(
818 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
819}
820
821/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530822 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530823 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530824 * @lrb - pointer to local reference block
825 */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530826static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530827{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530828 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530829 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530830
831 switch (lrbp->command_type) {
832 case UTP_CMD_TYPE_SCSI:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530833 if (likely(lrbp->cmd)) {
834 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
835 lrbp->cmd->sc_data_direction);
836 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530837 } else {
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530838 ret = -EINVAL;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530839 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530840 break;
841 case UTP_CMD_TYPE_DEV_MANAGE:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530842 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
Dolev Raviv68078d52013-07-30 00:35:58 +0530843 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
844 ufshcd_prepare_utp_query_req_upiu(
845 hba, lrbp, upiu_flags);
846 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530847 ufshcd_prepare_utp_nop_upiu(lrbp);
848 else
849 ret = -EINVAL;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530850 break;
851 case UTP_CMD_TYPE_UFS:
852 /* For UFS native command implementation */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530853 ret = -ENOTSUPP;
854 dev_err(hba->dev, "%s: UFS native command are not supported\n",
855 __func__);
856 break;
857 default:
858 ret = -ENOTSUPP;
859 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
860 __func__, lrbp->command_type);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530861 break;
862 } /* end of switch */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530863
864 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530865}
866
867/**
868 * ufshcd_queuecommand - main entry point for SCSI requests
869 * @cmd: command from SCSI Midlayer
870 * @done: call back function
871 *
872 * Returns 0 for success, non-zero in case of failure
873 */
874static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
875{
876 struct ufshcd_lrb *lrbp;
877 struct ufs_hba *hba;
878 unsigned long flags;
879 int tag;
880 int err = 0;
881
882 hba = shost_priv(host);
883
884 tag = cmd->request->tag;
885
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530886 spin_lock_irqsave(hba->host->host_lock, flags);
887 switch (hba->ufshcd_state) {
888 case UFSHCD_STATE_OPERATIONAL:
889 break;
890 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530891 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530892 goto out_unlock;
893 case UFSHCD_STATE_ERROR:
894 set_host_byte(cmd, DID_ERROR);
895 cmd->scsi_done(cmd);
896 goto out_unlock;
897 default:
898 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
899 __func__, hba->ufshcd_state);
900 set_host_byte(cmd, DID_BAD_TARGET);
901 cmd->scsi_done(cmd);
902 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530903 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530904 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530905
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530906 /* acquire the tag to make sure device cmds don't use it */
907 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
908 /*
909 * Dev manage command in progress, requeue the command.
910 * Requeuing the command helps in cases where the request *may*
911 * find different tag instead of waiting for dev manage command
912 * completion.
913 */
914 err = SCSI_MLQUEUE_HOST_BUSY;
915 goto out;
916 }
917
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530918 lrbp = &hba->lrb[tag];
919
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530920 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530921 lrbp->cmd = cmd;
922 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
923 lrbp->sense_buffer = cmd->sense_buffer;
924 lrbp->task_tag = tag;
925 lrbp->lun = cmd->device->lun;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530926 lrbp->intr_cmd = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530927 lrbp->command_type = UTP_CMD_TYPE_SCSI;
928
929 /* form UPIU before issuing the command */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530930 ufshcd_compose_upiu(hba, lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530931 err = ufshcd_map_sg(lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530932 if (err) {
933 lrbp->cmd = NULL;
934 clear_bit_unlock(tag, &hba->lrb_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530935 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530936 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530937
938 /* issue command to the controller */
939 spin_lock_irqsave(hba->host->host_lock, flags);
940 ufshcd_send_command(hba, tag);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530941out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530942 spin_unlock_irqrestore(hba->host->host_lock, flags);
943out:
944 return err;
945}
946
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530947static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
948 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
949{
950 lrbp->cmd = NULL;
951 lrbp->sense_bufflen = 0;
952 lrbp->sense_buffer = NULL;
953 lrbp->task_tag = tag;
954 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
955 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
956 lrbp->intr_cmd = true; /* No interrupt aggregation */
957 hba->dev_cmd.type = cmd_type;
958
959 return ufshcd_compose_upiu(hba, lrbp);
960}
961
962static int
963ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
964{
965 int err = 0;
966 unsigned long flags;
967 u32 mask = 1 << tag;
968
969 /* clear outstanding transaction before retry */
970 spin_lock_irqsave(hba->host->host_lock, flags);
971 ufshcd_utrl_clear(hba, tag);
972 spin_unlock_irqrestore(hba->host->host_lock, flags);
973
974 /*
975 * wait for for h/w to clear corresponding bit in door-bell.
976 * max. wait is 1 sec.
977 */
978 err = ufshcd_wait_for_register(hba,
979 REG_UTP_TRANSFER_REQ_DOOR_BELL,
980 mask, ~mask, 1000, 1000);
981
982 return err;
983}
984
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300985static int
986ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
987{
988 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
989
990 /* Get the UPIU response */
991 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
992 UPIU_RSP_CODE_OFFSET;
993 return query_res->response;
994}
995
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530996/**
997 * ufshcd_dev_cmd_completion() - handles device management command responses
998 * @hba: per adapter instance
999 * @lrbp: pointer to local reference block
1000 */
1001static int
1002ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1003{
1004 int resp;
1005 int err = 0;
1006
1007 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1008
1009 switch (resp) {
1010 case UPIU_TRANSACTION_NOP_IN:
1011 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1012 err = -EINVAL;
1013 dev_err(hba->dev, "%s: unexpected response %x\n",
1014 __func__, resp);
1015 }
1016 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05301017 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001018 err = ufshcd_check_query_response(hba, lrbp);
1019 if (!err)
1020 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05301021 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301022 case UPIU_TRANSACTION_REJECT_UPIU:
1023 /* TODO: handle Reject UPIU Response */
1024 err = -EPERM;
1025 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1026 __func__);
1027 break;
1028 default:
1029 err = -EINVAL;
1030 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1031 __func__, resp);
1032 break;
1033 }
1034
1035 return err;
1036}
1037
1038static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1039 struct ufshcd_lrb *lrbp, int max_timeout)
1040{
1041 int err = 0;
1042 unsigned long time_left;
1043 unsigned long flags;
1044
1045 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1046 msecs_to_jiffies(max_timeout));
1047
1048 spin_lock_irqsave(hba->host->host_lock, flags);
1049 hba->dev_cmd.complete = NULL;
1050 if (likely(time_left)) {
1051 err = ufshcd_get_tr_ocs(lrbp);
1052 if (!err)
1053 err = ufshcd_dev_cmd_completion(hba, lrbp);
1054 }
1055 spin_unlock_irqrestore(hba->host->host_lock, flags);
1056
1057 if (!time_left) {
1058 err = -ETIMEDOUT;
1059 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1060 /* sucessfully cleared the command, retry if needed */
1061 err = -EAGAIN;
1062 }
1063
1064 return err;
1065}
1066
1067/**
1068 * ufshcd_get_dev_cmd_tag - Get device management command tag
1069 * @hba: per-adapter instance
1070 * @tag: pointer to variable with available slot value
1071 *
1072 * Get a free slot and lock it until device management command
1073 * completes.
1074 *
1075 * Returns false if free slot is unavailable for locking, else
1076 * return true with tag value in @tag.
1077 */
1078static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1079{
1080 int tag;
1081 bool ret = false;
1082 unsigned long tmp;
1083
1084 if (!tag_out)
1085 goto out;
1086
1087 do {
1088 tmp = ~hba->lrb_in_use;
1089 tag = find_last_bit(&tmp, hba->nutrs);
1090 if (tag >= hba->nutrs)
1091 goto out;
1092 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1093
1094 *tag_out = tag;
1095 ret = true;
1096out:
1097 return ret;
1098}
1099
1100static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1101{
1102 clear_bit_unlock(tag, &hba->lrb_in_use);
1103}
1104
1105/**
1106 * ufshcd_exec_dev_cmd - API for sending device management requests
1107 * @hba - UFS hba
1108 * @cmd_type - specifies the type (NOP, Query...)
1109 * @timeout - time in seconds
1110 *
Dolev Raviv68078d52013-07-30 00:35:58 +05301111 * NOTE: Since there is only one available tag for device management commands,
1112 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301113 */
1114static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1115 enum dev_cmd_type cmd_type, int timeout)
1116{
1117 struct ufshcd_lrb *lrbp;
1118 int err;
1119 int tag;
1120 struct completion wait;
1121 unsigned long flags;
1122
1123 /*
1124 * Get free slot, sleep if slots are unavailable.
1125 * Even though we use wait_event() which sleeps indefinitely,
1126 * the maximum wait time is bounded by SCSI request timeout.
1127 */
1128 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1129
1130 init_completion(&wait);
1131 lrbp = &hba->lrb[tag];
1132 WARN_ON(lrbp->cmd);
1133 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1134 if (unlikely(err))
1135 goto out_put_tag;
1136
1137 hba->dev_cmd.complete = &wait;
1138
1139 spin_lock_irqsave(hba->host->host_lock, flags);
1140 ufshcd_send_command(hba, tag);
1141 spin_unlock_irqrestore(hba->host->host_lock, flags);
1142
1143 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1144
1145out_put_tag:
1146 ufshcd_put_dev_cmd_tag(hba, tag);
1147 wake_up(&hba->dev_cmd.tag_wq);
1148 return err;
1149}
1150
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301151/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001152 * ufshcd_init_query() - init the query response and request parameters
1153 * @hba: per-adapter instance
1154 * @request: address of the request pointer to be initialized
1155 * @response: address of the response pointer to be initialized
1156 * @opcode: operation to perform
1157 * @idn: flag idn to access
1158 * @index: LU number to access
1159 * @selector: query/flag/descriptor further identification
1160 */
1161static inline void ufshcd_init_query(struct ufs_hba *hba,
1162 struct ufs_query_req **request, struct ufs_query_res **response,
1163 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1164{
1165 *request = &hba->dev_cmd.query.request;
1166 *response = &hba->dev_cmd.query.response;
1167 memset(*request, 0, sizeof(struct ufs_query_req));
1168 memset(*response, 0, sizeof(struct ufs_query_res));
1169 (*request)->upiu_req.opcode = opcode;
1170 (*request)->upiu_req.idn = idn;
1171 (*request)->upiu_req.index = index;
1172 (*request)->upiu_req.selector = selector;
1173}
1174
1175/**
Dolev Raviv68078d52013-07-30 00:35:58 +05301176 * ufshcd_query_flag() - API function for sending flag query requests
1177 * hba: per-adapter instance
1178 * query_opcode: flag query to perform
1179 * idn: flag idn to access
1180 * flag_res: the flag value after the query request completes
1181 *
1182 * Returns 0 for success, non-zero in case of failure
1183 */
1184static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1185 enum flag_idn idn, bool *flag_res)
1186{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001187 struct ufs_query_req *request = NULL;
1188 struct ufs_query_res *response = NULL;
1189 int err, index = 0, selector = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05301190
1191 BUG_ON(!hba);
1192
1193 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001194 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1195 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05301196
1197 switch (opcode) {
1198 case UPIU_QUERY_OPCODE_SET_FLAG:
1199 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1200 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1201 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1202 break;
1203 case UPIU_QUERY_OPCODE_READ_FLAG:
1204 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1205 if (!flag_res) {
1206 /* No dummy reads */
1207 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1208 __func__);
1209 err = -EINVAL;
1210 goto out_unlock;
1211 }
1212 break;
1213 default:
1214 dev_err(hba->dev,
1215 "%s: Expected query flag opcode but got = %d\n",
1216 __func__, opcode);
1217 err = -EINVAL;
1218 goto out_unlock;
1219 }
Dolev Raviv68078d52013-07-30 00:35:58 +05301220
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001221 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Dolev Raviv68078d52013-07-30 00:35:58 +05301222
1223 if (err) {
1224 dev_err(hba->dev,
1225 "%s: Sending flag query for idn %d failed, err = %d\n",
1226 __func__, idn, err);
1227 goto out_unlock;
1228 }
1229
1230 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301231 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05301232 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1233
1234out_unlock:
1235 mutex_unlock(&hba->dev_cmd.lock);
1236 return err;
1237}
1238
1239/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301240 * ufshcd_query_attr - API function for sending attribute requests
1241 * hba: per-adapter instance
1242 * opcode: attribute opcode
1243 * idn: attribute idn to access
1244 * index: index field
1245 * selector: selector field
1246 * attr_val: the attribute value after the query request completes
1247 *
1248 * Returns 0 for success, non-zero in case of failure
1249*/
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05301250static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301251 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1252{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001253 struct ufs_query_req *request = NULL;
1254 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301255 int err;
1256
1257 BUG_ON(!hba);
1258
1259 if (!attr_val) {
1260 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1261 __func__, opcode);
1262 err = -EINVAL;
1263 goto out;
1264 }
1265
1266 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001267 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1268 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301269
1270 switch (opcode) {
1271 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1272 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301273 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301274 break;
1275 case UPIU_QUERY_OPCODE_READ_ATTR:
1276 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1277 break;
1278 default:
1279 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1280 __func__, opcode);
1281 err = -EINVAL;
1282 goto out_unlock;
1283 }
1284
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001285 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301286
1287 if (err) {
1288 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1289 __func__, opcode, idn, err);
1290 goto out_unlock;
1291 }
1292
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301293 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301294
1295out_unlock:
1296 mutex_unlock(&hba->dev_cmd.lock);
1297out:
1298 return err;
1299}
1300
1301/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001302 * ufshcd_query_descriptor - API function for sending descriptor requests
1303 * hba: per-adapter instance
1304 * opcode: attribute opcode
1305 * idn: attribute idn to access
1306 * index: index field
1307 * selector: selector field
1308 * desc_buf: the buffer that contains the descriptor
1309 * buf_len: length parameter passed to the device
1310 *
1311 * Returns 0 for success, non-zero in case of failure.
1312 * The buf_len parameter will contain, on return, the length parameter
1313 * received on the response.
1314 */
1315int ufshcd_query_descriptor(struct ufs_hba *hba,
1316 enum query_opcode opcode, enum desc_idn idn, u8 index,
1317 u8 selector, u8 *desc_buf, int *buf_len)
1318{
1319 struct ufs_query_req *request = NULL;
1320 struct ufs_query_res *response = NULL;
1321 int err;
1322
1323 BUG_ON(!hba);
1324
1325 if (!desc_buf) {
1326 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1327 __func__, opcode);
1328 err = -EINVAL;
1329 goto out;
1330 }
1331
1332 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1333 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1334 __func__, *buf_len);
1335 err = -EINVAL;
1336 goto out;
1337 }
1338
1339 mutex_lock(&hba->dev_cmd.lock);
1340 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1341 selector);
1342 hba->dev_cmd.query.descriptor = desc_buf;
1343 request->upiu_req.length = *buf_len;
1344
1345 switch (opcode) {
1346 case UPIU_QUERY_OPCODE_WRITE_DESC:
1347 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1348 break;
1349 case UPIU_QUERY_OPCODE_READ_DESC:
1350 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1351 break;
1352 default:
1353 dev_err(hba->dev,
1354 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1355 __func__, opcode);
1356 err = -EINVAL;
1357 goto out_unlock;
1358 }
1359
1360 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1361
1362 if (err) {
1363 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1364 __func__, opcode, idn, err);
1365 goto out_unlock;
1366 }
1367
1368 hba->dev_cmd.query.descriptor = NULL;
1369 *buf_len = response->upiu_res.length;
1370
1371out_unlock:
1372 mutex_unlock(&hba->dev_cmd.lock);
1373out:
1374 return err;
1375}
1376
1377/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301378 * ufshcd_memory_alloc - allocate memory for host memory space data structures
1379 * @hba: per adapter instance
1380 *
1381 * 1. Allocate DMA memory for Command Descriptor array
1382 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1383 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1384 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1385 * (UTMRDL)
1386 * 4. Allocate memory for local reference block(lrb).
1387 *
1388 * Returns 0 for success, non-zero in case of failure
1389 */
1390static int ufshcd_memory_alloc(struct ufs_hba *hba)
1391{
1392 size_t utmrdl_size, utrdl_size, ucdl_size;
1393
1394 /* Allocate memory for UTP command descriptors */
1395 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09001396 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1397 ucdl_size,
1398 &hba->ucdl_dma_addr,
1399 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301400
1401 /*
1402 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1403 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1404 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1405 * be aligned to 128 bytes as well
1406 */
1407 if (!hba->ucdl_base_addr ||
1408 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05301409 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301410 "Command Descriptor Memory allocation failed\n");
1411 goto out;
1412 }
1413
1414 /*
1415 * Allocate memory for UTP Transfer descriptors
1416 * UFSHCI requires 1024 byte alignment of UTRD
1417 */
1418 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09001419 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1420 utrdl_size,
1421 &hba->utrdl_dma_addr,
1422 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301423 if (!hba->utrdl_base_addr ||
1424 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05301425 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301426 "Transfer Descriptor Memory allocation failed\n");
1427 goto out;
1428 }
1429
1430 /*
1431 * Allocate memory for UTP Task Management descriptors
1432 * UFSHCI requires 1024 byte alignment of UTMRD
1433 */
1434 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09001435 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
1436 utmrdl_size,
1437 &hba->utmrdl_dma_addr,
1438 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301439 if (!hba->utmrdl_base_addr ||
1440 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05301441 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301442 "Task Management Descriptor Memory allocation failed\n");
1443 goto out;
1444 }
1445
1446 /* Allocate memory for local reference block */
Seungwon Jeon2953f852013-06-27 13:31:54 +09001447 hba->lrb = devm_kzalloc(hba->dev,
1448 hba->nutrs * sizeof(struct ufshcd_lrb),
1449 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301450 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05301451 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301452 goto out;
1453 }
1454 return 0;
1455out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301456 return -ENOMEM;
1457}
1458
1459/**
1460 * ufshcd_host_memory_configure - configure local reference block with
1461 * memory offsets
1462 * @hba: per adapter instance
1463 *
1464 * Configure Host memory space
1465 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1466 * address.
1467 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1468 * and PRDT offset.
1469 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1470 * into local reference block.
1471 */
1472static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1473{
1474 struct utp_transfer_cmd_desc *cmd_descp;
1475 struct utp_transfer_req_desc *utrdlp;
1476 dma_addr_t cmd_desc_dma_addr;
1477 dma_addr_t cmd_desc_element_addr;
1478 u16 response_offset;
1479 u16 prdt_offset;
1480 int cmd_desc_size;
1481 int i;
1482
1483 utrdlp = hba->utrdl_base_addr;
1484 cmd_descp = hba->ucdl_base_addr;
1485
1486 response_offset =
1487 offsetof(struct utp_transfer_cmd_desc, response_upiu);
1488 prdt_offset =
1489 offsetof(struct utp_transfer_cmd_desc, prd_table);
1490
1491 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
1492 cmd_desc_dma_addr = hba->ucdl_dma_addr;
1493
1494 for (i = 0; i < hba->nutrs; i++) {
1495 /* Configure UTRD with command descriptor base address */
1496 cmd_desc_element_addr =
1497 (cmd_desc_dma_addr + (cmd_desc_size * i));
1498 utrdlp[i].command_desc_base_addr_lo =
1499 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
1500 utrdlp[i].command_desc_base_addr_hi =
1501 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
1502
1503 /* Response upiu and prdt offset should be in double words */
1504 utrdlp[i].response_upiu_offset =
1505 cpu_to_le16((response_offset >> 2));
1506 utrdlp[i].prd_table_offset =
1507 cpu_to_le16((prdt_offset >> 2));
1508 utrdlp[i].response_upiu_length =
Sujit Reddy Thumma3ca316c2013-06-26 22:39:30 +05301509 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301510
1511 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301512 hba->lrb[i].ucd_req_ptr =
1513 (struct utp_upiu_req *)(cmd_descp + i);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301514 hba->lrb[i].ucd_rsp_ptr =
1515 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
1516 hba->lrb[i].ucd_prdt_ptr =
1517 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
1518 }
1519}
1520
1521/**
1522 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
1523 * @hba: per adapter instance
1524 *
1525 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
1526 * in order to initialize the Unipro link startup procedure.
1527 * Once the Unipro links are up, the device connected to the controller
1528 * is detected.
1529 *
1530 * Returns 0 on success, non-zero value on failure
1531 */
1532static int ufshcd_dme_link_startup(struct ufs_hba *hba)
1533{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301534 struct uic_command uic_cmd = {0};
1535 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301536
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301537 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
1538
1539 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1540 if (ret)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05301541 dev_err(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301542 "dme-link-startup: error code %d\n", ret);
1543 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301544}
1545
1546/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05301547 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
1548 * @hba: per adapter instance
1549 * @attr_sel: uic command argument1
1550 * @attr_set: attribute set type as uic command argument2
1551 * @mib_val: setting value as uic command argument3
1552 * @peer: indicate whether peer or local
1553 *
1554 * Returns 0 on success, non-zero value on failure
1555 */
1556int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1557 u8 attr_set, u32 mib_val, u8 peer)
1558{
1559 struct uic_command uic_cmd = {0};
1560 static const char *const action[] = {
1561 "dme-set",
1562 "dme-peer-set"
1563 };
1564 const char *set = action[!!peer];
1565 int ret;
1566
1567 uic_cmd.command = peer ?
1568 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
1569 uic_cmd.argument1 = attr_sel;
1570 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
1571 uic_cmd.argument3 = mib_val;
1572
1573 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1574 if (ret)
1575 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
1576 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
1577
1578 return ret;
1579}
1580EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
1581
1582/**
1583 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
1584 * @hba: per adapter instance
1585 * @attr_sel: uic command argument1
1586 * @mib_val: the value of the attribute as returned by the UIC command
1587 * @peer: indicate whether peer or local
1588 *
1589 * Returns 0 on success, non-zero value on failure
1590 */
1591int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1592 u32 *mib_val, u8 peer)
1593{
1594 struct uic_command uic_cmd = {0};
1595 static const char *const action[] = {
1596 "dme-get",
1597 "dme-peer-get"
1598 };
1599 const char *get = action[!!peer];
1600 int ret;
1601
1602 uic_cmd.command = peer ?
1603 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
1604 uic_cmd.argument1 = attr_sel;
1605
1606 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1607 if (ret) {
1608 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
1609 get, UIC_GET_ATTR_ID(attr_sel), ret);
1610 goto out;
1611 }
1612
1613 if (mib_val)
1614 *mib_val = uic_cmd.argument3;
1615out:
1616 return ret;
1617}
1618EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1619
1620/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05301621 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
1622 * using DME_SET primitives.
1623 * @hba: per adapter instance
1624 * @mode: powr mode value
1625 *
1626 * Returns 0 on success, non-zero value on failure
1627 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05301628static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05301629{
1630 struct uic_command uic_cmd = {0};
1631 struct completion pwr_done;
1632 unsigned long flags;
1633 u8 status;
1634 int ret;
1635
1636 uic_cmd.command = UIC_CMD_DME_SET;
1637 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1638 uic_cmd.argument3 = mode;
1639 init_completion(&pwr_done);
1640
1641 mutex_lock(&hba->uic_cmd_mutex);
1642
1643 spin_lock_irqsave(hba->host->host_lock, flags);
1644 hba->pwr_done = &pwr_done;
1645 spin_unlock_irqrestore(hba->host->host_lock, flags);
1646 ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
1647 if (ret) {
1648 dev_err(hba->dev,
1649 "pwr mode change with mode 0x%x uic error %d\n",
1650 mode, ret);
1651 goto out;
1652 }
1653
1654 if (!wait_for_completion_timeout(hba->pwr_done,
1655 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
1656 dev_err(hba->dev,
1657 "pwr mode change with mode 0x%x completion timeout\n",
1658 mode);
1659 ret = -ETIMEDOUT;
1660 goto out;
1661 }
1662
1663 status = ufshcd_get_upmcrs(hba);
1664 if (status != PWR_LOCAL) {
1665 dev_err(hba->dev,
1666 "pwr mode change failed, host umpcrs:0x%x\n",
1667 status);
1668 ret = (status != PWR_OK) ? status : -1;
1669 }
1670out:
1671 spin_lock_irqsave(hba->host->host_lock, flags);
1672 hba->pwr_done = NULL;
1673 spin_unlock_irqrestore(hba->host->host_lock, flags);
1674 mutex_unlock(&hba->uic_cmd_mutex);
1675 return ret;
1676}
1677
1678/**
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05301679 * ufshcd_config_max_pwr_mode - Set & Change power mode with
1680 * maximum capability attribute information.
1681 * @hba: per adapter instance
1682 *
1683 * Returns 0 on success, non-zero value on failure
1684 */
1685static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
1686{
1687 enum {RX = 0, TX = 1};
1688 u32 lanes[] = {1, 1};
1689 u32 gear[] = {1, 1};
1690 u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
1691 int ret;
1692
1693 /* Get the connected lane count */
1694 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
1695 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
1696
1697 /*
1698 * First, get the maximum gears of HS speed.
1699 * If a zero value, it means there is no HSGEAR capability.
1700 * Then, get the maximum gears of PWM speed.
1701 */
1702 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
1703 if (!gear[RX]) {
1704 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
1705 pwr[RX] = SLOWAUTO_MODE;
1706 }
1707
1708 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
1709 if (!gear[TX]) {
1710 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1711 &gear[TX]);
1712 pwr[TX] = SLOWAUTO_MODE;
1713 }
1714
1715 /*
1716 * Configure attributes for power mode change with below.
1717 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1718 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1719 * - PA_HSSERIES
1720 */
1721 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
1722 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
1723 if (pwr[RX] == FASTAUTO_MODE)
1724 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1725
1726 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
1727 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
1728 if (pwr[TX] == FASTAUTO_MODE)
1729 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1730
1731 if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
1732 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
1733
1734 ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
1735 if (ret)
1736 dev_err(hba->dev,
1737 "pwr_mode: power mode change failed %d\n", ret);
1738
1739 return ret;
1740}
1741
1742/**
Dolev Raviv68078d52013-07-30 00:35:58 +05301743 * ufshcd_complete_dev_init() - checks device readiness
1744 * hba: per-adapter instance
1745 *
1746 * Set fDeviceInit flag and poll until device toggles it.
1747 */
1748static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1749{
1750 int i, retries, err = 0;
1751 bool flag_res = 1;
1752
1753 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1754 /* Set the fDeviceInit flag */
1755 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1756 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1757 if (!err || err == -ETIMEDOUT)
1758 break;
1759 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1760 }
1761 if (err) {
1762 dev_err(hba->dev,
1763 "%s setting fDeviceInit flag failed with error %d\n",
1764 __func__, err);
1765 goto out;
1766 }
1767
1768 /* poll for max. 100 iterations for fDeviceInit flag to clear */
1769 for (i = 0; i < 100 && !err && flag_res; i++) {
1770 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1771 err = ufshcd_query_flag(hba,
1772 UPIU_QUERY_OPCODE_READ_FLAG,
1773 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
1774 if (!err || err == -ETIMEDOUT)
1775 break;
1776 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
1777 err);
1778 }
1779 }
1780 if (err)
1781 dev_err(hba->dev,
1782 "%s reading fDeviceInit flag failed with error %d\n",
1783 __func__, err);
1784 else if (flag_res)
1785 dev_err(hba->dev,
1786 "%s fDeviceInit was not cleared by the device\n",
1787 __func__);
1788
1789out:
1790 return err;
1791}
1792
1793/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301794 * ufshcd_make_hba_operational - Make UFS controller operational
1795 * @hba: per adapter instance
1796 *
1797 * To bring UFS host controller to operational state,
1798 * 1. Check if device is present
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301799 * 2. Enable required interrupts
1800 * 3. Configure interrupt aggregation
1801 * 4. Program UTRL and UTMRL base addres
1802 * 5. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301803 *
1804 * Returns 0 on success, non-zero value on failure
1805 */
1806static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1807{
1808 int err = 0;
1809 u32 reg;
1810
1811 /* check if device present */
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301812 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Venkatraman S73ec5132012-07-10 19:39:23 +05301813 if (!ufshcd_is_device_present(reg)) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05301814 dev_err(hba->dev, "cc: Device not present\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301815 err = -ENXIO;
1816 goto out;
1817 }
1818
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301819 /* Enable required interrupts */
1820 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
1821
1822 /* Configure interrupt aggregation */
Seungwon Jeon7d568652013-08-31 21:40:20 +05301823 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301824
1825 /* Configure UTRL and UTMRL base address registers */
1826 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
1827 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
1828 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
1829 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
1830 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
1831 REG_UTP_TASK_REQ_LIST_BASE_L);
1832 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
1833 REG_UTP_TASK_REQ_LIST_BASE_H);
1834
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301835 /*
1836 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
1837 * DEI, HEI bits must be 0
1838 */
1839 if (!(ufshcd_get_lists_status(reg))) {
1840 ufshcd_enable_run_stop_reg(hba);
1841 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05301842 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301843 "Host controller not ready to process requests");
1844 err = -EIO;
1845 goto out;
1846 }
1847
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301848out:
1849 return err;
1850}
1851
1852/**
1853 * ufshcd_hba_enable - initialize the controller
1854 * @hba: per adapter instance
1855 *
1856 * The controller resets itself and controller firmware initialization
1857 * sequence kicks off. When controller is ready it will set
1858 * the Host Controller Enable bit to 1.
1859 *
1860 * Returns 0 on success, non-zero value on failure
1861 */
1862static int ufshcd_hba_enable(struct ufs_hba *hba)
1863{
1864 int retry;
1865
1866 /*
1867 * msleep of 1 and 5 used in this function might result in msleep(20),
1868 * but it was necessary to send the UFS FPGA to reset mode during
1869 * development and testing of this driver. msleep can be changed to
1870 * mdelay and retry count can be reduced based on the controller.
1871 */
1872 if (!ufshcd_is_hba_active(hba)) {
1873
1874 /* change controller state to "reset state" */
1875 ufshcd_hba_stop(hba);
1876
1877 /*
1878 * This delay is based on the testing done with UFS host
1879 * controller FPGA. The delay can be changed based on the
1880 * host controller used.
1881 */
1882 msleep(5);
1883 }
1884
1885 /* start controller initialization sequence */
1886 ufshcd_hba_start(hba);
1887
1888 /*
1889 * To initialize a UFS host controller HCE bit must be set to 1.
1890 * During initialization the HCE bit value changes from 1->0->1.
1891 * When the host controller completes initialization sequence
1892 * it sets the value of HCE bit to 1. The same HCE bit is read back
1893 * to check if the controller has completed initialization sequence.
1894 * So without this delay the value HCE = 1, set in the previous
1895 * instruction might be read back.
1896 * This delay can be changed based on the controller.
1897 */
1898 msleep(1);
1899
1900 /* wait for the host controller to complete initialization */
1901 retry = 10;
1902 while (ufshcd_is_hba_active(hba)) {
1903 if (retry) {
1904 retry--;
1905 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05301906 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301907 "Controller enable failed\n");
1908 return -EIO;
1909 }
1910 msleep(5);
1911 }
1912 return 0;
1913}
1914
1915/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301916 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301917 * @hba: per adapter instance
1918 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301919 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301920 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301921static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301922{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301923 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301924
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301925 /* enable UIC related interrupts */
1926 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301927
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301928 ret = ufshcd_dme_link_startup(hba);
1929 if (ret)
1930 goto out;
1931
1932 ret = ufshcd_make_hba_operational(hba);
1933
1934out:
1935 if (ret)
1936 dev_err(hba->dev, "link startup failed %d\n", ret);
1937 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301938}
1939
1940/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301941 * ufshcd_verify_dev_init() - Verify device initialization
1942 * @hba: per-adapter instance
1943 *
1944 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
1945 * device Transport Protocol (UTP) layer is ready after a reset.
1946 * If the UTP layer at the device side is not initialized, it may
1947 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
1948 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
1949 */
1950static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1951{
1952 int err = 0;
1953 int retries;
1954
1955 mutex_lock(&hba->dev_cmd.lock);
1956 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1957 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1958 NOP_OUT_TIMEOUT);
1959
1960 if (!err || err == -ETIMEDOUT)
1961 break;
1962
1963 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1964 }
1965 mutex_unlock(&hba->dev_cmd.lock);
1966
1967 if (err)
1968 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1969 return err;
1970}
1971
1972/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301973 * ufshcd_slave_alloc - handle initial SCSI device configurations
1974 * @sdev: pointer to SCSI device
1975 *
1976 * Returns success
1977 */
1978static int ufshcd_slave_alloc(struct scsi_device *sdev)
1979{
1980 struct ufs_hba *hba;
1981
1982 hba = shost_priv(sdev->host);
1983 sdev->tagged_supported = 1;
1984
1985 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
1986 sdev->use_10_for_ms = 1;
1987 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1988
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05301989 /* allow SCSI layer to restart the device in case of errors */
1990 sdev->allow_restart = 1;
1991
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301992 /*
1993 * Inform SCSI Midlayer that the LUN queue depth is same as the
1994 * controller queue depth. If a LUN queue depth is less than the
1995 * controller queue depth and if the LUN reports
1996 * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
1997 * with scsi_adjust_queue_depth.
1998 */
1999 scsi_activate_tcq(sdev, hba->nutrs);
2000 return 0;
2001}
2002
2003/**
2004 * ufshcd_slave_destroy - remove SCSI device configurations
2005 * @sdev: pointer to SCSI device
2006 */
2007static void ufshcd_slave_destroy(struct scsi_device *sdev)
2008{
2009 struct ufs_hba *hba;
2010
2011 hba = shost_priv(sdev->host);
2012 scsi_deactivate_tcq(sdev, hba->nutrs);
2013}
2014
2015/**
2016 * ufshcd_task_req_compl - handle task management request completion
2017 * @hba: per adapter instance
2018 * @index: index of the completed request
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302019 * @resp: task management service response
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302020 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302021 * Returns non-zero value on error, zero on success
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302022 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302023static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302024{
2025 struct utp_task_req_desc *task_req_descp;
2026 struct utp_upiu_task_rsp *task_rsp_upiup;
2027 unsigned long flags;
2028 int ocs_value;
2029 int task_result;
2030
2031 spin_lock_irqsave(hba->host->host_lock, flags);
2032
2033 /* Clear completed tasks from outstanding_tasks */
2034 __clear_bit(index, &hba->outstanding_tasks);
2035
2036 task_req_descp = hba->utmrdl_base_addr;
2037 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
2038
2039 if (ocs_value == OCS_SUCCESS) {
2040 task_rsp_upiup = (struct utp_upiu_task_rsp *)
2041 task_req_descp[index].task_rsp_upiu;
2042 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
2043 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302044 if (resp)
2045 *resp = (u8)task_result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302046 } else {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302047 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
2048 __func__, ocs_value);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302049 }
2050 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302051
2052 return ocs_value;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302053}
2054
2055/**
2056 * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
2057 * SAM_STAT_TASK_SET_FULL SCSI command status.
2058 * @cmd: pointer to SCSI command
2059 */
2060static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
2061{
2062 struct ufs_hba *hba;
2063 int i;
2064 int lun_qdepth = 0;
2065
2066 hba = shost_priv(cmd->device->host);
2067
2068 /*
2069 * LUN queue depth can be obtained by counting outstanding commands
2070 * on the LUN.
2071 */
2072 for (i = 0; i < hba->nutrs; i++) {
2073 if (test_bit(i, &hba->outstanding_reqs)) {
2074
2075 /*
2076 * Check if the outstanding command belongs
2077 * to the LUN which reported SAM_STAT_TASK_SET_FULL.
2078 */
2079 if (cmd->device->lun == hba->lrb[i].lun)
2080 lun_qdepth++;
2081 }
2082 }
2083
2084 /*
2085 * LUN queue depth will be total outstanding commands, except the
2086 * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
2087 */
2088 scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
2089}
2090
2091/**
2092 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
2093 * @lrb: pointer to local reference block of completed command
2094 * @scsi_status: SCSI command status
2095 *
2096 * Returns value base on SCSI command status
2097 */
2098static inline int
2099ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
2100{
2101 int result = 0;
2102
2103 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05302104 case SAM_STAT_CHECK_CONDITION:
2105 ufshcd_copy_sense_data(lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302106 case SAM_STAT_GOOD:
2107 result |= DID_OK << 16 |
2108 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05302109 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302110 break;
2111 case SAM_STAT_TASK_SET_FULL:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302112 /*
2113 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
2114 * depth needs to be adjusted to the exact number of
2115 * outstanding commands the LUN can handle at any given time.
2116 */
2117 ufshcd_adjust_lun_qdepth(lrbp->cmd);
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05302118 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302119 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05302120 ufshcd_copy_sense_data(lrbp);
2121 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302122 break;
2123 default:
2124 result |= DID_ERROR << 16;
2125 break;
2126 } /* end of switch */
2127
2128 return result;
2129}
2130
2131/**
2132 * ufshcd_transfer_rsp_status - Get overall status of the response
2133 * @hba: per adapter instance
2134 * @lrb: pointer to local reference block of completed command
2135 *
2136 * Returns result of the command to notify SCSI midlayer
2137 */
2138static inline int
2139ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2140{
2141 int result = 0;
2142 int scsi_status;
2143 int ocs;
2144
2145 /* overall command status of utrd */
2146 ocs = ufshcd_get_tr_ocs(lrbp);
2147
2148 switch (ocs) {
2149 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302150 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302151
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302152 switch (result) {
2153 case UPIU_TRANSACTION_RESPONSE:
2154 /*
2155 * get the response UPIU result to extract
2156 * the SCSI command status
2157 */
2158 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
2159
2160 /*
2161 * get the result based on SCSI status response
2162 * to notify the SCSI midlayer of the command status
2163 */
2164 scsi_status = result & MASK_SCSI_STATUS;
2165 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302166
2167 if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
2168 schedule_work(&hba->eeh_work);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302169 break;
2170 case UPIU_TRANSACTION_REJECT_UPIU:
2171 /* TODO: handle Reject UPIU Response */
2172 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302173 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302174 "Reject UPIU not fully implemented\n");
2175 break;
2176 default:
2177 result = DID_ERROR << 16;
2178 dev_err(hba->dev,
2179 "Unexpected request response code = %x\n",
2180 result);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302181 break;
2182 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302183 break;
2184 case OCS_ABORTED:
2185 result |= DID_ABORT << 16;
2186 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302187 case OCS_INVALID_COMMAND_STATUS:
2188 result |= DID_REQUEUE << 16;
2189 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302190 case OCS_INVALID_CMD_TABLE_ATTR:
2191 case OCS_INVALID_PRDT_ATTR:
2192 case OCS_MISMATCH_DATA_BUF_SIZE:
2193 case OCS_MISMATCH_RESP_UPIU_SIZE:
2194 case OCS_PEER_COMM_FAILURE:
2195 case OCS_FATAL_ERROR:
2196 default:
2197 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302198 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302199 "OCS error from controller = %x\n", ocs);
2200 break;
2201 } /* end of switch */
2202
2203 return result;
2204}
2205
2206/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302207 * ufshcd_uic_cmd_compl - handle completion of uic command
2208 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302209 * @intr_status: interrupt status generated by the controller
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302210 */
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302211static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302212{
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302213 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302214 hba->active_uic_cmd->argument2 |=
2215 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302216 hba->active_uic_cmd->argument3 =
2217 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302218 complete(&hba->active_uic_cmd->done);
2219 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302220
2221 if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
2222 complete(hba->pwr_done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302223}
2224
2225/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302226 * ufshcd_transfer_req_compl - handle SCSI and query command completion
2227 * @hba: per adapter instance
2228 */
2229static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2230{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302231 struct ufshcd_lrb *lrbp;
2232 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302233 unsigned long completed_reqs;
2234 u32 tr_doorbell;
2235 int result;
2236 int index;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302237 bool int_aggr_reset = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302238
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302239 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302240 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
2241
2242 for (index = 0; index < hba->nutrs; index++) {
2243 if (test_bit(index, &completed_reqs)) {
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302244 lrbp = &hba->lrb[index];
2245 cmd = lrbp->cmd;
2246 /*
2247 * Don't skip resetting interrupt aggregation counters
2248 * if a regular command is present.
2249 */
2250 int_aggr_reset |= !lrbp->intr_cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302251
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302252 if (cmd) {
2253 result = ufshcd_transfer_rsp_status(hba, lrbp);
2254 scsi_dma_unmap(cmd);
2255 cmd->result = result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302256 /* Mark completed command as NULL in LRB */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302257 lrbp->cmd = NULL;
2258 clear_bit_unlock(index, &hba->lrb_in_use);
2259 /* Do not touch lrbp after scsi done */
2260 cmd->scsi_done(cmd);
2261 } else if (lrbp->command_type ==
2262 UTP_CMD_TYPE_DEV_MANAGE) {
2263 if (hba->dev_cmd.complete)
2264 complete(hba->dev_cmd.complete);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302265 }
2266 } /* end of if */
2267 } /* end of for */
2268
2269 /* clear corresponding bits of completed commands */
2270 hba->outstanding_reqs ^= completed_reqs;
2271
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302272 /* we might have free'd some tags above */
2273 wake_up(&hba->dev_cmd.tag_wq);
2274
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302275 /* Reset interrupt aggregation counters */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302276 if (int_aggr_reset)
Seungwon Jeon7d568652013-08-31 21:40:20 +05302277 ufshcd_reset_intr_aggr(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302278}
2279
2280/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302281 * ufshcd_disable_ee - disable exception event
2282 * @hba: per-adapter instance
2283 * @mask: exception event to disable
2284 *
2285 * Disables exception event in the device so that the EVENT_ALERT
2286 * bit is not set.
2287 *
2288 * Returns zero on success, non-zero error value on failure.
2289 */
2290static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
2291{
2292 int err = 0;
2293 u32 val;
2294
2295 if (!(hba->ee_ctrl_mask & mask))
2296 goto out;
2297
2298 val = hba->ee_ctrl_mask & ~mask;
2299 val &= 0xFFFF; /* 2 bytes */
2300 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2301 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2302 if (!err)
2303 hba->ee_ctrl_mask &= ~mask;
2304out:
2305 return err;
2306}
2307
2308/**
2309 * ufshcd_enable_ee - enable exception event
2310 * @hba: per-adapter instance
2311 * @mask: exception event to enable
2312 *
2313 * Enable corresponding exception event in the device to allow
2314 * device to alert host in critical scenarios.
2315 *
2316 * Returns zero on success, non-zero error value on failure.
2317 */
2318static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
2319{
2320 int err = 0;
2321 u32 val;
2322
2323 if (hba->ee_ctrl_mask & mask)
2324 goto out;
2325
2326 val = hba->ee_ctrl_mask | mask;
2327 val &= 0xFFFF; /* 2 bytes */
2328 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2329 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2330 if (!err)
2331 hba->ee_ctrl_mask |= mask;
2332out:
2333 return err;
2334}
2335
2336/**
2337 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
2338 * @hba: per-adapter instance
2339 *
2340 * Allow device to manage background operations on its own. Enabling
2341 * this might lead to inconsistent latencies during normal data transfers
2342 * as the device is allowed to manage its own way of handling background
2343 * operations.
2344 *
2345 * Returns zero on success, non-zero on failure.
2346 */
2347static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
2348{
2349 int err = 0;
2350
2351 if (hba->auto_bkops_enabled)
2352 goto out;
2353
2354 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2355 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2356 if (err) {
2357 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
2358 __func__, err);
2359 goto out;
2360 }
2361
2362 hba->auto_bkops_enabled = true;
2363
2364 /* No need of URGENT_BKOPS exception from the device */
2365 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2366 if (err)
2367 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
2368 __func__, err);
2369out:
2370 return err;
2371}
2372
2373/**
2374 * ufshcd_disable_auto_bkops - block device in doing background operations
2375 * @hba: per-adapter instance
2376 *
2377 * Disabling background operations improves command response latency but
2378 * has drawback of device moving into critical state where the device is
2379 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
2380 * host is idle so that BKOPS are managed effectively without any negative
2381 * impacts.
2382 *
2383 * Returns zero on success, non-zero on failure.
2384 */
2385static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
2386{
2387 int err = 0;
2388
2389 if (!hba->auto_bkops_enabled)
2390 goto out;
2391
2392 /*
2393 * If host assisted BKOPs is to be enabled, make sure
2394 * urgent bkops exception is allowed.
2395 */
2396 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
2397 if (err) {
2398 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
2399 __func__, err);
2400 goto out;
2401 }
2402
2403 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
2404 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2405 if (err) {
2406 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
2407 __func__, err);
2408 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2409 goto out;
2410 }
2411
2412 hba->auto_bkops_enabled = false;
2413out:
2414 return err;
2415}
2416
2417/**
2418 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
2419 * @hba: per adapter instance
2420 *
2421 * After a device reset the device may toggle the BKOPS_EN flag
2422 * to default value. The s/w tracking variables should be updated
2423 * as well. Do this by forcing enable of auto bkops.
2424 */
2425static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
2426{
2427 hba->auto_bkops_enabled = false;
2428 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
2429 ufshcd_enable_auto_bkops(hba);
2430}
2431
2432static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
2433{
2434 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2435 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
2436}
2437
2438/**
2439 * ufshcd_urgent_bkops - handle urgent bkops exception event
2440 * @hba: per-adapter instance
2441 *
2442 * Enable fBackgroundOpsEn flag in the device to permit background
2443 * operations.
2444 */
2445static int ufshcd_urgent_bkops(struct ufs_hba *hba)
2446{
2447 int err;
2448 u32 status = 0;
2449
2450 err = ufshcd_get_bkops_status(hba, &status);
2451 if (err) {
2452 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
2453 __func__, err);
2454 goto out;
2455 }
2456
2457 status = status & 0xF;
2458
2459 /* handle only if status indicates performance impact or critical */
2460 if (status >= BKOPS_STATUS_PERF_IMPACT)
2461 err = ufshcd_enable_auto_bkops(hba);
2462out:
2463 return err;
2464}
2465
2466static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
2467{
2468 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2469 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
2470}
2471
2472/**
2473 * ufshcd_exception_event_handler - handle exceptions raised by device
2474 * @work: pointer to work data
2475 *
2476 * Read bExceptionEventStatus attribute from the device and handle the
2477 * exception event accordingly.
2478 */
2479static void ufshcd_exception_event_handler(struct work_struct *work)
2480{
2481 struct ufs_hba *hba;
2482 int err;
2483 u32 status = 0;
2484 hba = container_of(work, struct ufs_hba, eeh_work);
2485
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05302486 pm_runtime_get_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302487 err = ufshcd_get_ee_status(hba, &status);
2488 if (err) {
2489 dev_err(hba->dev, "%s: failed to get exception status %d\n",
2490 __func__, err);
2491 goto out;
2492 }
2493
2494 status &= hba->ee_ctrl_mask;
2495 if (status & MASK_EE_URGENT_BKOPS) {
2496 err = ufshcd_urgent_bkops(hba);
2497 if (err)
2498 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
2499 __func__, err);
2500 }
2501out:
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05302502 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05302503 return;
2504}
2505
2506/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302507 * ufshcd_err_handler - handle UFS errors that require s/w attention
2508 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302509 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302510static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302511{
2512 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302513 unsigned long flags;
2514 u32 err_xfer = 0;
2515 u32 err_tm = 0;
2516 int err = 0;
2517 int tag;
2518
2519 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302520
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05302521 pm_runtime_get_sync(hba->dev);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302522
2523 spin_lock_irqsave(hba->host->host_lock, flags);
2524 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
2525 spin_unlock_irqrestore(hba->host->host_lock, flags);
2526 goto out;
2527 }
2528
2529 hba->ufshcd_state = UFSHCD_STATE_RESET;
2530 ufshcd_set_eh_in_progress(hba);
2531
2532 /* Complete requests that have door-bell cleared by h/w */
2533 ufshcd_transfer_req_compl(hba);
2534 ufshcd_tmc_handler(hba);
2535 spin_unlock_irqrestore(hba->host->host_lock, flags);
2536
2537 /* Clear pending transfer requests */
2538 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
2539 if (ufshcd_clear_cmd(hba, tag))
2540 err_xfer |= 1 << tag;
2541
2542 /* Clear pending task management requests */
2543 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
2544 if (ufshcd_clear_tm_cmd(hba, tag))
2545 err_tm |= 1 << tag;
2546
2547 /* Complete the requests that are cleared by s/w */
2548 spin_lock_irqsave(hba->host->host_lock, flags);
2549 ufshcd_transfer_req_compl(hba);
2550 ufshcd_tmc_handler(hba);
2551 spin_unlock_irqrestore(hba->host->host_lock, flags);
2552
2553 /* Fatal errors need reset */
2554 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
2555 ((hba->saved_err & UIC_ERROR) &&
2556 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
2557 err = ufshcd_reset_and_restore(hba);
2558 if (err) {
2559 dev_err(hba->dev, "%s: reset and restore failed\n",
2560 __func__);
2561 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2562 }
2563 /*
2564 * Inform scsi mid-layer that we did reset and allow to handle
2565 * Unit Attention properly.
2566 */
2567 scsi_report_bus_reset(hba->host, 0);
2568 hba->saved_err = 0;
2569 hba->saved_uic_err = 0;
2570 }
2571 ufshcd_clear_eh_in_progress(hba);
2572
2573out:
2574 scsi_unblock_requests(hba->host);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05302575 pm_runtime_put_sync(hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302576}
2577
2578/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302579 * ufshcd_update_uic_error - check and set fatal UIC error flags.
2580 * @hba: per-adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302581 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302582static void ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302583{
2584 u32 reg;
2585
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302586 /* PA_INIT_ERROR is fatal and needs UIC reset */
2587 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
2588 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2589 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
2590
2591 /* UIC NL/TL/DME errors needs software retry */
2592 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
2593 if (reg)
2594 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
2595
2596 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
2597 if (reg)
2598 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
2599
2600 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
2601 if (reg)
2602 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
2603
2604 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
2605 __func__, hba->uic_error);
2606}
2607
2608/**
2609 * ufshcd_check_errors - Check for errors that need s/w attention
2610 * @hba: per-adapter instance
2611 */
2612static void ufshcd_check_errors(struct ufs_hba *hba)
2613{
2614 bool queue_eh_work = false;
2615
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302616 if (hba->errors & INT_FATAL_ERRORS)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302617 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302618
2619 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302620 hba->uic_error = 0;
2621 ufshcd_update_uic_error(hba);
2622 if (hba->uic_error)
2623 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302624 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302625
2626 if (queue_eh_work) {
2627 /* handle fatal errors only when link is functional */
2628 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
2629 /* block commands from scsi mid-layer */
2630 scsi_block_requests(hba->host);
2631
2632 /* transfer error masks to sticky bits */
2633 hba->saved_err |= hba->errors;
2634 hba->saved_uic_err |= hba->uic_error;
2635
2636 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2637 schedule_work(&hba->eh_work);
2638 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302639 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302640 /*
2641 * if (!queue_eh_work) -
2642 * Other errors are either non-fatal where host recovers
2643 * itself without s/w intervention or errors that will be
2644 * handled by the SCSI core layer.
2645 */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302646}
2647
2648/**
2649 * ufshcd_tmc_handler - handle task management function completion
2650 * @hba: per adapter instance
2651 */
2652static void ufshcd_tmc_handler(struct ufs_hba *hba)
2653{
2654 u32 tm_doorbell;
2655
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302656 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302657 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302658 wake_up(&hba->tm_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302659}
2660
2661/**
2662 * ufshcd_sl_intr - Interrupt service routine
2663 * @hba: per adapter instance
2664 * @intr_status: contains interrupts generated by the controller
2665 */
2666static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2667{
2668 hba->errors = UFSHCD_ERROR_MASK & intr_status;
2669 if (hba->errors)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05302670 ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302671
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302672 if (intr_status & UFSHCD_UIC_MASK)
2673 ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302674
2675 if (intr_status & UTP_TASK_REQ_COMPL)
2676 ufshcd_tmc_handler(hba);
2677
2678 if (intr_status & UTP_TRANSFER_REQ_COMPL)
2679 ufshcd_transfer_req_compl(hba);
2680}
2681
2682/**
2683 * ufshcd_intr - Main interrupt service routine
2684 * @irq: irq number
2685 * @__hba: pointer to adapter instance
2686 *
2687 * Returns IRQ_HANDLED - If interrupt is valid
2688 * IRQ_NONE - If invalid interrupt
2689 */
2690static irqreturn_t ufshcd_intr(int irq, void *__hba)
2691{
2692 u32 intr_status;
2693 irqreturn_t retval = IRQ_NONE;
2694 struct ufs_hba *hba = __hba;
2695
2696 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302697 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302698
2699 if (intr_status) {
Seungwon Jeon261ea452013-06-26 22:39:28 +05302700 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302701 ufshcd_sl_intr(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302702 retval = IRQ_HANDLED;
2703 }
2704 spin_unlock(hba->host->host_lock);
2705 return retval;
2706}
2707
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302708static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
2709{
2710 int err = 0;
2711 u32 mask = 1 << tag;
2712 unsigned long flags;
2713
2714 if (!test_bit(tag, &hba->outstanding_tasks))
2715 goto out;
2716
2717 spin_lock_irqsave(hba->host->host_lock, flags);
2718 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
2719 spin_unlock_irqrestore(hba->host->host_lock, flags);
2720
2721 /* poll for max. 1 sec to clear door bell register by h/w */
2722 err = ufshcd_wait_for_register(hba,
2723 REG_UTP_TASK_REQ_DOOR_BELL,
2724 mask, 0, 1000, 1000);
2725out:
2726 return err;
2727}
2728
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302729/**
2730 * ufshcd_issue_tm_cmd - issues task management commands to controller
2731 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302732 * @lun_id: LUN ID to which TM command is sent
2733 * @task_id: task ID to which the TM command is applicable
2734 * @tm_function: task management function opcode
2735 * @tm_response: task management service response return value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302736 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302737 * Returns non-zero value on error, zero on success.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302738 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302739static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2740 u8 tm_function, u8 *tm_response)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302741{
2742 struct utp_task_req_desc *task_req_descp;
2743 struct utp_upiu_task_req *task_req_upiup;
2744 struct Scsi_Host *host;
2745 unsigned long flags;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302746 int free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302747 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302748 int task_tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302749
2750 host = hba->host;
2751
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302752 /*
2753 * Get free slot, sleep if slots are unavailable.
2754 * Even though we use wait_event() which sleeps indefinitely,
2755 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
2756 */
2757 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
2758
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302759 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302760 task_req_descp = hba->utmrdl_base_addr;
2761 task_req_descp += free_slot;
2762
2763 /* Configure task request descriptor */
2764 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
2765 task_req_descp->header.dword_2 =
2766 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2767
2768 /* Configure task request UPIU */
2769 task_req_upiup =
2770 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302771 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302772 task_req_upiup->header.dword_0 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302773 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302774 lun_id, task_tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302775 task_req_upiup->header.dword_1 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302776 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302777
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302778 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
2779 task_req_upiup->input_param2 = cpu_to_be32(task_id);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302780
2781 /* send command to the controller */
2782 __set_bit(free_slot, &hba->outstanding_tasks);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302783 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302784
2785 spin_unlock_irqrestore(host->host_lock, flags);
2786
2787 /* wait until the task management command is completed */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302788 err = wait_event_timeout(hba->tm_wq,
2789 test_bit(free_slot, &hba->tm_condition),
2790 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302791 if (!err) {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302792 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
2793 __func__, tm_function);
2794 if (ufshcd_clear_tm_cmd(hba, free_slot))
2795 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
2796 __func__, free_slot);
2797 err = -ETIMEDOUT;
2798 } else {
2799 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302800 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302801
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302802 clear_bit(free_slot, &hba->tm_condition);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302803 ufshcd_put_tm_slot(hba, free_slot);
2804 wake_up(&hba->tm_tag_wq);
2805
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302806 return err;
2807}
2808
2809/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302810 * ufshcd_eh_device_reset_handler - device reset handler registered to
2811 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302812 * @cmd: SCSI command pointer
2813 *
2814 * Returns SUCCESS/FAILED
2815 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302816static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302817{
2818 struct Scsi_Host *host;
2819 struct ufs_hba *hba;
2820 unsigned int tag;
2821 u32 pos;
2822 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302823 u8 resp = 0xF;
2824 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302825 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302826
2827 host = cmd->device->host;
2828 hba = shost_priv(host);
2829 tag = cmd->request->tag;
2830
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302831 lrbp = &hba->lrb[tag];
2832 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
2833 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302834 if (!err)
2835 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302836 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302837 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302838
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302839 /* clear the commands that were pending for corresponding LUN */
2840 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
2841 if (hba->lrb[pos].lun == lrbp->lun) {
2842 err = ufshcd_clear_cmd(hba, pos);
2843 if (err)
2844 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302845 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302846 }
2847 spin_lock_irqsave(host->host_lock, flags);
2848 ufshcd_transfer_req_compl(hba);
2849 spin_unlock_irqrestore(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302850out:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302851 if (!err) {
2852 err = SUCCESS;
2853 } else {
2854 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2855 err = FAILED;
2856 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302857 return err;
2858}
2859
2860/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302861 * ufshcd_abort - abort a specific command
2862 * @cmd: SCSI command pointer
2863 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05302864 * Abort the pending command in device by sending UFS_ABORT_TASK task management
2865 * command, and in host controller by clearing the door-bell register. There can
2866 * be race between controller sending the command to the device while abort is
2867 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
2868 * really issued and then try to abort it.
2869 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302870 * Returns SUCCESS/FAILED
2871 */
2872static int ufshcd_abort(struct scsi_cmnd *cmd)
2873{
2874 struct Scsi_Host *host;
2875 struct ufs_hba *hba;
2876 unsigned long flags;
2877 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05302878 int err = 0;
2879 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302880 u8 resp = 0xF;
2881 struct ufshcd_lrb *lrbp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302882
2883 host = cmd->device->host;
2884 hba = shost_priv(host);
2885 tag = cmd->request->tag;
2886
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05302887 /* If command is already aborted/completed, return SUCCESS */
2888 if (!(test_bit(tag, &hba->outstanding_reqs)))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302889 goto out;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302890
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302891 lrbp = &hba->lrb[tag];
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05302892 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
2893 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2894 UFS_QUERY_TASK, &resp);
2895 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
2896 /* cmd pending in the device */
2897 break;
2898 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2899 u32 reg;
2900
2901 /*
2902 * cmd not pending in the device, check if it is
2903 * in transition.
2904 */
2905 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2906 if (reg & (1 << tag)) {
2907 /* sleep for max. 200us to stabilize */
2908 usleep_range(100, 200);
2909 continue;
2910 }
2911 /* command completed already */
2912 goto out;
2913 } else {
2914 if (!err)
2915 err = resp; /* service response error */
2916 goto out;
2917 }
2918 }
2919
2920 if (!poll_cnt) {
2921 err = -EBUSY;
2922 goto out;
2923 }
2924
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302925 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2926 UFS_ABORT_TASK, &resp);
2927 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05302928 if (!err)
2929 err = resp; /* service response error */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302930 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05302931 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302932
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05302933 err = ufshcd_clear_cmd(hba, tag);
2934 if (err)
2935 goto out;
2936
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302937 scsi_dma_unmap(cmd);
2938
2939 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302940 __clear_bit(tag, &hba->outstanding_reqs);
2941 hba->lrb[tag].cmd = NULL;
2942 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302943
2944 clear_bit_unlock(tag, &hba->lrb_in_use);
2945 wake_up(&hba->dev_cmd.tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302946out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05302947 if (!err) {
2948 err = SUCCESS;
2949 } else {
2950 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2951 err = FAILED;
2952 }
2953
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302954 return err;
2955}
2956
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302957/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302958 * ufshcd_host_reset_and_restore - reset and restore host controller
2959 * @hba: per-adapter instance
2960 *
2961 * Note that host controller reset may issue DME_RESET to
2962 * local and remote (device) Uni-Pro stack and the attributes
2963 * are reset to default state.
2964 *
2965 * Returns zero on success, non-zero on failure
2966 */
2967static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
2968{
2969 int err;
2970 async_cookie_t cookie;
2971 unsigned long flags;
2972
2973 /* Reset the host controller */
2974 spin_lock_irqsave(hba->host->host_lock, flags);
2975 ufshcd_hba_stop(hba);
2976 spin_unlock_irqrestore(hba->host->host_lock, flags);
2977
2978 err = ufshcd_hba_enable(hba);
2979 if (err)
2980 goto out;
2981
2982 /* Establish the link again and restore the device */
2983 cookie = async_schedule(ufshcd_async_scan, hba);
2984 /* wait for async scan to be completed */
2985 async_synchronize_cookie(++cookie);
2986 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
2987 err = -EIO;
2988out:
2989 if (err)
2990 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
2991
2992 return err;
2993}
2994
2995/**
2996 * ufshcd_reset_and_restore - reset and re-initialize host/device
2997 * @hba: per-adapter instance
2998 *
2999 * Reset and recover device, host and re-establish link. This
3000 * is helpful to recover the communication in fatal error conditions.
3001 *
3002 * Returns zero on success, non-zero on failure
3003 */
3004static int ufshcd_reset_and_restore(struct ufs_hba *hba)
3005{
3006 int err = 0;
3007 unsigned long flags;
3008
3009 err = ufshcd_host_reset_and_restore(hba);
3010
3011 /*
3012 * After reset the door-bell might be cleared, complete
3013 * outstanding requests in s/w here.
3014 */
3015 spin_lock_irqsave(hba->host->host_lock, flags);
3016 ufshcd_transfer_req_compl(hba);
3017 ufshcd_tmc_handler(hba);
3018 spin_unlock_irqrestore(hba->host->host_lock, flags);
3019
3020 return err;
3021}
3022
3023/**
3024 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
3025 * @cmd - SCSI command pointer
3026 *
3027 * Returns SUCCESS/FAILED
3028 */
3029static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3030{
3031 int err;
3032 unsigned long flags;
3033 struct ufs_hba *hba;
3034
3035 hba = shost_priv(cmd->device->host);
3036
3037 /*
3038 * Check if there is any race with fatal error handling.
3039 * If so, wait for it to complete. Even though fatal error
3040 * handling does reset and restore in some cases, don't assume
3041 * anything out of it. We are just avoiding race here.
3042 */
3043 do {
3044 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303045 if (!(work_pending(&hba->eh_work) ||
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303046 hba->ufshcd_state == UFSHCD_STATE_RESET))
3047 break;
3048 spin_unlock_irqrestore(hba->host->host_lock, flags);
3049 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303050 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303051 } while (1);
3052
3053 hba->ufshcd_state = UFSHCD_STATE_RESET;
3054 ufshcd_set_eh_in_progress(hba);
3055 spin_unlock_irqrestore(hba->host->host_lock, flags);
3056
3057 err = ufshcd_reset_and_restore(hba);
3058
3059 spin_lock_irqsave(hba->host->host_lock, flags);
3060 if (!err) {
3061 err = SUCCESS;
3062 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3063 } else {
3064 err = FAILED;
3065 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3066 }
3067 ufshcd_clear_eh_in_progress(hba);
3068 spin_unlock_irqrestore(hba->host->host_lock, flags);
3069
3070 return err;
3071}
3072
3073/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303074 * ufshcd_async_scan - asynchronous execution for link startup
3075 * @data: data pointer to pass to this function
3076 * @cookie: cookie data
3077 */
3078static void ufshcd_async_scan(void *data, async_cookie_t cookie)
3079{
3080 struct ufs_hba *hba = (struct ufs_hba *)data;
3081 int ret;
3082
3083 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303084 if (ret)
3085 goto out;
3086
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05303087 ufshcd_config_max_pwr_mode(hba);
3088
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303089 ret = ufshcd_verify_dev_init(hba);
3090 if (ret)
3091 goto out;
3092
Dolev Raviv68078d52013-07-30 00:35:58 +05303093 ret = ufshcd_complete_dev_init(hba);
3094 if (ret)
3095 goto out;
3096
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303097 ufshcd_force_reset_auto_bkops(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303098 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3099
3100 /* If we are in error handling context no need to scan the host */
3101 if (!ufshcd_eh_in_progress(hba)) {
3102 scsi_scan_host(hba->host);
3103 pm_runtime_put_sync(hba->dev);
3104 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303105out:
3106 return;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303107}
3108
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303109static struct scsi_host_template ufshcd_driver_template = {
3110 .module = THIS_MODULE,
3111 .name = UFSHCD,
3112 .proc_name = UFSHCD,
3113 .queuecommand = ufshcd_queuecommand,
3114 .slave_alloc = ufshcd_slave_alloc,
3115 .slave_destroy = ufshcd_slave_destroy,
3116 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303117 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
3118 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303119 .this_id = -1,
3120 .sg_tablesize = SG_ALL,
3121 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
3122 .can_queue = UFSHCD_CAN_QUEUE,
3123};
3124
3125/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303126 * ufshcd_suspend - suspend power management function
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303127 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303128 * @state: power state
3129 *
3130 * Returns -ENOSYS
3131 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303132int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303133{
3134 /*
3135 * TODO:
3136 * 1. Block SCSI requests from SCSI midlayer
3137 * 2. Change the internal driver state to non operational
3138 * 3. Set UTRLRSR and UTMRLRSR bits to zero
3139 * 4. Wait until outstanding commands are completed
3140 * 5. Set HCE to zero to send the UFS host controller to reset state
3141 */
3142
3143 return -ENOSYS;
3144}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303145EXPORT_SYMBOL_GPL(ufshcd_suspend);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303146
3147/**
3148 * ufshcd_resume - resume power management function
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303149 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303150 *
3151 * Returns -ENOSYS
3152 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303153int ufshcd_resume(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303154{
3155 /*
3156 * TODO:
3157 * 1. Set HCE to 1, to start the UFS host controller
3158 * initialization process
3159 * 2. Set UTRLRSR and UTMRLRSR bits to 1
3160 * 3. Change the internal driver state to operational
3161 * 4. Unblock SCSI requests from SCSI midlayer
3162 */
3163
3164 return -ENOSYS;
3165}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303166EXPORT_SYMBOL_GPL(ufshcd_resume);
3167
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303168int ufshcd_runtime_suspend(struct ufs_hba *hba)
3169{
3170 if (!hba)
3171 return 0;
3172
3173 /*
3174 * The device is idle with no requests in the queue,
3175 * allow background operations.
3176 */
3177 return ufshcd_enable_auto_bkops(hba);
3178}
3179EXPORT_SYMBOL(ufshcd_runtime_suspend);
3180
3181int ufshcd_runtime_resume(struct ufs_hba *hba)
3182{
3183 if (!hba)
3184 return 0;
3185
3186 return ufshcd_disable_auto_bkops(hba);
3187}
3188EXPORT_SYMBOL(ufshcd_runtime_resume);
3189
3190int ufshcd_runtime_idle(struct ufs_hba *hba)
3191{
3192 return 0;
3193}
3194EXPORT_SYMBOL(ufshcd_runtime_idle);
3195
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303196/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303197 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303198 * data structure memory
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303199 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303200 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303201void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303202{
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05303203 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303204 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05303205 ufshcd_disable_intr(hba, hba->intr_mask);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303206 ufshcd_hba_stop(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303207
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303208 scsi_host_put(hba->host);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303209}
3210EXPORT_SYMBOL_GPL(ufshcd_remove);
3211
3212/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303213 * ufshcd_init - Driver initialization routine
3214 * @dev: pointer to device handle
3215 * @hba_handle: driver private handle
3216 * @mmio_base: base register address
3217 * @irq: Interrupt line of device
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303218 * Returns 0 on success, non-zero value on failure
3219 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303220int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
3221 void __iomem *mmio_base, unsigned int irq)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303222{
3223 struct Scsi_Host *host;
3224 struct ufs_hba *hba;
3225 int err;
3226
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303227 if (!dev) {
3228 dev_err(dev,
3229 "Invalid memory reference for dev is NULL\n");
3230 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303231 goto out_error;
3232 }
3233
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303234 if (!mmio_base) {
3235 dev_err(dev,
3236 "Invalid memory reference for mmio_base is NULL\n");
3237 err = -ENODEV;
3238 goto out_error;
3239 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303240
3241 host = scsi_host_alloc(&ufshcd_driver_template,
3242 sizeof(struct ufs_hba));
3243 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303244 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303245 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303246 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303247 }
3248 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303249 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303250 hba->dev = dev;
3251 hba->mmio_base = mmio_base;
3252 hba->irq = irq;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303253
3254 /* Read capabilities registers */
3255 ufshcd_hba_capabilities(hba);
3256
3257 /* Get UFS version supported by the controller */
3258 hba->ufs_version = ufshcd_get_ufs_version(hba);
3259
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05303260 /* Get Interrupt bit mask per version */
3261 hba->intr_mask = ufshcd_get_intr_mask(hba);
3262
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303263 /* Allocate memory for host memory space */
3264 err = ufshcd_memory_alloc(hba);
3265 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303266 dev_err(hba->dev, "Memory allocation failed\n");
3267 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303268 }
3269
3270 /* Configure LRB */
3271 ufshcd_host_memory_configure(hba);
3272
3273 host->can_queue = hba->nutrs;
3274 host->cmd_per_lun = hba->nutrs;
3275 host->max_id = UFSHCD_MAX_ID;
3276 host->max_lun = UFSHCD_MAX_LUNS;
3277 host->max_channel = UFSHCD_MAX_CHANNEL;
3278 host->unique_id = host->host_no;
3279 host->max_cmd_len = MAX_CDB_SIZE;
3280
3281 /* Initailize wait queue for task management */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303282 init_waitqueue_head(&hba->tm_wq);
3283 init_waitqueue_head(&hba->tm_tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303284
3285 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303286 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303287 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303288
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303289 /* Initialize UIC command mutex */
3290 mutex_init(&hba->uic_cmd_mutex);
3291
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303292 /* Initialize mutex for device management commands */
3293 mutex_init(&hba->dev_cmd.lock);
3294
3295 /* Initialize device management tag acquire wait queue */
3296 init_waitqueue_head(&hba->dev_cmd.tag_wq);
3297
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303298 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09003299 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303300 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303301 dev_err(hba->dev, "request irq failed\n");
Seungwon Jeon2953f852013-06-27 13:31:54 +09003302 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303303 }
3304
3305 /* Enable SCSI tag mapping */
3306 err = scsi_init_shared_tag_map(host, host->can_queue);
3307 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303308 dev_err(hba->dev, "init shared queue failed\n");
Seungwon Jeon2953f852013-06-27 13:31:54 +09003309 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303310 }
3311
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303312 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303313 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303314 dev_err(hba->dev, "scsi_add_host failed\n");
Seungwon Jeon2953f852013-06-27 13:31:54 +09003315 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303316 }
3317
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303318 /* Host controller enable */
3319 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303320 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303321 dev_err(hba->dev, "Host controller enable failed\n");
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303322 goto out_remove_scsi_host;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303323 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303324
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303325 *hba_handle = hba;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303326
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05303327 /* Hold auto suspend until async scan completes */
3328 pm_runtime_get_sync(dev);
3329
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303330 async_schedule(ufshcd_async_scan, hba);
3331
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303332 return 0;
3333
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303334out_remove_scsi_host:
3335 scsi_remove_host(hba->host);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303336out_disable:
3337 scsi_host_put(host);
3338out_error:
3339 return err;
3340}
3341EXPORT_SYMBOL_GPL(ufshcd_init);
3342
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303343MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
3344MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05303345MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303346MODULE_LICENSE("GPL");
3347MODULE_VERSION(UFSHCD_DRIVER_VERSION);