blob: e724a6591bf8810de3f82aaf54fef294d3e6678b [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070045#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053046
Asutosh Das36c2e922015-12-01 12:19:58 +053047#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080048#define CORE_POWER 0x0
49#define CORE_SW_RST (1 << 7)
50
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070051#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080052
53#define CORE_VERSION_STEP_MASK 0x0000FFFF
54#define CORE_VERSION_MINOR_MASK 0x0FFF0000
55#define CORE_VERSION_MINOR_SHIFT 16
56#define CORE_VERSION_MAJOR_MASK 0xF0000000
57#define CORE_VERSION_MAJOR_SHIFT 28
58#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030059#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080060
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053062
63#define CORE_VERSION_MAJOR_MASK 0xF0000000
64#define CORE_VERSION_MAJOR_SHIFT 28
65
Asutosh Das0ef24812012-12-18 16:14:02 +053066#define CORE_HC_MODE 0x78
67#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070068#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053069
Asutosh Das0ef24812012-12-18 16:14:02 +053070#define CORE_PWRCTL_BUS_OFF 0x01
71#define CORE_PWRCTL_BUS_ON (1 << 1)
72#define CORE_PWRCTL_IO_LOW (1 << 2)
73#define CORE_PWRCTL_IO_HIGH (1 << 3)
74
75#define CORE_PWRCTL_BUS_SUCCESS 0x01
76#define CORE_PWRCTL_BUS_FAIL (1 << 1)
77#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
78#define CORE_PWRCTL_IO_FAIL (1 << 3)
79
80#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070081#define MAX_PHASES 16
82
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070083#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070084#define CORE_DLL_EN (1 << 16)
85#define CORE_CDR_EN (1 << 17)
86#define CORE_CK_OUT_EN (1 << 18)
87#define CORE_CDR_EXT_EN (1 << 19)
88#define CORE_DLL_PDN (1 << 29)
89#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070090
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070092#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070093
Krishna Konda46fd1432014-10-30 21:13:27 -070094#define CORE_CLK_PWRSAVE (1 << 1)
95#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
96#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
97#define CORE_HC_MCLK_SEL_MASK (3 << 8)
98#define CORE_HC_AUTO_CMD21_EN (1 << 6)
99#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700100#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700101#define CORE_HC_SELECT_IN_EN (1 << 18)
102#define CORE_HC_SELECT_IN_HS400 (6 << 19)
103#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700104#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105
Pavan Anamula691dd592015-08-25 16:11:20 +0530106#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
107#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530108#define CORE_ONE_MID_EN (1 << 25)
109
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530110#define CORE_8_BIT_SUPPORT (1 << 18)
111#define CORE_3_3V_SUPPORT (1 << 24)
112#define CORE_3_0V_SUPPORT (1 << 25)
113#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300114#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700115
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700116#define CORE_CSR_CDC_CTLR_CFG0 0x130
117#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
118#define CORE_HW_AUTOCAL_ENA (1 << 17)
119
120#define CORE_CSR_CDC_CTLR_CFG1 0x134
121#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
122#define CORE_TIMER_ENA (1 << 16)
123
124#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
125#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
126#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
127#define CORE_CDC_OFFSET_CFG 0x14C
128#define CORE_CSR_CDC_DELAY_CFG 0x150
129#define CORE_CDC_SLAVE_DDA_CFG 0x160
130#define CORE_CSR_CDC_STATUS0 0x164
131#define CORE_CALIBRATION_DONE (1 << 0)
132
133#define CORE_CDC_ERROR_CODE_MASK 0x7000000
134
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300135#define CQ_CMD_DBG_RAM 0x110
136#define CQ_CMD_DBG_RAM_WA 0x150
137#define CQ_CMD_DBG_RAM_OL 0x154
138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_GEN_CFG 0x178
140#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
141#define CORE_CDC_SWITCH_RC_EN (1 << 1)
142
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700143#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530144#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700147#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530148#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700150#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800151#define CORE_FLL_CYCLE_CNT (1 << 18)
152#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530154#define DDR_CONFIG_POR_VAL 0x80040853
155#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
156#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700157#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700158
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700159/* 512 descriptors */
160#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530161#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530162
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700163#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800164#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700165
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700166#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530167#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700168
Krishna Konda96e6b112013-10-28 15:25:03 -0700169#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200170#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200171#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700172
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530173struct sdhci_msm_offset {
174 u32 CORE_MCI_DATA_CNT;
175 u32 CORE_MCI_STATUS;
176 u32 CORE_MCI_FIFO_CNT;
177 u32 CORE_MCI_VERSION;
178 u32 CORE_GENERICS;
179 u32 CORE_TESTBUS_CONFIG;
180 u32 CORE_TESTBUS_SEL2_BIT;
181 u32 CORE_TESTBUS_ENA;
182 u32 CORE_TESTBUS_SEL2;
183 u32 CORE_PWRCTL_STATUS;
184 u32 CORE_PWRCTL_MASK;
185 u32 CORE_PWRCTL_CLEAR;
186 u32 CORE_PWRCTL_CTL;
187 u32 CORE_SDCC_DEBUG_REG;
188 u32 CORE_DLL_CONFIG;
189 u32 CORE_DLL_STATUS;
190 u32 CORE_VENDOR_SPEC;
191 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
192 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
193 u32 CORE_VENDOR_SPEC_FUNC2;
194 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
195 u32 CORE_DDR_200_CFG;
196 u32 CORE_VENDOR_SPEC3;
197 u32 CORE_DLL_CONFIG_2;
198 u32 CORE_DDR_CONFIG;
199 u32 CORE_DDR_CONFIG_2;
200};
201
202struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
203 .CORE_MCI_DATA_CNT = 0x35C,
204 .CORE_MCI_STATUS = 0x324,
205 .CORE_MCI_FIFO_CNT = 0x308,
206 .CORE_MCI_VERSION = 0x318,
207 .CORE_GENERICS = 0x320,
208 .CORE_TESTBUS_CONFIG = 0x32C,
209 .CORE_TESTBUS_SEL2_BIT = 3,
210 .CORE_TESTBUS_ENA = (1 << 31),
211 .CORE_TESTBUS_SEL2 = (1 << 3),
212 .CORE_PWRCTL_STATUS = 0x240,
213 .CORE_PWRCTL_MASK = 0x244,
214 .CORE_PWRCTL_CLEAR = 0x248,
215 .CORE_PWRCTL_CTL = 0x24C,
216 .CORE_SDCC_DEBUG_REG = 0x358,
217 .CORE_DLL_CONFIG = 0x200,
218 .CORE_DLL_STATUS = 0x208,
219 .CORE_VENDOR_SPEC = 0x20C,
220 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
221 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
222 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
223 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
224 .CORE_DDR_200_CFG = 0x224,
225 .CORE_VENDOR_SPEC3 = 0x250,
226 .CORE_DLL_CONFIG_2 = 0x254,
227 .CORE_DDR_CONFIG = 0x258,
228 .CORE_DDR_CONFIG_2 = 0x25C,
229};
230
231struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
232 .CORE_MCI_DATA_CNT = 0x30,
233 .CORE_MCI_STATUS = 0x34,
234 .CORE_MCI_FIFO_CNT = 0x44,
235 .CORE_MCI_VERSION = 0x050,
236 .CORE_GENERICS = 0x70,
237 .CORE_TESTBUS_CONFIG = 0x0CC,
238 .CORE_TESTBUS_SEL2_BIT = 4,
239 .CORE_TESTBUS_ENA = (1 << 3),
240 .CORE_TESTBUS_SEL2 = (1 << 4),
241 .CORE_PWRCTL_STATUS = 0xDC,
242 .CORE_PWRCTL_MASK = 0xE0,
243 .CORE_PWRCTL_CLEAR = 0xE4,
244 .CORE_PWRCTL_CTL = 0xE8,
245 .CORE_SDCC_DEBUG_REG = 0x124,
246 .CORE_DLL_CONFIG = 0x100,
247 .CORE_DLL_STATUS = 0x108,
248 .CORE_VENDOR_SPEC = 0x10C,
249 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
250 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
251 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
252 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
253 .CORE_DDR_200_CFG = 0x184,
254 .CORE_VENDOR_SPEC3 = 0x1B0,
255 .CORE_DLL_CONFIG_2 = 0x1B4,
256 .CORE_DDR_CONFIG = 0x1B8,
257 .CORE_DDR_CONFIG_2 = 0x1BC,
258};
259
260u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
261{
262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
263 struct sdhci_msm_host *msm_host = pltfm_host->priv;
264 void __iomem *base_addr;
265
266 if (msm_host->mci_removed)
267 base_addr = host->ioaddr;
268 else
269 base_addr = msm_host->core_mem;
270
271 return readb_relaxed(base_addr + offset);
272}
273
274u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
275{
276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
277 struct sdhci_msm_host *msm_host = pltfm_host->priv;
278 void __iomem *base_addr;
279
280 if (msm_host->mci_removed)
281 base_addr = host->ioaddr;
282 else
283 base_addr = msm_host->core_mem;
284
285 return readl_relaxed(base_addr + offset);
286}
287
288void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
289{
290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
291 struct sdhci_msm_host *msm_host = pltfm_host->priv;
292 void __iomem *base_addr;
293
294 if (msm_host->mci_removed)
295 base_addr = host->ioaddr;
296 else
297 base_addr = msm_host->core_mem;
298
299 writeb_relaxed(val, base_addr + offset);
300}
301
302void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
303{
304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
305 struct sdhci_msm_host *msm_host = pltfm_host->priv;
306 void __iomem *base_addr;
307
308 if (msm_host->mci_removed)
309 base_addr = host->ioaddr;
310 else
311 base_addr = msm_host->core_mem;
312
313 writel_relaxed(val, base_addr + offset);
314}
315
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700316static const u32 tuning_block_64[] = {
317 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
318 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
319 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
320 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
321};
322
323static const u32 tuning_block_128[] = {
324 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
325 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
326 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
327 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
328 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
329 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
330 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
331 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
332};
Asutosh Das0ef24812012-12-18 16:14:02 +0530333
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700334/* global to hold each slot instance for debug */
335static struct sdhci_msm_host *sdhci_slot[2];
336
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700337static int disable_slots;
338/* root can write, others read */
339module_param(disable_slots, int, S_IRUGO|S_IWUSR);
340
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530341static bool nocmdq;
342module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
343
Asutosh Das0ef24812012-12-18 16:14:02 +0530344enum vdd_io_level {
345 /* set vdd_io_data->low_vol_level */
346 VDD_IO_LOW,
347 /* set vdd_io_data->high_vol_level */
348 VDD_IO_HIGH,
349 /*
350 * set whatever there in voltage_level (third argument) of
351 * sdhci_msm_set_vdd_io_vol() function.
352 */
353 VDD_IO_SET_LEVEL,
354};
355
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700356/* MSM platform specific tuning */
357static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
358 u8 poll)
359{
360 int rc = 0;
361 u32 wait_cnt = 50;
362 u8 ck_out_en = 0;
363 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530364 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
365 struct sdhci_msm_host *msm_host = pltfm_host->priv;
366 const struct sdhci_msm_offset *msm_host_offset =
367 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700368
369 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530370 ck_out_en = !!(readl_relaxed(host->ioaddr +
371 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700372
373 while (ck_out_en != poll) {
374 if (--wait_cnt == 0) {
375 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
376 mmc_hostname(mmc), __func__, poll);
377 rc = -ETIMEDOUT;
378 goto out;
379 }
380 udelay(1);
381
382 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530383 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700384 }
385out:
386 return rc;
387}
388
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530389/*
390 * Enable CDR to track changes of DAT lines and adjust sampling
391 * point according to voltage/temperature variations
392 */
393static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
394{
395 int rc = 0;
396 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530397 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
398 struct sdhci_msm_host *msm_host = pltfm_host->priv;
399 const struct sdhci_msm_offset *msm_host_offset =
400 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530401
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530402 config = readl_relaxed(host->ioaddr +
403 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530404 config |= CORE_CDR_EN;
405 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530406 writel_relaxed(config, host->ioaddr +
407 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530408
409 rc = msm_dll_poll_ck_out_en(host, 0);
410 if (rc)
411 goto err;
412
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530413 writel_relaxed((readl_relaxed(host->ioaddr +
414 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
415 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530416
417 rc = msm_dll_poll_ck_out_en(host, 1);
418 if (rc)
419 goto err;
420 goto out;
421err:
422 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
423out:
424 return rc;
425}
426
427static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
428 *attr, const char *buf, size_t count)
429{
430 struct sdhci_host *host = dev_get_drvdata(dev);
431 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
432 struct sdhci_msm_host *msm_host = pltfm_host->priv;
433 u32 tmp;
434 unsigned long flags;
435
436 if (!kstrtou32(buf, 0, &tmp)) {
437 spin_lock_irqsave(&host->lock, flags);
438 msm_host->en_auto_cmd21 = !!tmp;
439 spin_unlock_irqrestore(&host->lock, flags);
440 }
441 return count;
442}
443
444static ssize_t show_auto_cmd21(struct device *dev,
445 struct device_attribute *attr, char *buf)
446{
447 struct sdhci_host *host = dev_get_drvdata(dev);
448 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
449 struct sdhci_msm_host *msm_host = pltfm_host->priv;
450
451 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
452}
453
454/* MSM auto-tuning handler */
455static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
456 bool enable,
457 u32 type)
458{
459 int rc = 0;
460 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
461 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530462 const struct sdhci_msm_offset *msm_host_offset =
463 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530464 u32 val = 0;
465
466 if (!msm_host->en_auto_cmd21)
467 return 0;
468
469 if (type == MMC_SEND_TUNING_BLOCK_HS200)
470 val = CORE_HC_AUTO_CMD21_EN;
471 else
472 return 0;
473
474 if (enable) {
475 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530476 writel_relaxed(readl_relaxed(host->ioaddr +
477 msm_host_offset->CORE_VENDOR_SPEC) | val,
478 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530479 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530480 writel_relaxed(readl_relaxed(host->ioaddr +
481 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
482 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530483 }
484 return rc;
485}
486
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700487static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
488{
489 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530490 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
491 struct sdhci_msm_host *msm_host = pltfm_host->priv;
492 const struct sdhci_msm_offset *msm_host_offset =
493 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700494 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
495 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
496 0x8};
497 unsigned long flags;
498 u32 config;
499 struct mmc_host *mmc = host->mmc;
500
501 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
502 spin_lock_irqsave(&host->lock, flags);
503
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530504 config = readl_relaxed(host->ioaddr +
505 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700506 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
507 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530508 writel_relaxed(config, host->ioaddr +
509 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700510
511 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
512 rc = msm_dll_poll_ck_out_en(host, 0);
513 if (rc)
514 goto err_out;
515
516 /*
517 * Write the selected DLL clock output phase (0 ... 15)
518 * to CDR_SELEXT bit field of DLL_CONFIG register.
519 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530520 writel_relaxed(((readl_relaxed(host->ioaddr +
521 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700522 & ~(0xF << 20))
523 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530524 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700525
526 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530527 writel_relaxed((readl_relaxed(host->ioaddr +
528 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
529 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700530
531 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
532 rc = msm_dll_poll_ck_out_en(host, 1);
533 if (rc)
534 goto err_out;
535
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530536 config = readl_relaxed(host->ioaddr +
537 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700538 config |= CORE_CDR_EN;
539 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530540 writel_relaxed(config, host->ioaddr +
541 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700542 goto out;
543
544err_out:
545 pr_err("%s: %s: Failed to set DLL phase: %d\n",
546 mmc_hostname(mmc), __func__, phase);
547out:
548 spin_unlock_irqrestore(&host->lock, flags);
549 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
550 return rc;
551}
552
553/*
554 * Find out the greatest range of consecuitive selected
555 * DLL clock output phases that can be used as sampling
556 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700557 * timing mode) or for eMMC4.5 card read operation (in
558 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700559 * Select the 3/4 of the range and configure the DLL with the
560 * selected DLL clock output phase.
561 */
562
563static int msm_find_most_appropriate_phase(struct sdhci_host *host,
564 u8 *phase_table, u8 total_phases)
565{
566 int ret;
567 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
568 u8 phases_per_row[MAX_PHASES] = {0};
569 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
570 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
571 bool phase_0_found = false, phase_15_found = false;
572 struct mmc_host *mmc = host->mmc;
573
574 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
575 if (!total_phases || (total_phases > MAX_PHASES)) {
576 pr_err("%s: %s: invalid argument: total_phases=%d\n",
577 mmc_hostname(mmc), __func__, total_phases);
578 return -EINVAL;
579 }
580
581 for (cnt = 0; cnt < total_phases; cnt++) {
582 ranges[row_index][col_index] = phase_table[cnt];
583 phases_per_row[row_index] += 1;
584 col_index++;
585
586 if ((cnt + 1) == total_phases) {
587 continue;
588 /* check if next phase in phase_table is consecutive or not */
589 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
590 row_index++;
591 col_index = 0;
592 }
593 }
594
595 if (row_index >= MAX_PHASES)
596 return -EINVAL;
597
598 /* Check if phase-0 is present in first valid window? */
599 if (!ranges[0][0]) {
600 phase_0_found = true;
601 phase_0_raw_index = 0;
602 /* Check if cycle exist between 2 valid windows */
603 for (cnt = 1; cnt <= row_index; cnt++) {
604 if (phases_per_row[cnt]) {
605 for (i = 0; i < phases_per_row[cnt]; i++) {
606 if (ranges[cnt][i] == 15) {
607 phase_15_found = true;
608 phase_15_raw_index = cnt;
609 break;
610 }
611 }
612 }
613 }
614 }
615
616 /* If 2 valid windows form cycle then merge them as single window */
617 if (phase_0_found && phase_15_found) {
618 /* number of phases in raw where phase 0 is present */
619 u8 phases_0 = phases_per_row[phase_0_raw_index];
620 /* number of phases in raw where phase 15 is present */
621 u8 phases_15 = phases_per_row[phase_15_raw_index];
622
623 if (phases_0 + phases_15 >= MAX_PHASES)
624 /*
625 * If there are more than 1 phase windows then total
626 * number of phases in both the windows should not be
627 * more than or equal to MAX_PHASES.
628 */
629 return -EINVAL;
630
631 /* Merge 2 cyclic windows */
632 i = phases_15;
633 for (cnt = 0; cnt < phases_0; cnt++) {
634 ranges[phase_15_raw_index][i] =
635 ranges[phase_0_raw_index][cnt];
636 if (++i >= MAX_PHASES)
637 break;
638 }
639
640 phases_per_row[phase_0_raw_index] = 0;
641 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
642 }
643
644 for (cnt = 0; cnt <= row_index; cnt++) {
645 if (phases_per_row[cnt] > curr_max) {
646 curr_max = phases_per_row[cnt];
647 selected_row_index = cnt;
648 }
649 }
650
651 i = ((curr_max * 3) / 4);
652 if (i)
653 i--;
654
655 ret = (int)ranges[selected_row_index][i];
656
657 if (ret >= MAX_PHASES) {
658 ret = -EINVAL;
659 pr_err("%s: %s: invalid phase selected=%d\n",
660 mmc_hostname(mmc), __func__, ret);
661 }
662
663 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
664 return ret;
665}
666
667static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
668{
669 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530670 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
671 struct sdhci_msm_host *msm_host = pltfm_host->priv;
672 const struct sdhci_msm_offset *msm_host_offset =
673 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700674
675 /* Program the MCLK value to MCLK_FREQ bit field */
676 if (host->clock <= 112000000)
677 mclk_freq = 0;
678 else if (host->clock <= 125000000)
679 mclk_freq = 1;
680 else if (host->clock <= 137000000)
681 mclk_freq = 2;
682 else if (host->clock <= 150000000)
683 mclk_freq = 3;
684 else if (host->clock <= 162000000)
685 mclk_freq = 4;
686 else if (host->clock <= 175000000)
687 mclk_freq = 5;
688 else if (host->clock <= 187000000)
689 mclk_freq = 6;
690 else if (host->clock <= 200000000)
691 mclk_freq = 7;
692
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530693 writel_relaxed(((readl_relaxed(host->ioaddr +
694 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700695 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530696 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700697}
698
699/* Initialize the DLL (Programmable Delay Line ) */
700static int msm_init_cm_dll(struct sdhci_host *host)
701{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800702 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
703 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530704 const struct sdhci_msm_offset *msm_host_offset =
705 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700706 struct mmc_host *mmc = host->mmc;
707 int rc = 0;
708 unsigned long flags;
709 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530710 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700711
712 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
713 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530714 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
715 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530716 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700717 /*
718 * Make sure that clock is always enabled when DLL
719 * tuning is in progress. Keeping PWRSAVE ON may
720 * turn off the clock. So let's disable the PWRSAVE
721 * here and re-enable it once tuning is completed.
722 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530723 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530724 writel_relaxed((readl_relaxed(host->ioaddr +
725 msm_host_offset->CORE_VENDOR_SPEC)
726 & ~CORE_CLK_PWRSAVE), host->ioaddr +
727 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530728 curr_pwrsave = false;
729 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700730
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800731 if (msm_host->use_updated_dll_reset) {
732 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530733 writel_relaxed((readl_relaxed(host->ioaddr +
734 msm_host_offset->CORE_DLL_CONFIG)
735 & ~CORE_CK_OUT_EN), host->ioaddr +
736 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800737
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530738 writel_relaxed((readl_relaxed(host->ioaddr +
739 msm_host_offset->CORE_DLL_CONFIG_2)
740 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
741 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800742 }
743
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700744 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530745 writel_relaxed((readl_relaxed(host->ioaddr +
746 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
747 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700748
749 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530750 writel_relaxed((readl_relaxed(host->ioaddr +
751 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
752 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700753 msm_cm_dll_set_freq(host);
754
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800755 if (msm_host->use_updated_dll_reset) {
756 u32 mclk_freq = 0;
757
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530758 if ((readl_relaxed(host->ioaddr +
759 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800760 & CORE_FLL_CYCLE_CNT))
761 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
762 else
763 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
764
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530765 writel_relaxed(((readl_relaxed(host->ioaddr +
766 msm_host_offset->CORE_DLL_CONFIG_2)
767 & ~(0xFF << 10)) | (mclk_freq << 10)),
768 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800769 /* wait for 5us before enabling DLL clock */
770 udelay(5);
771 }
772
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700773 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530774 writel_relaxed((readl_relaxed(host->ioaddr +
775 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
776 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700777
778 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530779 writel_relaxed((readl_relaxed(host->ioaddr +
780 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
781 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700782
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800783 if (msm_host->use_updated_dll_reset) {
784 msm_cm_dll_set_freq(host);
785 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530786 writel_relaxed((readl_relaxed(host->ioaddr +
787 msm_host_offset->CORE_DLL_CONFIG_2)
788 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
789 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800790 }
791
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700792 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530793 writel_relaxed((readl_relaxed(host->ioaddr +
794 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
795 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700796
797 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530798 writel_relaxed((readl_relaxed(host->ioaddr +
799 msm_host_offset->CORE_DLL_CONFIG)
800 | CORE_CK_OUT_EN), host->ioaddr +
801 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700802
803 wait_cnt = 50;
804 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530805 while (!(readl_relaxed(host->ioaddr +
806 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700807 /* max. wait for 50us sec for LOCK bit to be set */
808 if (--wait_cnt == 0) {
809 pr_err("%s: %s: DLL failed to LOCK\n",
810 mmc_hostname(mmc), __func__);
811 rc = -ETIMEDOUT;
812 goto out;
813 }
814 /* wait for 1us before polling again */
815 udelay(1);
816 }
817
818out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530819 /* Restore the correct PWRSAVE state */
820 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530821 u32 reg = readl_relaxed(host->ioaddr +
822 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530823
824 if (prev_pwrsave)
825 reg |= CORE_CLK_PWRSAVE;
826 else
827 reg &= ~CORE_CLK_PWRSAVE;
828
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530829 writel_relaxed(reg, host->ioaddr +
830 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530831 }
832
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700833 spin_unlock_irqrestore(&host->lock, flags);
834 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
835 return rc;
836}
837
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700838static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
839{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700840 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700841 int ret = 0;
842 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530843 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
844 struct sdhci_msm_host *msm_host = pltfm_host->priv;
845 const struct sdhci_msm_offset *msm_host_offset =
846 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700847
848 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
849
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700850 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530851 writel_relaxed((readl_relaxed(host->ioaddr +
852 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700853 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530854 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700855
856 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
857 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
858 & ~CORE_CDC_SWITCH_BYPASS_OFF),
859 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
860
861 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
862 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
863 | CORE_CDC_SWITCH_RC_EN),
864 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
865
866 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530867 writel_relaxed((readl_relaxed(host->ioaddr +
868 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700869 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530870 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700871
872 /*
873 * Perform CDC Register Initialization Sequence
874 *
875 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
876 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
877 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
878 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
879 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
880 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
881 * CORE_CSR_CDC_DELAY_CFG 0x3AC
882 * CORE_CDC_OFFSET_CFG 0x0
883 * CORE_CDC_SLAVE_DDA_CFG 0x16334
884 */
885
886 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
887 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
888 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
889 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
890 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
891 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700892 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700893 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
894 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
895
896 /* CDC HW Calibration */
897
898 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
899 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
900 | CORE_SW_TRIG_FULL_CALIB),
901 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
902
903 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
904 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
905 & ~CORE_SW_TRIG_FULL_CALIB),
906 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
907
908 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
909 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
910 | CORE_HW_AUTOCAL_ENA),
911 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
912
913 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
914 writel_relaxed((readl_relaxed(host->ioaddr +
915 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
916 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
917
918 mb();
919
920 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700921 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
922 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
923
924 if (ret == -ETIMEDOUT) {
925 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700926 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700927 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700928 }
929
930 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
931 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
932 & CORE_CDC_ERROR_CODE_MASK;
933 if (cdc_err) {
934 pr_err("%s: %s: CDC Error Code %d\n",
935 mmc_hostname(host->mmc), __func__, cdc_err);
936 ret = -EINVAL;
937 goto out;
938 }
939
940 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530941 writel_relaxed((readl_relaxed(host->ioaddr +
942 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700943 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530944 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700945out:
946 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
947 __func__, ret);
948 return ret;
949}
950
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700951static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
952{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530953 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
954 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530955 const struct sdhci_msm_offset *msm_host_offset =
956 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530957 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700958 int ret = 0;
959
960 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
961
962 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530963 * Reprogramming the value in case it might have been modified by
964 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700965 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700966 if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530967 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
968 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700969 } else {
970 ddr_config = DDR_CONFIG_POR_VAL &
971 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
972 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530973 writel_relaxed(ddr_config, host->ioaddr +
974 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700975 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700976
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530977 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530978 writel_relaxed((readl_relaxed(host->ioaddr +
979 msm_host_offset->CORE_DDR_200_CFG)
980 | CORE_CMDIN_RCLK_EN), host->ioaddr +
981 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530982
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700983 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530984 writel_relaxed((readl_relaxed(host->ioaddr +
985 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700986 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530987 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700988
989 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530990 ret = readl_poll_timeout(host->ioaddr +
991 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700992 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
993
994 if (ret == -ETIMEDOUT) {
995 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
996 mmc_hostname(host->mmc), __func__);
997 goto out;
998 }
999
Ritesh Harjani764065e2015-05-13 14:14:45 +05301000 /*
1001 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1002 * when MCLK is gated OFF, it is not gated for less than 0.5us
1003 * and MCLK must be switched on for at-least 1us before DATA
1004 * starts coming. Controllers with 14lpp tech DLL cannot
1005 * guarantee above requirement. So PWRSAVE_DLL should not be
1006 * turned on for host controllers using this DLL.
1007 */
1008 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301009 writel_relaxed((readl_relaxed(host->ioaddr +
1010 msm_host_offset->CORE_VENDOR_SPEC3)
1011 | CORE_PWRSAVE_DLL), host->ioaddr +
1012 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001013 mb();
1014out:
1015 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1016 __func__, ret);
1017 return ret;
1018}
1019
Ritesh Harjaniea709662015-05-27 15:40:24 +05301020static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1021{
1022 int ret = 0;
1023 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1024 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1025 struct mmc_host *mmc = host->mmc;
1026
1027 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1028
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301029 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1030 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301031 mmc_hostname(mmc));
1032 return -EINVAL;
1033 }
1034
1035 if (msm_host->calibration_done ||
1036 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1037 return 0;
1038 }
1039
1040 /*
1041 * Reset the tuning block.
1042 */
1043 ret = msm_init_cm_dll(host);
1044 if (ret)
1045 goto out;
1046
1047 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1048out:
1049 if (!ret)
1050 msm_host->calibration_done = true;
1051 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1052 __func__, ret);
1053 return ret;
1054}
1055
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001056static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1057{
1058 int ret = 0;
1059 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1060 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301061 const struct sdhci_msm_offset *msm_host_offset =
1062 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001063
1064 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1065
1066 /*
1067 * Retuning in HS400 (DDR mode) will fail, just reset the
1068 * tuning block and restore the saved tuning phase.
1069 */
1070 ret = msm_init_cm_dll(host);
1071 if (ret)
1072 goto out;
1073
1074 /* Set the selected phase in delay line hw block */
1075 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1076 if (ret)
1077 goto out;
1078
Krishna Konda0e8efba2014-06-23 14:50:38 -07001079 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301080 writel_relaxed((readl_relaxed(host->ioaddr +
1081 msm_host_offset->CORE_DLL_CONFIG)
1082 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1083 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001084
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001085 if (msm_host->use_cdclp533)
1086 /* Calibrate CDCLP533 DLL HW */
1087 ret = sdhci_msm_cdclp533_calibration(host);
1088 else
1089 /* Calibrate CM_DLL_SDC4 HW */
1090 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1091out:
1092 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1093 __func__, ret);
1094 return ret;
1095}
1096
Krishna Konda96e6b112013-10-28 15:25:03 -07001097static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1098 u8 drv_type)
1099{
1100 struct mmc_command cmd = {0};
1101 struct mmc_request mrq = {NULL};
1102 struct mmc_host *mmc = host->mmc;
1103 u8 val = ((drv_type << 4) | 2);
1104
1105 cmd.opcode = MMC_SWITCH;
1106 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1107 (EXT_CSD_HS_TIMING << 16) |
1108 (val << 8) |
1109 EXT_CSD_CMD_SET_NORMAL;
1110 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1111 /* 1 sec */
1112 cmd.busy_timeout = 1000 * 1000;
1113
1114 memset(cmd.resp, 0, sizeof(cmd.resp));
1115 cmd.retries = 3;
1116
1117 mrq.cmd = &cmd;
1118 cmd.data = NULL;
1119
1120 mmc_wait_for_req(mmc, &mrq);
1121 pr_debug("%s: %s: set card drive type to %d\n",
1122 mmc_hostname(mmc), __func__,
1123 drv_type);
1124}
1125
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001126int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1127{
1128 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301129 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001130 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001131 const u32 *tuning_block_pattern = tuning_block_64;
1132 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1133 int rc;
1134 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301135 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001136 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1137 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001138 u8 drv_type = 0;
1139 bool drv_type_changed = false;
1140 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301141 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301142
1143 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001144 * Tuning is required for SDR104, HS200 and HS400 cards and
1145 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301146 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001147 if (host->clock <= CORE_FREQ_100MHZ ||
1148 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1149 (ios.timing == MMC_TIMING_MMC_HS200) ||
1150 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301151 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001152
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301153 /*
1154 * Don't allow re-tuning for CRC errors observed for any commands
1155 * that are sent during tuning sequence itself.
1156 */
1157 if (msm_host->tuning_in_progress)
1158 return 0;
1159 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001160 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001161
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001162 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001163 if (msm_host->tuning_done && !msm_host->calibration_done &&
1164 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001165 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001166 spin_lock_irqsave(&host->lock, flags);
1167 if (!rc)
1168 msm_host->calibration_done = true;
1169 spin_unlock_irqrestore(&host->lock, flags);
1170 goto out;
1171 }
1172
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001173 spin_lock_irqsave(&host->lock, flags);
1174
1175 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1176 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1177 tuning_block_pattern = tuning_block_128;
1178 size = sizeof(tuning_block_128);
1179 }
1180 spin_unlock_irqrestore(&host->lock, flags);
1181
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001182 data_buf = kmalloc(size, GFP_KERNEL);
1183 if (!data_buf) {
1184 rc = -ENOMEM;
1185 goto out;
1186 }
1187
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301188retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001189 tuned_phase_cnt = 0;
1190
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301191 /* first of all reset the tuning block */
1192 rc = msm_init_cm_dll(host);
1193 if (rc)
1194 goto kfree;
1195
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001196 phase = 0;
1197 do {
1198 struct mmc_command cmd = {0};
1199 struct mmc_data data = {0};
1200 struct mmc_request mrq = {
1201 .cmd = &cmd,
1202 .data = &data
1203 };
1204 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301205 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001206
1207 /* set the phase in delay line hw block */
1208 rc = msm_config_cm_dll_phase(host, phase);
1209 if (rc)
1210 goto kfree;
1211
1212 cmd.opcode = opcode;
1213 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1214
1215 data.blksz = size;
1216 data.blocks = 1;
1217 data.flags = MMC_DATA_READ;
1218 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1219
1220 data.sg = &sg;
1221 data.sg_len = 1;
1222 sg_init_one(&sg, data_buf, size);
1223 memset(data_buf, 0, size);
1224 mmc_wait_for_req(mmc, &mrq);
1225
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301226 if (card && (cmd.error || data.error)) {
1227 sts_cmd.opcode = MMC_SEND_STATUS;
1228 sts_cmd.arg = card->rca << 16;
1229 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1230 sts_retry = 5;
1231 while (sts_retry) {
1232 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1233
1234 if (sts_cmd.error ||
1235 (R1_CURRENT_STATE(sts_cmd.resp[0])
1236 != R1_STATE_TRAN)) {
1237 sts_retry--;
1238 /*
1239 * wait for at least 146 MCLK cycles for
1240 * the card to move to TRANS state. As
1241 * the MCLK would be min 200MHz for
1242 * tuning, we need max 0.73us delay. To
1243 * be on safer side 1ms delay is given.
1244 */
1245 usleep_range(1000, 1200);
1246 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1247 mmc_hostname(mmc), phase,
1248 sts_cmd.error, sts_cmd.resp[0]);
1249 continue;
1250 }
1251 break;
1252 };
1253 }
1254
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001255 if (!cmd.error && !data.error &&
1256 !memcmp(data_buf, tuning_block_pattern, size)) {
1257 /* tuning is successful at this tuning point */
1258 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001259 pr_debug("%s: %s: found *** good *** phase = %d\n",
1260 mmc_hostname(mmc), __func__, phase);
1261 } else {
1262 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001263 mmc_hostname(mmc), __func__, phase);
1264 }
1265 } while (++phase < 16);
1266
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301267 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1268 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001269 /*
1270 * If all phases pass then its a problem. So change the card's
1271 * drive type to a different value, if supported and repeat
1272 * tuning until at least one phase fails. Then set the original
1273 * drive type back.
1274 *
1275 * If all the phases still pass after trying all possible
1276 * drive types, then one of those 16 phases will be picked.
1277 * This is no different from what was going on before the
1278 * modification to change drive type and retune.
1279 */
1280 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1281 tuned_phase_cnt);
1282
1283 /* set drive type to other value . default setting is 0x0 */
1284 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001285 pr_debug("%s: trying different drive strength (%d)\n",
1286 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001287 if (card->ext_csd.raw_driver_strength &
1288 (1 << drv_type)) {
1289 sdhci_msm_set_mmc_drv_type(host, opcode,
1290 drv_type);
1291 if (!drv_type_changed)
1292 drv_type_changed = true;
1293 goto retry;
1294 }
1295 }
1296 }
1297
1298 /* reset drive type to default (50 ohm) if changed */
1299 if (drv_type_changed)
1300 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1301
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001302 if (tuned_phase_cnt) {
1303 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1304 tuned_phase_cnt);
1305 if (rc < 0)
1306 goto kfree;
1307 else
1308 phase = (u8)rc;
1309
1310 /*
1311 * Finally set the selected phase in delay
1312 * line hw block.
1313 */
1314 rc = msm_config_cm_dll_phase(host, phase);
1315 if (rc)
1316 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001317 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001318 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1319 mmc_hostname(mmc), __func__, phase);
1320 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301321 if (--tuning_seq_cnt)
1322 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001323 /* tuning failed */
1324 pr_err("%s: %s: no tuning point found\n",
1325 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301326 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001327 }
1328
1329kfree:
1330 kfree(data_buf);
1331out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001332 spin_lock_irqsave(&host->lock, flags);
1333 if (!rc)
1334 msm_host->tuning_done = true;
1335 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301336 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001337 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001338 return rc;
1339}
1340
Asutosh Das0ef24812012-12-18 16:14:02 +05301341static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1342{
1343 struct sdhci_msm_gpio_data *curr;
1344 int i, ret = 0;
1345
1346 curr = pdata->pin_data->gpio_data;
1347 for (i = 0; i < curr->size; i++) {
1348 if (!gpio_is_valid(curr->gpio[i].no)) {
1349 ret = -EINVAL;
1350 pr_err("%s: Invalid gpio = %d\n", __func__,
1351 curr->gpio[i].no);
1352 goto free_gpios;
1353 }
1354 if (enable) {
1355 ret = gpio_request(curr->gpio[i].no,
1356 curr->gpio[i].name);
1357 if (ret) {
1358 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1359 __func__, curr->gpio[i].no,
1360 curr->gpio[i].name, ret);
1361 goto free_gpios;
1362 }
1363 curr->gpio[i].is_enabled = true;
1364 } else {
1365 gpio_free(curr->gpio[i].no);
1366 curr->gpio[i].is_enabled = false;
1367 }
1368 }
1369 return ret;
1370
1371free_gpios:
1372 for (i--; i >= 0; i--) {
1373 gpio_free(curr->gpio[i].no);
1374 curr->gpio[i].is_enabled = false;
1375 }
1376 return ret;
1377}
1378
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301379static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1380 bool enable)
1381{
1382 int ret = 0;
1383
1384 if (enable)
1385 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1386 pdata->pctrl_data->pins_active);
1387 else
1388 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1389 pdata->pctrl_data->pins_sleep);
1390
1391 if (ret < 0)
1392 pr_err("%s state for pinctrl failed with %d\n",
1393 enable ? "Enabling" : "Disabling", ret);
1394
1395 return ret;
1396}
1397
Asutosh Das0ef24812012-12-18 16:14:02 +05301398static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1399{
1400 int ret = 0;
1401
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301402 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301403 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301404 } else if (pdata->pctrl_data) {
1405 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1406 goto out;
1407 } else if (!pdata->pin_data) {
1408 return 0;
1409 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301410
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301411 if (pdata->pin_data->is_gpio)
1412 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301413out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301414 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301415 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301416
1417 return ret;
1418}
1419
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301420static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1421 u32 **out, int *len, u32 size)
1422{
1423 int ret = 0;
1424 struct device_node *np = dev->of_node;
1425 size_t sz;
1426 u32 *arr = NULL;
1427
1428 if (!of_get_property(np, prop_name, len)) {
1429 ret = -EINVAL;
1430 goto out;
1431 }
1432 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001433 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301434 dev_err(dev, "%s invalid size\n", prop_name);
1435 ret = -EINVAL;
1436 goto out;
1437 }
1438
1439 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1440 if (!arr) {
1441 dev_err(dev, "%s failed allocating memory\n", prop_name);
1442 ret = -ENOMEM;
1443 goto out;
1444 }
1445
1446 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1447 if (ret < 0) {
1448 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1449 goto out;
1450 }
1451 *out = arr;
1452out:
1453 if (ret)
1454 *len = 0;
1455 return ret;
1456}
1457
Asutosh Das0ef24812012-12-18 16:14:02 +05301458#define MAX_PROP_SIZE 32
1459static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1460 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1461{
1462 int len, ret = 0;
1463 const __be32 *prop;
1464 char prop_name[MAX_PROP_SIZE];
1465 struct sdhci_msm_reg_data *vreg;
1466 struct device_node *np = dev->of_node;
1467
1468 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1469 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301470 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301471 return ret;
1472 }
1473
1474 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1475 if (!vreg) {
1476 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1477 ret = -ENOMEM;
1478 return ret;
1479 }
1480
1481 vreg->name = vreg_name;
1482
1483 snprintf(prop_name, MAX_PROP_SIZE,
1484 "qcom,%s-always-on", vreg_name);
1485 if (of_get_property(np, prop_name, NULL))
1486 vreg->is_always_on = true;
1487
1488 snprintf(prop_name, MAX_PROP_SIZE,
1489 "qcom,%s-lpm-sup", vreg_name);
1490 if (of_get_property(np, prop_name, NULL))
1491 vreg->lpm_sup = true;
1492
1493 snprintf(prop_name, MAX_PROP_SIZE,
1494 "qcom,%s-voltage-level", vreg_name);
1495 prop = of_get_property(np, prop_name, &len);
1496 if (!prop || (len != (2 * sizeof(__be32)))) {
1497 dev_warn(dev, "%s %s property\n",
1498 prop ? "invalid format" : "no", prop_name);
1499 } else {
1500 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1501 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1502 }
1503
1504 snprintf(prop_name, MAX_PROP_SIZE,
1505 "qcom,%s-current-level", vreg_name);
1506 prop = of_get_property(np, prop_name, &len);
1507 if (!prop || (len != (2 * sizeof(__be32)))) {
1508 dev_warn(dev, "%s %s property\n",
1509 prop ? "invalid format" : "no", prop_name);
1510 } else {
1511 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1512 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1513 }
1514
1515 *vreg_data = vreg;
1516 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1517 vreg->name, vreg->is_always_on ? "always_on," : "",
1518 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1519 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1520
1521 return ret;
1522}
1523
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301524static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1525 struct sdhci_msm_pltfm_data *pdata)
1526{
1527 struct sdhci_pinctrl_data *pctrl_data;
1528 struct pinctrl *pctrl;
1529 int ret = 0;
1530
1531 /* Try to obtain pinctrl handle */
1532 pctrl = devm_pinctrl_get(dev);
1533 if (IS_ERR(pctrl)) {
1534 ret = PTR_ERR(pctrl);
1535 goto out;
1536 }
1537 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1538 if (!pctrl_data) {
1539 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1540 ret = -ENOMEM;
1541 goto out;
1542 }
1543 pctrl_data->pctrl = pctrl;
1544 /* Look-up and keep the states handy to be used later */
1545 pctrl_data->pins_active = pinctrl_lookup_state(
1546 pctrl_data->pctrl, "active");
1547 if (IS_ERR(pctrl_data->pins_active)) {
1548 ret = PTR_ERR(pctrl_data->pins_active);
1549 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1550 goto out;
1551 }
1552 pctrl_data->pins_sleep = pinctrl_lookup_state(
1553 pctrl_data->pctrl, "sleep");
1554 if (IS_ERR(pctrl_data->pins_sleep)) {
1555 ret = PTR_ERR(pctrl_data->pins_sleep);
1556 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1557 goto out;
1558 }
1559 pdata->pctrl_data = pctrl_data;
1560out:
1561 return ret;
1562}
1563
Asutosh Das0ef24812012-12-18 16:14:02 +05301564#define GPIO_NAME_MAX_LEN 32
1565static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1566 struct sdhci_msm_pltfm_data *pdata)
1567{
1568 int ret = 0, cnt, i;
1569 struct sdhci_msm_pin_data *pin_data;
1570 struct device_node *np = dev->of_node;
1571
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301572 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1573 if (!ret) {
1574 goto out;
1575 } else if (ret == -EPROBE_DEFER) {
1576 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1577 goto out;
1578 } else {
1579 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1580 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301581 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301582 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301583 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1584 if (!pin_data) {
1585 dev_err(dev, "No memory for pin_data\n");
1586 ret = -ENOMEM;
1587 goto out;
1588 }
1589
1590 cnt = of_gpio_count(np);
1591 if (cnt > 0) {
1592 pin_data->gpio_data = devm_kzalloc(dev,
1593 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1594 if (!pin_data->gpio_data) {
1595 dev_err(dev, "No memory for gpio_data\n");
1596 ret = -ENOMEM;
1597 goto out;
1598 }
1599 pin_data->gpio_data->size = cnt;
1600 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1601 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1602
1603 if (!pin_data->gpio_data->gpio) {
1604 dev_err(dev, "No memory for gpio\n");
1605 ret = -ENOMEM;
1606 goto out;
1607 }
1608
1609 for (i = 0; i < cnt; i++) {
1610 const char *name = NULL;
1611 char result[GPIO_NAME_MAX_LEN];
1612 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1613 of_property_read_string_index(np,
1614 "qcom,gpio-names", i, &name);
1615
1616 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1617 dev_name(dev), name ? name : "?");
1618 pin_data->gpio_data->gpio[i].name = result;
1619 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1620 pin_data->gpio_data->gpio[i].name,
1621 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301622 }
1623 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301624 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301625out:
1626 if (ret)
1627 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1628 return ret;
1629}
1630
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001631#ifdef CONFIG_SMP
1632static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1633{
1634 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1635}
1636#else
1637static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1638#endif
1639
Gilad Bronerc788a672015-09-08 15:39:11 +03001640static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1641 struct sdhci_msm_pltfm_data *pdata)
1642{
1643 struct device_node *np = dev->of_node;
1644 const char *str;
1645 u32 cpu;
1646 int ret = 0;
1647 int i;
1648
1649 pdata->pm_qos_data.irq_valid = false;
1650 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1651 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1652 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001653 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001654 }
1655
1656 /* must specify cpu for "affine_cores" type */
1657 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1658 pdata->pm_qos_data.irq_cpu = -1;
1659 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1660 if (ret) {
1661 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1662 ret);
1663 goto out;
1664 }
1665 if (cpu < 0 || cpu >= num_possible_cpus()) {
1666 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1667 __func__, cpu, num_possible_cpus());
1668 ret = -EINVAL;
1669 goto out;
1670 }
1671 pdata->pm_qos_data.irq_cpu = cpu;
1672 }
1673
1674 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1675 SDHCI_POWER_POLICY_NUM) {
1676 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1677 __func__, SDHCI_POWER_POLICY_NUM);
1678 ret = -EINVAL;
1679 goto out;
1680 }
1681
1682 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1683 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1684 &pdata->pm_qos_data.irq_latency.latency[i]);
1685
1686 pdata->pm_qos_data.irq_valid = true;
1687out:
1688 return ret;
1689}
1690
1691static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1692 struct sdhci_msm_pltfm_data *pdata)
1693{
1694 struct device_node *np = dev->of_node;
1695 u32 mask;
1696 int nr_groups;
1697 int ret;
1698 int i;
1699
1700 /* Read cpu group mapping */
1701 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1702 if (nr_groups <= 0) {
1703 ret = -EINVAL;
1704 goto out;
1705 }
1706 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1707 pdata->pm_qos_data.cpu_group_map.mask =
1708 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1709 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1710 ret = -ENOMEM;
1711 goto out;
1712 }
1713
1714 for (i = 0; i < nr_groups; i++) {
1715 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1716 i, &mask);
1717
1718 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1719 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1720 cpu_possible_mask)) {
1721 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1722 __func__, mask, i);
1723 ret = -EINVAL;
1724 goto free_res;
1725 }
1726 }
1727 return 0;
1728
1729free_res:
1730 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1731out:
1732 return ret;
1733}
1734
1735static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1736 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1737{
1738 struct device_node *np = dev->of_node;
1739 struct sdhci_msm_pm_qos_latency *values;
1740 int ret;
1741 int i;
1742 int group;
1743 int cfg;
1744
1745 ret = of_property_count_u32_elems(np, name);
1746 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1747 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1748 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1749 ret);
1750 return -EINVAL;
1751 } else if (ret < 0) {
1752 return ret;
1753 }
1754
1755 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1756 GFP_KERNEL);
1757 if (!values)
1758 return -ENOMEM;
1759
1760 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1761 group = i / SDHCI_POWER_POLICY_NUM;
1762 cfg = i % SDHCI_POWER_POLICY_NUM;
1763 of_property_read_u32_index(np, name, i,
1764 &(values[group].latency[cfg]));
1765 }
1766
1767 *latency = values;
1768 return 0;
1769}
1770
1771static void sdhci_msm_pm_qos_parse(struct device *dev,
1772 struct sdhci_msm_pltfm_data *pdata)
1773{
1774 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1775 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1776 __func__);
1777
1778 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1779 pdata->pm_qos_data.cmdq_valid =
1780 !sdhci_msm_pm_qos_parse_latency(dev,
1781 "qcom,pm-qos-cmdq-latency-us",
1782 pdata->pm_qos_data.cpu_group_map.nr_groups,
1783 &pdata->pm_qos_data.cmdq_latency);
1784 pdata->pm_qos_data.legacy_valid =
1785 !sdhci_msm_pm_qos_parse_latency(dev,
1786 "qcom,pm-qos-legacy-latency-us",
1787 pdata->pm_qos_data.cpu_group_map.nr_groups,
1788 &pdata->pm_qos_data.latency);
1789 if (!pdata->pm_qos_data.cmdq_valid &&
1790 !pdata->pm_qos_data.legacy_valid) {
1791 /* clean-up previously allocated arrays */
1792 kfree(pdata->pm_qos_data.latency);
1793 kfree(pdata->pm_qos_data.cmdq_latency);
1794 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1795 __func__);
1796 }
1797 } else {
1798 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1799 __func__);
1800 }
1801}
1802
Asutosh Das0ef24812012-12-18 16:14:02 +05301803/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001804static
1805struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1806 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301807{
1808 struct sdhci_msm_pltfm_data *pdata = NULL;
1809 struct device_node *np = dev->of_node;
1810 u32 bus_width = 0;
1811 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301812 int clk_table_len;
1813 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301814 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301815
1816 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1817 if (!pdata) {
1818 dev_err(dev, "failed to allocate memory for platform data\n");
1819 goto out;
1820 }
1821
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301822 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1823 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1824 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301825
Asutosh Das0ef24812012-12-18 16:14:02 +05301826 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1827 if (bus_width == 8)
1828 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1829 else if (bus_width == 4)
1830 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1831 else {
1832 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1833 pdata->mmc_bus_width = 0;
1834 }
1835
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001836 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1837 &msm_host->mmc->clk_scaling.freq_table,
1838 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1839 pr_debug("%s: no clock scaling frequencies were supplied\n",
1840 dev_name(dev));
1841 else if (!msm_host->mmc->clk_scaling.freq_table ||
1842 !msm_host->mmc->clk_scaling.freq_table_sz)
1843 dev_err(dev, "bad dts clock scaling frequencies\n");
1844
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301845 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1846 &clk_table, &clk_table_len, 0)) {
1847 dev_err(dev, "failed parsing supported clock rates\n");
1848 goto out;
1849 }
1850 if (!clk_table || !clk_table_len) {
1851 dev_err(dev, "Invalid clock table\n");
1852 goto out;
1853 }
1854 pdata->sup_clk_table = clk_table;
1855 pdata->sup_clk_cnt = clk_table_len;
1856
Asutosh Das0ef24812012-12-18 16:14:02 +05301857 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1858 sdhci_msm_slot_reg_data),
1859 GFP_KERNEL);
1860 if (!pdata->vreg_data) {
1861 dev_err(dev, "failed to allocate memory for vreg data\n");
1862 goto out;
1863 }
1864
1865 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1866 "vdd")) {
1867 dev_err(dev, "failed parsing vdd data\n");
1868 goto out;
1869 }
1870 if (sdhci_msm_dt_parse_vreg_info(dev,
1871 &pdata->vreg_data->vdd_io_data,
1872 "vdd-io")) {
1873 dev_err(dev, "failed parsing vdd-io data\n");
1874 goto out;
1875 }
1876
1877 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1878 dev_err(dev, "failed parsing gpio data\n");
1879 goto out;
1880 }
1881
Asutosh Das0ef24812012-12-18 16:14:02 +05301882 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1883
1884 for (i = 0; i < len; i++) {
1885 const char *name = NULL;
1886
1887 of_property_read_string_index(np,
1888 "qcom,bus-speed-mode", i, &name);
1889 if (!name)
1890 continue;
1891
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001892 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1893 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1894 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1895 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1896 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301897 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1898 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1899 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1900 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1901 pdata->caps |= MMC_CAP_1_8V_DDR
1902 | MMC_CAP_UHS_DDR50;
1903 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1904 pdata->caps |= MMC_CAP_1_2V_DDR
1905 | MMC_CAP_UHS_DDR50;
1906 }
1907
1908 if (of_get_property(np, "qcom,nonremovable", NULL))
1909 pdata->nonremovable = true;
1910
Guoping Yuf7c91332014-08-20 16:56:18 +08001911 if (of_get_property(np, "qcom,nonhotplug", NULL))
1912 pdata->nonhotplug = true;
1913
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001914 pdata->largeaddressbus =
1915 of_property_read_bool(np, "qcom,large-address-bus");
1916
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001917 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1918 msm_host->mmc->wakeup_on_idle = true;
1919
Gilad Bronerc788a672015-09-08 15:39:11 +03001920 sdhci_msm_pm_qos_parse(dev, pdata);
1921
Pavan Anamula5a256df2015-10-16 14:38:28 +05301922 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1923 pdata->core_3_0v_support = true;
1924
Asutosh Das0ef24812012-12-18 16:14:02 +05301925 return pdata;
1926out:
1927 return NULL;
1928}
1929
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301930/* Returns required bandwidth in Bytes per Sec */
1931static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1932 struct mmc_ios *ios)
1933{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301934 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1935 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1936
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301937 unsigned int bw;
1938
Sahitya Tummala2886c922013-04-03 18:03:31 +05301939 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301940 /*
1941 * For DDR mode, SDCC controller clock will be at
1942 * the double rate than the actual clock that goes to card.
1943 */
1944 if (ios->bus_width == MMC_BUS_WIDTH_4)
1945 bw /= 2;
1946 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1947 bw /= 8;
1948
1949 return bw;
1950}
1951
1952static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1953 unsigned int bw)
1954{
1955 unsigned int *table = host->pdata->voting_data->bw_vecs;
1956 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1957 int i;
1958
1959 if (host->msm_bus_vote.is_max_bw_needed && bw)
1960 return host->msm_bus_vote.max_bw_vote;
1961
1962 for (i = 0; i < size; i++) {
1963 if (bw <= table[i])
1964 break;
1965 }
1966
1967 if (i && (i == size))
1968 i--;
1969
1970 return i;
1971}
1972
1973/*
1974 * This function must be called with host lock acquired.
1975 * Caller of this function should also ensure that msm bus client
1976 * handle is not null.
1977 */
1978static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1979 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301980 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301981{
1982 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1983 int rc = 0;
1984
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301985 BUG_ON(!flags);
1986
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301987 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301988 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301989 rc = msm_bus_scale_client_update_request(
1990 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301991 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301992 if (rc) {
1993 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1994 mmc_hostname(host->mmc),
1995 msm_host->msm_bus_vote.client_handle, vote, rc);
1996 goto out;
1997 }
1998 msm_host->msm_bus_vote.curr_vote = vote;
1999 }
2000out:
2001 return rc;
2002}
2003
2004/*
2005 * Internal work. Work to set 0 bandwidth for msm bus.
2006 */
2007static void sdhci_msm_bus_work(struct work_struct *work)
2008{
2009 struct sdhci_msm_host *msm_host;
2010 struct sdhci_host *host;
2011 unsigned long flags;
2012
2013 msm_host = container_of(work, struct sdhci_msm_host,
2014 msm_bus_vote.vote_work.work);
2015 host = platform_get_drvdata(msm_host->pdev);
2016
2017 if (!msm_host->msm_bus_vote.client_handle)
2018 return;
2019
2020 spin_lock_irqsave(&host->lock, flags);
2021 /* don't vote for 0 bandwidth if any request is in progress */
2022 if (!host->mrq) {
2023 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302024 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302025 } else
2026 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2027 mmc_hostname(host->mmc), __func__);
2028 spin_unlock_irqrestore(&host->lock, flags);
2029}
2030
2031/*
2032 * This function cancels any scheduled delayed work and sets the bus
2033 * vote based on bw (bandwidth) argument.
2034 */
2035static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2036 unsigned int bw)
2037{
2038 int vote;
2039 unsigned long flags;
2040 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2041 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2042
2043 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2044 spin_lock_irqsave(&host->lock, flags);
2045 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302046 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302047 spin_unlock_irqrestore(&host->lock, flags);
2048}
2049
2050#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2051
2052/* This function queues a work which will set the bandwidth requiement to 0 */
2053static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2054{
2055 unsigned long flags;
2056 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2057 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2058
2059 spin_lock_irqsave(&host->lock, flags);
2060 if (msm_host->msm_bus_vote.min_bw_vote !=
2061 msm_host->msm_bus_vote.curr_vote)
2062 queue_delayed_work(system_wq,
2063 &msm_host->msm_bus_vote.vote_work,
2064 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2065 spin_unlock_irqrestore(&host->lock, flags);
2066}
2067
2068static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2069 struct platform_device *pdev)
2070{
2071 int rc = 0;
2072 struct msm_bus_scale_pdata *bus_pdata;
2073
2074 struct sdhci_msm_bus_voting_data *data;
2075 struct device *dev = &pdev->dev;
2076
2077 data = devm_kzalloc(dev,
2078 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2079 if (!data) {
2080 dev_err(&pdev->dev,
2081 "%s: failed to allocate memory\n", __func__);
2082 rc = -ENOMEM;
2083 goto out;
2084 }
2085 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2086 if (data->bus_pdata) {
2087 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2088 &data->bw_vecs, &data->bw_vecs_size, 0);
2089 if (rc) {
2090 dev_err(&pdev->dev,
2091 "%s: Failed to get bus-bw-vectors-bps\n",
2092 __func__);
2093 goto out;
2094 }
2095 host->pdata->voting_data = data;
2096 }
2097 if (host->pdata->voting_data &&
2098 host->pdata->voting_data->bus_pdata &&
2099 host->pdata->voting_data->bw_vecs &&
2100 host->pdata->voting_data->bw_vecs_size) {
2101
2102 bus_pdata = host->pdata->voting_data->bus_pdata;
2103 host->msm_bus_vote.client_handle =
2104 msm_bus_scale_register_client(bus_pdata);
2105 if (!host->msm_bus_vote.client_handle) {
2106 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2107 rc = -EFAULT;
2108 goto out;
2109 }
2110 /* cache the vote index for minimum and maximum bandwidth */
2111 host->msm_bus_vote.min_bw_vote =
2112 sdhci_msm_bus_get_vote_for_bw(host, 0);
2113 host->msm_bus_vote.max_bw_vote =
2114 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2115 } else {
2116 devm_kfree(dev, data);
2117 }
2118
2119out:
2120 return rc;
2121}
2122
2123static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2124{
2125 if (host->msm_bus_vote.client_handle)
2126 msm_bus_scale_unregister_client(
2127 host->msm_bus_vote.client_handle);
2128}
2129
2130static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2131{
2132 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2133 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2134 struct mmc_ios *ios = &host->mmc->ios;
2135 unsigned int bw;
2136
2137 if (!msm_host->msm_bus_vote.client_handle)
2138 return;
2139
2140 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302141 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302142 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302143 } else {
2144 /*
2145 * If clock gating is enabled, then remove the vote
2146 * immediately because clocks will be disabled only
2147 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2148 * additional delay is required to remove the bus vote.
2149 */
2150#ifdef CONFIG_MMC_CLKGATE
2151 if (host->mmc->clkgate_delay)
2152 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2153 else
2154#endif
2155 sdhci_msm_bus_queue_work(host);
2156 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302157}
2158
Asutosh Das0ef24812012-12-18 16:14:02 +05302159/* Regulator utility functions */
2160static int sdhci_msm_vreg_init_reg(struct device *dev,
2161 struct sdhci_msm_reg_data *vreg)
2162{
2163 int ret = 0;
2164
2165 /* check if regulator is already initialized? */
2166 if (vreg->reg)
2167 goto out;
2168
2169 /* Get the regulator handle */
2170 vreg->reg = devm_regulator_get(dev, vreg->name);
2171 if (IS_ERR(vreg->reg)) {
2172 ret = PTR_ERR(vreg->reg);
2173 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2174 __func__, vreg->name, ret);
2175 goto out;
2176 }
2177
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302178 if (regulator_count_voltages(vreg->reg) > 0) {
2179 vreg->set_voltage_sup = true;
2180 /* sanity check */
2181 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2182 pr_err("%s: %s invalid constraints specified\n",
2183 __func__, vreg->name);
2184 ret = -EINVAL;
2185 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302186 }
2187
2188out:
2189 return ret;
2190}
2191
2192static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2193{
2194 if (vreg->reg)
2195 devm_regulator_put(vreg->reg);
2196}
2197
2198static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2199 *vreg, int uA_load)
2200{
2201 int ret = 0;
2202
2203 /*
2204 * regulators that do not support regulator_set_voltage also
2205 * do not support regulator_set_optimum_mode
2206 */
2207 if (vreg->set_voltage_sup) {
2208 ret = regulator_set_load(vreg->reg, uA_load);
2209 if (ret < 0)
2210 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2211 __func__, vreg->name, uA_load, ret);
2212 else
2213 /*
2214 * regulator_set_load() can return non zero
2215 * value even for success case.
2216 */
2217 ret = 0;
2218 }
2219 return ret;
2220}
2221
2222static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2223 int min_uV, int max_uV)
2224{
2225 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302226 if (vreg->set_voltage_sup) {
2227 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2228 if (ret) {
2229 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302230 __func__, vreg->name, min_uV, max_uV, ret);
2231 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302232 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302233
2234 return ret;
2235}
2236
2237static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2238{
2239 int ret = 0;
2240
2241 /* Put regulator in HPM (high power mode) */
2242 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2243 if (ret < 0)
2244 return ret;
2245
2246 if (!vreg->is_enabled) {
2247 /* Set voltage level */
2248 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2249 vreg->high_vol_level);
2250 if (ret)
2251 return ret;
2252 }
2253 ret = regulator_enable(vreg->reg);
2254 if (ret) {
2255 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2256 __func__, vreg->name, ret);
2257 return ret;
2258 }
2259 vreg->is_enabled = true;
2260 return ret;
2261}
2262
2263static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2264{
2265 int ret = 0;
2266
2267 /* Never disable regulator marked as always_on */
2268 if (vreg->is_enabled && !vreg->is_always_on) {
2269 ret = regulator_disable(vreg->reg);
2270 if (ret) {
2271 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2272 __func__, vreg->name, ret);
2273 goto out;
2274 }
2275 vreg->is_enabled = false;
2276
2277 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2278 if (ret < 0)
2279 goto out;
2280
2281 /* Set min. voltage level to 0 */
2282 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2283 if (ret)
2284 goto out;
2285 } else if (vreg->is_enabled && vreg->is_always_on) {
2286 if (vreg->lpm_sup) {
2287 /* Put always_on regulator in LPM (low power mode) */
2288 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2289 vreg->lpm_uA);
2290 if (ret < 0)
2291 goto out;
2292 }
2293 }
2294out:
2295 return ret;
2296}
2297
2298static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2299 bool enable, bool is_init)
2300{
2301 int ret = 0, i;
2302 struct sdhci_msm_slot_reg_data *curr_slot;
2303 struct sdhci_msm_reg_data *vreg_table[2];
2304
2305 curr_slot = pdata->vreg_data;
2306 if (!curr_slot) {
2307 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2308 __func__);
2309 goto out;
2310 }
2311
2312 vreg_table[0] = curr_slot->vdd_data;
2313 vreg_table[1] = curr_slot->vdd_io_data;
2314
2315 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2316 if (vreg_table[i]) {
2317 if (enable)
2318 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2319 else
2320 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2321 if (ret)
2322 goto out;
2323 }
2324 }
2325out:
2326 return ret;
2327}
2328
2329/*
2330 * Reset vreg by ensuring it is off during probe. A call
2331 * to enable vreg is needed to balance disable vreg
2332 */
2333static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2334{
2335 int ret;
2336
2337 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2338 if (ret)
2339 return ret;
2340 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2341 return ret;
2342}
2343
2344/* This init function should be called only once for each SDHC slot */
2345static int sdhci_msm_vreg_init(struct device *dev,
2346 struct sdhci_msm_pltfm_data *pdata,
2347 bool is_init)
2348{
2349 int ret = 0;
2350 struct sdhci_msm_slot_reg_data *curr_slot;
2351 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2352
2353 curr_slot = pdata->vreg_data;
2354 if (!curr_slot)
2355 goto out;
2356
2357 curr_vdd_reg = curr_slot->vdd_data;
2358 curr_vdd_io_reg = curr_slot->vdd_io_data;
2359
2360 if (!is_init)
2361 /* Deregister all regulators from regulator framework */
2362 goto vdd_io_reg_deinit;
2363
2364 /*
2365 * Get the regulator handle from voltage regulator framework
2366 * and then try to set the voltage level for the regulator
2367 */
2368 if (curr_vdd_reg) {
2369 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2370 if (ret)
2371 goto out;
2372 }
2373 if (curr_vdd_io_reg) {
2374 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2375 if (ret)
2376 goto vdd_reg_deinit;
2377 }
2378 ret = sdhci_msm_vreg_reset(pdata);
2379 if (ret)
2380 dev_err(dev, "vreg reset failed (%d)\n", ret);
2381 goto out;
2382
2383vdd_io_reg_deinit:
2384 if (curr_vdd_io_reg)
2385 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2386vdd_reg_deinit:
2387 if (curr_vdd_reg)
2388 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2389out:
2390 return ret;
2391}
2392
2393
2394static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2395 enum vdd_io_level level,
2396 unsigned int voltage_level)
2397{
2398 int ret = 0;
2399 int set_level;
2400 struct sdhci_msm_reg_data *vdd_io_reg;
2401
2402 if (!pdata->vreg_data)
2403 return ret;
2404
2405 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2406 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2407 switch (level) {
2408 case VDD_IO_LOW:
2409 set_level = vdd_io_reg->low_vol_level;
2410 break;
2411 case VDD_IO_HIGH:
2412 set_level = vdd_io_reg->high_vol_level;
2413 break;
2414 case VDD_IO_SET_LEVEL:
2415 set_level = voltage_level;
2416 break;
2417 default:
2418 pr_err("%s: invalid argument level = %d",
2419 __func__, level);
2420 ret = -EINVAL;
2421 return ret;
2422 }
2423 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2424 set_level);
2425 }
2426 return ret;
2427}
2428
Ritesh Harjani42876f42015-11-17 17:46:51 +05302429/*
2430 * Acquire spin-lock host->lock before calling this function
2431 */
2432static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2433 bool enable)
2434{
2435 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2436 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2437
2438 if (enable && !msm_host->is_sdiowakeup_enabled)
2439 enable_irq(msm_host->pdata->sdiowakeup_irq);
2440 else if (!enable && msm_host->is_sdiowakeup_enabled)
2441 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2442 else
2443 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2444 __func__, enable, msm_host->is_sdiowakeup_enabled);
2445 msm_host->is_sdiowakeup_enabled = enable;
2446}
2447
2448static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2449{
2450 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302451 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2452 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2453
Ritesh Harjani42876f42015-11-17 17:46:51 +05302454 unsigned long flags;
2455
2456 pr_debug("%s: irq (%d) received\n", __func__, irq);
2457
2458 spin_lock_irqsave(&host->lock, flags);
2459 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2460 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302461 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302462
2463 return IRQ_HANDLED;
2464}
2465
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302466void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2467{
2468 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2469 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302470 const struct sdhci_msm_offset *msm_host_offset =
2471 msm_host->offset;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302472
2473 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2474 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302475 sdhci_msm_readl_relaxed(host,
2476 msm_host_offset->CORE_PWRCTL_STATUS),
2477 sdhci_msm_readl_relaxed(host,
2478 msm_host_offset->CORE_PWRCTL_MASK),
2479 sdhci_msm_readl_relaxed(host,
2480 msm_host_offset->CORE_PWRCTL_CTL));
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302481}
2482
Asutosh Das0ef24812012-12-18 16:14:02 +05302483static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2484{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002485 struct sdhci_host *host = (struct sdhci_host *)data;
2486 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2487 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302488 const struct sdhci_msm_offset *msm_host_offset =
2489 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302490 u8 irq_status = 0;
2491 u8 irq_ack = 0;
2492 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302493 int pwr_state = 0, io_level = 0;
2494 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302495 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302496
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302497 irq_status = sdhci_msm_readb_relaxed(host,
2498 msm_host_offset->CORE_PWRCTL_STATUS);
2499
Asutosh Das0ef24812012-12-18 16:14:02 +05302500 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2501 mmc_hostname(msm_host->mmc), irq, irq_status);
2502
2503 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302504 sdhci_msm_writeb_relaxed(irq_status, host,
2505 msm_host_offset->CORE_PWRCTL_CLEAR);
2506
Asutosh Das0ef24812012-12-18 16:14:02 +05302507 /*
2508 * SDHC has core_mem and hc_mem device memory and these memory
2509 * addresses do not fall within 1KB region. Hence, any update to
2510 * core_mem address space would require an mb() to ensure this gets
2511 * completed before its next update to registers within hc_mem.
2512 */
2513 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302514 /*
2515 * There is a rare HW scenario where the first clear pulse could be
2516 * lost when actual reset and clear/read of status register is
2517 * happening at a time. Hence, retry for at least 10 times to make
2518 * sure status register is cleared. Otherwise, this will result in
2519 * a spurious power IRQ resulting in system instability.
2520 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302521 while (irq_status & sdhci_msm_readb_relaxed(host,
2522 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302523 if (retry == 0) {
2524 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2525 mmc_hostname(host->mmc), irq_status);
2526 sdhci_msm_dump_pwr_ctrl_regs(host);
2527 BUG_ON(1);
2528 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302529 sdhci_msm_writeb_relaxed(irq_status, host,
2530 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302531 retry--;
2532 udelay(10);
2533 }
2534 if (likely(retry < 10))
2535 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2536 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302537
2538 /* Handle BUS ON/OFF*/
2539 if (irq_status & CORE_PWRCTL_BUS_ON) {
2540 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302541 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302542 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302543 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2544 VDD_IO_HIGH, 0);
2545 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302546 if (ret)
2547 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2548 else
2549 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302550
2551 pwr_state = REQ_BUS_ON;
2552 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302553 }
2554 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2555 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302556 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302557 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302558 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2559 VDD_IO_LOW, 0);
2560 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302561 if (ret)
2562 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2563 else
2564 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302565
2566 pwr_state = REQ_BUS_OFF;
2567 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302568 }
2569 /* Handle IO LOW/HIGH */
2570 if (irq_status & CORE_PWRCTL_IO_LOW) {
2571 /* Switch voltage Low */
2572 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2573 if (ret)
2574 irq_ack |= CORE_PWRCTL_IO_FAIL;
2575 else
2576 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302577
2578 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302579 }
2580 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2581 /* Switch voltage High */
2582 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2583 if (ret)
2584 irq_ack |= CORE_PWRCTL_IO_FAIL;
2585 else
2586 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302587
2588 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302589 }
2590
2591 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302592 sdhci_msm_writeb_relaxed(irq_ack, host,
2593 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302594 /*
2595 * SDHC has core_mem and hc_mem device memory and these memory
2596 * addresses do not fall within 1KB region. Hence, any update to
2597 * core_mem address space would require an mb() to ensure this gets
2598 * completed before its next update to registers within hc_mem.
2599 */
2600 mb();
2601
Krishna Konda46fd1432014-10-30 21:13:27 -07002602 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302603 writel_relaxed((readl_relaxed(host->ioaddr +
2604 msm_host_offset->CORE_VENDOR_SPEC) &
2605 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2606 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002607 else if ((io_level & REQ_IO_LOW) ||
2608 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302609 writel_relaxed((readl_relaxed(host->ioaddr +
2610 msm_host_offset->CORE_VENDOR_SPEC) |
2611 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2612 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002613 mb();
2614
Asutosh Das0ef24812012-12-18 16:14:02 +05302615 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2616 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302617 spin_lock_irqsave(&host->lock, flags);
2618 if (pwr_state)
2619 msm_host->curr_pwr_state = pwr_state;
2620 if (io_level)
2621 msm_host->curr_io_level = io_level;
2622 complete(&msm_host->pwr_irq_completion);
2623 spin_unlock_irqrestore(&host->lock, flags);
2624
Asutosh Das0ef24812012-12-18 16:14:02 +05302625 return IRQ_HANDLED;
2626}
2627
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302628static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302629show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2630{
2631 struct sdhci_host *host = dev_get_drvdata(dev);
2632 int poll;
2633 unsigned long flags;
2634
2635 spin_lock_irqsave(&host->lock, flags);
2636 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2637 spin_unlock_irqrestore(&host->lock, flags);
2638
2639 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2640}
2641
2642static ssize_t
2643store_polling(struct device *dev, struct device_attribute *attr,
2644 const char *buf, size_t count)
2645{
2646 struct sdhci_host *host = dev_get_drvdata(dev);
2647 int value;
2648 unsigned long flags;
2649
2650 if (!kstrtou32(buf, 0, &value)) {
2651 spin_lock_irqsave(&host->lock, flags);
2652 if (value) {
2653 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2654 mmc_detect_change(host->mmc, 0);
2655 } else {
2656 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2657 }
2658 spin_unlock_irqrestore(&host->lock, flags);
2659 }
2660 return count;
2661}
2662
2663static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302664show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2665 char *buf)
2666{
2667 struct sdhci_host *host = dev_get_drvdata(dev);
2668 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2669 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2670
2671 return snprintf(buf, PAGE_SIZE, "%u\n",
2672 msm_host->msm_bus_vote.is_max_bw_needed);
2673}
2674
2675static ssize_t
2676store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2677 const char *buf, size_t count)
2678{
2679 struct sdhci_host *host = dev_get_drvdata(dev);
2680 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2681 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2682 uint32_t value;
2683 unsigned long flags;
2684
2685 if (!kstrtou32(buf, 0, &value)) {
2686 spin_lock_irqsave(&host->lock, flags);
2687 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2688 spin_unlock_irqrestore(&host->lock, flags);
2689 }
2690 return count;
2691}
2692
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302693static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302694{
2695 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2696 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302697 const struct sdhci_msm_offset *msm_host_offset =
2698 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302699 unsigned long flags;
2700 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302701 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302702
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302703 spin_lock_irqsave(&host->lock, flags);
2704 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2705 mmc_hostname(host->mmc), __func__, req_type,
2706 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302707 io_sig_sts = sdhci_msm_readl_relaxed(host,
2708 msm_host_offset->CORE_GENERICS);
2709
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302710 /*
2711 * The IRQ for request type IO High/Low will be generated when -
2712 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2713 * 2. If 1 is true and when there is a state change in 1.8V enable
2714 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2715 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2716 * layer tries to set it to 3.3V before card detection happens, the
2717 * IRQ doesn't get triggered as there is no state change in this bit.
2718 * The driver already handles this case by changing the IO voltage
2719 * level to high as part of controller power up sequence. Hence, check
2720 * for host->pwr to handle a case where IO voltage high request is
2721 * issued even before controller power up.
2722 */
2723 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2724 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2725 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2726 pr_debug("%s: do not wait for power IRQ that never comes\n",
2727 mmc_hostname(host->mmc));
2728 spin_unlock_irqrestore(&host->lock, flags);
2729 return;
2730 }
2731 }
2732
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302733 if ((req_type & msm_host->curr_pwr_state) ||
2734 (req_type & msm_host->curr_io_level))
2735 done = true;
2736 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302737
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302738 /*
2739 * This is needed here to hanlde a case where IRQ gets
2740 * triggered even before this function is called so that
2741 * x->done counter of completion gets reset. Otherwise,
2742 * next call to wait_for_completion returns immediately
2743 * without actually waiting for the IRQ to be handled.
2744 */
2745 if (done)
2746 init_completion(&msm_host->pwr_irq_completion);
2747 else
2748 wait_for_completion(&msm_host->pwr_irq_completion);
2749
2750 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2751 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302752}
2753
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002754static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2755{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302756 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2757 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2758 const struct sdhci_msm_offset *msm_host_offset =
2759 msm_host->offset;
2760 u32 config = readl_relaxed(host->ioaddr +
2761 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302762
2763 if (enable) {
2764 config |= CORE_CDR_EN;
2765 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302766 writel_relaxed(config, host->ioaddr +
2767 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302768 } else {
2769 config &= ~CORE_CDR_EN;
2770 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302771 writel_relaxed(config, host->ioaddr +
2772 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302773 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002774}
2775
Asutosh Das648f9d12013-01-10 21:11:04 +05302776static unsigned int sdhci_msm_max_segs(void)
2777{
2778 return SDHCI_MSM_MAX_SEGMENTS;
2779}
2780
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302781static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302782{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302783 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2784 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302785
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302786 return msm_host->pdata->sup_clk_table[0];
2787}
2788
2789static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2790{
2791 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2792 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2793 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2794
2795 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2796}
2797
2798static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2799 u32 req_clk)
2800{
2801 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2802 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2803 unsigned int sel_clk = -1;
2804 unsigned char cnt;
2805
2806 if (req_clk < sdhci_msm_get_min_clock(host)) {
2807 sel_clk = sdhci_msm_get_min_clock(host);
2808 return sel_clk;
2809 }
2810
2811 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2812 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2813 break;
2814 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2815 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2816 break;
2817 } else {
2818 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2819 }
2820 }
2821 return sel_clk;
2822}
2823
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302824static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2825{
2826 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2827 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2828 int rc = 0;
2829
2830 if (atomic_read(&msm_host->controller_clock))
2831 return 0;
2832
2833 sdhci_msm_bus_voting(host, 1);
2834
2835 if (!IS_ERR(msm_host->pclk)) {
2836 rc = clk_prepare_enable(msm_host->pclk);
2837 if (rc) {
2838 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2839 mmc_hostname(host->mmc), __func__, rc);
2840 goto remove_vote;
2841 }
2842 }
2843
2844 rc = clk_prepare_enable(msm_host->clk);
2845 if (rc) {
2846 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2847 mmc_hostname(host->mmc), __func__, rc);
2848 goto disable_pclk;
2849 }
2850
2851 atomic_set(&msm_host->controller_clock, 1);
2852 pr_debug("%s: %s: enabled controller clock\n",
2853 mmc_hostname(host->mmc), __func__);
2854 goto out;
2855
2856disable_pclk:
2857 if (!IS_ERR(msm_host->pclk))
2858 clk_disable_unprepare(msm_host->pclk);
2859remove_vote:
2860 if (msm_host->msm_bus_vote.client_handle)
2861 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2862out:
2863 return rc;
2864}
2865
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302866static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
2867{
2868 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2869 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302870
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302871 if (atomic_read(&msm_host->controller_clock)) {
2872 if (!IS_ERR(msm_host->clk))
2873 clk_disable_unprepare(msm_host->clk);
2874 if (!IS_ERR(msm_host->pclk))
2875 clk_disable_unprepare(msm_host->pclk);
2876 if (!IS_ERR(msm_host->ice_clk))
2877 clk_disable_unprepare(msm_host->ice_clk);
2878 sdhci_msm_bus_voting(host, 0);
2879 atomic_set(&msm_host->controller_clock, 0);
2880 pr_debug("%s: %s: disabled controller clock\n",
2881 mmc_hostname(host->mmc), __func__);
2882 }
2883}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302884
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302885static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2886{
2887 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2888 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2889 int rc = 0;
2890
2891 if (enable && !atomic_read(&msm_host->clks_on)) {
2892 pr_debug("%s: request to enable clocks\n",
2893 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302894
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302895 /*
2896 * The bus-width or the clock rate might have changed
2897 * after controller clocks are enbaled, update bus vote
2898 * in such case.
2899 */
2900 if (atomic_read(&msm_host->controller_clock))
2901 sdhci_msm_bus_voting(host, 1);
2902
2903 rc = sdhci_msm_enable_controller_clock(host);
2904 if (rc)
2905 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302906
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302907 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2908 rc = clk_prepare_enable(msm_host->bus_clk);
2909 if (rc) {
2910 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2911 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302912 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302913 }
2914 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002915 if (!IS_ERR(msm_host->ff_clk)) {
2916 rc = clk_prepare_enable(msm_host->ff_clk);
2917 if (rc) {
2918 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2919 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302920 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002921 }
2922 }
2923 if (!IS_ERR(msm_host->sleep_clk)) {
2924 rc = clk_prepare_enable(msm_host->sleep_clk);
2925 if (rc) {
2926 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2927 mmc_hostname(host->mmc), __func__, rc);
2928 goto disable_ff_clk;
2929 }
2930 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302931 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302932
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302933 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302934 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2935 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302936 /*
2937 * During 1.8V signal switching the clock source must
2938 * still be ON as it requires accessing SDHC
2939 * registers (SDHCi host control2 register bit 3 must
2940 * be written and polled after stopping the SDCLK).
2941 */
2942 if (host->mmc->card_clock_off)
2943 return 0;
2944 pr_debug("%s: request to disable clocks\n",
2945 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002946 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2947 clk_disable_unprepare(msm_host->sleep_clk);
2948 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2949 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302950 clk_disable_unprepare(msm_host->clk);
2951 if (!IS_ERR(msm_host->pclk))
2952 clk_disable_unprepare(msm_host->pclk);
2953 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2954 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302955
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302956 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302957 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302958 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302959 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302960 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002961disable_ff_clk:
2962 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2963 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302964disable_bus_clk:
2965 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2966 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302967disable_controller_clk:
2968 if (!IS_ERR_OR_NULL(msm_host->clk))
2969 clk_disable_unprepare(msm_host->clk);
2970 if (!IS_ERR_OR_NULL(msm_host->pclk))
2971 clk_disable_unprepare(msm_host->pclk);
2972 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302973remove_vote:
2974 if (msm_host->msm_bus_vote.client_handle)
2975 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302976out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302977 return rc;
2978}
2979
2980static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2981{
2982 int rc;
2983 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2984 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302985 const struct sdhci_msm_offset *msm_host_offset =
2986 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07002987 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302988 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002989 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302990 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302991
2992 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302993 /*
2994 * disable pwrsave to ensure clock is not auto-gated until
2995 * the rate is >400KHz (initialization complete).
2996 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302997 writel_relaxed(readl_relaxed(host->ioaddr +
2998 msm_host_offset->CORE_VENDOR_SPEC) &
2999 ~CORE_CLK_PWRSAVE, host->ioaddr +
3000 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303001 sdhci_msm_prepare_clocks(host, false);
3002 host->clock = clock;
3003 goto out;
3004 }
3005
3006 rc = sdhci_msm_prepare_clocks(host, true);
3007 if (rc)
3008 goto out;
3009
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303010 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3011 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303012 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003013 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303014 writel_relaxed(readl_relaxed(host->ioaddr +
3015 msm_host_offset->CORE_VENDOR_SPEC)
3016 | CORE_CLK_PWRSAVE, host->ioaddr +
3017 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303018 /*
3019 * Disable pwrsave for a newly added card if doesn't allow clock
3020 * gating.
3021 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003022 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303023 writel_relaxed(readl_relaxed(host->ioaddr +
3024 msm_host_offset->CORE_VENDOR_SPEC)
3025 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3026 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303027
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303028 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003029 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003030 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003031 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303032 /*
3033 * The SDHC requires internal clock frequency to be double the
3034 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003035 * uses the faster clock(100/400MHz) for some of its parts and
3036 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303037 */
3038 ddr_clock = clock * 2;
3039 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3040 ddr_clock);
3041 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003042
3043 /*
3044 * In general all timing modes are controlled via UHS mode select in
3045 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3046 * their respective modes defined here, hence we use these values.
3047 *
3048 * HS200 - SDR104 (Since they both are equivalent in functionality)
3049 * HS400 - This involves multiple configurations
3050 * Initially SDR104 - when tuning is required as HS200
3051 * Then when switching to DDR @ 400MHz (HS400) we use
3052 * the vendor specific HC_SELECT_IN to control the mode.
3053 *
3054 * In addition to controlling the modes we also need to select the
3055 * correct input clock for DLL depending on the mode.
3056 *
3057 * HS400 - divided clock (free running MCLK/2)
3058 * All other modes - default (free running MCLK)
3059 */
3060 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3061 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303062 writel_relaxed(((readl_relaxed(host->ioaddr +
3063 msm_host_offset->CORE_VENDOR_SPEC)
3064 & ~CORE_HC_MCLK_SEL_MASK)
3065 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3066 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003067 /*
3068 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3069 * register
3070 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303071 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003072 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303073 msm_host->enhanced_strobe)) &&
3074 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003075 /*
3076 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3077 * field in VENDOR_SPEC_FUNC
3078 */
3079 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303080 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003081 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303082 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3083 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003084 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003085 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3086 /*
3087 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3088 * CORE_DLL_STATUS to be set. This should get set
3089 * with in 15 us at 200 MHz.
3090 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303091 rc = readl_poll_timeout(host->ioaddr +
3092 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003093 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3094 CORE_DDR_DLL_LOCK)), 10, 1000);
3095 if (rc == -ETIMEDOUT)
3096 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3097 mmc_hostname(host->mmc),
3098 dll_lock);
3099 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003100 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003101 if (!msm_host->use_cdclp533)
3102 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3103 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303104 msm_host_offset->CORE_VENDOR_SPEC3)
3105 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3106 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003107
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003108 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303109 writel_relaxed(((readl_relaxed(host->ioaddr +
3110 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003111 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303112 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3113 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003114
3115 /*
3116 * Disable HC_SELECT_IN to be able to use the UHS mode select
3117 * configuration from Host Control2 register for all other
3118 * modes.
3119 *
3120 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3121 * in VENDOR_SPEC_FUNC
3122 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303123 writel_relaxed((readl_relaxed(host->ioaddr +
3124 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003125 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303126 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3127 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003128 }
3129 mb();
3130
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303131 if (sup_clock != msm_host->clk_rate) {
3132 pr_debug("%s: %s: setting clk rate to %u\n",
3133 mmc_hostname(host->mmc), __func__, sup_clock);
3134 rc = clk_set_rate(msm_host->clk, sup_clock);
3135 if (rc) {
3136 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3137 mmc_hostname(host->mmc), __func__,
3138 sup_clock, rc);
3139 goto out;
3140 }
3141 msm_host->clk_rate = sup_clock;
3142 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303143 /*
3144 * Update the bus vote in case of frequency change due to
3145 * clock scaling.
3146 */
3147 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303148 }
3149out:
3150 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303151}
3152
Sahitya Tummala14613432013-03-21 11:13:25 +05303153static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3154 unsigned int uhs)
3155{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003156 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3157 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303158 const struct sdhci_msm_offset *msm_host_offset =
3159 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303160 u16 ctrl_2;
3161
3162 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3163 /* Select Bus Speed Mode for host */
3164 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003165 if ((uhs == MMC_TIMING_MMC_HS400) ||
3166 (uhs == MMC_TIMING_MMC_HS200) ||
3167 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303168 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3169 else if (uhs == MMC_TIMING_UHS_SDR12)
3170 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3171 else if (uhs == MMC_TIMING_UHS_SDR25)
3172 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3173 else if (uhs == MMC_TIMING_UHS_SDR50)
3174 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003175 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3176 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303177 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303178 /*
3179 * When clock frquency is less than 100MHz, the feedback clock must be
3180 * provided and DLL must not be used so that tuning can be skipped. To
3181 * provide feedback clock, the mode selection can be any value less
3182 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3183 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003184 if (host->clock <= CORE_FREQ_100MHZ) {
3185 if ((uhs == MMC_TIMING_MMC_HS400) ||
3186 (uhs == MMC_TIMING_MMC_HS200) ||
3187 (uhs == MMC_TIMING_UHS_SDR104))
3188 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303189
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003190 /*
3191 * Make sure DLL is disabled when not required
3192 *
3193 * Write 1 to DLL_RST bit of DLL_CONFIG register
3194 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303195 writel_relaxed((readl_relaxed(host->ioaddr +
3196 msm_host_offset->CORE_DLL_CONFIG)
3197 | CORE_DLL_RST), host->ioaddr +
3198 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003199
3200 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303201 writel_relaxed((readl_relaxed(host->ioaddr +
3202 msm_host_offset->CORE_DLL_CONFIG)
3203 | CORE_DLL_PDN), host->ioaddr +
3204 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003205 mb();
3206
3207 /*
3208 * The DLL needs to be restored and CDCLP533 recalibrated
3209 * when the clock frequency is set back to 400MHz.
3210 */
3211 msm_host->calibration_done = false;
3212 }
3213
3214 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3215 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303216 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3217
3218}
3219
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003220#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003221#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303222static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003223{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303224 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303225 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3226 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303227 const struct sdhci_msm_offset *msm_host_offset =
3228 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303229 struct cmdq_host *cq_host = host->cq_host;
3230
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303231 u32 version = sdhci_msm_readl_relaxed(host,
3232 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003233 u16 minor = version & CORE_VERSION_TARGET_MASK;
3234 /* registers offset changed starting from 4.2.0 */
3235 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3236
3237 pr_err("---- Debug RAM dump ----\n");
3238 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3239 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3240 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3241
3242 while (i < 16) {
3243 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3244 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3245 i++;
3246 }
3247 pr_err("-------------------------\n");
3248}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303249
3250void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3251{
3252 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3253 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303254 const struct sdhci_msm_offset *msm_host_offset =
3255 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303256 int tbsel, tbsel2;
3257 int i, index = 0;
3258 u32 test_bus_val = 0;
3259 u32 debug_reg[MAX_TEST_BUS] = {0};
3260
3261 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003262 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303263 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003264
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303265 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3266 sdhci_msm_readl_relaxed(host,
3267 msm_host_offset->CORE_MCI_DATA_CNT),
3268 sdhci_msm_readl_relaxed(host,
3269 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303270 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303271 sdhci_msm_readl_relaxed(host,
3272 msm_host_offset->CORE_MCI_DATA_CNT),
3273 sdhci_msm_readl_relaxed(host,
3274 msm_host_offset->CORE_MCI_FIFO_CNT),
3275 sdhci_msm_readl_relaxed(host,
3276 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303277 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303278 readl_relaxed(host->ioaddr +
3279 msm_host_offset->CORE_DLL_CONFIG),
3280 readl_relaxed(host->ioaddr +
3281 msm_host_offset->CORE_DLL_STATUS),
3282 sdhci_msm_readl_relaxed(host,
3283 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303284 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303285 readl_relaxed(host->ioaddr +
3286 msm_host_offset->CORE_VENDOR_SPEC),
3287 readl_relaxed(host->ioaddr +
3288 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3289 readl_relaxed(host->ioaddr +
3290 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303291 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303292 readl_relaxed(host->ioaddr +
3293 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303294
3295 /*
3296 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3297 * of CORE_TESTBUS_CONFIG register.
3298 *
3299 * To select test bus 0 to 7 use tbsel and to select any test bus
3300 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3301 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3302 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3303 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003304 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303305 for (tbsel = 0; tbsel < 8; tbsel++) {
3306 if (index >= MAX_TEST_BUS)
3307 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303308 test_bus_val =
3309 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3310 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3311 sdhci_msm_writel_relaxed(test_bus_val, host,
3312 msm_host_offset->CORE_TESTBUS_CONFIG);
3313 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3314 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303315 }
3316 }
3317 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3318 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3319 i, i + 3, debug_reg[i], debug_reg[i+1],
3320 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003321}
3322
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303323/*
3324 * sdhci_msm_enhanced_strobe_mask :-
3325 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3326 * SW should write 3 to
3327 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3328 * The default reset value of this register is 2.
3329 */
3330static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3331{
3332 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3333 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303334 const struct sdhci_msm_offset *msm_host_offset =
3335 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303336
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303337 if (!msm_host->enhanced_strobe ||
3338 !mmc_card_strobe(msm_host->mmc->card)) {
3339 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303340 mmc_hostname(host->mmc));
3341 return;
3342 }
3343
3344 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303345 writel_relaxed((readl_relaxed(host->ioaddr +
3346 msm_host_offset->CORE_VENDOR_SPEC3)
3347 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3348 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303349 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303350 writel_relaxed((readl_relaxed(host->ioaddr +
3351 msm_host_offset->CORE_VENDOR_SPEC3)
3352 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3353 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303354 }
3355}
3356
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003357static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3358{
3359 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3360 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303361 const struct sdhci_msm_offset *msm_host_offset =
3362 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003363
3364 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303365 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3366 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003367 } else {
3368 u32 value;
3369
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303370 value = sdhci_msm_readl_relaxed(host,
3371 msm_host_offset->CORE_TESTBUS_CONFIG);
3372 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3373 sdhci_msm_writel_relaxed(value, host,
3374 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003375 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303376}
3377
Pavan Anamula691dd592015-08-25 16:11:20 +05303378void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3379{
3380 u32 vendor_func2;
3381 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303382 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3383 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3384 const struct sdhci_msm_offset *msm_host_offset =
3385 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303386
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303387 vendor_func2 = readl_relaxed(host->ioaddr +
3388 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303389
3390 if (enable) {
3391 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303392 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303393 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303394 while (readl_relaxed(host->ioaddr +
3395 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303396 if (timeout == 0) {
3397 pr_info("%s: Applying wait idle disable workaround\n",
3398 mmc_hostname(host->mmc));
3399 /*
3400 * Apply the reset workaround to not wait for
3401 * pending data transfers on AXI before
3402 * resetting the controller. This could be
3403 * risky if the transfers were stuck on the
3404 * AXI bus.
3405 */
3406 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303407 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303408 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303409 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3410 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303411 host->reset_wa_t = ktime_get();
3412 return;
3413 }
3414 timeout--;
3415 udelay(10);
3416 }
3417 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3418 mmc_hostname(host->mmc));
3419 } else {
3420 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303421 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303422 }
3423}
3424
Gilad Broner44445992015-09-29 16:05:39 +03003425static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3426{
3427 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303428 container_of(work, struct sdhci_msm_pm_qos_irq,
3429 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003430
3431 if (atomic_read(&pm_qos_irq->counter))
3432 return;
3433
3434 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3435 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3436}
3437
3438void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3439{
3440 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3441 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3442 struct sdhci_msm_pm_qos_latency *latency =
3443 &msm_host->pdata->pm_qos_data.irq_latency;
3444 int counter;
3445
3446 if (!msm_host->pm_qos_irq.enabled)
3447 return;
3448
3449 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3450 /* Make sure to update the voting in case power policy has changed */
3451 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3452 && counter > 1)
3453 return;
3454
Asutosh Das36c2e922015-12-01 12:19:58 +05303455 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003456 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3457 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3458 msm_host->pm_qos_irq.latency);
3459}
3460
3461void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3462{
3463 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3464 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3465 int counter;
3466
3467 if (!msm_host->pm_qos_irq.enabled)
3468 return;
3469
Subhash Jadavani4d813902015-10-15 12:16:43 -07003470 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3471 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3472 } else {
3473 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3474 return;
Gilad Broner44445992015-09-29 16:05:39 +03003475 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003476
Gilad Broner44445992015-09-29 16:05:39 +03003477 if (counter)
3478 return;
3479
3480 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303481 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3482 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003483 return;
3484 }
3485
3486 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3487 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3488 msm_host->pm_qos_irq.latency);
3489}
3490
Gilad Broner68c54562015-09-20 11:59:46 +03003491static ssize_t
3492sdhci_msm_pm_qos_irq_show(struct device *dev,
3493 struct device_attribute *attr, char *buf)
3494{
3495 struct sdhci_host *host = dev_get_drvdata(dev);
3496 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3497 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3498 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3499
3500 return snprintf(buf, PAGE_SIZE,
3501 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3502 irq->enabled, atomic_read(&irq->counter), irq->latency);
3503}
3504
3505static ssize_t
3506sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3507 struct device_attribute *attr, char *buf)
3508{
3509 struct sdhci_host *host = dev_get_drvdata(dev);
3510 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3511 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3512
3513 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3514}
3515
3516static ssize_t
3517sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3518 struct device_attribute *attr, const char *buf, size_t count)
3519{
3520 struct sdhci_host *host = dev_get_drvdata(dev);
3521 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3522 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3523 uint32_t value;
3524 bool enable;
3525 int ret;
3526
3527 ret = kstrtou32(buf, 0, &value);
3528 if (ret)
3529 goto out;
3530 enable = !!value;
3531
3532 if (enable == msm_host->pm_qos_irq.enabled)
3533 goto out;
3534
3535 msm_host->pm_qos_irq.enabled = enable;
3536 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303537 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003538 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3539 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3540 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3541 msm_host->pm_qos_irq.latency);
3542 }
3543
3544out:
3545 return count;
3546}
3547
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003548#ifdef CONFIG_SMP
3549static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3550 struct sdhci_host *host)
3551{
3552 msm_host->pm_qos_irq.req.irq = host->irq;
3553}
3554#else
3555static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3556 struct sdhci_host *host) { }
3557#endif
3558
Gilad Broner44445992015-09-29 16:05:39 +03003559void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3560{
3561 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3562 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3563 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003564 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003565
3566 if (!msm_host->pdata->pm_qos_data.irq_valid)
3567 return;
3568
3569 /* Initialize only once as this gets called per partition */
3570 if (msm_host->pm_qos_irq.enabled)
3571 return;
3572
3573 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3574 msm_host->pm_qos_irq.req.type =
3575 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003576 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3577 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3578 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003579 else
3580 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3581 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3582
Asutosh Das36c2e922015-12-01 12:19:58 +05303583 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003584 sdhci_msm_pm_qos_irq_unvote_work);
3585 /* For initialization phase, set the performance latency */
3586 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3587 msm_host->pm_qos_irq.latency =
3588 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3589 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3590 msm_host->pm_qos_irq.latency);
3591 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003592
3593 /* sysfs */
3594 msm_host->pm_qos_irq.enable_attr.show =
3595 sdhci_msm_pm_qos_irq_enable_show;
3596 msm_host->pm_qos_irq.enable_attr.store =
3597 sdhci_msm_pm_qos_irq_enable_store;
3598 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3599 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3600 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3601 ret = device_create_file(&msm_host->pdev->dev,
3602 &msm_host->pm_qos_irq.enable_attr);
3603 if (ret)
3604 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3605 __func__, ret);
3606
3607 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3608 msm_host->pm_qos_irq.status_attr.store = NULL;
3609 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3610 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3611 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3612 ret = device_create_file(&msm_host->pdev->dev,
3613 &msm_host->pm_qos_irq.status_attr);
3614 if (ret)
3615 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3616 __func__, ret);
3617}
3618
3619static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3620 struct device_attribute *attr, char *buf)
3621{
3622 struct sdhci_host *host = dev_get_drvdata(dev);
3623 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3624 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3625 struct sdhci_msm_pm_qos_group *group;
3626 int i;
3627 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3628 int offset = 0;
3629
3630 for (i = 0; i < nr_groups; i++) {
3631 group = &msm_host->pm_qos[i];
3632 offset += snprintf(&buf[offset], PAGE_SIZE,
3633 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3634 i, group->req.cpus_affine.bits[0],
3635 msm_host->pm_qos_group_enable,
3636 atomic_read(&group->counter),
3637 group->latency);
3638 }
3639
3640 return offset;
3641}
3642
3643static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3644 struct device_attribute *attr, char *buf)
3645{
3646 struct sdhci_host *host = dev_get_drvdata(dev);
3647 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3648 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3649
3650 return snprintf(buf, PAGE_SIZE, "%s\n",
3651 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3652}
3653
3654static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3655 struct device_attribute *attr, const char *buf, size_t count)
3656{
3657 struct sdhci_host *host = dev_get_drvdata(dev);
3658 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3659 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3660 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3661 uint32_t value;
3662 bool enable;
3663 int ret;
3664 int i;
3665
3666 ret = kstrtou32(buf, 0, &value);
3667 if (ret)
3668 goto out;
3669 enable = !!value;
3670
3671 if (enable == msm_host->pm_qos_group_enable)
3672 goto out;
3673
3674 msm_host->pm_qos_group_enable = enable;
3675 if (!enable) {
3676 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303677 cancel_delayed_work_sync(
3678 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003679 atomic_set(&msm_host->pm_qos[i].counter, 0);
3680 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3681 pm_qos_update_request(&msm_host->pm_qos[i].req,
3682 msm_host->pm_qos[i].latency);
3683 }
3684 }
3685
3686out:
3687 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003688}
3689
3690static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3691{
3692 int i;
3693 struct sdhci_msm_cpu_group_map *map =
3694 &msm_host->pdata->pm_qos_data.cpu_group_map;
3695
3696 if (cpu < 0)
3697 goto not_found;
3698
3699 for (i = 0; i < map->nr_groups; i++)
3700 if (cpumask_test_cpu(cpu, &map->mask[i]))
3701 return i;
3702
3703not_found:
3704 return -EINVAL;
3705}
3706
3707void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3708 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3709{
3710 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3711 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3712 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3713 struct sdhci_msm_pm_qos_group *pm_qos_group;
3714 int counter;
3715
3716 if (!msm_host->pm_qos_group_enable || group < 0)
3717 return;
3718
3719 pm_qos_group = &msm_host->pm_qos[group];
3720 counter = atomic_inc_return(&pm_qos_group->counter);
3721
3722 /* Make sure to update the voting in case power policy has changed */
3723 if (pm_qos_group->latency == latency->latency[host->power_policy]
3724 && counter > 1)
3725 return;
3726
Asutosh Das36c2e922015-12-01 12:19:58 +05303727 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003728
3729 pm_qos_group->latency = latency->latency[host->power_policy];
3730 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3731}
3732
3733static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3734{
3735 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303736 container_of(work, struct sdhci_msm_pm_qos_group,
3737 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003738
3739 if (atomic_read(&group->counter))
3740 return;
3741
3742 group->latency = PM_QOS_DEFAULT_VALUE;
3743 pm_qos_update_request(&group->req, group->latency);
3744}
3745
Gilad Broner07d92eb2015-09-29 16:57:21 +03003746bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003747{
3748 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3749 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3750 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3751
3752 if (!msm_host->pm_qos_group_enable || group < 0 ||
3753 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003754 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003755
3756 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303757 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3758 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003759 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003760 }
3761
3762 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3763 pm_qos_update_request(&msm_host->pm_qos[group].req,
3764 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003765 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003766}
3767
3768void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3769 struct sdhci_msm_pm_qos_latency *latency)
3770{
3771 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3772 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3773 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3774 struct sdhci_msm_pm_qos_group *group;
3775 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003776 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003777
3778 if (msm_host->pm_qos_group_enable)
3779 return;
3780
3781 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3782 GFP_KERNEL);
3783 if (!msm_host->pm_qos)
3784 return;
3785
3786 for (i = 0; i < nr_groups; i++) {
3787 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303788 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003789 sdhci_msm_pm_qos_cpu_unvote_work);
3790 atomic_set(&group->counter, 0);
3791 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3792 cpumask_copy(&group->req.cpus_affine,
3793 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3794 /* For initialization phase, set the performance mode latency */
3795 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3796 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3797 group->latency);
3798 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3799 __func__, i,
3800 group->req.cpus_affine.bits[0],
3801 group->latency,
3802 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3803 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003804 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003805 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003806
3807 /* sysfs */
3808 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3809 msm_host->pm_qos_group_status_attr.store = NULL;
3810 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3811 msm_host->pm_qos_group_status_attr.attr.name =
3812 "pm_qos_cpu_groups_status";
3813 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3814 ret = device_create_file(&msm_host->pdev->dev,
3815 &msm_host->pm_qos_group_status_attr);
3816 if (ret)
3817 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3818 __func__, ret);
3819 msm_host->pm_qos_group_enable_attr.show =
3820 sdhci_msm_pm_qos_group_enable_show;
3821 msm_host->pm_qos_group_enable_attr.store =
3822 sdhci_msm_pm_qos_group_enable_store;
3823 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3824 msm_host->pm_qos_group_enable_attr.attr.name =
3825 "pm_qos_cpu_groups_enable";
3826 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3827 ret = device_create_file(&msm_host->pdev->dev,
3828 &msm_host->pm_qos_group_enable_attr);
3829 if (ret)
3830 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3831 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003832}
3833
Gilad Broner07d92eb2015-09-29 16:57:21 +03003834static void sdhci_msm_pre_req(struct sdhci_host *host,
3835 struct mmc_request *mmc_req)
3836{
3837 int cpu;
3838 int group;
3839 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3840 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3841 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3842 msm_host->pm_qos_prev_cpu);
3843
3844 sdhci_msm_pm_qos_irq_vote(host);
3845
3846 cpu = get_cpu();
3847 put_cpu();
3848 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3849 if (group < 0)
3850 return;
3851
3852 if (group != prev_group && prev_group >= 0) {
3853 sdhci_msm_pm_qos_cpu_unvote(host,
3854 msm_host->pm_qos_prev_cpu, false);
3855 prev_group = -1; /* make sure to vote for new group */
3856 }
3857
3858 if (prev_group < 0) {
3859 sdhci_msm_pm_qos_cpu_vote(host,
3860 msm_host->pdata->pm_qos_data.latency, cpu);
3861 msm_host->pm_qos_prev_cpu = cpu;
3862 }
3863}
3864
3865static void sdhci_msm_post_req(struct sdhci_host *host,
3866 struct mmc_request *mmc_req)
3867{
3868 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3869 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3870
3871 sdhci_msm_pm_qos_irq_unvote(host, false);
3872
3873 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3874 msm_host->pm_qos_prev_cpu = -1;
3875}
3876
3877static void sdhci_msm_init(struct sdhci_host *host)
3878{
3879 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3880 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3881
3882 sdhci_msm_pm_qos_irq_init(host);
3883
3884 if (msm_host->pdata->pm_qos_data.legacy_valid)
3885 sdhci_msm_pm_qos_cpu_init(host,
3886 msm_host->pdata->pm_qos_data.latency);
3887}
3888
Sahitya Tummala9150a942014-10-31 15:33:04 +05303889static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
3890{
3891 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3892 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3893 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
3894 u32 max_curr = 0;
3895
3896 if (curr_slot && curr_slot->vdd_data)
3897 max_curr = curr_slot->vdd_data->hpm_uA;
3898
3899 return max_curr;
3900}
3901
Asutosh Das0ef24812012-12-18 16:14:02 +05303902static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303903 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303904 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003905 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303906 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003907 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303908 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303909 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303910 .get_min_clock = sdhci_msm_get_min_clock,
3911 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303912 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303913 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303914 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003915 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003916 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003917 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303918 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05303919 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003920 .init = sdhci_msm_init,
3921 .pre_req = sdhci_msm_pre_req,
3922 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05303923 .get_current_limit = sdhci_msm_get_current_limit,
Asutosh Das0ef24812012-12-18 16:14:02 +05303924};
3925
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303926static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3927 struct sdhci_host *host)
3928{
Krishna Konda46fd1432014-10-30 21:13:27 -07003929 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303930 u16 minor;
3931 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303932 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303933 const struct sdhci_msm_offset *msm_host_offset =
3934 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303935
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303936 version = sdhci_msm_readl_relaxed(host,
3937 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303938 major = (version & CORE_VERSION_MAJOR_MASK) >>
3939 CORE_VERSION_MAJOR_SHIFT;
3940 minor = version & CORE_VERSION_TARGET_MASK;
3941
Krishna Konda46fd1432014-10-30 21:13:27 -07003942 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3943
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303944 /*
3945 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003946 * controller won't advertise 3.0v, 1.8v and 8-bit features
3947 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303948 */
3949 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003950 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003951 /*
3952 * Enable 1.8V support capability on controllers that
3953 * support dual voltage
3954 */
3955 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003956 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3957 caps |= CORE_3_0V_SUPPORT;
3958 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003959 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303960 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3961 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303962 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003963
3964 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303965 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3966 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3967 */
3968 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303969 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303970 val = readl_relaxed(host->ioaddr +
3971 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303972 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303973 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303974 }
3975 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003976 * SDCC 5 controller with major version 1, minor version 0x34 and later
3977 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3978 */
3979 if ((major == 1) && (minor < 0x34))
3980 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003981
3982 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003983 * SDCC 5 controller with major version 1, minor version 0x42 and later
3984 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303985 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003986 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303987 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003988 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303989 msm_host->enhanced_strobe = true;
3990 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003991
3992 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003993 * SDCC 5 controller with major version 1 and minor version 0x42,
3994 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3995 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303996 * when MCLK is gated OFF, it is not gated for less than 0.5us
3997 * and MCLK must be switched on for at-least 1us before DATA
3998 * starts coming.
3999 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004000 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
4001 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304002 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004003
Pavan Anamula5a256df2015-10-16 14:38:28 +05304004 /* Fake 3.0V support for SDIO devices which requires such voltage */
4005 if (msm_host->pdata->core_3_0v_support) {
4006 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304007 writel_relaxed((readl_relaxed(host->ioaddr +
4008 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4009 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304010 }
4011
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004012 if ((major == 1) && (minor >= 0x49))
4013 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304014 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004015 * Mask 64-bit support for controller with 32-bit address bus so that
4016 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004017 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004018 if (!msm_host->pdata->largeaddressbus)
4019 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4020
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304021 writel_relaxed(caps, host->ioaddr +
4022 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004023 /* keep track of the value in SDHCI_CAPABILITIES */
4024 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304025}
4026
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004027#ifdef CONFIG_MMC_CQ_HCI
4028static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4029 struct platform_device *pdev)
4030{
4031 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4032 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4033
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304034 if (nocmdq) {
4035 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4036 return;
4037 }
4038
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004039 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004040 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004041 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4042 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004043 host->cq_host = NULL;
4044 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004045 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004046 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004047}
4048#else
4049static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4050 struct platform_device *pdev)
4051{
4052
4053}
4054#endif
4055
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004056static bool sdhci_msm_is_bootdevice(struct device *dev)
4057{
4058 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4059 strlen(saved_command_line))) {
4060 char search_string[50];
4061
4062 snprintf(search_string, ARRAY_SIZE(search_string),
4063 "androidboot.bootdevice=%s", dev_name(dev));
4064 if (strnstr(saved_command_line, search_string,
4065 strlen(saved_command_line)))
4066 return true;
4067 else
4068 return false;
4069 }
4070
4071 /*
4072 * "androidboot.bootdevice=" argument is not present then
4073 * return true as we don't know the boot device anyways.
4074 */
4075 return true;
4076}
4077
Asutosh Das0ef24812012-12-18 16:14:02 +05304078static int sdhci_msm_probe(struct platform_device *pdev)
4079{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304080 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304081 struct sdhci_host *host;
4082 struct sdhci_pltfm_host *pltfm_host;
4083 struct sdhci_msm_host *msm_host;
4084 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004085 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004086 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004087 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304088 struct resource *tlmm_memres = NULL;
4089 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304090 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05304091
4092 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4093 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4094 GFP_KERNEL);
4095 if (!msm_host) {
4096 ret = -ENOMEM;
4097 goto out;
4098 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304099
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304100 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4101 msm_host->mci_removed = true;
4102 msm_host->offset = &sdhci_msm_offset_mci_removed;
4103 } else {
4104 msm_host->mci_removed = false;
4105 msm_host->offset = &sdhci_msm_offset_mci_present;
4106 }
4107 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304108 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4109 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4110 if (IS_ERR(host)) {
4111 ret = PTR_ERR(host);
4112 goto out;
4113 }
4114
4115 pltfm_host = sdhci_priv(host);
4116 pltfm_host->priv = msm_host;
4117 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304118 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304119
4120 /* Extract platform data */
4121 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004122 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304123 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004124 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4125 ret);
4126 goto pltfm_free;
4127 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004128
4129 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004130 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
4131 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004132 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004133 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004134
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004135 if (disable_slots & (1 << (ret - 1))) {
4136 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4137 ret);
4138 ret = -ENODEV;
4139 goto pltfm_free;
4140 }
4141
Sayali Lokhande5f768322016-04-11 18:36:53 +05304142 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004143 sdhci_slot[ret-1] = msm_host;
4144
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004145 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4146 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304147 if (!msm_host->pdata) {
4148 dev_err(&pdev->dev, "DT parsing error\n");
4149 goto pltfm_free;
4150 }
4151 } else {
4152 dev_err(&pdev->dev, "No device tree node\n");
4153 goto pltfm_free;
4154 }
4155
4156 /* Setup Clocks */
4157
4158 /* Setup SDCC bus voter clock. */
4159 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4160 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4161 /* Vote for max. clk rate for max. performance */
4162 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4163 if (ret)
4164 goto pltfm_free;
4165 ret = clk_prepare_enable(msm_host->bus_clk);
4166 if (ret)
4167 goto pltfm_free;
4168 }
4169
4170 /* Setup main peripheral bus clock */
4171 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4172 if (!IS_ERR(msm_host->pclk)) {
4173 ret = clk_prepare_enable(msm_host->pclk);
4174 if (ret)
4175 goto bus_clk_disable;
4176 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304177 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304178
4179 /* Setup SDC MMC clock */
4180 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4181 if (IS_ERR(msm_host->clk)) {
4182 ret = PTR_ERR(msm_host->clk);
4183 goto pclk_disable;
4184 }
4185
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304186 /* Set to the minimum supported clock frequency */
4187 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4188 if (ret) {
4189 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304190 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304191 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304192 ret = clk_prepare_enable(msm_host->clk);
4193 if (ret)
4194 goto pclk_disable;
4195
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304196 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304197 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304198
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004199 /* Setup CDC calibration fixed feedback clock */
4200 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4201 if (!IS_ERR(msm_host->ff_clk)) {
4202 ret = clk_prepare_enable(msm_host->ff_clk);
4203 if (ret)
4204 goto clk_disable;
4205 }
4206
4207 /* Setup CDC calibration sleep clock */
4208 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4209 if (!IS_ERR(msm_host->sleep_clk)) {
4210 ret = clk_prepare_enable(msm_host->sleep_clk);
4211 if (ret)
4212 goto ff_clk_disable;
4213 }
4214
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004215 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4216
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304217 ret = sdhci_msm_bus_register(msm_host, pdev);
4218 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004219 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304220
4221 if (msm_host->msm_bus_vote.client_handle)
4222 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4223 sdhci_msm_bus_work);
4224 sdhci_msm_bus_voting(host, 1);
4225
Asutosh Das0ef24812012-12-18 16:14:02 +05304226 /* Setup regulators */
4227 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4228 if (ret) {
4229 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304230 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304231 }
4232
4233 /* Reset the core and Enable SDHC mode */
4234 core_memres = platform_get_resource_byname(pdev,
4235 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304236 if (!msm_host->mci_removed) {
4237 if (!core_memres) {
4238 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4239 goto vreg_deinit;
4240 }
4241 msm_host->core_mem = devm_ioremap(&pdev->dev,
4242 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304243
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304244 if (!msm_host->core_mem) {
4245 dev_err(&pdev->dev, "Failed to remap registers\n");
4246 ret = -ENOMEM;
4247 goto vreg_deinit;
4248 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304249 }
4250
Sahitya Tummala079ed852015-10-29 20:18:45 +05304251 tlmm_memres = platform_get_resource_byname(pdev,
4252 IORESOURCE_MEM, "tlmm_mem");
4253 if (tlmm_memres) {
4254 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4255 resource_size(tlmm_memres));
4256
4257 if (!tlmm_mem) {
4258 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4259 ret = -ENOMEM;
4260 goto vreg_deinit;
4261 }
4262 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4263 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4264 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4265 }
4266
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304267 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004268 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304269 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004270 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304271 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304272
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304273 if (!msm_host->mci_removed) {
4274 /* Set HC_MODE_EN bit in HC_MODE register */
4275 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304276
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304277 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4278 writel_relaxed(readl_relaxed(msm_host->core_mem +
4279 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4280 msm_host->core_mem + CORE_HC_MODE);
4281 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304282 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004283
4284 /*
4285 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4286 * be used as required later on.
4287 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304288 writel_relaxed((readl_relaxed(host->ioaddr +
4289 msm_host_offset->CORE_VENDOR_SPEC) |
4290 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4291 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304292 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304293 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4294 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4295 * interrupt in GIC (by registering the interrupt handler), we need to
4296 * ensure that any pending power irq interrupt status is acknowledged
4297 * otherwise power irq interrupt handler would be fired prematurely.
4298 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304299 irq_status = sdhci_msm_readl_relaxed(host,
4300 msm_host_offset->CORE_PWRCTL_STATUS);
4301 sdhci_msm_writel_relaxed(irq_status, host,
4302 msm_host_offset->CORE_PWRCTL_CLEAR);
4303 irq_ctl = sdhci_msm_readl_relaxed(host,
4304 msm_host_offset->CORE_PWRCTL_CTL);
4305
Subhash Jadavani28137342013-05-14 17:46:43 +05304306 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4307 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4308 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4309 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304310 sdhci_msm_writel_relaxed(irq_ctl, host,
4311 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004312
Subhash Jadavani28137342013-05-14 17:46:43 +05304313 /*
4314 * Ensure that above writes are propogated before interrupt enablement
4315 * in GIC.
4316 */
4317 mb();
4318
4319 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304320 * Following are the deviations from SDHC spec v3.0 -
4321 * 1. Card detection is handled using separate GPIO.
4322 * 2. Bus power control is handled by interacting with PMIC.
4323 */
4324 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4325 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304326 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004327 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304328 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d439442013-04-12 11:49:11 +05304329 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304330 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304331 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304332 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304333 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304334
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304335 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4336 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4337
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004338 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004339 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4340 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4341 SDHCI_VENDOR_VER_SHIFT));
4342 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4343 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4344 /*
4345 * Add 40us delay in interrupt handler when
4346 * operating at initialization frequency(400KHz).
4347 */
4348 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4349 /*
4350 * Set Software Reset for DAT line in Software
4351 * Reset Register (Bit 2).
4352 */
4353 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4354 }
4355
Asutosh Das214b9662013-06-13 14:27:42 +05304356 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4357
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004358 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004359 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4360 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304361 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004362 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304363 goto vreg_deinit;
4364 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004365 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304366 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004367 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304368 if (ret) {
4369 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004370 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304371 goto vreg_deinit;
4372 }
4373
4374 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304375 sdhci_msm_writel_relaxed(INT_MASK, host,
4376 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304377
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304378#ifdef CONFIG_MMC_CLKGATE
4379 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4380 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4381#endif
4382
Asutosh Das0ef24812012-12-18 16:14:02 +05304383 /* Set host capabilities */
4384 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4385 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004386 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304387 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304388 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004389 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004390 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004391 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304392 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004393 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004394 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304395 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304396
4397 if (msm_host->pdata->nonremovable)
4398 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4399
Guoping Yuf7c91332014-08-20 16:56:18 +08004400 if (msm_host->pdata->nonhotplug)
4401 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4402
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304403 init_completion(&msm_host->pwr_irq_completion);
4404
Sahitya Tummala581df132013-03-12 14:57:46 +05304405 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304406 /*
4407 * Set up the card detect GPIO in active configuration before
4408 * configuring it as an IRQ. Otherwise, it can be in some
4409 * weird/inconsistent state resulting in flood of interrupts.
4410 */
4411 sdhci_msm_setup_pins(msm_host->pdata, true);
4412
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304413 /*
4414 * This delay is needed for stabilizing the card detect GPIO
4415 * line after changing the pull configs.
4416 */
4417 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304418 ret = mmc_gpio_request_cd(msm_host->mmc,
4419 msm_host->pdata->status_gpio, 0);
4420 if (ret) {
4421 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4422 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304423 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304424 }
4425 }
4426
Krishna Konda7feab352013-09-17 23:55:40 -07004427 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4428 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4429 host->dma_mask = DMA_BIT_MASK(64);
4430 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304431 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004432 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304433 host->dma_mask = DMA_BIT_MASK(32);
4434 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304435 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304436 } else {
4437 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4438 }
4439
Ritesh Harjani42876f42015-11-17 17:46:51 +05304440 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4441 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304442 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304443 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4444 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304445 msm_host->is_sdiowakeup_enabled = true;
4446 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4447 sdhci_msm_sdiowakeup_irq,
4448 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4449 "sdhci-msm sdiowakeup", host);
4450 if (ret) {
4451 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4452 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4453 msm_host->pdata->sdiowakeup_irq = -1;
4454 msm_host->is_sdiowakeup_enabled = false;
4455 goto vreg_deinit;
4456 } else {
4457 spin_lock_irqsave(&host->lock, flags);
4458 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304459 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304460 spin_unlock_irqrestore(&host->lock, flags);
4461 }
4462 }
4463
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004464 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304465 ret = sdhci_add_host(host);
4466 if (ret) {
4467 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304468 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304469 }
4470
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004471 pm_runtime_set_active(&pdev->dev);
4472 pm_runtime_enable(&pdev->dev);
4473 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4474 pm_runtime_use_autosuspend(&pdev->dev);
4475
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304476 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4477 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4478 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4479 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4480 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4481 ret = device_create_file(&pdev->dev,
4482 &msm_host->msm_bus_vote.max_bus_bw);
4483 if (ret)
4484 goto remove_host;
4485
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304486 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4487 msm_host->polling.show = show_polling;
4488 msm_host->polling.store = store_polling;
4489 sysfs_attr_init(&msm_host->polling.attr);
4490 msm_host->polling.attr.name = "polling";
4491 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4492 ret = device_create_file(&pdev->dev, &msm_host->polling);
4493 if (ret)
4494 goto remove_max_bus_bw_file;
4495 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304496
4497 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4498 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4499 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4500 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4501 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4502 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4503 if (ret) {
4504 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4505 mmc_hostname(host->mmc), __func__, ret);
4506 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4507 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304508 /* Successful initialization */
4509 goto out;
4510
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304511remove_max_bus_bw_file:
4512 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304513remove_host:
4514 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004515 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304516 sdhci_remove_host(host, dead);
4517vreg_deinit:
4518 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304519bus_unregister:
4520 if (msm_host->msm_bus_vote.client_handle)
4521 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4522 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004523sleep_clk_disable:
4524 if (!IS_ERR(msm_host->sleep_clk))
4525 clk_disable_unprepare(msm_host->sleep_clk);
4526ff_clk_disable:
4527 if (!IS_ERR(msm_host->ff_clk))
4528 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304529clk_disable:
4530 if (!IS_ERR(msm_host->clk))
4531 clk_disable_unprepare(msm_host->clk);
4532pclk_disable:
4533 if (!IS_ERR(msm_host->pclk))
4534 clk_disable_unprepare(msm_host->pclk);
4535bus_clk_disable:
4536 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4537 clk_disable_unprepare(msm_host->bus_clk);
4538pltfm_free:
4539 sdhci_pltfm_free(pdev);
4540out:
4541 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4542 return ret;
4543}
4544
4545static int sdhci_msm_remove(struct platform_device *pdev)
4546{
4547 struct sdhci_host *host = platform_get_drvdata(pdev);
4548 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4549 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4550 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4551 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4552 0xffffffff);
4553
4554 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304555 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4556 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304557 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004558 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304559 sdhci_remove_host(host, dead);
4560 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304561
Asutosh Das0ef24812012-12-18 16:14:02 +05304562 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304563
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304564 sdhci_msm_setup_pins(pdata, true);
4565 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304566
4567 if (msm_host->msm_bus_vote.client_handle) {
4568 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4569 sdhci_msm_bus_unregister(msm_host);
4570 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304571 return 0;
4572}
4573
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004574#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304575static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4576{
4577 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4578 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4579 unsigned long flags;
4580 int ret = 0;
4581
4582 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4583 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4584 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304585 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304586 return 1;
4587 }
4588
4589 spin_lock_irqsave(&host->lock, flags);
4590 if (enable) {
4591 /* configure DAT1 gpio if applicable */
4592 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304593 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304594 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4595 if (!ret)
4596 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4597 goto out;
4598 } else {
4599 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4600 mmc_hostname(host->mmc), enable);
4601 }
4602 } else {
4603 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4604 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4605 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304606 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304607 } else {
4608 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4609 mmc_hostname(host->mmc), enable);
4610
4611 }
4612 }
4613out:
4614 if (ret)
4615 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4616 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4617 ret, msm_host->pdata->sdiowakeup_irq);
4618 spin_unlock_irqrestore(&host->lock, flags);
4619 return ret;
4620}
4621
4622
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004623static int sdhci_msm_runtime_suspend(struct device *dev)
4624{
4625 struct sdhci_host *host = dev_get_drvdata(dev);
4626 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4627 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004628 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004629
Ritesh Harjani42876f42015-11-17 17:46:51 +05304630 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4631 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304632
Ritesh Harjani42876f42015-11-17 17:46:51 +05304633 sdhci_cfg_irq(host, false, true);
4634
4635defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004636 disable_irq(msm_host->pwr_irq);
4637
4638 /*
4639 * Remove the vote immediately only if clocks are off in which
4640 * case we might have queued work to remove vote but it may not
4641 * be completed before runtime suspend or system suspend.
4642 */
4643 if (!atomic_read(&msm_host->clks_on)) {
4644 if (msm_host->msm_bus_vote.client_handle)
4645 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4646 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004647 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4648 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004649
4650 return 0;
4651}
4652
4653static int sdhci_msm_runtime_resume(struct device *dev)
4654{
4655 struct sdhci_host *host = dev_get_drvdata(dev);
4656 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4657 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004658 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004659
Ritesh Harjani42876f42015-11-17 17:46:51 +05304660 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4661 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304662
Ritesh Harjani42876f42015-11-17 17:46:51 +05304663 sdhci_cfg_irq(host, true, true);
4664
4665defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004666 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004667
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004668 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4669 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004670 return 0;
4671}
4672
4673static int sdhci_msm_suspend(struct device *dev)
4674{
4675 struct sdhci_host *host = dev_get_drvdata(dev);
4676 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4677 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004678 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304679 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004680 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004681
4682 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4683 (msm_host->mmc->slot.cd_irq >= 0))
4684 disable_irq(msm_host->mmc->slot.cd_irq);
4685
4686 if (pm_runtime_suspended(dev)) {
4687 pr_debug("%s: %s: already runtime suspended\n",
4688 mmc_hostname(host->mmc), __func__);
4689 goto out;
4690 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004691 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004692out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05304693 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304694 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4695 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4696 if (sdio_cfg)
4697 sdhci_cfg_irq(host, false, true);
4698 }
4699
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004700 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4701 ktime_to_us(ktime_sub(ktime_get(), start)));
4702 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004703}
4704
4705static int sdhci_msm_resume(struct device *dev)
4706{
4707 struct sdhci_host *host = dev_get_drvdata(dev);
4708 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4709 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4710 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304711 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004712 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004713
4714 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4715 (msm_host->mmc->slot.cd_irq >= 0))
4716 enable_irq(msm_host->mmc->slot.cd_irq);
4717
4718 if (pm_runtime_suspended(dev)) {
4719 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4720 mmc_hostname(host->mmc), __func__);
4721 goto out;
4722 }
4723
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004724 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004725out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304726 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4727 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4728 if (sdio_cfg)
4729 sdhci_cfg_irq(host, true, true);
4730 }
4731
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004732 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4733 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004734 return ret;
4735}
4736
Ritesh Harjani42876f42015-11-17 17:46:51 +05304737static int sdhci_msm_suspend_noirq(struct device *dev)
4738{
4739 struct sdhci_host *host = dev_get_drvdata(dev);
4740 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4741 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4742 int ret = 0;
4743
4744 /*
4745 * ksdioirqd may be running, hence retry
4746 * suspend in case the clocks are ON
4747 */
4748 if (atomic_read(&msm_host->clks_on)) {
4749 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
4750 mmc_hostname(host->mmc), __func__);
4751 ret = -EAGAIN;
4752 }
4753
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304754 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4755 if (msm_host->sdio_pending_processing)
4756 ret = -EBUSY;
4757
Ritesh Harjani42876f42015-11-17 17:46:51 +05304758 return ret;
4759}
4760
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004761static const struct dev_pm_ops sdhci_msm_pmops = {
4762 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4763 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4764 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05304765 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004766};
4767
4768#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4769
4770#else
4771#define SDHCI_MSM_PMOPS NULL
4772#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304773static const struct of_device_id sdhci_msm_dt_match[] = {
4774 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304775 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004776 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304777};
4778MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4779
4780static struct platform_driver sdhci_msm_driver = {
4781 .probe = sdhci_msm_probe,
4782 .remove = sdhci_msm_remove,
4783 .driver = {
4784 .name = "sdhci_msm",
4785 .owner = THIS_MODULE,
4786 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004787 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304788 },
4789};
4790
4791module_platform_driver(sdhci_msm_driver);
4792
4793MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4794MODULE_LICENSE("GPL v2");