blob: 130bc4de2db3e0e441d9e8ebf301c688249683ff [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053034#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053035#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053036#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053037#include <linux/pinctrl/consumer.h>
38#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053039#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020040#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020041#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053042
Sahitya Tummala56874732015-05-21 08:24:03 +053043#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070044#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053045
Asutosh Das36c2e922015-12-01 12:19:58 +053046#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080047#define CORE_POWER 0x0
48#define CORE_SW_RST (1 << 7)
49
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070050#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080051#define CORE_MCI_DATA_CNT 0x30
52#define CORE_MCI_STATUS 0x34
53#define CORE_MCI_FIFO_CNT 0x44
54
55#define CORE_VERSION_STEP_MASK 0x0000FFFF
56#define CORE_VERSION_MINOR_MASK 0x0FFF0000
57#define CORE_VERSION_MINOR_SHIFT 16
58#define CORE_VERSION_MAJOR_MASK 0xF0000000
59#define CORE_VERSION_MAJOR_SHIFT 28
60#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030061#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062
63#define CORE_GENERICS 0x70
64#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053065
66#define CORE_VERSION_MAJOR_MASK 0xF0000000
67#define CORE_VERSION_MAJOR_SHIFT 28
68
Asutosh Das0ef24812012-12-18 16:14:02 +053069#define CORE_HC_MODE 0x78
70#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070071#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053072
Sahitya Tummala67717bc2013-08-02 09:21:37 +053073#define CORE_MCI_VERSION 0x050
74#define CORE_TESTBUS_CONFIG 0x0CC
75#define CORE_TESTBUS_ENA (1 << 3)
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080076#define CORE_TESTBUS_SEL2_BIT 4
77#define CORE_TESTBUS_SEL2 (1 << CORE_TESTBUS_SEL2_BIT)
Sahitya Tummala67717bc2013-08-02 09:21:37 +053078
Asutosh Das0ef24812012-12-18 16:14:02 +053079#define CORE_PWRCTL_STATUS 0xDC
80#define CORE_PWRCTL_MASK 0xE0
81#define CORE_PWRCTL_CLEAR 0xE4
82#define CORE_PWRCTL_CTL 0xE8
83
84#define CORE_PWRCTL_BUS_OFF 0x01
85#define CORE_PWRCTL_BUS_ON (1 << 1)
86#define CORE_PWRCTL_IO_LOW (1 << 2)
87#define CORE_PWRCTL_IO_HIGH (1 << 3)
88
89#define CORE_PWRCTL_BUS_SUCCESS 0x01
90#define CORE_PWRCTL_BUS_FAIL (1 << 1)
91#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
92#define CORE_PWRCTL_IO_FAIL (1 << 3)
93
94#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070095#define MAX_PHASES 16
96
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070097#define CORE_DLL_CONFIG 0x100
98#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070099#define CORE_DLL_EN (1 << 16)
100#define CORE_CDR_EN (1 << 17)
101#define CORE_CK_OUT_EN (1 << 18)
102#define CORE_CDR_EXT_EN (1 << 19)
103#define CORE_DLL_PDN (1 << 29)
104#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700105
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700106#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700107#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700108#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700109
110#define CORE_VENDOR_SPEC 0x10C
Krishna Konda46fd1432014-10-30 21:13:27 -0700111#define CORE_CLK_PWRSAVE (1 << 1)
112#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
113#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
114#define CORE_HC_MCLK_SEL_MASK (3 << 8)
115#define CORE_HC_AUTO_CMD21_EN (1 << 6)
116#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700117#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700118#define CORE_HC_SELECT_IN_EN (1 << 18)
119#define CORE_HC_SELECT_IN_HS400 (6 << 19)
120#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700121#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700122
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800123#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
124#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
125
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530126#define CORE_VENDOR_SPEC_FUNC2 0x110
Pavan Anamula691dd592015-08-25 16:11:20 +0530127#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
128#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530129#define CORE_ONE_MID_EN (1 << 25)
130
Krishna Konda7feab352013-09-17 23:55:40 -0700131#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11C
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530132#define CORE_8_BIT_SUPPORT (1 << 18)
133#define CORE_3_3V_SUPPORT (1 << 24)
134#define CORE_3_0V_SUPPORT (1 << 25)
135#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300136#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700137
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800138#define CORE_SDCC_DEBUG_REG 0x124
Sahitya Tummala67717bc2013-08-02 09:21:37 +0530139
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700140#define CORE_CSR_CDC_CTLR_CFG0 0x130
141#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
142#define CORE_HW_AUTOCAL_ENA (1 << 17)
143
144#define CORE_CSR_CDC_CTLR_CFG1 0x134
145#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
146#define CORE_TIMER_ENA (1 << 16)
147
148#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
149#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
150#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
151#define CORE_CDC_OFFSET_CFG 0x14C
152#define CORE_CSR_CDC_DELAY_CFG 0x150
153#define CORE_CDC_SLAVE_DDA_CFG 0x160
154#define CORE_CSR_CDC_STATUS0 0x164
155#define CORE_CALIBRATION_DONE (1 << 0)
156
157#define CORE_CDC_ERROR_CODE_MASK 0x7000000
158
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300159#define CQ_CMD_DBG_RAM 0x110
160#define CQ_CMD_DBG_RAM_WA 0x150
161#define CQ_CMD_DBG_RAM_OL 0x154
162
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700163#define CORE_CSR_CDC_GEN_CFG 0x178
164#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
165#define CORE_CDC_SWITCH_RC_EN (1 << 1)
166
167#define CORE_DDR_200_CFG 0x184
168#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530169#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700170#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530171
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700172#define CORE_VENDOR_SPEC3 0x1B0
173#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530174#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700175
176#define CORE_DLL_CONFIG_2 0x1B4
177#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800178#define CORE_FLL_CYCLE_CNT (1 << 18)
179#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700180
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530181#define CORE_DDR_CONFIG 0x1B8
182#define DDR_CONFIG_POR_VAL 0x80040853
183#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
184#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700185#define CORE_DDR_CONFIG_2 0x1BC
186#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700187
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700188/* 512 descriptors */
189#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530190#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530191
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700192#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800193#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700194
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700195#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530196#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700197
Krishna Konda96e6b112013-10-28 15:25:03 -0700198#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200199#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200200#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700201
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700202static const u32 tuning_block_64[] = {
203 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
204 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
205 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
206 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
207};
208
209static const u32 tuning_block_128[] = {
210 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
211 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
212 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
213 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
214 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
215 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
216 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
217 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
218};
Asutosh Das0ef24812012-12-18 16:14:02 +0530219
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700220/* global to hold each slot instance for debug */
221static struct sdhci_msm_host *sdhci_slot[2];
222
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700223static int disable_slots;
224/* root can write, others read */
225module_param(disable_slots, int, S_IRUGO|S_IWUSR);
226
Asutosh Das0ef24812012-12-18 16:14:02 +0530227enum vdd_io_level {
228 /* set vdd_io_data->low_vol_level */
229 VDD_IO_LOW,
230 /* set vdd_io_data->high_vol_level */
231 VDD_IO_HIGH,
232 /*
233 * set whatever there in voltage_level (third argument) of
234 * sdhci_msm_set_vdd_io_vol() function.
235 */
236 VDD_IO_SET_LEVEL,
237};
238
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700239/* MSM platform specific tuning */
240static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
241 u8 poll)
242{
243 int rc = 0;
244 u32 wait_cnt = 50;
245 u8 ck_out_en = 0;
246 struct mmc_host *mmc = host->mmc;
247
248 /* poll for CK_OUT_EN bit. max. poll time = 50us */
249 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
250 CORE_CK_OUT_EN);
251
252 while (ck_out_en != poll) {
253 if (--wait_cnt == 0) {
254 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
255 mmc_hostname(mmc), __func__, poll);
256 rc = -ETIMEDOUT;
257 goto out;
258 }
259 udelay(1);
260
261 ck_out_en = !!(readl_relaxed(host->ioaddr +
262 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
263 }
264out:
265 return rc;
266}
267
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530268/*
269 * Enable CDR to track changes of DAT lines and adjust sampling
270 * point according to voltage/temperature variations
271 */
272static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
273{
274 int rc = 0;
275 u32 config;
276
277 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
278 config |= CORE_CDR_EN;
279 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
280 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
281
282 rc = msm_dll_poll_ck_out_en(host, 0);
283 if (rc)
284 goto err;
285
286 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
287 CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
288
289 rc = msm_dll_poll_ck_out_en(host, 1);
290 if (rc)
291 goto err;
292 goto out;
293err:
294 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
295out:
296 return rc;
297}
298
299static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
300 *attr, const char *buf, size_t count)
301{
302 struct sdhci_host *host = dev_get_drvdata(dev);
303 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
304 struct sdhci_msm_host *msm_host = pltfm_host->priv;
305 u32 tmp;
306 unsigned long flags;
307
308 if (!kstrtou32(buf, 0, &tmp)) {
309 spin_lock_irqsave(&host->lock, flags);
310 msm_host->en_auto_cmd21 = !!tmp;
311 spin_unlock_irqrestore(&host->lock, flags);
312 }
313 return count;
314}
315
316static ssize_t show_auto_cmd21(struct device *dev,
317 struct device_attribute *attr, char *buf)
318{
319 struct sdhci_host *host = dev_get_drvdata(dev);
320 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
321 struct sdhci_msm_host *msm_host = pltfm_host->priv;
322
323 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
324}
325
326/* MSM auto-tuning handler */
327static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
328 bool enable,
329 u32 type)
330{
331 int rc = 0;
332 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
333 struct sdhci_msm_host *msm_host = pltfm_host->priv;
334 u32 val = 0;
335
336 if (!msm_host->en_auto_cmd21)
337 return 0;
338
339 if (type == MMC_SEND_TUNING_BLOCK_HS200)
340 val = CORE_HC_AUTO_CMD21_EN;
341 else
342 return 0;
343
344 if (enable) {
345 rc = msm_enable_cdr_cm_sdc4_dll(host);
346 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
347 val, host->ioaddr + CORE_VENDOR_SPEC);
348 } else {
349 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
350 ~val, host->ioaddr + CORE_VENDOR_SPEC);
351 }
352 return rc;
353}
354
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700355static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
356{
357 int rc = 0;
358 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
359 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
360 0x8};
361 unsigned long flags;
362 u32 config;
363 struct mmc_host *mmc = host->mmc;
364
365 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
366 spin_lock_irqsave(&host->lock, flags);
367
368 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
369 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
370 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
371 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
372
373 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
374 rc = msm_dll_poll_ck_out_en(host, 0);
375 if (rc)
376 goto err_out;
377
378 /*
379 * Write the selected DLL clock output phase (0 ... 15)
380 * to CDR_SELEXT bit field of DLL_CONFIG register.
381 */
382 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
383 & ~(0xF << 20))
384 | (grey_coded_phase_table[phase] << 20)),
385 host->ioaddr + CORE_DLL_CONFIG);
386
387 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
388 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
389 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
390
391 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
392 rc = msm_dll_poll_ck_out_en(host, 1);
393 if (rc)
394 goto err_out;
395
396 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
397 config |= CORE_CDR_EN;
398 config &= ~CORE_CDR_EXT_EN;
399 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
400 goto out;
401
402err_out:
403 pr_err("%s: %s: Failed to set DLL phase: %d\n",
404 mmc_hostname(mmc), __func__, phase);
405out:
406 spin_unlock_irqrestore(&host->lock, flags);
407 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
408 return rc;
409}
410
411/*
412 * Find out the greatest range of consecuitive selected
413 * DLL clock output phases that can be used as sampling
414 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700415 * timing mode) or for eMMC4.5 card read operation (in
416 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700417 * Select the 3/4 of the range and configure the DLL with the
418 * selected DLL clock output phase.
419 */
420
421static int msm_find_most_appropriate_phase(struct sdhci_host *host,
422 u8 *phase_table, u8 total_phases)
423{
424 int ret;
425 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
426 u8 phases_per_row[MAX_PHASES] = {0};
427 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
428 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
429 bool phase_0_found = false, phase_15_found = false;
430 struct mmc_host *mmc = host->mmc;
431
432 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
433 if (!total_phases || (total_phases > MAX_PHASES)) {
434 pr_err("%s: %s: invalid argument: total_phases=%d\n",
435 mmc_hostname(mmc), __func__, total_phases);
436 return -EINVAL;
437 }
438
439 for (cnt = 0; cnt < total_phases; cnt++) {
440 ranges[row_index][col_index] = phase_table[cnt];
441 phases_per_row[row_index] += 1;
442 col_index++;
443
444 if ((cnt + 1) == total_phases) {
445 continue;
446 /* check if next phase in phase_table is consecutive or not */
447 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
448 row_index++;
449 col_index = 0;
450 }
451 }
452
453 if (row_index >= MAX_PHASES)
454 return -EINVAL;
455
456 /* Check if phase-0 is present in first valid window? */
457 if (!ranges[0][0]) {
458 phase_0_found = true;
459 phase_0_raw_index = 0;
460 /* Check if cycle exist between 2 valid windows */
461 for (cnt = 1; cnt <= row_index; cnt++) {
462 if (phases_per_row[cnt]) {
463 for (i = 0; i < phases_per_row[cnt]; i++) {
464 if (ranges[cnt][i] == 15) {
465 phase_15_found = true;
466 phase_15_raw_index = cnt;
467 break;
468 }
469 }
470 }
471 }
472 }
473
474 /* If 2 valid windows form cycle then merge them as single window */
475 if (phase_0_found && phase_15_found) {
476 /* number of phases in raw where phase 0 is present */
477 u8 phases_0 = phases_per_row[phase_0_raw_index];
478 /* number of phases in raw where phase 15 is present */
479 u8 phases_15 = phases_per_row[phase_15_raw_index];
480
481 if (phases_0 + phases_15 >= MAX_PHASES)
482 /*
483 * If there are more than 1 phase windows then total
484 * number of phases in both the windows should not be
485 * more than or equal to MAX_PHASES.
486 */
487 return -EINVAL;
488
489 /* Merge 2 cyclic windows */
490 i = phases_15;
491 for (cnt = 0; cnt < phases_0; cnt++) {
492 ranges[phase_15_raw_index][i] =
493 ranges[phase_0_raw_index][cnt];
494 if (++i >= MAX_PHASES)
495 break;
496 }
497
498 phases_per_row[phase_0_raw_index] = 0;
499 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
500 }
501
502 for (cnt = 0; cnt <= row_index; cnt++) {
503 if (phases_per_row[cnt] > curr_max) {
504 curr_max = phases_per_row[cnt];
505 selected_row_index = cnt;
506 }
507 }
508
509 i = ((curr_max * 3) / 4);
510 if (i)
511 i--;
512
513 ret = (int)ranges[selected_row_index][i];
514
515 if (ret >= MAX_PHASES) {
516 ret = -EINVAL;
517 pr_err("%s: %s: invalid phase selected=%d\n",
518 mmc_hostname(mmc), __func__, ret);
519 }
520
521 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
522 return ret;
523}
524
525static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
526{
527 u32 mclk_freq = 0;
528
529 /* Program the MCLK value to MCLK_FREQ bit field */
530 if (host->clock <= 112000000)
531 mclk_freq = 0;
532 else if (host->clock <= 125000000)
533 mclk_freq = 1;
534 else if (host->clock <= 137000000)
535 mclk_freq = 2;
536 else if (host->clock <= 150000000)
537 mclk_freq = 3;
538 else if (host->clock <= 162000000)
539 mclk_freq = 4;
540 else if (host->clock <= 175000000)
541 mclk_freq = 5;
542 else if (host->clock <= 187000000)
543 mclk_freq = 6;
544 else if (host->clock <= 200000000)
545 mclk_freq = 7;
546
547 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
548 & ~(7 << 24)) | (mclk_freq << 24)),
549 host->ioaddr + CORE_DLL_CONFIG);
550}
551
552/* Initialize the DLL (Programmable Delay Line ) */
553static int msm_init_cm_dll(struct sdhci_host *host)
554{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800555 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
556 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700557 struct mmc_host *mmc = host->mmc;
558 int rc = 0;
559 unsigned long flags;
560 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530561 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700562
563 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
564 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530565 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
566 CORE_CLK_PWRSAVE);
567 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700568 /*
569 * Make sure that clock is always enabled when DLL
570 * tuning is in progress. Keeping PWRSAVE ON may
571 * turn off the clock. So let's disable the PWRSAVE
572 * here and re-enable it once tuning is completed.
573 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530574 if (prev_pwrsave) {
575 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
576 & ~CORE_CLK_PWRSAVE),
577 host->ioaddr + CORE_VENDOR_SPEC);
578 curr_pwrsave = false;
579 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700580
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800581 if (msm_host->use_updated_dll_reset) {
582 /* Disable the DLL clock */
583 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
584 & ~CORE_CK_OUT_EN),
585 host->ioaddr + CORE_DLL_CONFIG);
586
587 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
588 | CORE_DLL_CLOCK_DISABLE),
589 host->ioaddr + CORE_DLL_CONFIG_2);
590 }
591
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700592 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
593 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
594 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
595
596 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
597 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
598 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
599 msm_cm_dll_set_freq(host);
600
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800601 if (msm_host->use_updated_dll_reset) {
602 u32 mclk_freq = 0;
603
604 if ((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
605 & CORE_FLL_CYCLE_CNT))
606 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
607 else
608 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
609
610 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
611 & ~(0xFF << 10)) | (mclk_freq << 10)),
612 host->ioaddr + CORE_DLL_CONFIG_2);
613 /* wait for 5us before enabling DLL clock */
614 udelay(5);
615 }
616
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700617 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
618 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
619 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
620
621 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
622 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
623 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
624
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800625 if (msm_host->use_updated_dll_reset) {
626 msm_cm_dll_set_freq(host);
627 /* Enable the DLL clock */
628 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
629 & ~CORE_DLL_CLOCK_DISABLE),
630 host->ioaddr + CORE_DLL_CONFIG_2);
631 }
632
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700633 /* Set DLL_EN bit to 1. */
634 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
635 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
636
637 /* Set CK_OUT_EN bit to 1. */
638 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
639 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
640
641 wait_cnt = 50;
642 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
643 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
644 CORE_DLL_LOCK)) {
645 /* max. wait for 50us sec for LOCK bit to be set */
646 if (--wait_cnt == 0) {
647 pr_err("%s: %s: DLL failed to LOCK\n",
648 mmc_hostname(mmc), __func__);
649 rc = -ETIMEDOUT;
650 goto out;
651 }
652 /* wait for 1us before polling again */
653 udelay(1);
654 }
655
656out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530657 /* Restore the correct PWRSAVE state */
658 if (prev_pwrsave ^ curr_pwrsave) {
659 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
660
661 if (prev_pwrsave)
662 reg |= CORE_CLK_PWRSAVE;
663 else
664 reg &= ~CORE_CLK_PWRSAVE;
665
666 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
667 }
668
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700669 spin_unlock_irqrestore(&host->lock, flags);
670 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
671 return rc;
672}
673
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700674static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
675{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700676 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700677 int ret = 0;
678 int cdc_err = 0;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700679
680 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
681
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700682 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
683 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
684 & ~CORE_CDC_T4_DLY_SEL),
685 host->ioaddr + CORE_DDR_200_CFG);
686
687 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
688 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
689 & ~CORE_CDC_SWITCH_BYPASS_OFF),
690 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
691
692 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
693 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
694 | CORE_CDC_SWITCH_RC_EN),
695 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
696
697 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
698 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
699 & ~CORE_START_CDC_TRAFFIC),
700 host->ioaddr + CORE_DDR_200_CFG);
701
702 /*
703 * Perform CDC Register Initialization Sequence
704 *
705 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
706 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
707 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
708 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
709 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
710 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
711 * CORE_CSR_CDC_DELAY_CFG 0x3AC
712 * CORE_CDC_OFFSET_CFG 0x0
713 * CORE_CDC_SLAVE_DDA_CFG 0x16334
714 */
715
716 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
717 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
718 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
719 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
720 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
721 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700722 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700723 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
724 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
725
726 /* CDC HW Calibration */
727
728 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
729 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
730 | CORE_SW_TRIG_FULL_CALIB),
731 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
732
733 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
734 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
735 & ~CORE_SW_TRIG_FULL_CALIB),
736 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
737
738 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
739 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
740 | CORE_HW_AUTOCAL_ENA),
741 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
742
743 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
744 writel_relaxed((readl_relaxed(host->ioaddr +
745 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
746 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
747
748 mb();
749
750 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700751 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
752 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
753
754 if (ret == -ETIMEDOUT) {
755 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700756 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700757 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700758 }
759
760 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
761 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
762 & CORE_CDC_ERROR_CODE_MASK;
763 if (cdc_err) {
764 pr_err("%s: %s: CDC Error Code %d\n",
765 mmc_hostname(host->mmc), __func__, cdc_err);
766 ret = -EINVAL;
767 goto out;
768 }
769
770 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
771 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
772 | CORE_START_CDC_TRAFFIC),
773 host->ioaddr + CORE_DDR_200_CFG);
774out:
775 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
776 __func__, ret);
777 return ret;
778}
779
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700780static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
781{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530782 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
783 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530784 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700785 int ret = 0;
786
787 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
788
789 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530790 * Reprogramming the value in case it might have been modified by
791 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700792 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700793 if (msm_host->rclk_delay_fix) {
794 writel_relaxed(DDR_CONFIG_2_POR_VAL,
795 host->ioaddr + CORE_DDR_CONFIG_2);
796 } else {
797 ddr_config = DDR_CONFIG_POR_VAL &
798 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
799 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
800 writel_relaxed(ddr_config, host->ioaddr + CORE_DDR_CONFIG);
801 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700802
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530803 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Ritesh Harjaniea709662015-05-27 15:40:24 +0530804 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
805 | CORE_CMDIN_RCLK_EN),
806 host->ioaddr + CORE_DDR_200_CFG);
807
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700808 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
809 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
810 | CORE_DDR_CAL_EN),
811 host->ioaddr + CORE_DLL_CONFIG_2);
812
813 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
814 ret = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
815 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
816
817 if (ret == -ETIMEDOUT) {
818 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
819 mmc_hostname(host->mmc), __func__);
820 goto out;
821 }
822
Ritesh Harjani764065e2015-05-13 14:14:45 +0530823 /*
824 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
825 * when MCLK is gated OFF, it is not gated for less than 0.5us
826 * and MCLK must be switched on for at-least 1us before DATA
827 * starts coming. Controllers with 14lpp tech DLL cannot
828 * guarantee above requirement. So PWRSAVE_DLL should not be
829 * turned on for host controllers using this DLL.
830 */
831 if (!msm_host->use_14lpp_dll)
832 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
833 | CORE_PWRSAVE_DLL),
834 host->ioaddr + CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700835 mb();
836out:
837 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
838 __func__, ret);
839 return ret;
840}
841
Ritesh Harjaniea709662015-05-27 15:40:24 +0530842static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
843{
844 int ret = 0;
845 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
846 struct sdhci_msm_host *msm_host = pltfm_host->priv;
847 struct mmc_host *mmc = host->mmc;
848
849 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
850
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530851 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
852 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +0530853 mmc_hostname(mmc));
854 return -EINVAL;
855 }
856
857 if (msm_host->calibration_done ||
858 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
859 return 0;
860 }
861
862 /*
863 * Reset the tuning block.
864 */
865 ret = msm_init_cm_dll(host);
866 if (ret)
867 goto out;
868
869 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
870out:
871 if (!ret)
872 msm_host->calibration_done = true;
873 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
874 __func__, ret);
875 return ret;
876}
877
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700878static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
879{
880 int ret = 0;
881 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
882 struct sdhci_msm_host *msm_host = pltfm_host->priv;
883
884 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
885
886 /*
887 * Retuning in HS400 (DDR mode) will fail, just reset the
888 * tuning block and restore the saved tuning phase.
889 */
890 ret = msm_init_cm_dll(host);
891 if (ret)
892 goto out;
893
894 /* Set the selected phase in delay line hw block */
895 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
896 if (ret)
897 goto out;
898
Krishna Konda0e8efba2014-06-23 14:50:38 -0700899 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
900 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
901 | CORE_CMD_DAT_TRACK_SEL),
902 host->ioaddr + CORE_DLL_CONFIG);
903
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700904 if (msm_host->use_cdclp533)
905 /* Calibrate CDCLP533 DLL HW */
906 ret = sdhci_msm_cdclp533_calibration(host);
907 else
908 /* Calibrate CM_DLL_SDC4 HW */
909 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
910out:
911 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
912 __func__, ret);
913 return ret;
914}
915
Krishna Konda96e6b112013-10-28 15:25:03 -0700916static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
917 u8 drv_type)
918{
919 struct mmc_command cmd = {0};
920 struct mmc_request mrq = {NULL};
921 struct mmc_host *mmc = host->mmc;
922 u8 val = ((drv_type << 4) | 2);
923
924 cmd.opcode = MMC_SWITCH;
925 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
926 (EXT_CSD_HS_TIMING << 16) |
927 (val << 8) |
928 EXT_CSD_CMD_SET_NORMAL;
929 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
930 /* 1 sec */
931 cmd.busy_timeout = 1000 * 1000;
932
933 memset(cmd.resp, 0, sizeof(cmd.resp));
934 cmd.retries = 3;
935
936 mrq.cmd = &cmd;
937 cmd.data = NULL;
938
939 mmc_wait_for_req(mmc, &mrq);
940 pr_debug("%s: %s: set card drive type to %d\n",
941 mmc_hostname(mmc), __func__,
942 drv_type);
943}
944
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700945int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
946{
947 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530948 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -0700949 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700950 const u32 *tuning_block_pattern = tuning_block_64;
951 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
952 int rc;
953 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530954 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700955 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
956 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -0700957 u8 drv_type = 0;
958 bool drv_type_changed = false;
959 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +0530960 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530961
962 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700963 * Tuning is required for SDR104, HS200 and HS400 cards and
964 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530965 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700966 if (host->clock <= CORE_FREQ_100MHZ ||
967 !((ios.timing == MMC_TIMING_MMC_HS400) ||
968 (ios.timing == MMC_TIMING_MMC_HS200) ||
969 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530970 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700971
Sahitya Tummala0523cda2015-11-17 09:24:53 +0530972 /*
973 * Don't allow re-tuning for CRC errors observed for any commands
974 * that are sent during tuning sequence itself.
975 */
976 if (msm_host->tuning_in_progress)
977 return 0;
978 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700979 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700980
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700981 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700982 if (msm_host->tuning_done && !msm_host->calibration_done &&
983 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700984 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700985 spin_lock_irqsave(&host->lock, flags);
986 if (!rc)
987 msm_host->calibration_done = true;
988 spin_unlock_irqrestore(&host->lock, flags);
989 goto out;
990 }
991
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700992 spin_lock_irqsave(&host->lock, flags);
993
994 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
995 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
996 tuning_block_pattern = tuning_block_128;
997 size = sizeof(tuning_block_128);
998 }
999 spin_unlock_irqrestore(&host->lock, flags);
1000
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001001 data_buf = kmalloc(size, GFP_KERNEL);
1002 if (!data_buf) {
1003 rc = -ENOMEM;
1004 goto out;
1005 }
1006
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301007retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001008 tuned_phase_cnt = 0;
1009
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301010 /* first of all reset the tuning block */
1011 rc = msm_init_cm_dll(host);
1012 if (rc)
1013 goto kfree;
1014
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001015 phase = 0;
1016 do {
1017 struct mmc_command cmd = {0};
1018 struct mmc_data data = {0};
1019 struct mmc_request mrq = {
1020 .cmd = &cmd,
1021 .data = &data
1022 };
1023 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301024 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001025
1026 /* set the phase in delay line hw block */
1027 rc = msm_config_cm_dll_phase(host, phase);
1028 if (rc)
1029 goto kfree;
1030
1031 cmd.opcode = opcode;
1032 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1033
1034 data.blksz = size;
1035 data.blocks = 1;
1036 data.flags = MMC_DATA_READ;
1037 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1038
1039 data.sg = &sg;
1040 data.sg_len = 1;
1041 sg_init_one(&sg, data_buf, size);
1042 memset(data_buf, 0, size);
1043 mmc_wait_for_req(mmc, &mrq);
1044
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301045 if (card && (cmd.error || data.error)) {
1046 sts_cmd.opcode = MMC_SEND_STATUS;
1047 sts_cmd.arg = card->rca << 16;
1048 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1049 sts_retry = 5;
1050 while (sts_retry) {
1051 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1052
1053 if (sts_cmd.error ||
1054 (R1_CURRENT_STATE(sts_cmd.resp[0])
1055 != R1_STATE_TRAN)) {
1056 sts_retry--;
1057 /*
1058 * wait for at least 146 MCLK cycles for
1059 * the card to move to TRANS state. As
1060 * the MCLK would be min 200MHz for
1061 * tuning, we need max 0.73us delay. To
1062 * be on safer side 1ms delay is given.
1063 */
1064 usleep_range(1000, 1200);
1065 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1066 mmc_hostname(mmc), phase,
1067 sts_cmd.error, sts_cmd.resp[0]);
1068 continue;
1069 }
1070 break;
1071 };
1072 }
1073
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001074 if (!cmd.error && !data.error &&
1075 !memcmp(data_buf, tuning_block_pattern, size)) {
1076 /* tuning is successful at this tuning point */
1077 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001078 pr_debug("%s: %s: found *** good *** phase = %d\n",
1079 mmc_hostname(mmc), __func__, phase);
1080 } else {
1081 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001082 mmc_hostname(mmc), __func__, phase);
1083 }
1084 } while (++phase < 16);
1085
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301086 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1087 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001088 /*
1089 * If all phases pass then its a problem. So change the card's
1090 * drive type to a different value, if supported and repeat
1091 * tuning until at least one phase fails. Then set the original
1092 * drive type back.
1093 *
1094 * If all the phases still pass after trying all possible
1095 * drive types, then one of those 16 phases will be picked.
1096 * This is no different from what was going on before the
1097 * modification to change drive type and retune.
1098 */
1099 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1100 tuned_phase_cnt);
1101
1102 /* set drive type to other value . default setting is 0x0 */
1103 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001104 pr_debug("%s: trying different drive strength (%d)\n",
1105 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001106 if (card->ext_csd.raw_driver_strength &
1107 (1 << drv_type)) {
1108 sdhci_msm_set_mmc_drv_type(host, opcode,
1109 drv_type);
1110 if (!drv_type_changed)
1111 drv_type_changed = true;
1112 goto retry;
1113 }
1114 }
1115 }
1116
1117 /* reset drive type to default (50 ohm) if changed */
1118 if (drv_type_changed)
1119 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1120
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001121 if (tuned_phase_cnt) {
1122 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1123 tuned_phase_cnt);
1124 if (rc < 0)
1125 goto kfree;
1126 else
1127 phase = (u8)rc;
1128
1129 /*
1130 * Finally set the selected phase in delay
1131 * line hw block.
1132 */
1133 rc = msm_config_cm_dll_phase(host, phase);
1134 if (rc)
1135 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001136 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001137 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1138 mmc_hostname(mmc), __func__, phase);
1139 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301140 if (--tuning_seq_cnt)
1141 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001142 /* tuning failed */
1143 pr_err("%s: %s: no tuning point found\n",
1144 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301145 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001146 }
1147
1148kfree:
1149 kfree(data_buf);
1150out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001151 spin_lock_irqsave(&host->lock, flags);
1152 if (!rc)
1153 msm_host->tuning_done = true;
1154 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301155 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001156 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001157 return rc;
1158}
1159
Asutosh Das0ef24812012-12-18 16:14:02 +05301160static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1161{
1162 struct sdhci_msm_gpio_data *curr;
1163 int i, ret = 0;
1164
1165 curr = pdata->pin_data->gpio_data;
1166 for (i = 0; i < curr->size; i++) {
1167 if (!gpio_is_valid(curr->gpio[i].no)) {
1168 ret = -EINVAL;
1169 pr_err("%s: Invalid gpio = %d\n", __func__,
1170 curr->gpio[i].no);
1171 goto free_gpios;
1172 }
1173 if (enable) {
1174 ret = gpio_request(curr->gpio[i].no,
1175 curr->gpio[i].name);
1176 if (ret) {
1177 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1178 __func__, curr->gpio[i].no,
1179 curr->gpio[i].name, ret);
1180 goto free_gpios;
1181 }
1182 curr->gpio[i].is_enabled = true;
1183 } else {
1184 gpio_free(curr->gpio[i].no);
1185 curr->gpio[i].is_enabled = false;
1186 }
1187 }
1188 return ret;
1189
1190free_gpios:
1191 for (i--; i >= 0; i--) {
1192 gpio_free(curr->gpio[i].no);
1193 curr->gpio[i].is_enabled = false;
1194 }
1195 return ret;
1196}
1197
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301198static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1199 bool enable)
1200{
1201 int ret = 0;
1202
1203 if (enable)
1204 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1205 pdata->pctrl_data->pins_active);
1206 else
1207 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1208 pdata->pctrl_data->pins_sleep);
1209
1210 if (ret < 0)
1211 pr_err("%s state for pinctrl failed with %d\n",
1212 enable ? "Enabling" : "Disabling", ret);
1213
1214 return ret;
1215}
1216
Asutosh Das0ef24812012-12-18 16:14:02 +05301217static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1218{
1219 int ret = 0;
1220
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301221 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301222 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301223 } else if (pdata->pctrl_data) {
1224 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1225 goto out;
1226 } else if (!pdata->pin_data) {
1227 return 0;
1228 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301229
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301230 if (pdata->pin_data->is_gpio)
1231 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301232out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301233 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301234 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301235
1236 return ret;
1237}
1238
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301239static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1240 u32 **out, int *len, u32 size)
1241{
1242 int ret = 0;
1243 struct device_node *np = dev->of_node;
1244 size_t sz;
1245 u32 *arr = NULL;
1246
1247 if (!of_get_property(np, prop_name, len)) {
1248 ret = -EINVAL;
1249 goto out;
1250 }
1251 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001252 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301253 dev_err(dev, "%s invalid size\n", prop_name);
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257
1258 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1259 if (!arr) {
1260 dev_err(dev, "%s failed allocating memory\n", prop_name);
1261 ret = -ENOMEM;
1262 goto out;
1263 }
1264
1265 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1266 if (ret < 0) {
1267 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1268 goto out;
1269 }
1270 *out = arr;
1271out:
1272 if (ret)
1273 *len = 0;
1274 return ret;
1275}
1276
Asutosh Das0ef24812012-12-18 16:14:02 +05301277#define MAX_PROP_SIZE 32
1278static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1279 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1280{
1281 int len, ret = 0;
1282 const __be32 *prop;
1283 char prop_name[MAX_PROP_SIZE];
1284 struct sdhci_msm_reg_data *vreg;
1285 struct device_node *np = dev->of_node;
1286
1287 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1288 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301289 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301290 return ret;
1291 }
1292
1293 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1294 if (!vreg) {
1295 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1296 ret = -ENOMEM;
1297 return ret;
1298 }
1299
1300 vreg->name = vreg_name;
1301
1302 snprintf(prop_name, MAX_PROP_SIZE,
1303 "qcom,%s-always-on", vreg_name);
1304 if (of_get_property(np, prop_name, NULL))
1305 vreg->is_always_on = true;
1306
1307 snprintf(prop_name, MAX_PROP_SIZE,
1308 "qcom,%s-lpm-sup", vreg_name);
1309 if (of_get_property(np, prop_name, NULL))
1310 vreg->lpm_sup = true;
1311
1312 snprintf(prop_name, MAX_PROP_SIZE,
1313 "qcom,%s-voltage-level", vreg_name);
1314 prop = of_get_property(np, prop_name, &len);
1315 if (!prop || (len != (2 * sizeof(__be32)))) {
1316 dev_warn(dev, "%s %s property\n",
1317 prop ? "invalid format" : "no", prop_name);
1318 } else {
1319 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1320 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1321 }
1322
1323 snprintf(prop_name, MAX_PROP_SIZE,
1324 "qcom,%s-current-level", vreg_name);
1325 prop = of_get_property(np, prop_name, &len);
1326 if (!prop || (len != (2 * sizeof(__be32)))) {
1327 dev_warn(dev, "%s %s property\n",
1328 prop ? "invalid format" : "no", prop_name);
1329 } else {
1330 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1331 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1332 }
1333
1334 *vreg_data = vreg;
1335 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1336 vreg->name, vreg->is_always_on ? "always_on," : "",
1337 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1338 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1339
1340 return ret;
1341}
1342
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301343static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1344 struct sdhci_msm_pltfm_data *pdata)
1345{
1346 struct sdhci_pinctrl_data *pctrl_data;
1347 struct pinctrl *pctrl;
1348 int ret = 0;
1349
1350 /* Try to obtain pinctrl handle */
1351 pctrl = devm_pinctrl_get(dev);
1352 if (IS_ERR(pctrl)) {
1353 ret = PTR_ERR(pctrl);
1354 goto out;
1355 }
1356 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1357 if (!pctrl_data) {
1358 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1359 ret = -ENOMEM;
1360 goto out;
1361 }
1362 pctrl_data->pctrl = pctrl;
1363 /* Look-up and keep the states handy to be used later */
1364 pctrl_data->pins_active = pinctrl_lookup_state(
1365 pctrl_data->pctrl, "active");
1366 if (IS_ERR(pctrl_data->pins_active)) {
1367 ret = PTR_ERR(pctrl_data->pins_active);
1368 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1369 goto out;
1370 }
1371 pctrl_data->pins_sleep = pinctrl_lookup_state(
1372 pctrl_data->pctrl, "sleep");
1373 if (IS_ERR(pctrl_data->pins_sleep)) {
1374 ret = PTR_ERR(pctrl_data->pins_sleep);
1375 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1376 goto out;
1377 }
1378 pdata->pctrl_data = pctrl_data;
1379out:
1380 return ret;
1381}
1382
Asutosh Das0ef24812012-12-18 16:14:02 +05301383#define GPIO_NAME_MAX_LEN 32
1384static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1385 struct sdhci_msm_pltfm_data *pdata)
1386{
1387 int ret = 0, cnt, i;
1388 struct sdhci_msm_pin_data *pin_data;
1389 struct device_node *np = dev->of_node;
1390
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301391 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1392 if (!ret) {
1393 goto out;
1394 } else if (ret == -EPROBE_DEFER) {
1395 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1396 goto out;
1397 } else {
1398 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1399 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301400 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301401 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301402 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1403 if (!pin_data) {
1404 dev_err(dev, "No memory for pin_data\n");
1405 ret = -ENOMEM;
1406 goto out;
1407 }
1408
1409 cnt = of_gpio_count(np);
1410 if (cnt > 0) {
1411 pin_data->gpio_data = devm_kzalloc(dev,
1412 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1413 if (!pin_data->gpio_data) {
1414 dev_err(dev, "No memory for gpio_data\n");
1415 ret = -ENOMEM;
1416 goto out;
1417 }
1418 pin_data->gpio_data->size = cnt;
1419 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1420 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1421
1422 if (!pin_data->gpio_data->gpio) {
1423 dev_err(dev, "No memory for gpio\n");
1424 ret = -ENOMEM;
1425 goto out;
1426 }
1427
1428 for (i = 0; i < cnt; i++) {
1429 const char *name = NULL;
1430 char result[GPIO_NAME_MAX_LEN];
1431 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1432 of_property_read_string_index(np,
1433 "qcom,gpio-names", i, &name);
1434
1435 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1436 dev_name(dev), name ? name : "?");
1437 pin_data->gpio_data->gpio[i].name = result;
1438 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1439 pin_data->gpio_data->gpio[i].name,
1440 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301441 }
1442 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301443 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301444out:
1445 if (ret)
1446 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1447 return ret;
1448}
1449
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001450#ifdef CONFIG_SMP
1451static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1452{
1453 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1454}
1455#else
1456static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1457#endif
1458
Gilad Bronerc788a672015-09-08 15:39:11 +03001459static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1460 struct sdhci_msm_pltfm_data *pdata)
1461{
1462 struct device_node *np = dev->of_node;
1463 const char *str;
1464 u32 cpu;
1465 int ret = 0;
1466 int i;
1467
1468 pdata->pm_qos_data.irq_valid = false;
1469 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1470 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1471 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001472 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001473 }
1474
1475 /* must specify cpu for "affine_cores" type */
1476 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1477 pdata->pm_qos_data.irq_cpu = -1;
1478 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1479 if (ret) {
1480 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1481 ret);
1482 goto out;
1483 }
1484 if (cpu < 0 || cpu >= num_possible_cpus()) {
1485 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1486 __func__, cpu, num_possible_cpus());
1487 ret = -EINVAL;
1488 goto out;
1489 }
1490 pdata->pm_qos_data.irq_cpu = cpu;
1491 }
1492
1493 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1494 SDHCI_POWER_POLICY_NUM) {
1495 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1496 __func__, SDHCI_POWER_POLICY_NUM);
1497 ret = -EINVAL;
1498 goto out;
1499 }
1500
1501 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1502 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1503 &pdata->pm_qos_data.irq_latency.latency[i]);
1504
1505 pdata->pm_qos_data.irq_valid = true;
1506out:
1507 return ret;
1508}
1509
1510static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1511 struct sdhci_msm_pltfm_data *pdata)
1512{
1513 struct device_node *np = dev->of_node;
1514 u32 mask;
1515 int nr_groups;
1516 int ret;
1517 int i;
1518
1519 /* Read cpu group mapping */
1520 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1521 if (nr_groups <= 0) {
1522 ret = -EINVAL;
1523 goto out;
1524 }
1525 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1526 pdata->pm_qos_data.cpu_group_map.mask =
1527 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1528 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1529 ret = -ENOMEM;
1530 goto out;
1531 }
1532
1533 for (i = 0; i < nr_groups; i++) {
1534 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1535 i, &mask);
1536
1537 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1538 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1539 cpu_possible_mask)) {
1540 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1541 __func__, mask, i);
1542 ret = -EINVAL;
1543 goto free_res;
1544 }
1545 }
1546 return 0;
1547
1548free_res:
1549 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1550out:
1551 return ret;
1552}
1553
1554static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1555 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1556{
1557 struct device_node *np = dev->of_node;
1558 struct sdhci_msm_pm_qos_latency *values;
1559 int ret;
1560 int i;
1561 int group;
1562 int cfg;
1563
1564 ret = of_property_count_u32_elems(np, name);
1565 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1566 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1567 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1568 ret);
1569 return -EINVAL;
1570 } else if (ret < 0) {
1571 return ret;
1572 }
1573
1574 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1575 GFP_KERNEL);
1576 if (!values)
1577 return -ENOMEM;
1578
1579 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1580 group = i / SDHCI_POWER_POLICY_NUM;
1581 cfg = i % SDHCI_POWER_POLICY_NUM;
1582 of_property_read_u32_index(np, name, i,
1583 &(values[group].latency[cfg]));
1584 }
1585
1586 *latency = values;
1587 return 0;
1588}
1589
1590static void sdhci_msm_pm_qos_parse(struct device *dev,
1591 struct sdhci_msm_pltfm_data *pdata)
1592{
1593 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1594 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1595 __func__);
1596
1597 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1598 pdata->pm_qos_data.cmdq_valid =
1599 !sdhci_msm_pm_qos_parse_latency(dev,
1600 "qcom,pm-qos-cmdq-latency-us",
1601 pdata->pm_qos_data.cpu_group_map.nr_groups,
1602 &pdata->pm_qos_data.cmdq_latency);
1603 pdata->pm_qos_data.legacy_valid =
1604 !sdhci_msm_pm_qos_parse_latency(dev,
1605 "qcom,pm-qos-legacy-latency-us",
1606 pdata->pm_qos_data.cpu_group_map.nr_groups,
1607 &pdata->pm_qos_data.latency);
1608 if (!pdata->pm_qos_data.cmdq_valid &&
1609 !pdata->pm_qos_data.legacy_valid) {
1610 /* clean-up previously allocated arrays */
1611 kfree(pdata->pm_qos_data.latency);
1612 kfree(pdata->pm_qos_data.cmdq_latency);
1613 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1614 __func__);
1615 }
1616 } else {
1617 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1618 __func__);
1619 }
1620}
1621
Asutosh Das0ef24812012-12-18 16:14:02 +05301622/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001623static
1624struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1625 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301626{
1627 struct sdhci_msm_pltfm_data *pdata = NULL;
1628 struct device_node *np = dev->of_node;
1629 u32 bus_width = 0;
1630 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301631 int clk_table_len;
1632 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301633 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301634
1635 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1636 if (!pdata) {
1637 dev_err(dev, "failed to allocate memory for platform data\n");
1638 goto out;
1639 }
1640
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301641 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1642 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1643 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301644
Asutosh Das0ef24812012-12-18 16:14:02 +05301645 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1646 if (bus_width == 8)
1647 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1648 else if (bus_width == 4)
1649 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1650 else {
1651 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1652 pdata->mmc_bus_width = 0;
1653 }
1654
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001655 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1656 &msm_host->mmc->clk_scaling.freq_table,
1657 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1658 pr_debug("%s: no clock scaling frequencies were supplied\n",
1659 dev_name(dev));
1660 else if (!msm_host->mmc->clk_scaling.freq_table ||
1661 !msm_host->mmc->clk_scaling.freq_table_sz)
1662 dev_err(dev, "bad dts clock scaling frequencies\n");
1663
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301664 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1665 &clk_table, &clk_table_len, 0)) {
1666 dev_err(dev, "failed parsing supported clock rates\n");
1667 goto out;
1668 }
1669 if (!clk_table || !clk_table_len) {
1670 dev_err(dev, "Invalid clock table\n");
1671 goto out;
1672 }
1673 pdata->sup_clk_table = clk_table;
1674 pdata->sup_clk_cnt = clk_table_len;
1675
Asutosh Das0ef24812012-12-18 16:14:02 +05301676 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1677 sdhci_msm_slot_reg_data),
1678 GFP_KERNEL);
1679 if (!pdata->vreg_data) {
1680 dev_err(dev, "failed to allocate memory for vreg data\n");
1681 goto out;
1682 }
1683
1684 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1685 "vdd")) {
1686 dev_err(dev, "failed parsing vdd data\n");
1687 goto out;
1688 }
1689 if (sdhci_msm_dt_parse_vreg_info(dev,
1690 &pdata->vreg_data->vdd_io_data,
1691 "vdd-io")) {
1692 dev_err(dev, "failed parsing vdd-io data\n");
1693 goto out;
1694 }
1695
1696 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1697 dev_err(dev, "failed parsing gpio data\n");
1698 goto out;
1699 }
1700
Asutosh Das0ef24812012-12-18 16:14:02 +05301701 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1702
1703 for (i = 0; i < len; i++) {
1704 const char *name = NULL;
1705
1706 of_property_read_string_index(np,
1707 "qcom,bus-speed-mode", i, &name);
1708 if (!name)
1709 continue;
1710
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001711 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1712 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1713 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1714 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1715 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301716 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1717 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1718 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1719 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1720 pdata->caps |= MMC_CAP_1_8V_DDR
1721 | MMC_CAP_UHS_DDR50;
1722 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1723 pdata->caps |= MMC_CAP_1_2V_DDR
1724 | MMC_CAP_UHS_DDR50;
1725 }
1726
1727 if (of_get_property(np, "qcom,nonremovable", NULL))
1728 pdata->nonremovable = true;
1729
Guoping Yuf7c91332014-08-20 16:56:18 +08001730 if (of_get_property(np, "qcom,nonhotplug", NULL))
1731 pdata->nonhotplug = true;
1732
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001733 pdata->largeaddressbus =
1734 of_property_read_bool(np, "qcom,large-address-bus");
1735
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001736 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1737 msm_host->mmc->wakeup_on_idle = true;
1738
Gilad Bronerc788a672015-09-08 15:39:11 +03001739 sdhci_msm_pm_qos_parse(dev, pdata);
1740
Pavan Anamula5a256df2015-10-16 14:38:28 +05301741 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1742 pdata->core_3_0v_support = true;
1743
Asutosh Das0ef24812012-12-18 16:14:02 +05301744 return pdata;
1745out:
1746 return NULL;
1747}
1748
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301749/* Returns required bandwidth in Bytes per Sec */
1750static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1751 struct mmc_ios *ios)
1752{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301753 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1754 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1755
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301756 unsigned int bw;
1757
Sahitya Tummala2886c922013-04-03 18:03:31 +05301758 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301759 /*
1760 * For DDR mode, SDCC controller clock will be at
1761 * the double rate than the actual clock that goes to card.
1762 */
1763 if (ios->bus_width == MMC_BUS_WIDTH_4)
1764 bw /= 2;
1765 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1766 bw /= 8;
1767
1768 return bw;
1769}
1770
1771static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1772 unsigned int bw)
1773{
1774 unsigned int *table = host->pdata->voting_data->bw_vecs;
1775 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1776 int i;
1777
1778 if (host->msm_bus_vote.is_max_bw_needed && bw)
1779 return host->msm_bus_vote.max_bw_vote;
1780
1781 for (i = 0; i < size; i++) {
1782 if (bw <= table[i])
1783 break;
1784 }
1785
1786 if (i && (i == size))
1787 i--;
1788
1789 return i;
1790}
1791
1792/*
1793 * This function must be called with host lock acquired.
1794 * Caller of this function should also ensure that msm bus client
1795 * handle is not null.
1796 */
1797static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1798 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301799 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301800{
1801 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1802 int rc = 0;
1803
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301804 BUG_ON(!flags);
1805
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301806 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301807 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301808 rc = msm_bus_scale_client_update_request(
1809 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301810 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301811 if (rc) {
1812 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1813 mmc_hostname(host->mmc),
1814 msm_host->msm_bus_vote.client_handle, vote, rc);
1815 goto out;
1816 }
1817 msm_host->msm_bus_vote.curr_vote = vote;
1818 }
1819out:
1820 return rc;
1821}
1822
1823/*
1824 * Internal work. Work to set 0 bandwidth for msm bus.
1825 */
1826static void sdhci_msm_bus_work(struct work_struct *work)
1827{
1828 struct sdhci_msm_host *msm_host;
1829 struct sdhci_host *host;
1830 unsigned long flags;
1831
1832 msm_host = container_of(work, struct sdhci_msm_host,
1833 msm_bus_vote.vote_work.work);
1834 host = platform_get_drvdata(msm_host->pdev);
1835
1836 if (!msm_host->msm_bus_vote.client_handle)
1837 return;
1838
1839 spin_lock_irqsave(&host->lock, flags);
1840 /* don't vote for 0 bandwidth if any request is in progress */
1841 if (!host->mrq) {
1842 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301843 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301844 } else
1845 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1846 mmc_hostname(host->mmc), __func__);
1847 spin_unlock_irqrestore(&host->lock, flags);
1848}
1849
1850/*
1851 * This function cancels any scheduled delayed work and sets the bus
1852 * vote based on bw (bandwidth) argument.
1853 */
1854static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1855 unsigned int bw)
1856{
1857 int vote;
1858 unsigned long flags;
1859 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1860 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1861
1862 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1863 spin_lock_irqsave(&host->lock, flags);
1864 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301865 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301866 spin_unlock_irqrestore(&host->lock, flags);
1867}
1868
1869#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1870
1871/* This function queues a work which will set the bandwidth requiement to 0 */
1872static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1873{
1874 unsigned long flags;
1875 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1876 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1877
1878 spin_lock_irqsave(&host->lock, flags);
1879 if (msm_host->msm_bus_vote.min_bw_vote !=
1880 msm_host->msm_bus_vote.curr_vote)
1881 queue_delayed_work(system_wq,
1882 &msm_host->msm_bus_vote.vote_work,
1883 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1884 spin_unlock_irqrestore(&host->lock, flags);
1885}
1886
1887static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1888 struct platform_device *pdev)
1889{
1890 int rc = 0;
1891 struct msm_bus_scale_pdata *bus_pdata;
1892
1893 struct sdhci_msm_bus_voting_data *data;
1894 struct device *dev = &pdev->dev;
1895
1896 data = devm_kzalloc(dev,
1897 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1898 if (!data) {
1899 dev_err(&pdev->dev,
1900 "%s: failed to allocate memory\n", __func__);
1901 rc = -ENOMEM;
1902 goto out;
1903 }
1904 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1905 if (data->bus_pdata) {
1906 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1907 &data->bw_vecs, &data->bw_vecs_size, 0);
1908 if (rc) {
1909 dev_err(&pdev->dev,
1910 "%s: Failed to get bus-bw-vectors-bps\n",
1911 __func__);
1912 goto out;
1913 }
1914 host->pdata->voting_data = data;
1915 }
1916 if (host->pdata->voting_data &&
1917 host->pdata->voting_data->bus_pdata &&
1918 host->pdata->voting_data->bw_vecs &&
1919 host->pdata->voting_data->bw_vecs_size) {
1920
1921 bus_pdata = host->pdata->voting_data->bus_pdata;
1922 host->msm_bus_vote.client_handle =
1923 msm_bus_scale_register_client(bus_pdata);
1924 if (!host->msm_bus_vote.client_handle) {
1925 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1926 rc = -EFAULT;
1927 goto out;
1928 }
1929 /* cache the vote index for minimum and maximum bandwidth */
1930 host->msm_bus_vote.min_bw_vote =
1931 sdhci_msm_bus_get_vote_for_bw(host, 0);
1932 host->msm_bus_vote.max_bw_vote =
1933 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1934 } else {
1935 devm_kfree(dev, data);
1936 }
1937
1938out:
1939 return rc;
1940}
1941
1942static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1943{
1944 if (host->msm_bus_vote.client_handle)
1945 msm_bus_scale_unregister_client(
1946 host->msm_bus_vote.client_handle);
1947}
1948
1949static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1950{
1951 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1952 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1953 struct mmc_ios *ios = &host->mmc->ios;
1954 unsigned int bw;
1955
1956 if (!msm_host->msm_bus_vote.client_handle)
1957 return;
1958
1959 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301960 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301961 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301962 } else {
1963 /*
1964 * If clock gating is enabled, then remove the vote
1965 * immediately because clocks will be disabled only
1966 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1967 * additional delay is required to remove the bus vote.
1968 */
1969#ifdef CONFIG_MMC_CLKGATE
1970 if (host->mmc->clkgate_delay)
1971 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1972 else
1973#endif
1974 sdhci_msm_bus_queue_work(host);
1975 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301976}
1977
Asutosh Das0ef24812012-12-18 16:14:02 +05301978/* Regulator utility functions */
1979static int sdhci_msm_vreg_init_reg(struct device *dev,
1980 struct sdhci_msm_reg_data *vreg)
1981{
1982 int ret = 0;
1983
1984 /* check if regulator is already initialized? */
1985 if (vreg->reg)
1986 goto out;
1987
1988 /* Get the regulator handle */
1989 vreg->reg = devm_regulator_get(dev, vreg->name);
1990 if (IS_ERR(vreg->reg)) {
1991 ret = PTR_ERR(vreg->reg);
1992 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1993 __func__, vreg->name, ret);
1994 goto out;
1995 }
1996
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301997 if (regulator_count_voltages(vreg->reg) > 0) {
1998 vreg->set_voltage_sup = true;
1999 /* sanity check */
2000 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2001 pr_err("%s: %s invalid constraints specified\n",
2002 __func__, vreg->name);
2003 ret = -EINVAL;
2004 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302005 }
2006
2007out:
2008 return ret;
2009}
2010
2011static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2012{
2013 if (vreg->reg)
2014 devm_regulator_put(vreg->reg);
2015}
2016
2017static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2018 *vreg, int uA_load)
2019{
2020 int ret = 0;
2021
2022 /*
2023 * regulators that do not support regulator_set_voltage also
2024 * do not support regulator_set_optimum_mode
2025 */
2026 if (vreg->set_voltage_sup) {
2027 ret = regulator_set_load(vreg->reg, uA_load);
2028 if (ret < 0)
2029 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2030 __func__, vreg->name, uA_load, ret);
2031 else
2032 /*
2033 * regulator_set_load() can return non zero
2034 * value even for success case.
2035 */
2036 ret = 0;
2037 }
2038 return ret;
2039}
2040
2041static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2042 int min_uV, int max_uV)
2043{
2044 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302045 if (vreg->set_voltage_sup) {
2046 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2047 if (ret) {
2048 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302049 __func__, vreg->name, min_uV, max_uV, ret);
2050 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302051 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302052
2053 return ret;
2054}
2055
2056static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2057{
2058 int ret = 0;
2059
2060 /* Put regulator in HPM (high power mode) */
2061 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2062 if (ret < 0)
2063 return ret;
2064
2065 if (!vreg->is_enabled) {
2066 /* Set voltage level */
2067 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2068 vreg->high_vol_level);
2069 if (ret)
2070 return ret;
2071 }
2072 ret = regulator_enable(vreg->reg);
2073 if (ret) {
2074 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2075 __func__, vreg->name, ret);
2076 return ret;
2077 }
2078 vreg->is_enabled = true;
2079 return ret;
2080}
2081
2082static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2083{
2084 int ret = 0;
2085
2086 /* Never disable regulator marked as always_on */
2087 if (vreg->is_enabled && !vreg->is_always_on) {
2088 ret = regulator_disable(vreg->reg);
2089 if (ret) {
2090 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2091 __func__, vreg->name, ret);
2092 goto out;
2093 }
2094 vreg->is_enabled = false;
2095
2096 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2097 if (ret < 0)
2098 goto out;
2099
2100 /* Set min. voltage level to 0 */
2101 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2102 if (ret)
2103 goto out;
2104 } else if (vreg->is_enabled && vreg->is_always_on) {
2105 if (vreg->lpm_sup) {
2106 /* Put always_on regulator in LPM (low power mode) */
2107 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2108 vreg->lpm_uA);
2109 if (ret < 0)
2110 goto out;
2111 }
2112 }
2113out:
2114 return ret;
2115}
2116
2117static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2118 bool enable, bool is_init)
2119{
2120 int ret = 0, i;
2121 struct sdhci_msm_slot_reg_data *curr_slot;
2122 struct sdhci_msm_reg_data *vreg_table[2];
2123
2124 curr_slot = pdata->vreg_data;
2125 if (!curr_slot) {
2126 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2127 __func__);
2128 goto out;
2129 }
2130
2131 vreg_table[0] = curr_slot->vdd_data;
2132 vreg_table[1] = curr_slot->vdd_io_data;
2133
2134 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2135 if (vreg_table[i]) {
2136 if (enable)
2137 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2138 else
2139 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2140 if (ret)
2141 goto out;
2142 }
2143 }
2144out:
2145 return ret;
2146}
2147
2148/*
2149 * Reset vreg by ensuring it is off during probe. A call
2150 * to enable vreg is needed to balance disable vreg
2151 */
2152static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2153{
2154 int ret;
2155
2156 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2157 if (ret)
2158 return ret;
2159 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2160 return ret;
2161}
2162
2163/* This init function should be called only once for each SDHC slot */
2164static int sdhci_msm_vreg_init(struct device *dev,
2165 struct sdhci_msm_pltfm_data *pdata,
2166 bool is_init)
2167{
2168 int ret = 0;
2169 struct sdhci_msm_slot_reg_data *curr_slot;
2170 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2171
2172 curr_slot = pdata->vreg_data;
2173 if (!curr_slot)
2174 goto out;
2175
2176 curr_vdd_reg = curr_slot->vdd_data;
2177 curr_vdd_io_reg = curr_slot->vdd_io_data;
2178
2179 if (!is_init)
2180 /* Deregister all regulators from regulator framework */
2181 goto vdd_io_reg_deinit;
2182
2183 /*
2184 * Get the regulator handle from voltage regulator framework
2185 * and then try to set the voltage level for the regulator
2186 */
2187 if (curr_vdd_reg) {
2188 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2189 if (ret)
2190 goto out;
2191 }
2192 if (curr_vdd_io_reg) {
2193 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2194 if (ret)
2195 goto vdd_reg_deinit;
2196 }
2197 ret = sdhci_msm_vreg_reset(pdata);
2198 if (ret)
2199 dev_err(dev, "vreg reset failed (%d)\n", ret);
2200 goto out;
2201
2202vdd_io_reg_deinit:
2203 if (curr_vdd_io_reg)
2204 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2205vdd_reg_deinit:
2206 if (curr_vdd_reg)
2207 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2208out:
2209 return ret;
2210}
2211
2212
2213static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2214 enum vdd_io_level level,
2215 unsigned int voltage_level)
2216{
2217 int ret = 0;
2218 int set_level;
2219 struct sdhci_msm_reg_data *vdd_io_reg;
2220
2221 if (!pdata->vreg_data)
2222 return ret;
2223
2224 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2225 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2226 switch (level) {
2227 case VDD_IO_LOW:
2228 set_level = vdd_io_reg->low_vol_level;
2229 break;
2230 case VDD_IO_HIGH:
2231 set_level = vdd_io_reg->high_vol_level;
2232 break;
2233 case VDD_IO_SET_LEVEL:
2234 set_level = voltage_level;
2235 break;
2236 default:
2237 pr_err("%s: invalid argument level = %d",
2238 __func__, level);
2239 ret = -EINVAL;
2240 return ret;
2241 }
2242 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2243 set_level);
2244 }
2245 return ret;
2246}
2247
Ritesh Harjani42876f42015-11-17 17:46:51 +05302248/*
2249 * Acquire spin-lock host->lock before calling this function
2250 */
2251static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2252 bool enable)
2253{
2254 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2255 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2256
2257 if (enable && !msm_host->is_sdiowakeup_enabled)
2258 enable_irq(msm_host->pdata->sdiowakeup_irq);
2259 else if (!enable && msm_host->is_sdiowakeup_enabled)
2260 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2261 else
2262 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2263 __func__, enable, msm_host->is_sdiowakeup_enabled);
2264 msm_host->is_sdiowakeup_enabled = enable;
2265}
2266
2267static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2268{
2269 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302270 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2271 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2272
Ritesh Harjani42876f42015-11-17 17:46:51 +05302273 unsigned long flags;
2274
2275 pr_debug("%s: irq (%d) received\n", __func__, irq);
2276
2277 spin_lock_irqsave(&host->lock, flags);
2278 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2279 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302280 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302281
2282 return IRQ_HANDLED;
2283}
2284
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302285void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2286{
2287 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2288 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2289
2290 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2291 mmc_hostname(host->mmc),
2292 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
2293 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
2294 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
2295}
2296
Asutosh Das0ef24812012-12-18 16:14:02 +05302297static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2298{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002299 struct sdhci_host *host = (struct sdhci_host *)data;
2300 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2301 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das0ef24812012-12-18 16:14:02 +05302302 u8 irq_status = 0;
2303 u8 irq_ack = 0;
2304 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302305 int pwr_state = 0, io_level = 0;
2306 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302307 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302308
2309 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2310 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2311 mmc_hostname(msm_host->mmc), irq, irq_status);
2312
2313 /* Clear the interrupt */
2314 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2315 /*
2316 * SDHC has core_mem and hc_mem device memory and these memory
2317 * addresses do not fall within 1KB region. Hence, any update to
2318 * core_mem address space would require an mb() to ensure this gets
2319 * completed before its next update to registers within hc_mem.
2320 */
2321 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302322 /*
2323 * There is a rare HW scenario where the first clear pulse could be
2324 * lost when actual reset and clear/read of status register is
2325 * happening at a time. Hence, retry for at least 10 times to make
2326 * sure status register is cleared. Otherwise, this will result in
2327 * a spurious power IRQ resulting in system instability.
2328 */
2329 while (irq_status &
2330 readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS)) {
2331 if (retry == 0) {
2332 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2333 mmc_hostname(host->mmc), irq_status);
2334 sdhci_msm_dump_pwr_ctrl_regs(host);
2335 BUG_ON(1);
2336 }
2337 writeb_relaxed(irq_status,
2338 (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2339 retry--;
2340 udelay(10);
2341 }
2342 if (likely(retry < 10))
2343 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2344 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302345
2346 /* Handle BUS ON/OFF*/
2347 if (irq_status & CORE_PWRCTL_BUS_ON) {
2348 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302349 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302350 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302351 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2352 VDD_IO_HIGH, 0);
2353 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302354 if (ret)
2355 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2356 else
2357 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302358
2359 pwr_state = REQ_BUS_ON;
2360 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302361 }
2362 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2363 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302364 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302365 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302366 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2367 VDD_IO_LOW, 0);
2368 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302369 if (ret)
2370 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2371 else
2372 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302373
2374 pwr_state = REQ_BUS_OFF;
2375 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302376 }
2377 /* Handle IO LOW/HIGH */
2378 if (irq_status & CORE_PWRCTL_IO_LOW) {
2379 /* Switch voltage Low */
2380 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2381 if (ret)
2382 irq_ack |= CORE_PWRCTL_IO_FAIL;
2383 else
2384 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302385
2386 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302387 }
2388 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2389 /* Switch voltage High */
2390 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2391 if (ret)
2392 irq_ack |= CORE_PWRCTL_IO_FAIL;
2393 else
2394 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302395
2396 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302397 }
2398
2399 /* ACK status to the core */
2400 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2401 /*
2402 * SDHC has core_mem and hc_mem device memory and these memory
2403 * addresses do not fall within 1KB region. Hence, any update to
2404 * core_mem address space would require an mb() to ensure this gets
2405 * completed before its next update to registers within hc_mem.
2406 */
2407 mb();
2408
Krishna Konda46fd1432014-10-30 21:13:27 -07002409 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002410 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2411 ~CORE_IO_PAD_PWR_SWITCH),
2412 host->ioaddr + CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002413 else if ((io_level & REQ_IO_LOW) ||
2414 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002415 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2416 CORE_IO_PAD_PWR_SWITCH),
2417 host->ioaddr + CORE_VENDOR_SPEC);
2418 mb();
2419
Asutosh Das0ef24812012-12-18 16:14:02 +05302420 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2421 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302422 spin_lock_irqsave(&host->lock, flags);
2423 if (pwr_state)
2424 msm_host->curr_pwr_state = pwr_state;
2425 if (io_level)
2426 msm_host->curr_io_level = io_level;
2427 complete(&msm_host->pwr_irq_completion);
2428 spin_unlock_irqrestore(&host->lock, flags);
2429
Asutosh Das0ef24812012-12-18 16:14:02 +05302430 return IRQ_HANDLED;
2431}
2432
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302433static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302434show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2435{
2436 struct sdhci_host *host = dev_get_drvdata(dev);
2437 int poll;
2438 unsigned long flags;
2439
2440 spin_lock_irqsave(&host->lock, flags);
2441 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2442 spin_unlock_irqrestore(&host->lock, flags);
2443
2444 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2445}
2446
2447static ssize_t
2448store_polling(struct device *dev, struct device_attribute *attr,
2449 const char *buf, size_t count)
2450{
2451 struct sdhci_host *host = dev_get_drvdata(dev);
2452 int value;
2453 unsigned long flags;
2454
2455 if (!kstrtou32(buf, 0, &value)) {
2456 spin_lock_irqsave(&host->lock, flags);
2457 if (value) {
2458 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2459 mmc_detect_change(host->mmc, 0);
2460 } else {
2461 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2462 }
2463 spin_unlock_irqrestore(&host->lock, flags);
2464 }
2465 return count;
2466}
2467
2468static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302469show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2470 char *buf)
2471{
2472 struct sdhci_host *host = dev_get_drvdata(dev);
2473 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2474 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2475
2476 return snprintf(buf, PAGE_SIZE, "%u\n",
2477 msm_host->msm_bus_vote.is_max_bw_needed);
2478}
2479
2480static ssize_t
2481store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2482 const char *buf, size_t count)
2483{
2484 struct sdhci_host *host = dev_get_drvdata(dev);
2485 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2486 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2487 uint32_t value;
2488 unsigned long flags;
2489
2490 if (!kstrtou32(buf, 0, &value)) {
2491 spin_lock_irqsave(&host->lock, flags);
2492 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2493 spin_unlock_irqrestore(&host->lock, flags);
2494 }
2495 return count;
2496}
2497
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302498static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302499{
2500 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2501 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302502 unsigned long flags;
2503 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302504 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302505
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302506 spin_lock_irqsave(&host->lock, flags);
2507 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2508 mmc_hostname(host->mmc), __func__, req_type,
2509 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302510 io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
2511 /*
2512 * The IRQ for request type IO High/Low will be generated when -
2513 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2514 * 2. If 1 is true and when there is a state change in 1.8V enable
2515 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2516 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2517 * layer tries to set it to 3.3V before card detection happens, the
2518 * IRQ doesn't get triggered as there is no state change in this bit.
2519 * The driver already handles this case by changing the IO voltage
2520 * level to high as part of controller power up sequence. Hence, check
2521 * for host->pwr to handle a case where IO voltage high request is
2522 * issued even before controller power up.
2523 */
2524 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2525 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2526 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2527 pr_debug("%s: do not wait for power IRQ that never comes\n",
2528 mmc_hostname(host->mmc));
2529 spin_unlock_irqrestore(&host->lock, flags);
2530 return;
2531 }
2532 }
2533
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302534 if ((req_type & msm_host->curr_pwr_state) ||
2535 (req_type & msm_host->curr_io_level))
2536 done = true;
2537 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302538
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302539 /*
2540 * This is needed here to hanlde a case where IRQ gets
2541 * triggered even before this function is called so that
2542 * x->done counter of completion gets reset. Otherwise,
2543 * next call to wait_for_completion returns immediately
2544 * without actually waiting for the IRQ to be handled.
2545 */
2546 if (done)
2547 init_completion(&msm_host->pwr_irq_completion);
2548 else
2549 wait_for_completion(&msm_host->pwr_irq_completion);
2550
2551 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2552 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302553}
2554
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002555static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2556{
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302557 u32 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
2558
2559 if (enable) {
2560 config |= CORE_CDR_EN;
2561 config &= ~CORE_CDR_EXT_EN;
2562 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2563 } else {
2564 config &= ~CORE_CDR_EN;
2565 config |= CORE_CDR_EXT_EN;
2566 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2567 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002568}
2569
Asutosh Das648f9d12013-01-10 21:11:04 +05302570static unsigned int sdhci_msm_max_segs(void)
2571{
2572 return SDHCI_MSM_MAX_SEGMENTS;
2573}
2574
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302575static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302576{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302577 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2578 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302579
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302580 return msm_host->pdata->sup_clk_table[0];
2581}
2582
2583static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2584{
2585 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2586 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2587 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2588
2589 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2590}
2591
2592static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2593 u32 req_clk)
2594{
2595 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2596 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2597 unsigned int sel_clk = -1;
2598 unsigned char cnt;
2599
2600 if (req_clk < sdhci_msm_get_min_clock(host)) {
2601 sel_clk = sdhci_msm_get_min_clock(host);
2602 return sel_clk;
2603 }
2604
2605 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2606 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2607 break;
2608 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2609 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2610 break;
2611 } else {
2612 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2613 }
2614 }
2615 return sel_clk;
2616}
2617
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302618static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2619{
2620 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2621 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2622 int rc = 0;
2623
2624 if (atomic_read(&msm_host->controller_clock))
2625 return 0;
2626
2627 sdhci_msm_bus_voting(host, 1);
2628
2629 if (!IS_ERR(msm_host->pclk)) {
2630 rc = clk_prepare_enable(msm_host->pclk);
2631 if (rc) {
2632 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2633 mmc_hostname(host->mmc), __func__, rc);
2634 goto remove_vote;
2635 }
2636 }
2637
2638 rc = clk_prepare_enable(msm_host->clk);
2639 if (rc) {
2640 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2641 mmc_hostname(host->mmc), __func__, rc);
2642 goto disable_pclk;
2643 }
2644
2645 atomic_set(&msm_host->controller_clock, 1);
2646 pr_debug("%s: %s: enabled controller clock\n",
2647 mmc_hostname(host->mmc), __func__);
2648 goto out;
2649
2650disable_pclk:
2651 if (!IS_ERR(msm_host->pclk))
2652 clk_disable_unprepare(msm_host->pclk);
2653remove_vote:
2654 if (msm_host->msm_bus_vote.client_handle)
2655 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2656out:
2657 return rc;
2658}
2659
2660
2661
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302662static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2663{
2664 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2665 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2666 int rc = 0;
2667
2668 if (enable && !atomic_read(&msm_host->clks_on)) {
2669 pr_debug("%s: request to enable clocks\n",
2670 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302671
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302672 /*
2673 * The bus-width or the clock rate might have changed
2674 * after controller clocks are enbaled, update bus vote
2675 * in such case.
2676 */
2677 if (atomic_read(&msm_host->controller_clock))
2678 sdhci_msm_bus_voting(host, 1);
2679
2680 rc = sdhci_msm_enable_controller_clock(host);
2681 if (rc)
2682 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302683
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302684 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2685 rc = clk_prepare_enable(msm_host->bus_clk);
2686 if (rc) {
2687 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2688 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302689 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302690 }
2691 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002692 if (!IS_ERR(msm_host->ff_clk)) {
2693 rc = clk_prepare_enable(msm_host->ff_clk);
2694 if (rc) {
2695 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2696 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302697 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002698 }
2699 }
2700 if (!IS_ERR(msm_host->sleep_clk)) {
2701 rc = clk_prepare_enable(msm_host->sleep_clk);
2702 if (rc) {
2703 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2704 mmc_hostname(host->mmc), __func__, rc);
2705 goto disable_ff_clk;
2706 }
2707 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302708 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302709
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302710 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302711 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2712 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302713 /*
2714 * During 1.8V signal switching the clock source must
2715 * still be ON as it requires accessing SDHC
2716 * registers (SDHCi host control2 register bit 3 must
2717 * be written and polled after stopping the SDCLK).
2718 */
2719 if (host->mmc->card_clock_off)
2720 return 0;
2721 pr_debug("%s: request to disable clocks\n",
2722 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002723 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2724 clk_disable_unprepare(msm_host->sleep_clk);
2725 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2726 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302727 clk_disable_unprepare(msm_host->clk);
2728 if (!IS_ERR(msm_host->pclk))
2729 clk_disable_unprepare(msm_host->pclk);
2730 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2731 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302732
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302733 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302734 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302735 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302736 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302737 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002738disable_ff_clk:
2739 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2740 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302741disable_bus_clk:
2742 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2743 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302744disable_controller_clk:
2745 if (!IS_ERR_OR_NULL(msm_host->clk))
2746 clk_disable_unprepare(msm_host->clk);
2747 if (!IS_ERR_OR_NULL(msm_host->pclk))
2748 clk_disable_unprepare(msm_host->pclk);
2749 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302750remove_vote:
2751 if (msm_host->msm_bus_vote.client_handle)
2752 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302753out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302754 return rc;
2755}
2756
2757static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2758{
2759 int rc;
2760 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2761 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07002762 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302763 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002764 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302765 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302766
2767 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302768 /*
2769 * disable pwrsave to ensure clock is not auto-gated until
2770 * the rate is >400KHz (initialization complete).
2771 */
2772 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2773 ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302774 sdhci_msm_prepare_clocks(host, false);
2775 host->clock = clock;
2776 goto out;
2777 }
2778
2779 rc = sdhci_msm_prepare_clocks(host, true);
2780 if (rc)
2781 goto out;
2782
Sahitya Tummala043744a2013-06-24 09:55:33 +05302783 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2784 CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05302785 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07002786 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302787 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2788 | CORE_CLK_PWRSAVE,
2789 host->ioaddr + CORE_VENDOR_SPEC);
2790 /*
2791 * Disable pwrsave for a newly added card if doesn't allow clock
2792 * gating.
2793 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07002794 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302795 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2796 & ~CORE_CLK_PWRSAVE,
2797 host->ioaddr + CORE_VENDOR_SPEC);
2798
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302799 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002800 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002801 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002802 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302803 /*
2804 * The SDHC requires internal clock frequency to be double the
2805 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002806 * uses the faster clock(100/400MHz) for some of its parts and
2807 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302808 */
2809 ddr_clock = clock * 2;
2810 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2811 ddr_clock);
2812 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002813
2814 /*
2815 * In general all timing modes are controlled via UHS mode select in
2816 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2817 * their respective modes defined here, hence we use these values.
2818 *
2819 * HS200 - SDR104 (Since they both are equivalent in functionality)
2820 * HS400 - This involves multiple configurations
2821 * Initially SDR104 - when tuning is required as HS200
2822 * Then when switching to DDR @ 400MHz (HS400) we use
2823 * the vendor specific HC_SELECT_IN to control the mode.
2824 *
2825 * In addition to controlling the modes we also need to select the
2826 * correct input clock for DLL depending on the mode.
2827 *
2828 * HS400 - divided clock (free running MCLK/2)
2829 * All other modes - default (free running MCLK)
2830 */
2831 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2832 /* Select the divided clock (free running MCLK/2) */
2833 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2834 & ~CORE_HC_MCLK_SEL_MASK)
2835 | CORE_HC_MCLK_SEL_HS400),
2836 host->ioaddr + CORE_VENDOR_SPEC);
2837 /*
2838 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2839 * register
2840 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302841 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07002842 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302843 msm_host->enhanced_strobe)) &&
2844 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002845 /*
2846 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2847 * field in VENDOR_SPEC_FUNC
2848 */
2849 writel_relaxed((readl_relaxed(host->ioaddr + \
2850 CORE_VENDOR_SPEC)
2851 | CORE_HC_SELECT_IN_HS400
2852 | CORE_HC_SELECT_IN_EN),
2853 host->ioaddr + CORE_VENDOR_SPEC);
2854 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002855 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
2856 /*
2857 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
2858 * CORE_DLL_STATUS to be set. This should get set
2859 * with in 15 us at 200 MHz.
2860 */
2861 rc = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
2862 dll_lock, (dll_lock & (CORE_DLL_LOCK |
2863 CORE_DDR_DLL_LOCK)), 10, 1000);
2864 if (rc == -ETIMEDOUT)
2865 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
2866 mmc_hostname(host->mmc),
2867 dll_lock);
2868 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002869 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002870 if (!msm_host->use_cdclp533)
2871 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
2872 writel_relaxed((readl_relaxed(host->ioaddr +
2873 CORE_VENDOR_SPEC3) & ~CORE_PWRSAVE_DLL),
2874 host->ioaddr + CORE_VENDOR_SPEC3);
2875
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002876 /* Select the default clock (free running MCLK) */
2877 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2878 & ~CORE_HC_MCLK_SEL_MASK)
2879 | CORE_HC_MCLK_SEL_DFLT),
2880 host->ioaddr + CORE_VENDOR_SPEC);
2881
2882 /*
2883 * Disable HC_SELECT_IN to be able to use the UHS mode select
2884 * configuration from Host Control2 register for all other
2885 * modes.
2886 *
2887 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2888 * in VENDOR_SPEC_FUNC
2889 */
2890 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2891 & ~CORE_HC_SELECT_IN_EN
2892 & ~CORE_HC_SELECT_IN_MASK),
2893 host->ioaddr + CORE_VENDOR_SPEC);
2894 }
2895 mb();
2896
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302897 if (sup_clock != msm_host->clk_rate) {
2898 pr_debug("%s: %s: setting clk rate to %u\n",
2899 mmc_hostname(host->mmc), __func__, sup_clock);
2900 rc = clk_set_rate(msm_host->clk, sup_clock);
2901 if (rc) {
2902 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2903 mmc_hostname(host->mmc), __func__,
2904 sup_clock, rc);
2905 goto out;
2906 }
2907 msm_host->clk_rate = sup_clock;
2908 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302909 /*
2910 * Update the bus vote in case of frequency change due to
2911 * clock scaling.
2912 */
2913 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302914 }
2915out:
2916 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302917}
2918
Sahitya Tummala14613432013-03-21 11:13:25 +05302919static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2920 unsigned int uhs)
2921{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002922 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2923 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala14613432013-03-21 11:13:25 +05302924 u16 ctrl_2;
2925
2926 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2927 /* Select Bus Speed Mode for host */
2928 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002929 if ((uhs == MMC_TIMING_MMC_HS400) ||
2930 (uhs == MMC_TIMING_MMC_HS200) ||
2931 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05302932 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2933 else if (uhs == MMC_TIMING_UHS_SDR12)
2934 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2935 else if (uhs == MMC_TIMING_UHS_SDR25)
2936 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2937 else if (uhs == MMC_TIMING_UHS_SDR50)
2938 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002939 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
2940 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05302941 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302942 /*
2943 * When clock frquency is less than 100MHz, the feedback clock must be
2944 * provided and DLL must not be used so that tuning can be skipped. To
2945 * provide feedback clock, the mode selection can be any value less
2946 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2947 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002948 if (host->clock <= CORE_FREQ_100MHZ) {
2949 if ((uhs == MMC_TIMING_MMC_HS400) ||
2950 (uhs == MMC_TIMING_MMC_HS200) ||
2951 (uhs == MMC_TIMING_UHS_SDR104))
2952 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302953
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002954 /*
2955 * Make sure DLL is disabled when not required
2956 *
2957 * Write 1 to DLL_RST bit of DLL_CONFIG register
2958 */
2959 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2960 | CORE_DLL_RST),
2961 host->ioaddr + CORE_DLL_CONFIG);
2962
2963 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2964 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2965 | CORE_DLL_PDN),
2966 host->ioaddr + CORE_DLL_CONFIG);
2967 mb();
2968
2969 /*
2970 * The DLL needs to be restored and CDCLP533 recalibrated
2971 * when the clock frequency is set back to 400MHz.
2972 */
2973 msm_host->calibration_done = false;
2974 }
2975
2976 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2977 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05302978 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2979
2980}
2981
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002982#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002983#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05302984static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002985{
Ritesh Harjani17f5d812015-12-23 13:21:02 +05302986 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2987 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002988 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05302989 struct cmdq_host *cq_host = host->cq_host;
2990
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002991 u32 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2992 u16 minor = version & CORE_VERSION_TARGET_MASK;
2993 /* registers offset changed starting from 4.2.0 */
2994 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
2995
2996 pr_err("---- Debug RAM dump ----\n");
2997 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
2998 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
2999 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3000
3001 while (i < 16) {
3002 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3003 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3004 i++;
3005 }
3006 pr_err("-------------------------\n");
3007}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303008
3009void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3010{
3011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3012 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3013 int tbsel, tbsel2;
3014 int i, index = 0;
3015 u32 test_bus_val = 0;
3016 u32 debug_reg[MAX_TEST_BUS] = {0};
3017
3018 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003019 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303020 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003021
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303022 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
3023 readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
3024 readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
3025 readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
3026 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
3027 readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
3028 readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
3029 readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
3030 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
3031 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
3032 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3033 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303034 pr_info("Vndr func2: 0x%08x\n",
3035 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303036
3037 /*
3038 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3039 * of CORE_TESTBUS_CONFIG register.
3040 *
3041 * To select test bus 0 to 7 use tbsel and to select any test bus
3042 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3043 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3044 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3045 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003046 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303047 for (tbsel = 0; tbsel < 8; tbsel++) {
3048 if (index >= MAX_TEST_BUS)
3049 break;
3050 test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
3051 tbsel | CORE_TESTBUS_ENA;
3052 writel_relaxed(test_bus_val,
3053 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3054 debug_reg[index++] = readl_relaxed(msm_host->core_mem +
3055 CORE_SDCC_DEBUG_REG);
3056 }
3057 }
3058 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3059 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3060 i, i + 3, debug_reg[i], debug_reg[i+1],
3061 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003062}
3063
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303064/*
3065 * sdhci_msm_enhanced_strobe_mask :-
3066 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3067 * SW should write 3 to
3068 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3069 * The default reset value of this register is 2.
3070 */
3071static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3072{
3073 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3074 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3075
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303076 if (!msm_host->enhanced_strobe ||
3077 !mmc_card_strobe(msm_host->mmc->card)) {
3078 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303079 mmc_hostname(host->mmc));
3080 return;
3081 }
3082
3083 if (set) {
3084 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3085 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3086 host->ioaddr + CORE_VENDOR_SPEC3);
3087 } else {
3088 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3089 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3090 host->ioaddr + CORE_VENDOR_SPEC3);
3091 }
3092}
3093
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003094static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3095{
3096 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3097 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3098
3099 if (set) {
3100 writel_relaxed(CORE_TESTBUS_ENA,
3101 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3102 } else {
3103 u32 value;
3104
3105 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
3106 value &= ~CORE_TESTBUS_ENA;
3107 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
3108 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303109}
3110
Pavan Anamula691dd592015-08-25 16:11:20 +05303111void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3112{
3113 u32 vendor_func2;
3114 unsigned long timeout;
3115
3116 vendor_func2 = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3117
3118 if (enable) {
3119 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
3120 CORE_VENDOR_SPEC_FUNC2);
3121 timeout = 10000;
3122 while (readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2) &
3123 HC_SW_RST_REQ) {
3124 if (timeout == 0) {
3125 pr_info("%s: Applying wait idle disable workaround\n",
3126 mmc_hostname(host->mmc));
3127 /*
3128 * Apply the reset workaround to not wait for
3129 * pending data transfers on AXI before
3130 * resetting the controller. This could be
3131 * risky if the transfers were stuck on the
3132 * AXI bus.
3133 */
3134 vendor_func2 = readl_relaxed(host->ioaddr +
3135 CORE_VENDOR_SPEC_FUNC2);
3136 writel_relaxed(vendor_func2 |
3137 HC_SW_RST_WAIT_IDLE_DIS,
3138 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3139 host->reset_wa_t = ktime_get();
3140 return;
3141 }
3142 timeout--;
3143 udelay(10);
3144 }
3145 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3146 mmc_hostname(host->mmc));
3147 } else {
3148 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
3149 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3150 }
3151}
3152
Gilad Broner44445992015-09-29 16:05:39 +03003153static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3154{
3155 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303156 container_of(work, struct sdhci_msm_pm_qos_irq,
3157 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003158
3159 if (atomic_read(&pm_qos_irq->counter))
3160 return;
3161
3162 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3163 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3164}
3165
3166void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3167{
3168 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3169 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3170 struct sdhci_msm_pm_qos_latency *latency =
3171 &msm_host->pdata->pm_qos_data.irq_latency;
3172 int counter;
3173
3174 if (!msm_host->pm_qos_irq.enabled)
3175 return;
3176
3177 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3178 /* Make sure to update the voting in case power policy has changed */
3179 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3180 && counter > 1)
3181 return;
3182
Asutosh Das36c2e922015-12-01 12:19:58 +05303183 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003184 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3185 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3186 msm_host->pm_qos_irq.latency);
3187}
3188
3189void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3190{
3191 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3192 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3193 int counter;
3194
3195 if (!msm_host->pm_qos_irq.enabled)
3196 return;
3197
Subhash Jadavani4d813902015-10-15 12:16:43 -07003198 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3199 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3200 } else {
3201 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3202 return;
Gilad Broner44445992015-09-29 16:05:39 +03003203 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003204
Gilad Broner44445992015-09-29 16:05:39 +03003205 if (counter)
3206 return;
3207
3208 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303209 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3210 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003211 return;
3212 }
3213
3214 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3215 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3216 msm_host->pm_qos_irq.latency);
3217}
3218
Gilad Broner68c54562015-09-20 11:59:46 +03003219static ssize_t
3220sdhci_msm_pm_qos_irq_show(struct device *dev,
3221 struct device_attribute *attr, char *buf)
3222{
3223 struct sdhci_host *host = dev_get_drvdata(dev);
3224 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3225 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3226 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3227
3228 return snprintf(buf, PAGE_SIZE,
3229 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3230 irq->enabled, atomic_read(&irq->counter), irq->latency);
3231}
3232
3233static ssize_t
3234sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3235 struct device_attribute *attr, char *buf)
3236{
3237 struct sdhci_host *host = dev_get_drvdata(dev);
3238 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3239 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3240
3241 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3242}
3243
3244static ssize_t
3245sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3246 struct device_attribute *attr, const char *buf, size_t count)
3247{
3248 struct sdhci_host *host = dev_get_drvdata(dev);
3249 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3250 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3251 uint32_t value;
3252 bool enable;
3253 int ret;
3254
3255 ret = kstrtou32(buf, 0, &value);
3256 if (ret)
3257 goto out;
3258 enable = !!value;
3259
3260 if (enable == msm_host->pm_qos_irq.enabled)
3261 goto out;
3262
3263 msm_host->pm_qos_irq.enabled = enable;
3264 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303265 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003266 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3267 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3268 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3269 msm_host->pm_qos_irq.latency);
3270 }
3271
3272out:
3273 return count;
3274}
3275
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003276#ifdef CONFIG_SMP
3277static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3278 struct sdhci_host *host)
3279{
3280 msm_host->pm_qos_irq.req.irq = host->irq;
3281}
3282#else
3283static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3284 struct sdhci_host *host) { }
3285#endif
3286
Gilad Broner44445992015-09-29 16:05:39 +03003287void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3288{
3289 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3290 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3291 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003292 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003293
3294 if (!msm_host->pdata->pm_qos_data.irq_valid)
3295 return;
3296
3297 /* Initialize only once as this gets called per partition */
3298 if (msm_host->pm_qos_irq.enabled)
3299 return;
3300
3301 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3302 msm_host->pm_qos_irq.req.type =
3303 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003304 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3305 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3306 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003307 else
3308 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3309 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3310
Asutosh Das36c2e922015-12-01 12:19:58 +05303311 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003312 sdhci_msm_pm_qos_irq_unvote_work);
3313 /* For initialization phase, set the performance latency */
3314 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3315 msm_host->pm_qos_irq.latency =
3316 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3317 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3318 msm_host->pm_qos_irq.latency);
3319 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003320
3321 /* sysfs */
3322 msm_host->pm_qos_irq.enable_attr.show =
3323 sdhci_msm_pm_qos_irq_enable_show;
3324 msm_host->pm_qos_irq.enable_attr.store =
3325 sdhci_msm_pm_qos_irq_enable_store;
3326 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3327 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3328 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3329 ret = device_create_file(&msm_host->pdev->dev,
3330 &msm_host->pm_qos_irq.enable_attr);
3331 if (ret)
3332 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3333 __func__, ret);
3334
3335 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3336 msm_host->pm_qos_irq.status_attr.store = NULL;
3337 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3338 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3339 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3340 ret = device_create_file(&msm_host->pdev->dev,
3341 &msm_host->pm_qos_irq.status_attr);
3342 if (ret)
3343 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3344 __func__, ret);
3345}
3346
3347static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3348 struct device_attribute *attr, char *buf)
3349{
3350 struct sdhci_host *host = dev_get_drvdata(dev);
3351 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3352 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3353 struct sdhci_msm_pm_qos_group *group;
3354 int i;
3355 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3356 int offset = 0;
3357
3358 for (i = 0; i < nr_groups; i++) {
3359 group = &msm_host->pm_qos[i];
3360 offset += snprintf(&buf[offset], PAGE_SIZE,
3361 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3362 i, group->req.cpus_affine.bits[0],
3363 msm_host->pm_qos_group_enable,
3364 atomic_read(&group->counter),
3365 group->latency);
3366 }
3367
3368 return offset;
3369}
3370
3371static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3372 struct device_attribute *attr, char *buf)
3373{
3374 struct sdhci_host *host = dev_get_drvdata(dev);
3375 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3376 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3377
3378 return snprintf(buf, PAGE_SIZE, "%s\n",
3379 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3380}
3381
3382static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3383 struct device_attribute *attr, const char *buf, size_t count)
3384{
3385 struct sdhci_host *host = dev_get_drvdata(dev);
3386 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3387 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3388 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3389 uint32_t value;
3390 bool enable;
3391 int ret;
3392 int i;
3393
3394 ret = kstrtou32(buf, 0, &value);
3395 if (ret)
3396 goto out;
3397 enable = !!value;
3398
3399 if (enable == msm_host->pm_qos_group_enable)
3400 goto out;
3401
3402 msm_host->pm_qos_group_enable = enable;
3403 if (!enable) {
3404 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303405 cancel_delayed_work_sync(
3406 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003407 atomic_set(&msm_host->pm_qos[i].counter, 0);
3408 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3409 pm_qos_update_request(&msm_host->pm_qos[i].req,
3410 msm_host->pm_qos[i].latency);
3411 }
3412 }
3413
3414out:
3415 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003416}
3417
3418static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3419{
3420 int i;
3421 struct sdhci_msm_cpu_group_map *map =
3422 &msm_host->pdata->pm_qos_data.cpu_group_map;
3423
3424 if (cpu < 0)
3425 goto not_found;
3426
3427 for (i = 0; i < map->nr_groups; i++)
3428 if (cpumask_test_cpu(cpu, &map->mask[i]))
3429 return i;
3430
3431not_found:
3432 return -EINVAL;
3433}
3434
3435void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3436 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3437{
3438 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3439 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3440 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3441 struct sdhci_msm_pm_qos_group *pm_qos_group;
3442 int counter;
3443
3444 if (!msm_host->pm_qos_group_enable || group < 0)
3445 return;
3446
3447 pm_qos_group = &msm_host->pm_qos[group];
3448 counter = atomic_inc_return(&pm_qos_group->counter);
3449
3450 /* Make sure to update the voting in case power policy has changed */
3451 if (pm_qos_group->latency == latency->latency[host->power_policy]
3452 && counter > 1)
3453 return;
3454
Asutosh Das36c2e922015-12-01 12:19:58 +05303455 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003456
3457 pm_qos_group->latency = latency->latency[host->power_policy];
3458 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3459}
3460
3461static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3462{
3463 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303464 container_of(work, struct sdhci_msm_pm_qos_group,
3465 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003466
3467 if (atomic_read(&group->counter))
3468 return;
3469
3470 group->latency = PM_QOS_DEFAULT_VALUE;
3471 pm_qos_update_request(&group->req, group->latency);
3472}
3473
Gilad Broner07d92eb2015-09-29 16:57:21 +03003474bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003475{
3476 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3477 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3478 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3479
3480 if (!msm_host->pm_qos_group_enable || group < 0 ||
3481 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003482 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003483
3484 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303485 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3486 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003487 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003488 }
3489
3490 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3491 pm_qos_update_request(&msm_host->pm_qos[group].req,
3492 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003493 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003494}
3495
3496void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3497 struct sdhci_msm_pm_qos_latency *latency)
3498{
3499 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3500 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3501 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3502 struct sdhci_msm_pm_qos_group *group;
3503 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003504 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003505
3506 if (msm_host->pm_qos_group_enable)
3507 return;
3508
3509 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3510 GFP_KERNEL);
3511 if (!msm_host->pm_qos)
3512 return;
3513
3514 for (i = 0; i < nr_groups; i++) {
3515 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303516 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003517 sdhci_msm_pm_qos_cpu_unvote_work);
3518 atomic_set(&group->counter, 0);
3519 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3520 cpumask_copy(&group->req.cpus_affine,
3521 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3522 /* For initialization phase, set the performance mode latency */
3523 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3524 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3525 group->latency);
3526 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3527 __func__, i,
3528 group->req.cpus_affine.bits[0],
3529 group->latency,
3530 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3531 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003532 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003533 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003534
3535 /* sysfs */
3536 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3537 msm_host->pm_qos_group_status_attr.store = NULL;
3538 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3539 msm_host->pm_qos_group_status_attr.attr.name =
3540 "pm_qos_cpu_groups_status";
3541 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3542 ret = device_create_file(&msm_host->pdev->dev,
3543 &msm_host->pm_qos_group_status_attr);
3544 if (ret)
3545 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3546 __func__, ret);
3547 msm_host->pm_qos_group_enable_attr.show =
3548 sdhci_msm_pm_qos_group_enable_show;
3549 msm_host->pm_qos_group_enable_attr.store =
3550 sdhci_msm_pm_qos_group_enable_store;
3551 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3552 msm_host->pm_qos_group_enable_attr.attr.name =
3553 "pm_qos_cpu_groups_enable";
3554 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3555 ret = device_create_file(&msm_host->pdev->dev,
3556 &msm_host->pm_qos_group_enable_attr);
3557 if (ret)
3558 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3559 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003560}
3561
Gilad Broner07d92eb2015-09-29 16:57:21 +03003562static void sdhci_msm_pre_req(struct sdhci_host *host,
3563 struct mmc_request *mmc_req)
3564{
3565 int cpu;
3566 int group;
3567 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3568 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3569 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3570 msm_host->pm_qos_prev_cpu);
3571
3572 sdhci_msm_pm_qos_irq_vote(host);
3573
3574 cpu = get_cpu();
3575 put_cpu();
3576 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3577 if (group < 0)
3578 return;
3579
3580 if (group != prev_group && prev_group >= 0) {
3581 sdhci_msm_pm_qos_cpu_unvote(host,
3582 msm_host->pm_qos_prev_cpu, false);
3583 prev_group = -1; /* make sure to vote for new group */
3584 }
3585
3586 if (prev_group < 0) {
3587 sdhci_msm_pm_qos_cpu_vote(host,
3588 msm_host->pdata->pm_qos_data.latency, cpu);
3589 msm_host->pm_qos_prev_cpu = cpu;
3590 }
3591}
3592
3593static void sdhci_msm_post_req(struct sdhci_host *host,
3594 struct mmc_request *mmc_req)
3595{
3596 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3597 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3598
3599 sdhci_msm_pm_qos_irq_unvote(host, false);
3600
3601 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3602 msm_host->pm_qos_prev_cpu = -1;
3603}
3604
3605static void sdhci_msm_init(struct sdhci_host *host)
3606{
3607 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3608 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3609
3610 sdhci_msm_pm_qos_irq_init(host);
3611
3612 if (msm_host->pdata->pm_qos_data.legacy_valid)
3613 sdhci_msm_pm_qos_cpu_init(host,
3614 msm_host->pdata->pm_qos_data.latency);
3615}
3616
Sahitya Tummala9150a942014-10-31 15:33:04 +05303617static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
3618{
3619 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3620 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3621 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
3622 u32 max_curr = 0;
3623
3624 if (curr_slot && curr_slot->vdd_data)
3625 max_curr = curr_slot->vdd_data->hpm_uA;
3626
3627 return max_curr;
3628}
3629
Asutosh Das0ef24812012-12-18 16:14:02 +05303630static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303631 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303632 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003633 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303634 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003635 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303636 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303637 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303638 .get_min_clock = sdhci_msm_get_min_clock,
3639 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303640 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303641 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303642 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003643 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003644 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003645 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303646 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05303647 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003648 .init = sdhci_msm_init,
3649 .pre_req = sdhci_msm_pre_req,
3650 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05303651 .get_current_limit = sdhci_msm_get_current_limit,
Asutosh Das0ef24812012-12-18 16:14:02 +05303652};
3653
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303654static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3655 struct sdhci_host *host)
3656{
Krishna Konda46fd1432014-10-30 21:13:27 -07003657 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303658 u16 minor;
3659 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303660 u32 val;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303661
3662 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
3663 major = (version & CORE_VERSION_MAJOR_MASK) >>
3664 CORE_VERSION_MAJOR_SHIFT;
3665 minor = version & CORE_VERSION_TARGET_MASK;
3666
Krishna Konda46fd1432014-10-30 21:13:27 -07003667 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3668
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303669 /*
3670 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003671 * controller won't advertise 3.0v, 1.8v and 8-bit features
3672 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303673 */
3674 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003675 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003676 /*
3677 * Enable 1.8V support capability on controllers that
3678 * support dual voltage
3679 */
3680 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003681 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3682 caps |= CORE_3_0V_SUPPORT;
3683 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003684 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303685 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3686 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303687 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003688
3689 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303690 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3691 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3692 */
3693 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303694 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303695 val = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3696 writel_relaxed((val | CORE_ONE_MID_EN),
3697 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3698 }
3699 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003700 * SDCC 5 controller with major version 1, minor version 0x34 and later
3701 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3702 */
3703 if ((major == 1) && (minor < 0x34))
3704 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003705
3706 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003707 * SDCC 5 controller with major version 1, minor version 0x42 and later
3708 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303709 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003710 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303711 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003712 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303713 msm_host->enhanced_strobe = true;
3714 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003715
3716 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003717 * SDCC 5 controller with major version 1 and minor version 0x42,
3718 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3719 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303720 * when MCLK is gated OFF, it is not gated for less than 0.5us
3721 * and MCLK must be switched on for at-least 1us before DATA
3722 * starts coming.
3723 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03003724 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
3725 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05303726 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003727
Pavan Anamula5a256df2015-10-16 14:38:28 +05303728 /* Fake 3.0V support for SDIO devices which requires such voltage */
3729 if (msm_host->pdata->core_3_0v_support) {
3730 caps |= CORE_3_0V_SUPPORT;
3731 writel_relaxed(
3732 (readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES) |
3733 caps), host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
3734 }
3735
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003736 if ((major == 1) && (minor >= 0x49))
3737 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05303738 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03003739 * Mask 64-bit support for controller with 32-bit address bus so that
3740 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03003741 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08003742 if (!msm_host->pdata->largeaddressbus)
3743 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
3744
Gilad Broner2a10ca02014-10-02 17:20:35 +03003745 writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07003746 /* keep track of the value in SDHCI_CAPABILITIES */
3747 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303748}
3749
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003750#ifdef CONFIG_MMC_CQ_HCI
3751static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3752 struct platform_device *pdev)
3753{
3754 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3755 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3756
3757 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003758 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003759 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
3760 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003761 host->cq_host = NULL;
3762 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003763 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003764 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003765}
3766#else
3767static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3768 struct platform_device *pdev)
3769{
3770
3771}
3772#endif
3773
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003774static bool sdhci_msm_is_bootdevice(struct device *dev)
3775{
3776 if (strnstr(saved_command_line, "androidboot.bootdevice=",
3777 strlen(saved_command_line))) {
3778 char search_string[50];
3779
3780 snprintf(search_string, ARRAY_SIZE(search_string),
3781 "androidboot.bootdevice=%s", dev_name(dev));
3782 if (strnstr(saved_command_line, search_string,
3783 strlen(saved_command_line)))
3784 return true;
3785 else
3786 return false;
3787 }
3788
3789 /*
3790 * "androidboot.bootdevice=" argument is not present then
3791 * return true as we don't know the boot device anyways.
3792 */
3793 return true;
3794}
3795
Asutosh Das0ef24812012-12-18 16:14:02 +05303796static int sdhci_msm_probe(struct platform_device *pdev)
3797{
3798 struct sdhci_host *host;
3799 struct sdhci_pltfm_host *pltfm_host;
3800 struct sdhci_msm_host *msm_host;
3801 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003802 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003803 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003804 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05303805 struct resource *tlmm_memres = NULL;
3806 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05303807 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05303808
3809 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
3810 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
3811 GFP_KERNEL);
3812 if (!msm_host) {
3813 ret = -ENOMEM;
3814 goto out;
3815 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303816
3817 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
3818 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
3819 if (IS_ERR(host)) {
3820 ret = PTR_ERR(host);
3821 goto out;
3822 }
3823
3824 pltfm_host = sdhci_priv(host);
3825 pltfm_host->priv = msm_host;
3826 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303827 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05303828
3829 /* Extract platform data */
3830 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003831 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05303832 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003833 dev_err(&pdev->dev, "Failed to get slot index %d\n",
3834 ret);
3835 goto pltfm_free;
3836 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003837
3838 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003839 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
3840 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003841 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003842 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003843
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003844 if (disable_slots & (1 << (ret - 1))) {
3845 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
3846 ret);
3847 ret = -ENODEV;
3848 goto pltfm_free;
3849 }
3850
Sayali Lokhande5f768322016-04-11 18:36:53 +05303851 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003852 sdhci_slot[ret-1] = msm_host;
3853
Dov Levenglickc9033ab2015-03-10 16:00:56 +02003854 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
3855 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303856 if (!msm_host->pdata) {
3857 dev_err(&pdev->dev, "DT parsing error\n");
3858 goto pltfm_free;
3859 }
3860 } else {
3861 dev_err(&pdev->dev, "No device tree node\n");
3862 goto pltfm_free;
3863 }
3864
3865 /* Setup Clocks */
3866
3867 /* Setup SDCC bus voter clock. */
3868 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
3869 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3870 /* Vote for max. clk rate for max. performance */
3871 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
3872 if (ret)
3873 goto pltfm_free;
3874 ret = clk_prepare_enable(msm_host->bus_clk);
3875 if (ret)
3876 goto pltfm_free;
3877 }
3878
3879 /* Setup main peripheral bus clock */
3880 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
3881 if (!IS_ERR(msm_host->pclk)) {
3882 ret = clk_prepare_enable(msm_host->pclk);
3883 if (ret)
3884 goto bus_clk_disable;
3885 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303886 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05303887
3888 /* Setup SDC MMC clock */
3889 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
3890 if (IS_ERR(msm_host->clk)) {
3891 ret = PTR_ERR(msm_host->clk);
3892 goto pclk_disable;
3893 }
3894
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303895 /* Set to the minimum supported clock frequency */
3896 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
3897 if (ret) {
3898 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303899 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303900 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303901 ret = clk_prepare_enable(msm_host->clk);
3902 if (ret)
3903 goto pclk_disable;
3904
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303905 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303906 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303907
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003908 /* Setup CDC calibration fixed feedback clock */
3909 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
3910 if (!IS_ERR(msm_host->ff_clk)) {
3911 ret = clk_prepare_enable(msm_host->ff_clk);
3912 if (ret)
3913 goto clk_disable;
3914 }
3915
3916 /* Setup CDC calibration sleep clock */
3917 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
3918 if (!IS_ERR(msm_host->sleep_clk)) {
3919 ret = clk_prepare_enable(msm_host->sleep_clk);
3920 if (ret)
3921 goto ff_clk_disable;
3922 }
3923
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07003924 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
3925
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303926 ret = sdhci_msm_bus_register(msm_host, pdev);
3927 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003928 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303929
3930 if (msm_host->msm_bus_vote.client_handle)
3931 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
3932 sdhci_msm_bus_work);
3933 sdhci_msm_bus_voting(host, 1);
3934
Asutosh Das0ef24812012-12-18 16:14:02 +05303935 /* Setup regulators */
3936 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
3937 if (ret) {
3938 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303939 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05303940 }
3941
3942 /* Reset the core and Enable SDHC mode */
3943 core_memres = platform_get_resource_byname(pdev,
3944 IORESOURCE_MEM, "core_mem");
Asutosh Das890bdee2014-08-08 23:01:42 +05303945 if (!core_memres) {
3946 dev_err(&pdev->dev, "Failed to get iomem resource\n");
3947 goto vreg_deinit;
3948 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303949 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
3950 resource_size(core_memres));
3951
3952 if (!msm_host->core_mem) {
3953 dev_err(&pdev->dev, "Failed to remap registers\n");
3954 ret = -ENOMEM;
3955 goto vreg_deinit;
3956 }
3957
Sahitya Tummala079ed852015-10-29 20:18:45 +05303958 tlmm_memres = platform_get_resource_byname(pdev,
3959 IORESOURCE_MEM, "tlmm_mem");
3960 if (tlmm_memres) {
3961 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
3962 resource_size(tlmm_memres));
3963
3964 if (!tlmm_mem) {
3965 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
3966 ret = -ENOMEM;
3967 goto vreg_deinit;
3968 }
3969 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
3970 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
3971 &tlmm_memres->start, readl_relaxed(tlmm_mem));
3972 }
3973
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303974 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003975 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303976 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003977 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
3978 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303979
Asutosh Das0ef24812012-12-18 16:14:02 +05303980 /* Set HC_MODE_EN bit in HC_MODE register */
3981 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
3982
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003983 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
3984 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
3985 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
3986
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303987 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07003988
3989 /*
3990 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
3991 * be used as required later on.
3992 */
3993 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
3994 CORE_IO_PAD_PWR_SWITCH_EN),
3995 host->ioaddr + CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05303996 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05303997 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
3998 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
3999 * interrupt in GIC (by registering the interrupt handler), we need to
4000 * ensure that any pending power irq interrupt status is acknowledged
4001 * otherwise power irq interrupt handler would be fired prematurely.
4002 */
4003 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
4004 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
4005 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
4006 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4007 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4008 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4009 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
4010 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
Krishna Konda46fd1432014-10-30 21:13:27 -07004011
Subhash Jadavani28137342013-05-14 17:46:43 +05304012 /*
4013 * Ensure that above writes are propogated before interrupt enablement
4014 * in GIC.
4015 */
4016 mb();
4017
4018 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304019 * Following are the deviations from SDHC spec v3.0 -
4020 * 1. Card detection is handled using separate GPIO.
4021 * 2. Bus power control is handled by interacting with PMIC.
4022 */
4023 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4024 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304025 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004026 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304027 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d439442013-04-12 11:49:11 +05304028 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304029 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304030 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304031 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304032 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304033
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304034 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4035 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4036
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004037 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004038 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4039 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4040 SDHCI_VENDOR_VER_SHIFT));
4041 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4042 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4043 /*
4044 * Add 40us delay in interrupt handler when
4045 * operating at initialization frequency(400KHz).
4046 */
4047 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4048 /*
4049 * Set Software Reset for DAT line in Software
4050 * Reset Register (Bit 2).
4051 */
4052 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4053 }
4054
Asutosh Das214b9662013-06-13 14:27:42 +05304055 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4056
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004057 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004058 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4059 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304060 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004061 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304062 goto vreg_deinit;
4063 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004064 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304065 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004066 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304067 if (ret) {
4068 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004069 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304070 goto vreg_deinit;
4071 }
4072
4073 /* Enable pwr irq interrupts */
4074 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
4075
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304076#ifdef CONFIG_MMC_CLKGATE
4077 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4078 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4079#endif
4080
Asutosh Das0ef24812012-12-18 16:14:02 +05304081 /* Set host capabilities */
4082 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4083 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004084 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304085 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304086 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004087 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004088 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004089 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304090 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004091 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004092 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304093 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304094
4095 if (msm_host->pdata->nonremovable)
4096 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4097
Guoping Yuf7c91332014-08-20 16:56:18 +08004098 if (msm_host->pdata->nonhotplug)
4099 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4100
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304101 init_completion(&msm_host->pwr_irq_completion);
4102
Sahitya Tummala581df132013-03-12 14:57:46 +05304103 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304104 /*
4105 * Set up the card detect GPIO in active configuration before
4106 * configuring it as an IRQ. Otherwise, it can be in some
4107 * weird/inconsistent state resulting in flood of interrupts.
4108 */
4109 sdhci_msm_setup_pins(msm_host->pdata, true);
4110
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304111 /*
4112 * This delay is needed for stabilizing the card detect GPIO
4113 * line after changing the pull configs.
4114 */
4115 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304116 ret = mmc_gpio_request_cd(msm_host->mmc,
4117 msm_host->pdata->status_gpio, 0);
4118 if (ret) {
4119 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4120 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304121 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304122 }
4123 }
4124
Krishna Konda7feab352013-09-17 23:55:40 -07004125 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4126 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4127 host->dma_mask = DMA_BIT_MASK(64);
4128 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304129 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004130 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304131 host->dma_mask = DMA_BIT_MASK(32);
4132 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304133 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304134 } else {
4135 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4136 }
4137
Ritesh Harjani42876f42015-11-17 17:46:51 +05304138 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4139 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304140 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304141 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4142 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304143 msm_host->is_sdiowakeup_enabled = true;
4144 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4145 sdhci_msm_sdiowakeup_irq,
4146 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4147 "sdhci-msm sdiowakeup", host);
4148 if (ret) {
4149 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4150 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4151 msm_host->pdata->sdiowakeup_irq = -1;
4152 msm_host->is_sdiowakeup_enabled = false;
4153 goto vreg_deinit;
4154 } else {
4155 spin_lock_irqsave(&host->lock, flags);
4156 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304157 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304158 spin_unlock_irqrestore(&host->lock, flags);
4159 }
4160 }
4161
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004162 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304163 ret = sdhci_add_host(host);
4164 if (ret) {
4165 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304166 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304167 }
4168
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004169 pm_runtime_set_active(&pdev->dev);
4170 pm_runtime_enable(&pdev->dev);
4171 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4172 pm_runtime_use_autosuspend(&pdev->dev);
4173
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304174 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4175 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4176 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4177 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4178 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4179 ret = device_create_file(&pdev->dev,
4180 &msm_host->msm_bus_vote.max_bus_bw);
4181 if (ret)
4182 goto remove_host;
4183
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304184 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4185 msm_host->polling.show = show_polling;
4186 msm_host->polling.store = store_polling;
4187 sysfs_attr_init(&msm_host->polling.attr);
4188 msm_host->polling.attr.name = "polling";
4189 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4190 ret = device_create_file(&pdev->dev, &msm_host->polling);
4191 if (ret)
4192 goto remove_max_bus_bw_file;
4193 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304194
4195 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4196 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4197 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4198 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4199 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4200 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4201 if (ret) {
4202 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4203 mmc_hostname(host->mmc), __func__, ret);
4204 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4205 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304206 /* Successful initialization */
4207 goto out;
4208
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304209remove_max_bus_bw_file:
4210 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304211remove_host:
4212 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004213 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304214 sdhci_remove_host(host, dead);
4215vreg_deinit:
4216 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304217bus_unregister:
4218 if (msm_host->msm_bus_vote.client_handle)
4219 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4220 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004221sleep_clk_disable:
4222 if (!IS_ERR(msm_host->sleep_clk))
4223 clk_disable_unprepare(msm_host->sleep_clk);
4224ff_clk_disable:
4225 if (!IS_ERR(msm_host->ff_clk))
4226 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304227clk_disable:
4228 if (!IS_ERR(msm_host->clk))
4229 clk_disable_unprepare(msm_host->clk);
4230pclk_disable:
4231 if (!IS_ERR(msm_host->pclk))
4232 clk_disable_unprepare(msm_host->pclk);
4233bus_clk_disable:
4234 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4235 clk_disable_unprepare(msm_host->bus_clk);
4236pltfm_free:
4237 sdhci_pltfm_free(pdev);
4238out:
4239 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4240 return ret;
4241}
4242
4243static int sdhci_msm_remove(struct platform_device *pdev)
4244{
4245 struct sdhci_host *host = platform_get_drvdata(pdev);
4246 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4247 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4248 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4249 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4250 0xffffffff);
4251
4252 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304253 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4254 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304255 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004256 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304257 sdhci_remove_host(host, dead);
4258 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304259
Asutosh Das0ef24812012-12-18 16:14:02 +05304260 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304261
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304262 sdhci_msm_setup_pins(pdata, true);
4263 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304264
4265 if (msm_host->msm_bus_vote.client_handle) {
4266 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4267 sdhci_msm_bus_unregister(msm_host);
4268 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304269 return 0;
4270}
4271
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004272#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304273static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4274{
4275 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4276 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4277 unsigned long flags;
4278 int ret = 0;
4279
4280 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4281 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4282 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304283 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304284 return 1;
4285 }
4286
4287 spin_lock_irqsave(&host->lock, flags);
4288 if (enable) {
4289 /* configure DAT1 gpio if applicable */
4290 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304291 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304292 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4293 if (!ret)
4294 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4295 goto out;
4296 } else {
4297 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4298 mmc_hostname(host->mmc), enable);
4299 }
4300 } else {
4301 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4302 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4303 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304304 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304305 } else {
4306 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4307 mmc_hostname(host->mmc), enable);
4308
4309 }
4310 }
4311out:
4312 if (ret)
4313 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4314 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4315 ret, msm_host->pdata->sdiowakeup_irq);
4316 spin_unlock_irqrestore(&host->lock, flags);
4317 return ret;
4318}
4319
4320
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004321static int sdhci_msm_runtime_suspend(struct device *dev)
4322{
4323 struct sdhci_host *host = dev_get_drvdata(dev);
4324 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4325 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004326 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004327
Ritesh Harjani42876f42015-11-17 17:46:51 +05304328 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4329 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304330
Ritesh Harjani42876f42015-11-17 17:46:51 +05304331 sdhci_cfg_irq(host, false, true);
4332
4333defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004334 disable_irq(msm_host->pwr_irq);
4335
4336 /*
4337 * Remove the vote immediately only if clocks are off in which
4338 * case we might have queued work to remove vote but it may not
4339 * be completed before runtime suspend or system suspend.
4340 */
4341 if (!atomic_read(&msm_host->clks_on)) {
4342 if (msm_host->msm_bus_vote.client_handle)
4343 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4344 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004345 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4346 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004347
4348 return 0;
4349}
4350
4351static int sdhci_msm_runtime_resume(struct device *dev)
4352{
4353 struct sdhci_host *host = dev_get_drvdata(dev);
4354 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4355 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004356 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004357
Ritesh Harjani42876f42015-11-17 17:46:51 +05304358 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4359 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304360
Ritesh Harjani42876f42015-11-17 17:46:51 +05304361 sdhci_cfg_irq(host, true, true);
4362
4363defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004364 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004365
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004366 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4367 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004368 return 0;
4369}
4370
4371static int sdhci_msm_suspend(struct device *dev)
4372{
4373 struct sdhci_host *host = dev_get_drvdata(dev);
4374 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4375 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004376 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304377 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004378 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004379
4380 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4381 (msm_host->mmc->slot.cd_irq >= 0))
4382 disable_irq(msm_host->mmc->slot.cd_irq);
4383
4384 if (pm_runtime_suspended(dev)) {
4385 pr_debug("%s: %s: already runtime suspended\n",
4386 mmc_hostname(host->mmc), __func__);
4387 goto out;
4388 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004389 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004390out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304391
4392 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4393 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4394 if (sdio_cfg)
4395 sdhci_cfg_irq(host, false, true);
4396 }
4397
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004398 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4399 ktime_to_us(ktime_sub(ktime_get(), start)));
4400 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004401}
4402
4403static int sdhci_msm_resume(struct device *dev)
4404{
4405 struct sdhci_host *host = dev_get_drvdata(dev);
4406 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4407 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4408 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304409 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004410 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004411
4412 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4413 (msm_host->mmc->slot.cd_irq >= 0))
4414 enable_irq(msm_host->mmc->slot.cd_irq);
4415
4416 if (pm_runtime_suspended(dev)) {
4417 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4418 mmc_hostname(host->mmc), __func__);
4419 goto out;
4420 }
4421
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004422 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004423out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304424 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4425 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4426 if (sdio_cfg)
4427 sdhci_cfg_irq(host, true, true);
4428 }
4429
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004430 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4431 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004432 return ret;
4433}
4434
Ritesh Harjani42876f42015-11-17 17:46:51 +05304435static int sdhci_msm_suspend_noirq(struct device *dev)
4436{
4437 struct sdhci_host *host = dev_get_drvdata(dev);
4438 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4439 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4440 int ret = 0;
4441
4442 /*
4443 * ksdioirqd may be running, hence retry
4444 * suspend in case the clocks are ON
4445 */
4446 if (atomic_read(&msm_host->clks_on)) {
4447 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
4448 mmc_hostname(host->mmc), __func__);
4449 ret = -EAGAIN;
4450 }
4451
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304452 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4453 if (msm_host->sdio_pending_processing)
4454 ret = -EBUSY;
4455
Ritesh Harjani42876f42015-11-17 17:46:51 +05304456 return ret;
4457}
4458
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004459static const struct dev_pm_ops sdhci_msm_pmops = {
4460 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4461 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4462 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05304463 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004464};
4465
4466#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4467
4468#else
4469#define SDHCI_MSM_PMOPS NULL
4470#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304471static const struct of_device_id sdhci_msm_dt_match[] = {
4472 {.compatible = "qcom,sdhci-msm"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004473 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304474};
4475MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4476
4477static struct platform_driver sdhci_msm_driver = {
4478 .probe = sdhci_msm_probe,
4479 .remove = sdhci_msm_remove,
4480 .driver = {
4481 .name = "sdhci_msm",
4482 .owner = THIS_MODULE,
4483 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004484 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304485 },
4486};
4487
4488module_platform_driver(sdhci_msm_driver);
4489
4490MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4491MODULE_LICENSE("GPL v2");