blob: a155baa37e37d6fc6f3161efb69b5a7dd1410fef [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -08005 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053034#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053035#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053036#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053037#include <linux/pinctrl/consumer.h>
38#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053039#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020040#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020041#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053042
Sahitya Tummala56874732015-05-21 08:24:03 +053043#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070044#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053045
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080046#define CORE_POWER 0x0
47#define CORE_SW_RST (1 << 7)
48
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070049#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_MCI_DATA_CNT 0x30
51#define CORE_MCI_STATUS 0x34
52#define CORE_MCI_FIFO_CNT 0x44
53
54#define CORE_VERSION_STEP_MASK 0x0000FFFF
55#define CORE_VERSION_MINOR_MASK 0x0FFF0000
56#define CORE_VERSION_MINOR_SHIFT 16
57#define CORE_VERSION_MAJOR_MASK 0xF0000000
58#define CORE_VERSION_MAJOR_SHIFT 28
59#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030060#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061
62#define CORE_GENERICS 0x70
63#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053064
65#define CORE_VERSION_MAJOR_MASK 0xF0000000
66#define CORE_VERSION_MAJOR_SHIFT 28
67
Asutosh Das0ef24812012-12-18 16:14:02 +053068#define CORE_HC_MODE 0x78
69#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070070#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053071
Sahitya Tummala67717bc2013-08-02 09:21:37 +053072#define CORE_MCI_VERSION 0x050
73#define CORE_TESTBUS_CONFIG 0x0CC
74#define CORE_TESTBUS_ENA (1 << 3)
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080075#define CORE_TESTBUS_SEL2_BIT 4
76#define CORE_TESTBUS_SEL2 (1 << CORE_TESTBUS_SEL2_BIT)
Sahitya Tummala67717bc2013-08-02 09:21:37 +053077
Asutosh Das0ef24812012-12-18 16:14:02 +053078#define CORE_PWRCTL_STATUS 0xDC
79#define CORE_PWRCTL_MASK 0xE0
80#define CORE_PWRCTL_CLEAR 0xE4
81#define CORE_PWRCTL_CTL 0xE8
82
83#define CORE_PWRCTL_BUS_OFF 0x01
84#define CORE_PWRCTL_BUS_ON (1 << 1)
85#define CORE_PWRCTL_IO_LOW (1 << 2)
86#define CORE_PWRCTL_IO_HIGH (1 << 3)
87
88#define CORE_PWRCTL_BUS_SUCCESS 0x01
89#define CORE_PWRCTL_BUS_FAIL (1 << 1)
90#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
91#define CORE_PWRCTL_IO_FAIL (1 << 3)
92
93#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070094#define MAX_PHASES 16
95
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070096#define CORE_DLL_CONFIG 0x100
97#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070098#define CORE_DLL_EN (1 << 16)
99#define CORE_CDR_EN (1 << 17)
100#define CORE_CK_OUT_EN (1 << 18)
101#define CORE_CDR_EXT_EN (1 << 19)
102#define CORE_DLL_PDN (1 << 29)
103#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700104
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700106#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700107#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700108
109#define CORE_VENDOR_SPEC 0x10C
Krishna Konda46fd1432014-10-30 21:13:27 -0700110#define CORE_CLK_PWRSAVE (1 << 1)
111#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
112#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
113#define CORE_HC_MCLK_SEL_MASK (3 << 8)
114#define CORE_HC_AUTO_CMD21_EN (1 << 6)
115#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700116#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700117#define CORE_HC_SELECT_IN_EN (1 << 18)
118#define CORE_HC_SELECT_IN_HS400 (6 << 19)
119#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700120#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700121
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800122#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
123#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
124
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530125#define CORE_VENDOR_SPEC_FUNC2 0x110
Pavan Anamula691dd592015-08-25 16:11:20 +0530126#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
127#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530128#define CORE_ONE_MID_EN (1 << 25)
129
Krishna Konda7feab352013-09-17 23:55:40 -0700130#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11C
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530131#define CORE_8_BIT_SUPPORT (1 << 18)
132#define CORE_3_3V_SUPPORT (1 << 24)
133#define CORE_3_0V_SUPPORT (1 << 25)
134#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300135#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700136
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800137#define CORE_SDCC_DEBUG_REG 0x124
Sahitya Tummala67717bc2013-08-02 09:21:37 +0530138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_CTLR_CFG0 0x130
140#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
141#define CORE_HW_AUTOCAL_ENA (1 << 17)
142
143#define CORE_CSR_CDC_CTLR_CFG1 0x134
144#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
145#define CORE_TIMER_ENA (1 << 16)
146
147#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
148#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
149#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
150#define CORE_CDC_OFFSET_CFG 0x14C
151#define CORE_CSR_CDC_DELAY_CFG 0x150
152#define CORE_CDC_SLAVE_DDA_CFG 0x160
153#define CORE_CSR_CDC_STATUS0 0x164
154#define CORE_CALIBRATION_DONE (1 << 0)
155
156#define CORE_CDC_ERROR_CODE_MASK 0x7000000
157
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300158#define CQ_CMD_DBG_RAM 0x110
159#define CQ_CMD_DBG_RAM_WA 0x150
160#define CQ_CMD_DBG_RAM_OL 0x154
161
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700162#define CORE_CSR_CDC_GEN_CFG 0x178
163#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
164#define CORE_CDC_SWITCH_RC_EN (1 << 1)
165
166#define CORE_DDR_200_CFG 0x184
167#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530168#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700169#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530170
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700171#define CORE_VENDOR_SPEC3 0x1B0
172#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530173#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700174
175#define CORE_DLL_CONFIG_2 0x1B4
176#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800177#define CORE_FLL_CYCLE_CNT (1 << 18)
178#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700179
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530180#define CORE_DDR_CONFIG 0x1B8
181#define DDR_CONFIG_POR_VAL 0x80040853
182#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
183#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700184#define CORE_DDR_CONFIG_2 0x1BC
185#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700186
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700187/* 512 descriptors */
188#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530189#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530190
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700191#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800192#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700193
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700194#define INVALID_TUNING_PHASE -1
195
Krishna Konda96e6b112013-10-28 15:25:03 -0700196#define NUM_TUNING_PHASES 16
197#define MAX_DRV_TYPES_SUPPORTED_HS200 3
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200198#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700199
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700200static const u32 tuning_block_64[] = {
201 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
202 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
203 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
204 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
205};
206
207static const u32 tuning_block_128[] = {
208 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
209 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
210 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
211 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
212 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
213 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
214 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
215 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
216};
Asutosh Das0ef24812012-12-18 16:14:02 +0530217
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700218/* global to hold each slot instance for debug */
219static struct sdhci_msm_host *sdhci_slot[2];
220
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700221static int disable_slots;
222/* root can write, others read */
223module_param(disable_slots, int, S_IRUGO|S_IWUSR);
224
Asutosh Das0ef24812012-12-18 16:14:02 +0530225enum vdd_io_level {
226 /* set vdd_io_data->low_vol_level */
227 VDD_IO_LOW,
228 /* set vdd_io_data->high_vol_level */
229 VDD_IO_HIGH,
230 /*
231 * set whatever there in voltage_level (third argument) of
232 * sdhci_msm_set_vdd_io_vol() function.
233 */
234 VDD_IO_SET_LEVEL,
235};
236
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700237/* MSM platform specific tuning */
238static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
239 u8 poll)
240{
241 int rc = 0;
242 u32 wait_cnt = 50;
243 u8 ck_out_en = 0;
244 struct mmc_host *mmc = host->mmc;
245
246 /* poll for CK_OUT_EN bit. max. poll time = 50us */
247 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
248 CORE_CK_OUT_EN);
249
250 while (ck_out_en != poll) {
251 if (--wait_cnt == 0) {
252 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
253 mmc_hostname(mmc), __func__, poll);
254 rc = -ETIMEDOUT;
255 goto out;
256 }
257 udelay(1);
258
259 ck_out_en = !!(readl_relaxed(host->ioaddr +
260 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
261 }
262out:
263 return rc;
264}
265
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530266/*
267 * Enable CDR to track changes of DAT lines and adjust sampling
268 * point according to voltage/temperature variations
269 */
270static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
271{
272 int rc = 0;
273 u32 config;
274
275 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
276 config |= CORE_CDR_EN;
277 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
278 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
279
280 rc = msm_dll_poll_ck_out_en(host, 0);
281 if (rc)
282 goto err;
283
284 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
285 CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
286
287 rc = msm_dll_poll_ck_out_en(host, 1);
288 if (rc)
289 goto err;
290 goto out;
291err:
292 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
293out:
294 return rc;
295}
296
297static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
298 *attr, const char *buf, size_t count)
299{
300 struct sdhci_host *host = dev_get_drvdata(dev);
301 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
302 struct sdhci_msm_host *msm_host = pltfm_host->priv;
303 u32 tmp;
304 unsigned long flags;
305
306 if (!kstrtou32(buf, 0, &tmp)) {
307 spin_lock_irqsave(&host->lock, flags);
308 msm_host->en_auto_cmd21 = !!tmp;
309 spin_unlock_irqrestore(&host->lock, flags);
310 }
311 return count;
312}
313
314static ssize_t show_auto_cmd21(struct device *dev,
315 struct device_attribute *attr, char *buf)
316{
317 struct sdhci_host *host = dev_get_drvdata(dev);
318 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
319 struct sdhci_msm_host *msm_host = pltfm_host->priv;
320
321 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
322}
323
324/* MSM auto-tuning handler */
325static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
326 bool enable,
327 u32 type)
328{
329 int rc = 0;
330 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
331 struct sdhci_msm_host *msm_host = pltfm_host->priv;
332 u32 val = 0;
333
334 if (!msm_host->en_auto_cmd21)
335 return 0;
336
337 if (type == MMC_SEND_TUNING_BLOCK_HS200)
338 val = CORE_HC_AUTO_CMD21_EN;
339 else
340 return 0;
341
342 if (enable) {
343 rc = msm_enable_cdr_cm_sdc4_dll(host);
344 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
345 val, host->ioaddr + CORE_VENDOR_SPEC);
346 } else {
347 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
348 ~val, host->ioaddr + CORE_VENDOR_SPEC);
349 }
350 return rc;
351}
352
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700353static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
354{
355 int rc = 0;
356 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
357 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
358 0x8};
359 unsigned long flags;
360 u32 config;
361 struct mmc_host *mmc = host->mmc;
362
363 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
364 spin_lock_irqsave(&host->lock, flags);
365
366 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
367 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
368 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
369 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
370
371 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
372 rc = msm_dll_poll_ck_out_en(host, 0);
373 if (rc)
374 goto err_out;
375
376 /*
377 * Write the selected DLL clock output phase (0 ... 15)
378 * to CDR_SELEXT bit field of DLL_CONFIG register.
379 */
380 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
381 & ~(0xF << 20))
382 | (grey_coded_phase_table[phase] << 20)),
383 host->ioaddr + CORE_DLL_CONFIG);
384
385 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
386 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
387 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
388
389 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
390 rc = msm_dll_poll_ck_out_en(host, 1);
391 if (rc)
392 goto err_out;
393
394 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
395 config |= CORE_CDR_EN;
396 config &= ~CORE_CDR_EXT_EN;
397 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
398 goto out;
399
400err_out:
401 pr_err("%s: %s: Failed to set DLL phase: %d\n",
402 mmc_hostname(mmc), __func__, phase);
403out:
404 spin_unlock_irqrestore(&host->lock, flags);
405 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
406 return rc;
407}
408
409/*
410 * Find out the greatest range of consecuitive selected
411 * DLL clock output phases that can be used as sampling
412 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700413 * timing mode) or for eMMC4.5 card read operation (in
414 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700415 * Select the 3/4 of the range and configure the DLL with the
416 * selected DLL clock output phase.
417 */
418
419static int msm_find_most_appropriate_phase(struct sdhci_host *host,
420 u8 *phase_table, u8 total_phases)
421{
422 int ret;
423 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
424 u8 phases_per_row[MAX_PHASES] = {0};
425 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
426 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
427 bool phase_0_found = false, phase_15_found = false;
428 struct mmc_host *mmc = host->mmc;
429
430 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
431 if (!total_phases || (total_phases > MAX_PHASES)) {
432 pr_err("%s: %s: invalid argument: total_phases=%d\n",
433 mmc_hostname(mmc), __func__, total_phases);
434 return -EINVAL;
435 }
436
437 for (cnt = 0; cnt < total_phases; cnt++) {
438 ranges[row_index][col_index] = phase_table[cnt];
439 phases_per_row[row_index] += 1;
440 col_index++;
441
442 if ((cnt + 1) == total_phases) {
443 continue;
444 /* check if next phase in phase_table is consecutive or not */
445 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
446 row_index++;
447 col_index = 0;
448 }
449 }
450
451 if (row_index >= MAX_PHASES)
452 return -EINVAL;
453
454 /* Check if phase-0 is present in first valid window? */
455 if (!ranges[0][0]) {
456 phase_0_found = true;
457 phase_0_raw_index = 0;
458 /* Check if cycle exist between 2 valid windows */
459 for (cnt = 1; cnt <= row_index; cnt++) {
460 if (phases_per_row[cnt]) {
461 for (i = 0; i < phases_per_row[cnt]; i++) {
462 if (ranges[cnt][i] == 15) {
463 phase_15_found = true;
464 phase_15_raw_index = cnt;
465 break;
466 }
467 }
468 }
469 }
470 }
471
472 /* If 2 valid windows form cycle then merge them as single window */
473 if (phase_0_found && phase_15_found) {
474 /* number of phases in raw where phase 0 is present */
475 u8 phases_0 = phases_per_row[phase_0_raw_index];
476 /* number of phases in raw where phase 15 is present */
477 u8 phases_15 = phases_per_row[phase_15_raw_index];
478
479 if (phases_0 + phases_15 >= MAX_PHASES)
480 /*
481 * If there are more than 1 phase windows then total
482 * number of phases in both the windows should not be
483 * more than or equal to MAX_PHASES.
484 */
485 return -EINVAL;
486
487 /* Merge 2 cyclic windows */
488 i = phases_15;
489 for (cnt = 0; cnt < phases_0; cnt++) {
490 ranges[phase_15_raw_index][i] =
491 ranges[phase_0_raw_index][cnt];
492 if (++i >= MAX_PHASES)
493 break;
494 }
495
496 phases_per_row[phase_0_raw_index] = 0;
497 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
498 }
499
500 for (cnt = 0; cnt <= row_index; cnt++) {
501 if (phases_per_row[cnt] > curr_max) {
502 curr_max = phases_per_row[cnt];
503 selected_row_index = cnt;
504 }
505 }
506
507 i = ((curr_max * 3) / 4);
508 if (i)
509 i--;
510
511 ret = (int)ranges[selected_row_index][i];
512
513 if (ret >= MAX_PHASES) {
514 ret = -EINVAL;
515 pr_err("%s: %s: invalid phase selected=%d\n",
516 mmc_hostname(mmc), __func__, ret);
517 }
518
519 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
520 return ret;
521}
522
523static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
524{
525 u32 mclk_freq = 0;
526
527 /* Program the MCLK value to MCLK_FREQ bit field */
528 if (host->clock <= 112000000)
529 mclk_freq = 0;
530 else if (host->clock <= 125000000)
531 mclk_freq = 1;
532 else if (host->clock <= 137000000)
533 mclk_freq = 2;
534 else if (host->clock <= 150000000)
535 mclk_freq = 3;
536 else if (host->clock <= 162000000)
537 mclk_freq = 4;
538 else if (host->clock <= 175000000)
539 mclk_freq = 5;
540 else if (host->clock <= 187000000)
541 mclk_freq = 6;
542 else if (host->clock <= 200000000)
543 mclk_freq = 7;
544
545 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
546 & ~(7 << 24)) | (mclk_freq << 24)),
547 host->ioaddr + CORE_DLL_CONFIG);
548}
549
550/* Initialize the DLL (Programmable Delay Line ) */
551static int msm_init_cm_dll(struct sdhci_host *host)
552{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800553 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
554 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700555 struct mmc_host *mmc = host->mmc;
556 int rc = 0;
557 unsigned long flags;
558 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530559 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700560
561 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
562 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530563 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
564 CORE_CLK_PWRSAVE);
565 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700566 /*
567 * Make sure that clock is always enabled when DLL
568 * tuning is in progress. Keeping PWRSAVE ON may
569 * turn off the clock. So let's disable the PWRSAVE
570 * here and re-enable it once tuning is completed.
571 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530572 if (prev_pwrsave) {
573 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
574 & ~CORE_CLK_PWRSAVE),
575 host->ioaddr + CORE_VENDOR_SPEC);
576 curr_pwrsave = false;
577 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700578
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800579 if (msm_host->use_updated_dll_reset) {
580 /* Disable the DLL clock */
581 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
582 & ~CORE_CK_OUT_EN),
583 host->ioaddr + CORE_DLL_CONFIG);
584
585 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
586 | CORE_DLL_CLOCK_DISABLE),
587 host->ioaddr + CORE_DLL_CONFIG_2);
588 }
589
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700590 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
591 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
592 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
593
594 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
595 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
596 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
597 msm_cm_dll_set_freq(host);
598
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800599 if (msm_host->use_updated_dll_reset) {
600 u32 mclk_freq = 0;
601
602 if ((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
603 & CORE_FLL_CYCLE_CNT))
604 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
605 else
606 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
607
608 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
609 & ~(0xFF << 10)) | (mclk_freq << 10)),
610 host->ioaddr + CORE_DLL_CONFIG_2);
611 /* wait for 5us before enabling DLL clock */
612 udelay(5);
613 }
614
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700615 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
616 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
617 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
618
619 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
620 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
621 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
622
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800623 if (msm_host->use_updated_dll_reset) {
624 msm_cm_dll_set_freq(host);
625 /* Enable the DLL clock */
626 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
627 & ~CORE_DLL_CLOCK_DISABLE),
628 host->ioaddr + CORE_DLL_CONFIG_2);
629 }
630
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700631 /* Set DLL_EN bit to 1. */
632 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
633 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
634
635 /* Set CK_OUT_EN bit to 1. */
636 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
637 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
638
639 wait_cnt = 50;
640 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
641 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
642 CORE_DLL_LOCK)) {
643 /* max. wait for 50us sec for LOCK bit to be set */
644 if (--wait_cnt == 0) {
645 pr_err("%s: %s: DLL failed to LOCK\n",
646 mmc_hostname(mmc), __func__);
647 rc = -ETIMEDOUT;
648 goto out;
649 }
650 /* wait for 1us before polling again */
651 udelay(1);
652 }
653
654out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530655 /* Restore the correct PWRSAVE state */
656 if (prev_pwrsave ^ curr_pwrsave) {
657 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
658
659 if (prev_pwrsave)
660 reg |= CORE_CLK_PWRSAVE;
661 else
662 reg &= ~CORE_CLK_PWRSAVE;
663
664 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
665 }
666
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700667 spin_unlock_irqrestore(&host->lock, flags);
668 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
669 return rc;
670}
671
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700672static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
673{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700674 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700675 int ret = 0;
676 int cdc_err = 0;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700677
678 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
679
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700680 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
681 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
682 & ~CORE_CDC_T4_DLY_SEL),
683 host->ioaddr + CORE_DDR_200_CFG);
684
685 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
686 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
687 & ~CORE_CDC_SWITCH_BYPASS_OFF),
688 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
689
690 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
691 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
692 | CORE_CDC_SWITCH_RC_EN),
693 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
694
695 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
696 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
697 & ~CORE_START_CDC_TRAFFIC),
698 host->ioaddr + CORE_DDR_200_CFG);
699
700 /*
701 * Perform CDC Register Initialization Sequence
702 *
703 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
704 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
705 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
706 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
707 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
708 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
709 * CORE_CSR_CDC_DELAY_CFG 0x3AC
710 * CORE_CDC_OFFSET_CFG 0x0
711 * CORE_CDC_SLAVE_DDA_CFG 0x16334
712 */
713
714 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
715 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
716 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
717 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
718 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
719 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700720 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700721 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
722 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
723
724 /* CDC HW Calibration */
725
726 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
727 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
728 | CORE_SW_TRIG_FULL_CALIB),
729 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
730
731 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
732 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
733 & ~CORE_SW_TRIG_FULL_CALIB),
734 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
735
736 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
737 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
738 | CORE_HW_AUTOCAL_ENA),
739 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
740
741 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
742 writel_relaxed((readl_relaxed(host->ioaddr +
743 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
744 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
745
746 mb();
747
748 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700749 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
750 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
751
752 if (ret == -ETIMEDOUT) {
753 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700754 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700755 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700756 }
757
758 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
759 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
760 & CORE_CDC_ERROR_CODE_MASK;
761 if (cdc_err) {
762 pr_err("%s: %s: CDC Error Code %d\n",
763 mmc_hostname(host->mmc), __func__, cdc_err);
764 ret = -EINVAL;
765 goto out;
766 }
767
768 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
769 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
770 | CORE_START_CDC_TRAFFIC),
771 host->ioaddr + CORE_DDR_200_CFG);
772out:
773 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
774 __func__, ret);
775 return ret;
776}
777
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700778static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
779{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530780 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
781 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530782 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700783 int ret = 0;
784
785 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
786
787 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530788 * Reprogramming the value in case it might have been modified by
789 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700790 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700791 if (msm_host->rclk_delay_fix) {
792 writel_relaxed(DDR_CONFIG_2_POR_VAL,
793 host->ioaddr + CORE_DDR_CONFIG_2);
794 } else {
795 ddr_config = DDR_CONFIG_POR_VAL &
796 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
797 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
798 writel_relaxed(ddr_config, host->ioaddr + CORE_DDR_CONFIG);
799 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700800
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530801 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Ritesh Harjaniea709662015-05-27 15:40:24 +0530802 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
803 | CORE_CMDIN_RCLK_EN),
804 host->ioaddr + CORE_DDR_200_CFG);
805
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700806 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
807 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
808 | CORE_DDR_CAL_EN),
809 host->ioaddr + CORE_DLL_CONFIG_2);
810
811 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
812 ret = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
813 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
814
815 if (ret == -ETIMEDOUT) {
816 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
817 mmc_hostname(host->mmc), __func__);
818 goto out;
819 }
820
Ritesh Harjani764065e2015-05-13 14:14:45 +0530821 /*
822 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
823 * when MCLK is gated OFF, it is not gated for less than 0.5us
824 * and MCLK must be switched on for at-least 1us before DATA
825 * starts coming. Controllers with 14lpp tech DLL cannot
826 * guarantee above requirement. So PWRSAVE_DLL should not be
827 * turned on for host controllers using this DLL.
828 */
829 if (!msm_host->use_14lpp_dll)
830 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
831 | CORE_PWRSAVE_DLL),
832 host->ioaddr + CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700833 mb();
834out:
835 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
836 __func__, ret);
837 return ret;
838}
839
Ritesh Harjaniea709662015-05-27 15:40:24 +0530840static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
841{
842 int ret = 0;
843 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
844 struct sdhci_msm_host *msm_host = pltfm_host->priv;
845 struct mmc_host *mmc = host->mmc;
846
847 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
848
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530849 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
850 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +0530851 mmc_hostname(mmc));
852 return -EINVAL;
853 }
854
855 if (msm_host->calibration_done ||
856 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
857 return 0;
858 }
859
860 /*
861 * Reset the tuning block.
862 */
863 ret = msm_init_cm_dll(host);
864 if (ret)
865 goto out;
866
867 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
868out:
869 if (!ret)
870 msm_host->calibration_done = true;
871 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
872 __func__, ret);
873 return ret;
874}
875
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700876static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
877{
878 int ret = 0;
879 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
880 struct sdhci_msm_host *msm_host = pltfm_host->priv;
881
882 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
883
884 /*
885 * Retuning in HS400 (DDR mode) will fail, just reset the
886 * tuning block and restore the saved tuning phase.
887 */
888 ret = msm_init_cm_dll(host);
889 if (ret)
890 goto out;
891
892 /* Set the selected phase in delay line hw block */
893 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
894 if (ret)
895 goto out;
896
Krishna Konda0e8efba2014-06-23 14:50:38 -0700897 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
898 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
899 | CORE_CMD_DAT_TRACK_SEL),
900 host->ioaddr + CORE_DLL_CONFIG);
901
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700902 if (msm_host->use_cdclp533)
903 /* Calibrate CDCLP533 DLL HW */
904 ret = sdhci_msm_cdclp533_calibration(host);
905 else
906 /* Calibrate CM_DLL_SDC4 HW */
907 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
908out:
909 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
910 __func__, ret);
911 return ret;
912}
913
Krishna Konda96e6b112013-10-28 15:25:03 -0700914static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
915 u8 drv_type)
916{
917 struct mmc_command cmd = {0};
918 struct mmc_request mrq = {NULL};
919 struct mmc_host *mmc = host->mmc;
920 u8 val = ((drv_type << 4) | 2);
921
922 cmd.opcode = MMC_SWITCH;
923 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
924 (EXT_CSD_HS_TIMING << 16) |
925 (val << 8) |
926 EXT_CSD_CMD_SET_NORMAL;
927 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
928 /* 1 sec */
929 cmd.busy_timeout = 1000 * 1000;
930
931 memset(cmd.resp, 0, sizeof(cmd.resp));
932 cmd.retries = 3;
933
934 mrq.cmd = &cmd;
935 cmd.data = NULL;
936
937 mmc_wait_for_req(mmc, &mrq);
938 pr_debug("%s: %s: set card drive type to %d\n",
939 mmc_hostname(mmc), __func__,
940 drv_type);
941}
942
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700943int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
944{
945 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530946 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -0700947 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700948 const u32 *tuning_block_pattern = tuning_block_64;
949 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
950 int rc;
951 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530952 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700953 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
954 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -0700955 u8 drv_type = 0;
956 bool drv_type_changed = false;
957 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +0530958 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530959
960 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700961 * Tuning is required for SDR104, HS200 and HS400 cards and
962 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530963 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700964 if (host->clock <= CORE_FREQ_100MHZ ||
965 !((ios.timing == MMC_TIMING_MMC_HS400) ||
966 (ios.timing == MMC_TIMING_MMC_HS200) ||
967 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530968 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700969
970 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700971
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700972 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700973 if (msm_host->tuning_done && !msm_host->calibration_done &&
974 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700975 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700976 spin_lock_irqsave(&host->lock, flags);
977 if (!rc)
978 msm_host->calibration_done = true;
979 spin_unlock_irqrestore(&host->lock, flags);
980 goto out;
981 }
982
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700983 spin_lock_irqsave(&host->lock, flags);
984
985 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
986 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
987 tuning_block_pattern = tuning_block_128;
988 size = sizeof(tuning_block_128);
989 }
990 spin_unlock_irqrestore(&host->lock, flags);
991
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700992 data_buf = kmalloc(size, GFP_KERNEL);
993 if (!data_buf) {
994 rc = -ENOMEM;
995 goto out;
996 }
997
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530998retry:
Krishna Konda96e6b112013-10-28 15:25:03 -0700999 tuned_phase_cnt = 0;
1000
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301001 /* first of all reset the tuning block */
1002 rc = msm_init_cm_dll(host);
1003 if (rc)
1004 goto kfree;
1005
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001006 phase = 0;
1007 do {
1008 struct mmc_command cmd = {0};
1009 struct mmc_data data = {0};
1010 struct mmc_request mrq = {
1011 .cmd = &cmd,
1012 .data = &data
1013 };
1014 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301015 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001016
1017 /* set the phase in delay line hw block */
1018 rc = msm_config_cm_dll_phase(host, phase);
1019 if (rc)
1020 goto kfree;
1021
1022 cmd.opcode = opcode;
1023 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1024
1025 data.blksz = size;
1026 data.blocks = 1;
1027 data.flags = MMC_DATA_READ;
1028 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1029
1030 data.sg = &sg;
1031 data.sg_len = 1;
1032 sg_init_one(&sg, data_buf, size);
1033 memset(data_buf, 0, size);
1034 mmc_wait_for_req(mmc, &mrq);
1035
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301036 if (card && (cmd.error || data.error)) {
1037 sts_cmd.opcode = MMC_SEND_STATUS;
1038 sts_cmd.arg = card->rca << 16;
1039 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1040 sts_retry = 5;
1041 while (sts_retry) {
1042 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1043
1044 if (sts_cmd.error ||
1045 (R1_CURRENT_STATE(sts_cmd.resp[0])
1046 != R1_STATE_TRAN)) {
1047 sts_retry--;
1048 /*
1049 * wait for at least 146 MCLK cycles for
1050 * the card to move to TRANS state. As
1051 * the MCLK would be min 200MHz for
1052 * tuning, we need max 0.73us delay. To
1053 * be on safer side 1ms delay is given.
1054 */
1055 usleep_range(1000, 1200);
1056 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1057 mmc_hostname(mmc), phase,
1058 sts_cmd.error, sts_cmd.resp[0]);
1059 continue;
1060 }
1061 break;
1062 };
1063 }
1064
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001065 if (!cmd.error && !data.error &&
1066 !memcmp(data_buf, tuning_block_pattern, size)) {
1067 /* tuning is successful at this tuning point */
1068 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001069 pr_debug("%s: %s: found *** good *** phase = %d\n",
1070 mmc_hostname(mmc), __func__, phase);
1071 } else {
1072 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001073 mmc_hostname(mmc), __func__, phase);
1074 }
1075 } while (++phase < 16);
1076
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301077 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1078 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001079 /*
1080 * If all phases pass then its a problem. So change the card's
1081 * drive type to a different value, if supported and repeat
1082 * tuning until at least one phase fails. Then set the original
1083 * drive type back.
1084 *
1085 * If all the phases still pass after trying all possible
1086 * drive types, then one of those 16 phases will be picked.
1087 * This is no different from what was going on before the
1088 * modification to change drive type and retune.
1089 */
1090 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1091 tuned_phase_cnt);
1092
1093 /* set drive type to other value . default setting is 0x0 */
1094 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
1095 if (card->ext_csd.raw_driver_strength &
1096 (1 << drv_type)) {
1097 sdhci_msm_set_mmc_drv_type(host, opcode,
1098 drv_type);
1099 if (!drv_type_changed)
1100 drv_type_changed = true;
1101 goto retry;
1102 }
1103 }
1104 }
1105
1106 /* reset drive type to default (50 ohm) if changed */
1107 if (drv_type_changed)
1108 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1109
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001110 if (tuned_phase_cnt) {
1111 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1112 tuned_phase_cnt);
1113 if (rc < 0)
1114 goto kfree;
1115 else
1116 phase = (u8)rc;
1117
1118 /*
1119 * Finally set the selected phase in delay
1120 * line hw block.
1121 */
1122 rc = msm_config_cm_dll_phase(host, phase);
1123 if (rc)
1124 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001125 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001126 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1127 mmc_hostname(mmc), __func__, phase);
1128 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301129 if (--tuning_seq_cnt)
1130 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001131 /* tuning failed */
1132 pr_err("%s: %s: no tuning point found\n",
1133 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301134 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001135 }
1136
1137kfree:
1138 kfree(data_buf);
1139out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001140 spin_lock_irqsave(&host->lock, flags);
1141 if (!rc)
1142 msm_host->tuning_done = true;
1143 spin_unlock_irqrestore(&host->lock, flags);
1144 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001145 return rc;
1146}
1147
Asutosh Das0ef24812012-12-18 16:14:02 +05301148static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1149{
1150 struct sdhci_msm_gpio_data *curr;
1151 int i, ret = 0;
1152
1153 curr = pdata->pin_data->gpio_data;
1154 for (i = 0; i < curr->size; i++) {
1155 if (!gpio_is_valid(curr->gpio[i].no)) {
1156 ret = -EINVAL;
1157 pr_err("%s: Invalid gpio = %d\n", __func__,
1158 curr->gpio[i].no);
1159 goto free_gpios;
1160 }
1161 if (enable) {
1162 ret = gpio_request(curr->gpio[i].no,
1163 curr->gpio[i].name);
1164 if (ret) {
1165 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1166 __func__, curr->gpio[i].no,
1167 curr->gpio[i].name, ret);
1168 goto free_gpios;
1169 }
1170 curr->gpio[i].is_enabled = true;
1171 } else {
1172 gpio_free(curr->gpio[i].no);
1173 curr->gpio[i].is_enabled = false;
1174 }
1175 }
1176 return ret;
1177
1178free_gpios:
1179 for (i--; i >= 0; i--) {
1180 gpio_free(curr->gpio[i].no);
1181 curr->gpio[i].is_enabled = false;
1182 }
1183 return ret;
1184}
1185
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301186static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1187 bool enable)
1188{
1189 int ret = 0;
1190
1191 if (enable)
1192 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1193 pdata->pctrl_data->pins_active);
1194 else
1195 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1196 pdata->pctrl_data->pins_sleep);
1197
1198 if (ret < 0)
1199 pr_err("%s state for pinctrl failed with %d\n",
1200 enable ? "Enabling" : "Disabling", ret);
1201
1202 return ret;
1203}
1204
Asutosh Das0ef24812012-12-18 16:14:02 +05301205static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1206{
1207 int ret = 0;
1208
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301209 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301210 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301211 } else if (pdata->pctrl_data) {
1212 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1213 goto out;
1214 } else if (!pdata->pin_data) {
1215 return 0;
1216 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301217
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301218 if (pdata->pin_data->is_gpio)
1219 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301220out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301221 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301222 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301223
1224 return ret;
1225}
1226
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301227static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1228 u32 **out, int *len, u32 size)
1229{
1230 int ret = 0;
1231 struct device_node *np = dev->of_node;
1232 size_t sz;
1233 u32 *arr = NULL;
1234
1235 if (!of_get_property(np, prop_name, len)) {
1236 ret = -EINVAL;
1237 goto out;
1238 }
1239 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001240 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301241 dev_err(dev, "%s invalid size\n", prop_name);
1242 ret = -EINVAL;
1243 goto out;
1244 }
1245
1246 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1247 if (!arr) {
1248 dev_err(dev, "%s failed allocating memory\n", prop_name);
1249 ret = -ENOMEM;
1250 goto out;
1251 }
1252
1253 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1254 if (ret < 0) {
1255 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1256 goto out;
1257 }
1258 *out = arr;
1259out:
1260 if (ret)
1261 *len = 0;
1262 return ret;
1263}
1264
Asutosh Das0ef24812012-12-18 16:14:02 +05301265#define MAX_PROP_SIZE 32
1266static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1267 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1268{
1269 int len, ret = 0;
1270 const __be32 *prop;
1271 char prop_name[MAX_PROP_SIZE];
1272 struct sdhci_msm_reg_data *vreg;
1273 struct device_node *np = dev->of_node;
1274
1275 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1276 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301277 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301278 return ret;
1279 }
1280
1281 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1282 if (!vreg) {
1283 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1284 ret = -ENOMEM;
1285 return ret;
1286 }
1287
1288 vreg->name = vreg_name;
1289
1290 snprintf(prop_name, MAX_PROP_SIZE,
1291 "qcom,%s-always-on", vreg_name);
1292 if (of_get_property(np, prop_name, NULL))
1293 vreg->is_always_on = true;
1294
1295 snprintf(prop_name, MAX_PROP_SIZE,
1296 "qcom,%s-lpm-sup", vreg_name);
1297 if (of_get_property(np, prop_name, NULL))
1298 vreg->lpm_sup = true;
1299
1300 snprintf(prop_name, MAX_PROP_SIZE,
1301 "qcom,%s-voltage-level", vreg_name);
1302 prop = of_get_property(np, prop_name, &len);
1303 if (!prop || (len != (2 * sizeof(__be32)))) {
1304 dev_warn(dev, "%s %s property\n",
1305 prop ? "invalid format" : "no", prop_name);
1306 } else {
1307 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1308 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1309 }
1310
1311 snprintf(prop_name, MAX_PROP_SIZE,
1312 "qcom,%s-current-level", vreg_name);
1313 prop = of_get_property(np, prop_name, &len);
1314 if (!prop || (len != (2 * sizeof(__be32)))) {
1315 dev_warn(dev, "%s %s property\n",
1316 prop ? "invalid format" : "no", prop_name);
1317 } else {
1318 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1319 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1320 }
1321
1322 *vreg_data = vreg;
1323 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1324 vreg->name, vreg->is_always_on ? "always_on," : "",
1325 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1326 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1327
1328 return ret;
1329}
1330
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301331static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1332 struct sdhci_msm_pltfm_data *pdata)
1333{
1334 struct sdhci_pinctrl_data *pctrl_data;
1335 struct pinctrl *pctrl;
1336 int ret = 0;
1337
1338 /* Try to obtain pinctrl handle */
1339 pctrl = devm_pinctrl_get(dev);
1340 if (IS_ERR(pctrl)) {
1341 ret = PTR_ERR(pctrl);
1342 goto out;
1343 }
1344 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1345 if (!pctrl_data) {
1346 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1347 ret = -ENOMEM;
1348 goto out;
1349 }
1350 pctrl_data->pctrl = pctrl;
1351 /* Look-up and keep the states handy to be used later */
1352 pctrl_data->pins_active = pinctrl_lookup_state(
1353 pctrl_data->pctrl, "active");
1354 if (IS_ERR(pctrl_data->pins_active)) {
1355 ret = PTR_ERR(pctrl_data->pins_active);
1356 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1357 goto out;
1358 }
1359 pctrl_data->pins_sleep = pinctrl_lookup_state(
1360 pctrl_data->pctrl, "sleep");
1361 if (IS_ERR(pctrl_data->pins_sleep)) {
1362 ret = PTR_ERR(pctrl_data->pins_sleep);
1363 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1364 goto out;
1365 }
1366 pdata->pctrl_data = pctrl_data;
1367out:
1368 return ret;
1369}
1370
Asutosh Das0ef24812012-12-18 16:14:02 +05301371#define GPIO_NAME_MAX_LEN 32
1372static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1373 struct sdhci_msm_pltfm_data *pdata)
1374{
1375 int ret = 0, cnt, i;
1376 struct sdhci_msm_pin_data *pin_data;
1377 struct device_node *np = dev->of_node;
1378
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301379 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1380 if (!ret) {
1381 goto out;
1382 } else if (ret == -EPROBE_DEFER) {
1383 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1384 goto out;
1385 } else {
1386 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1387 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301388 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301389 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301390 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1391 if (!pin_data) {
1392 dev_err(dev, "No memory for pin_data\n");
1393 ret = -ENOMEM;
1394 goto out;
1395 }
1396
1397 cnt = of_gpio_count(np);
1398 if (cnt > 0) {
1399 pin_data->gpio_data = devm_kzalloc(dev,
1400 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1401 if (!pin_data->gpio_data) {
1402 dev_err(dev, "No memory for gpio_data\n");
1403 ret = -ENOMEM;
1404 goto out;
1405 }
1406 pin_data->gpio_data->size = cnt;
1407 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1408 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1409
1410 if (!pin_data->gpio_data->gpio) {
1411 dev_err(dev, "No memory for gpio\n");
1412 ret = -ENOMEM;
1413 goto out;
1414 }
1415
1416 for (i = 0; i < cnt; i++) {
1417 const char *name = NULL;
1418 char result[GPIO_NAME_MAX_LEN];
1419 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1420 of_property_read_string_index(np,
1421 "qcom,gpio-names", i, &name);
1422
1423 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1424 dev_name(dev), name ? name : "?");
1425 pin_data->gpio_data->gpio[i].name = result;
1426 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1427 pin_data->gpio_data->gpio[i].name,
1428 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301429 }
1430 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301431 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301432out:
1433 if (ret)
1434 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1435 return ret;
1436}
1437
Gilad Bronerc788a672015-09-08 15:39:11 +03001438static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1439 struct sdhci_msm_pltfm_data *pdata)
1440{
1441 struct device_node *np = dev->of_node;
1442 const char *str;
1443 u32 cpu;
1444 int ret = 0;
1445 int i;
1446
1447 pdata->pm_qos_data.irq_valid = false;
1448 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1449 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1450 !strcmp(str, "affine_irq")) {
1451 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1452 }
1453
1454 /* must specify cpu for "affine_cores" type */
1455 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1456 pdata->pm_qos_data.irq_cpu = -1;
1457 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1458 if (ret) {
1459 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1460 ret);
1461 goto out;
1462 }
1463 if (cpu < 0 || cpu >= num_possible_cpus()) {
1464 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1465 __func__, cpu, num_possible_cpus());
1466 ret = -EINVAL;
1467 goto out;
1468 }
1469 pdata->pm_qos_data.irq_cpu = cpu;
1470 }
1471
1472 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1473 SDHCI_POWER_POLICY_NUM) {
1474 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1475 __func__, SDHCI_POWER_POLICY_NUM);
1476 ret = -EINVAL;
1477 goto out;
1478 }
1479
1480 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1481 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1482 &pdata->pm_qos_data.irq_latency.latency[i]);
1483
1484 pdata->pm_qos_data.irq_valid = true;
1485out:
1486 return ret;
1487}
1488
1489static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1490 struct sdhci_msm_pltfm_data *pdata)
1491{
1492 struct device_node *np = dev->of_node;
1493 u32 mask;
1494 int nr_groups;
1495 int ret;
1496 int i;
1497
1498 /* Read cpu group mapping */
1499 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1500 if (nr_groups <= 0) {
1501 ret = -EINVAL;
1502 goto out;
1503 }
1504 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1505 pdata->pm_qos_data.cpu_group_map.mask =
1506 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1507 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1508 ret = -ENOMEM;
1509 goto out;
1510 }
1511
1512 for (i = 0; i < nr_groups; i++) {
1513 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1514 i, &mask);
1515
1516 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1517 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1518 cpu_possible_mask)) {
1519 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1520 __func__, mask, i);
1521 ret = -EINVAL;
1522 goto free_res;
1523 }
1524 }
1525 return 0;
1526
1527free_res:
1528 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1529out:
1530 return ret;
1531}
1532
1533static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1534 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1535{
1536 struct device_node *np = dev->of_node;
1537 struct sdhci_msm_pm_qos_latency *values;
1538 int ret;
1539 int i;
1540 int group;
1541 int cfg;
1542
1543 ret = of_property_count_u32_elems(np, name);
1544 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1545 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1546 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1547 ret);
1548 return -EINVAL;
1549 } else if (ret < 0) {
1550 return ret;
1551 }
1552
1553 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1554 GFP_KERNEL);
1555 if (!values)
1556 return -ENOMEM;
1557
1558 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1559 group = i / SDHCI_POWER_POLICY_NUM;
1560 cfg = i % SDHCI_POWER_POLICY_NUM;
1561 of_property_read_u32_index(np, name, i,
1562 &(values[group].latency[cfg]));
1563 }
1564
1565 *latency = values;
1566 return 0;
1567}
1568
1569static void sdhci_msm_pm_qos_parse(struct device *dev,
1570 struct sdhci_msm_pltfm_data *pdata)
1571{
1572 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1573 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1574 __func__);
1575
1576 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1577 pdata->pm_qos_data.cmdq_valid =
1578 !sdhci_msm_pm_qos_parse_latency(dev,
1579 "qcom,pm-qos-cmdq-latency-us",
1580 pdata->pm_qos_data.cpu_group_map.nr_groups,
1581 &pdata->pm_qos_data.cmdq_latency);
1582 pdata->pm_qos_data.legacy_valid =
1583 !sdhci_msm_pm_qos_parse_latency(dev,
1584 "qcom,pm-qos-legacy-latency-us",
1585 pdata->pm_qos_data.cpu_group_map.nr_groups,
1586 &pdata->pm_qos_data.latency);
1587 if (!pdata->pm_qos_data.cmdq_valid &&
1588 !pdata->pm_qos_data.legacy_valid) {
1589 /* clean-up previously allocated arrays */
1590 kfree(pdata->pm_qos_data.latency);
1591 kfree(pdata->pm_qos_data.cmdq_latency);
1592 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1593 __func__);
1594 }
1595 } else {
1596 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1597 __func__);
1598 }
1599}
1600
Asutosh Das0ef24812012-12-18 16:14:02 +05301601/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001602static
1603struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1604 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301605{
1606 struct sdhci_msm_pltfm_data *pdata = NULL;
1607 struct device_node *np = dev->of_node;
1608 u32 bus_width = 0;
1609 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301610 int clk_table_len;
1611 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301612 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301613
1614 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1615 if (!pdata) {
1616 dev_err(dev, "failed to allocate memory for platform data\n");
1617 goto out;
1618 }
1619
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301620 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1621 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1622 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301623
Asutosh Das0ef24812012-12-18 16:14:02 +05301624 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1625 if (bus_width == 8)
1626 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1627 else if (bus_width == 4)
1628 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1629 else {
1630 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1631 pdata->mmc_bus_width = 0;
1632 }
1633
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001634 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1635 &msm_host->mmc->clk_scaling.freq_table,
1636 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1637 pr_debug("%s: no clock scaling frequencies were supplied\n",
1638 dev_name(dev));
1639 else if (!msm_host->mmc->clk_scaling.freq_table ||
1640 !msm_host->mmc->clk_scaling.freq_table_sz)
1641 dev_err(dev, "bad dts clock scaling frequencies\n");
1642
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301643 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1644 &clk_table, &clk_table_len, 0)) {
1645 dev_err(dev, "failed parsing supported clock rates\n");
1646 goto out;
1647 }
1648 if (!clk_table || !clk_table_len) {
1649 dev_err(dev, "Invalid clock table\n");
1650 goto out;
1651 }
1652 pdata->sup_clk_table = clk_table;
1653 pdata->sup_clk_cnt = clk_table_len;
1654
Asutosh Das0ef24812012-12-18 16:14:02 +05301655 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1656 sdhci_msm_slot_reg_data),
1657 GFP_KERNEL);
1658 if (!pdata->vreg_data) {
1659 dev_err(dev, "failed to allocate memory for vreg data\n");
1660 goto out;
1661 }
1662
1663 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1664 "vdd")) {
1665 dev_err(dev, "failed parsing vdd data\n");
1666 goto out;
1667 }
1668 if (sdhci_msm_dt_parse_vreg_info(dev,
1669 &pdata->vreg_data->vdd_io_data,
1670 "vdd-io")) {
1671 dev_err(dev, "failed parsing vdd-io data\n");
1672 goto out;
1673 }
1674
1675 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1676 dev_err(dev, "failed parsing gpio data\n");
1677 goto out;
1678 }
1679
Asutosh Das0ef24812012-12-18 16:14:02 +05301680 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1681
1682 for (i = 0; i < len; i++) {
1683 const char *name = NULL;
1684
1685 of_property_read_string_index(np,
1686 "qcom,bus-speed-mode", i, &name);
1687 if (!name)
1688 continue;
1689
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001690 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1691 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1692 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1693 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1694 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301695 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1696 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1697 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1698 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1699 pdata->caps |= MMC_CAP_1_8V_DDR
1700 | MMC_CAP_UHS_DDR50;
1701 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1702 pdata->caps |= MMC_CAP_1_2V_DDR
1703 | MMC_CAP_UHS_DDR50;
1704 }
1705
1706 if (of_get_property(np, "qcom,nonremovable", NULL))
1707 pdata->nonremovable = true;
1708
Guoping Yuf7c91332014-08-20 16:56:18 +08001709 if (of_get_property(np, "qcom,nonhotplug", NULL))
1710 pdata->nonhotplug = true;
1711
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001712 pdata->largeaddressbus =
1713 of_property_read_bool(np, "qcom,large-address-bus");
1714
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001715 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1716 msm_host->mmc->wakeup_on_idle = true;
1717
Gilad Bronerc788a672015-09-08 15:39:11 +03001718 sdhci_msm_pm_qos_parse(dev, pdata);
1719
Pavan Anamula5a256df2015-10-16 14:38:28 +05301720 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1721 pdata->core_3_0v_support = true;
1722
Asutosh Das0ef24812012-12-18 16:14:02 +05301723 return pdata;
1724out:
1725 return NULL;
1726}
1727
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301728/* Returns required bandwidth in Bytes per Sec */
1729static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1730 struct mmc_ios *ios)
1731{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301732 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1733 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1734
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301735 unsigned int bw;
1736
Sahitya Tummala2886c922013-04-03 18:03:31 +05301737 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301738 /*
1739 * For DDR mode, SDCC controller clock will be at
1740 * the double rate than the actual clock that goes to card.
1741 */
1742 if (ios->bus_width == MMC_BUS_WIDTH_4)
1743 bw /= 2;
1744 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1745 bw /= 8;
1746
1747 return bw;
1748}
1749
1750static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1751 unsigned int bw)
1752{
1753 unsigned int *table = host->pdata->voting_data->bw_vecs;
1754 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1755 int i;
1756
1757 if (host->msm_bus_vote.is_max_bw_needed && bw)
1758 return host->msm_bus_vote.max_bw_vote;
1759
1760 for (i = 0; i < size; i++) {
1761 if (bw <= table[i])
1762 break;
1763 }
1764
1765 if (i && (i == size))
1766 i--;
1767
1768 return i;
1769}
1770
1771/*
1772 * This function must be called with host lock acquired.
1773 * Caller of this function should also ensure that msm bus client
1774 * handle is not null.
1775 */
1776static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1777 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301778 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301779{
1780 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1781 int rc = 0;
1782
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301783 BUG_ON(!flags);
1784
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301785 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301786 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301787 rc = msm_bus_scale_client_update_request(
1788 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301789 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301790 if (rc) {
1791 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1792 mmc_hostname(host->mmc),
1793 msm_host->msm_bus_vote.client_handle, vote, rc);
1794 goto out;
1795 }
1796 msm_host->msm_bus_vote.curr_vote = vote;
1797 }
1798out:
1799 return rc;
1800}
1801
1802/*
1803 * Internal work. Work to set 0 bandwidth for msm bus.
1804 */
1805static void sdhci_msm_bus_work(struct work_struct *work)
1806{
1807 struct sdhci_msm_host *msm_host;
1808 struct sdhci_host *host;
1809 unsigned long flags;
1810
1811 msm_host = container_of(work, struct sdhci_msm_host,
1812 msm_bus_vote.vote_work.work);
1813 host = platform_get_drvdata(msm_host->pdev);
1814
1815 if (!msm_host->msm_bus_vote.client_handle)
1816 return;
1817
1818 spin_lock_irqsave(&host->lock, flags);
1819 /* don't vote for 0 bandwidth if any request is in progress */
1820 if (!host->mrq) {
1821 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301822 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301823 } else
1824 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1825 mmc_hostname(host->mmc), __func__);
1826 spin_unlock_irqrestore(&host->lock, flags);
1827}
1828
1829/*
1830 * This function cancels any scheduled delayed work and sets the bus
1831 * vote based on bw (bandwidth) argument.
1832 */
1833static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1834 unsigned int bw)
1835{
1836 int vote;
1837 unsigned long flags;
1838 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1839 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1840
1841 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1842 spin_lock_irqsave(&host->lock, flags);
1843 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301844 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301845 spin_unlock_irqrestore(&host->lock, flags);
1846}
1847
1848#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1849
1850/* This function queues a work which will set the bandwidth requiement to 0 */
1851static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1852{
1853 unsigned long flags;
1854 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1855 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1856
1857 spin_lock_irqsave(&host->lock, flags);
1858 if (msm_host->msm_bus_vote.min_bw_vote !=
1859 msm_host->msm_bus_vote.curr_vote)
1860 queue_delayed_work(system_wq,
1861 &msm_host->msm_bus_vote.vote_work,
1862 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1863 spin_unlock_irqrestore(&host->lock, flags);
1864}
1865
1866static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1867 struct platform_device *pdev)
1868{
1869 int rc = 0;
1870 struct msm_bus_scale_pdata *bus_pdata;
1871
1872 struct sdhci_msm_bus_voting_data *data;
1873 struct device *dev = &pdev->dev;
1874
1875 data = devm_kzalloc(dev,
1876 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1877 if (!data) {
1878 dev_err(&pdev->dev,
1879 "%s: failed to allocate memory\n", __func__);
1880 rc = -ENOMEM;
1881 goto out;
1882 }
1883 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1884 if (data->bus_pdata) {
1885 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1886 &data->bw_vecs, &data->bw_vecs_size, 0);
1887 if (rc) {
1888 dev_err(&pdev->dev,
1889 "%s: Failed to get bus-bw-vectors-bps\n",
1890 __func__);
1891 goto out;
1892 }
1893 host->pdata->voting_data = data;
1894 }
1895 if (host->pdata->voting_data &&
1896 host->pdata->voting_data->bus_pdata &&
1897 host->pdata->voting_data->bw_vecs &&
1898 host->pdata->voting_data->bw_vecs_size) {
1899
1900 bus_pdata = host->pdata->voting_data->bus_pdata;
1901 host->msm_bus_vote.client_handle =
1902 msm_bus_scale_register_client(bus_pdata);
1903 if (!host->msm_bus_vote.client_handle) {
1904 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1905 rc = -EFAULT;
1906 goto out;
1907 }
1908 /* cache the vote index for minimum and maximum bandwidth */
1909 host->msm_bus_vote.min_bw_vote =
1910 sdhci_msm_bus_get_vote_for_bw(host, 0);
1911 host->msm_bus_vote.max_bw_vote =
1912 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1913 } else {
1914 devm_kfree(dev, data);
1915 }
1916
1917out:
1918 return rc;
1919}
1920
1921static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1922{
1923 if (host->msm_bus_vote.client_handle)
1924 msm_bus_scale_unregister_client(
1925 host->msm_bus_vote.client_handle);
1926}
1927
1928static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1929{
1930 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1931 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1932 struct mmc_ios *ios = &host->mmc->ios;
1933 unsigned int bw;
1934
1935 if (!msm_host->msm_bus_vote.client_handle)
1936 return;
1937
1938 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301939 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301940 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301941 } else {
1942 /*
1943 * If clock gating is enabled, then remove the vote
1944 * immediately because clocks will be disabled only
1945 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1946 * additional delay is required to remove the bus vote.
1947 */
1948#ifdef CONFIG_MMC_CLKGATE
1949 if (host->mmc->clkgate_delay)
1950 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1951 else
1952#endif
1953 sdhci_msm_bus_queue_work(host);
1954 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301955}
1956
Asutosh Das0ef24812012-12-18 16:14:02 +05301957/* Regulator utility functions */
1958static int sdhci_msm_vreg_init_reg(struct device *dev,
1959 struct sdhci_msm_reg_data *vreg)
1960{
1961 int ret = 0;
1962
1963 /* check if regulator is already initialized? */
1964 if (vreg->reg)
1965 goto out;
1966
1967 /* Get the regulator handle */
1968 vreg->reg = devm_regulator_get(dev, vreg->name);
1969 if (IS_ERR(vreg->reg)) {
1970 ret = PTR_ERR(vreg->reg);
1971 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1972 __func__, vreg->name, ret);
1973 goto out;
1974 }
1975
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301976 if (regulator_count_voltages(vreg->reg) > 0) {
1977 vreg->set_voltage_sup = true;
1978 /* sanity check */
1979 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1980 pr_err("%s: %s invalid constraints specified\n",
1981 __func__, vreg->name);
1982 ret = -EINVAL;
1983 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301984 }
1985
1986out:
1987 return ret;
1988}
1989
1990static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
1991{
1992 if (vreg->reg)
1993 devm_regulator_put(vreg->reg);
1994}
1995
1996static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
1997 *vreg, int uA_load)
1998{
1999 int ret = 0;
2000
2001 /*
2002 * regulators that do not support regulator_set_voltage also
2003 * do not support regulator_set_optimum_mode
2004 */
2005 if (vreg->set_voltage_sup) {
2006 ret = regulator_set_load(vreg->reg, uA_load);
2007 if (ret < 0)
2008 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2009 __func__, vreg->name, uA_load, ret);
2010 else
2011 /*
2012 * regulator_set_load() can return non zero
2013 * value even for success case.
2014 */
2015 ret = 0;
2016 }
2017 return ret;
2018}
2019
2020static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2021 int min_uV, int max_uV)
2022{
2023 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302024 if (vreg->set_voltage_sup) {
2025 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2026 if (ret) {
2027 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302028 __func__, vreg->name, min_uV, max_uV, ret);
2029 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302030 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302031
2032 return ret;
2033}
2034
2035static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2036{
2037 int ret = 0;
2038
2039 /* Put regulator in HPM (high power mode) */
2040 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2041 if (ret < 0)
2042 return ret;
2043
2044 if (!vreg->is_enabled) {
2045 /* Set voltage level */
2046 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2047 vreg->high_vol_level);
2048 if (ret)
2049 return ret;
2050 }
2051 ret = regulator_enable(vreg->reg);
2052 if (ret) {
2053 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2054 __func__, vreg->name, ret);
2055 return ret;
2056 }
2057 vreg->is_enabled = true;
2058 return ret;
2059}
2060
2061static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2062{
2063 int ret = 0;
2064
2065 /* Never disable regulator marked as always_on */
2066 if (vreg->is_enabled && !vreg->is_always_on) {
2067 ret = regulator_disable(vreg->reg);
2068 if (ret) {
2069 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2070 __func__, vreg->name, ret);
2071 goto out;
2072 }
2073 vreg->is_enabled = false;
2074
2075 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2076 if (ret < 0)
2077 goto out;
2078
2079 /* Set min. voltage level to 0 */
2080 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2081 if (ret)
2082 goto out;
2083 } else if (vreg->is_enabled && vreg->is_always_on) {
2084 if (vreg->lpm_sup) {
2085 /* Put always_on regulator in LPM (low power mode) */
2086 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2087 vreg->lpm_uA);
2088 if (ret < 0)
2089 goto out;
2090 }
2091 }
2092out:
2093 return ret;
2094}
2095
2096static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2097 bool enable, bool is_init)
2098{
2099 int ret = 0, i;
2100 struct sdhci_msm_slot_reg_data *curr_slot;
2101 struct sdhci_msm_reg_data *vreg_table[2];
2102
2103 curr_slot = pdata->vreg_data;
2104 if (!curr_slot) {
2105 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2106 __func__);
2107 goto out;
2108 }
2109
2110 vreg_table[0] = curr_slot->vdd_data;
2111 vreg_table[1] = curr_slot->vdd_io_data;
2112
2113 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2114 if (vreg_table[i]) {
2115 if (enable)
2116 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2117 else
2118 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2119 if (ret)
2120 goto out;
2121 }
2122 }
2123out:
2124 return ret;
2125}
2126
2127/*
2128 * Reset vreg by ensuring it is off during probe. A call
2129 * to enable vreg is needed to balance disable vreg
2130 */
2131static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2132{
2133 int ret;
2134
2135 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2136 if (ret)
2137 return ret;
2138 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2139 return ret;
2140}
2141
2142/* This init function should be called only once for each SDHC slot */
2143static int sdhci_msm_vreg_init(struct device *dev,
2144 struct sdhci_msm_pltfm_data *pdata,
2145 bool is_init)
2146{
2147 int ret = 0;
2148 struct sdhci_msm_slot_reg_data *curr_slot;
2149 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2150
2151 curr_slot = pdata->vreg_data;
2152 if (!curr_slot)
2153 goto out;
2154
2155 curr_vdd_reg = curr_slot->vdd_data;
2156 curr_vdd_io_reg = curr_slot->vdd_io_data;
2157
2158 if (!is_init)
2159 /* Deregister all regulators from regulator framework */
2160 goto vdd_io_reg_deinit;
2161
2162 /*
2163 * Get the regulator handle from voltage regulator framework
2164 * and then try to set the voltage level for the regulator
2165 */
2166 if (curr_vdd_reg) {
2167 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2168 if (ret)
2169 goto out;
2170 }
2171 if (curr_vdd_io_reg) {
2172 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2173 if (ret)
2174 goto vdd_reg_deinit;
2175 }
2176 ret = sdhci_msm_vreg_reset(pdata);
2177 if (ret)
2178 dev_err(dev, "vreg reset failed (%d)\n", ret);
2179 goto out;
2180
2181vdd_io_reg_deinit:
2182 if (curr_vdd_io_reg)
2183 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2184vdd_reg_deinit:
2185 if (curr_vdd_reg)
2186 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2187out:
2188 return ret;
2189}
2190
2191
2192static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2193 enum vdd_io_level level,
2194 unsigned int voltage_level)
2195{
2196 int ret = 0;
2197 int set_level;
2198 struct sdhci_msm_reg_data *vdd_io_reg;
2199
2200 if (!pdata->vreg_data)
2201 return ret;
2202
2203 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2204 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2205 switch (level) {
2206 case VDD_IO_LOW:
2207 set_level = vdd_io_reg->low_vol_level;
2208 break;
2209 case VDD_IO_HIGH:
2210 set_level = vdd_io_reg->high_vol_level;
2211 break;
2212 case VDD_IO_SET_LEVEL:
2213 set_level = voltage_level;
2214 break;
2215 default:
2216 pr_err("%s: invalid argument level = %d",
2217 __func__, level);
2218 ret = -EINVAL;
2219 return ret;
2220 }
2221 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2222 set_level);
2223 }
2224 return ret;
2225}
2226
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302227void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2228{
2229 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2230 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2231
2232 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2233 mmc_hostname(host->mmc),
2234 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
2235 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
2236 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
2237}
2238
Asutosh Das0ef24812012-12-18 16:14:02 +05302239static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2240{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002241 struct sdhci_host *host = (struct sdhci_host *)data;
2242 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2243 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das0ef24812012-12-18 16:14:02 +05302244 u8 irq_status = 0;
2245 u8 irq_ack = 0;
2246 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302247 int pwr_state = 0, io_level = 0;
2248 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302249 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302250
2251 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2252 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2253 mmc_hostname(msm_host->mmc), irq, irq_status);
2254
2255 /* Clear the interrupt */
2256 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2257 /*
2258 * SDHC has core_mem and hc_mem device memory and these memory
2259 * addresses do not fall within 1KB region. Hence, any update to
2260 * core_mem address space would require an mb() to ensure this gets
2261 * completed before its next update to registers within hc_mem.
2262 */
2263 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302264 /*
2265 * There is a rare HW scenario where the first clear pulse could be
2266 * lost when actual reset and clear/read of status register is
2267 * happening at a time. Hence, retry for at least 10 times to make
2268 * sure status register is cleared. Otherwise, this will result in
2269 * a spurious power IRQ resulting in system instability.
2270 */
2271 while (irq_status &
2272 readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS)) {
2273 if (retry == 0) {
2274 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2275 mmc_hostname(host->mmc), irq_status);
2276 sdhci_msm_dump_pwr_ctrl_regs(host);
2277 BUG_ON(1);
2278 }
2279 writeb_relaxed(irq_status,
2280 (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2281 retry--;
2282 udelay(10);
2283 }
2284 if (likely(retry < 10))
2285 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2286 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302287
2288 /* Handle BUS ON/OFF*/
2289 if (irq_status & CORE_PWRCTL_BUS_ON) {
2290 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302291 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302292 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302293 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2294 VDD_IO_HIGH, 0);
2295 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302296 if (ret)
2297 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2298 else
2299 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302300
2301 pwr_state = REQ_BUS_ON;
2302 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302303 }
2304 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2305 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302306 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302307 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302308 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2309 VDD_IO_LOW, 0);
2310 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302311 if (ret)
2312 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2313 else
2314 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302315
2316 pwr_state = REQ_BUS_OFF;
2317 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302318 }
2319 /* Handle IO LOW/HIGH */
2320 if (irq_status & CORE_PWRCTL_IO_LOW) {
2321 /* Switch voltage Low */
2322 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2323 if (ret)
2324 irq_ack |= CORE_PWRCTL_IO_FAIL;
2325 else
2326 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302327
2328 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302329 }
2330 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2331 /* Switch voltage High */
2332 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2333 if (ret)
2334 irq_ack |= CORE_PWRCTL_IO_FAIL;
2335 else
2336 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302337
2338 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302339 }
2340
2341 /* ACK status to the core */
2342 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2343 /*
2344 * SDHC has core_mem and hc_mem device memory and these memory
2345 * addresses do not fall within 1KB region. Hence, any update to
2346 * core_mem address space would require an mb() to ensure this gets
2347 * completed before its next update to registers within hc_mem.
2348 */
2349 mb();
2350
Krishna Konda46fd1432014-10-30 21:13:27 -07002351 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002352 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2353 ~CORE_IO_PAD_PWR_SWITCH),
2354 host->ioaddr + CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002355 else if ((io_level & REQ_IO_LOW) ||
2356 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002357 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2358 CORE_IO_PAD_PWR_SWITCH),
2359 host->ioaddr + CORE_VENDOR_SPEC);
2360 mb();
2361
Asutosh Das0ef24812012-12-18 16:14:02 +05302362 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2363 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302364 spin_lock_irqsave(&host->lock, flags);
2365 if (pwr_state)
2366 msm_host->curr_pwr_state = pwr_state;
2367 if (io_level)
2368 msm_host->curr_io_level = io_level;
2369 complete(&msm_host->pwr_irq_completion);
2370 spin_unlock_irqrestore(&host->lock, flags);
2371
Asutosh Das0ef24812012-12-18 16:14:02 +05302372 return IRQ_HANDLED;
2373}
2374
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302375static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302376show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2377{
2378 struct sdhci_host *host = dev_get_drvdata(dev);
2379 int poll;
2380 unsigned long flags;
2381
2382 spin_lock_irqsave(&host->lock, flags);
2383 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2384 spin_unlock_irqrestore(&host->lock, flags);
2385
2386 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2387}
2388
2389static ssize_t
2390store_polling(struct device *dev, struct device_attribute *attr,
2391 const char *buf, size_t count)
2392{
2393 struct sdhci_host *host = dev_get_drvdata(dev);
2394 int value;
2395 unsigned long flags;
2396
2397 if (!kstrtou32(buf, 0, &value)) {
2398 spin_lock_irqsave(&host->lock, flags);
2399 if (value) {
2400 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2401 mmc_detect_change(host->mmc, 0);
2402 } else {
2403 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2404 }
2405 spin_unlock_irqrestore(&host->lock, flags);
2406 }
2407 return count;
2408}
2409
2410static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302411show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2412 char *buf)
2413{
2414 struct sdhci_host *host = dev_get_drvdata(dev);
2415 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2416 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2417
2418 return snprintf(buf, PAGE_SIZE, "%u\n",
2419 msm_host->msm_bus_vote.is_max_bw_needed);
2420}
2421
2422static ssize_t
2423store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2424 const char *buf, size_t count)
2425{
2426 struct sdhci_host *host = dev_get_drvdata(dev);
2427 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2428 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2429 uint32_t value;
2430 unsigned long flags;
2431
2432 if (!kstrtou32(buf, 0, &value)) {
2433 spin_lock_irqsave(&host->lock, flags);
2434 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2435 spin_unlock_irqrestore(&host->lock, flags);
2436 }
2437 return count;
2438}
2439
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302440static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302441{
2442 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2443 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302444 unsigned long flags;
2445 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302446 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302447
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302448 spin_lock_irqsave(&host->lock, flags);
2449 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2450 mmc_hostname(host->mmc), __func__, req_type,
2451 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302452 io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
2453 /*
2454 * The IRQ for request type IO High/Low will be generated when -
2455 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2456 * 2. If 1 is true and when there is a state change in 1.8V enable
2457 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2458 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2459 * layer tries to set it to 3.3V before card detection happens, the
2460 * IRQ doesn't get triggered as there is no state change in this bit.
2461 * The driver already handles this case by changing the IO voltage
2462 * level to high as part of controller power up sequence. Hence, check
2463 * for host->pwr to handle a case where IO voltage high request is
2464 * issued even before controller power up.
2465 */
2466 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2467 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2468 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2469 pr_debug("%s: do not wait for power IRQ that never comes\n",
2470 mmc_hostname(host->mmc));
2471 spin_unlock_irqrestore(&host->lock, flags);
2472 return;
2473 }
2474 }
2475
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302476 if ((req_type & msm_host->curr_pwr_state) ||
2477 (req_type & msm_host->curr_io_level))
2478 done = true;
2479 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302480
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302481 /*
2482 * This is needed here to hanlde a case where IRQ gets
2483 * triggered even before this function is called so that
2484 * x->done counter of completion gets reset. Otherwise,
2485 * next call to wait_for_completion returns immediately
2486 * without actually waiting for the IRQ to be handled.
2487 */
2488 if (done)
2489 init_completion(&msm_host->pwr_irq_completion);
2490 else
2491 wait_for_completion(&msm_host->pwr_irq_completion);
2492
2493 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2494 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302495}
2496
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002497static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2498{
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302499 u32 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
2500
2501 if (enable) {
2502 config |= CORE_CDR_EN;
2503 config &= ~CORE_CDR_EXT_EN;
2504 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2505 } else {
2506 config &= ~CORE_CDR_EN;
2507 config |= CORE_CDR_EXT_EN;
2508 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2509 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002510}
2511
Asutosh Das648f9d12013-01-10 21:11:04 +05302512static unsigned int sdhci_msm_max_segs(void)
2513{
2514 return SDHCI_MSM_MAX_SEGMENTS;
2515}
2516
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302517static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302518{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302519 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2520 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302521
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302522 return msm_host->pdata->sup_clk_table[0];
2523}
2524
2525static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2526{
2527 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2528 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2529 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2530
2531 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2532}
2533
2534static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2535 u32 req_clk)
2536{
2537 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2538 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2539 unsigned int sel_clk = -1;
2540 unsigned char cnt;
2541
2542 if (req_clk < sdhci_msm_get_min_clock(host)) {
2543 sel_clk = sdhci_msm_get_min_clock(host);
2544 return sel_clk;
2545 }
2546
2547 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2548 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2549 break;
2550 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2551 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2552 break;
2553 } else {
2554 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2555 }
2556 }
2557 return sel_clk;
2558}
2559
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302560static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2561{
2562 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2563 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2564 int rc = 0;
2565
2566 if (atomic_read(&msm_host->controller_clock))
2567 return 0;
2568
2569 sdhci_msm_bus_voting(host, 1);
2570
2571 if (!IS_ERR(msm_host->pclk)) {
2572 rc = clk_prepare_enable(msm_host->pclk);
2573 if (rc) {
2574 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2575 mmc_hostname(host->mmc), __func__, rc);
2576 goto remove_vote;
2577 }
2578 }
2579
2580 rc = clk_prepare_enable(msm_host->clk);
2581 if (rc) {
2582 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2583 mmc_hostname(host->mmc), __func__, rc);
2584 goto disable_pclk;
2585 }
2586
2587 atomic_set(&msm_host->controller_clock, 1);
2588 pr_debug("%s: %s: enabled controller clock\n",
2589 mmc_hostname(host->mmc), __func__);
2590 goto out;
2591
2592disable_pclk:
2593 if (!IS_ERR(msm_host->pclk))
2594 clk_disable_unprepare(msm_host->pclk);
2595remove_vote:
2596 if (msm_host->msm_bus_vote.client_handle)
2597 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2598out:
2599 return rc;
2600}
2601
2602
2603
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302604static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2605{
2606 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2607 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2608 int rc = 0;
2609
2610 if (enable && !atomic_read(&msm_host->clks_on)) {
2611 pr_debug("%s: request to enable clocks\n",
2612 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302613
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302614 /*
2615 * The bus-width or the clock rate might have changed
2616 * after controller clocks are enbaled, update bus vote
2617 * in such case.
2618 */
2619 if (atomic_read(&msm_host->controller_clock))
2620 sdhci_msm_bus_voting(host, 1);
2621
2622 rc = sdhci_msm_enable_controller_clock(host);
2623 if (rc)
2624 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302625
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302626 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2627 rc = clk_prepare_enable(msm_host->bus_clk);
2628 if (rc) {
2629 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2630 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302631 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302632 }
2633 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002634 if (!IS_ERR(msm_host->ff_clk)) {
2635 rc = clk_prepare_enable(msm_host->ff_clk);
2636 if (rc) {
2637 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2638 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302639 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002640 }
2641 }
2642 if (!IS_ERR(msm_host->sleep_clk)) {
2643 rc = clk_prepare_enable(msm_host->sleep_clk);
2644 if (rc) {
2645 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2646 mmc_hostname(host->mmc), __func__, rc);
2647 goto disable_ff_clk;
2648 }
2649 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302650 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302651
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302652 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302653 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2654 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302655 /*
2656 * During 1.8V signal switching the clock source must
2657 * still be ON as it requires accessing SDHC
2658 * registers (SDHCi host control2 register bit 3 must
2659 * be written and polled after stopping the SDCLK).
2660 */
2661 if (host->mmc->card_clock_off)
2662 return 0;
2663 pr_debug("%s: request to disable clocks\n",
2664 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002665 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2666 clk_disable_unprepare(msm_host->sleep_clk);
2667 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2668 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302669 clk_disable_unprepare(msm_host->clk);
2670 if (!IS_ERR(msm_host->pclk))
2671 clk_disable_unprepare(msm_host->pclk);
2672 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2673 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302674
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302675 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302676 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302677 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302678 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302679 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002680disable_ff_clk:
2681 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2682 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302683disable_bus_clk:
2684 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2685 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302686disable_controller_clk:
2687 if (!IS_ERR_OR_NULL(msm_host->clk))
2688 clk_disable_unprepare(msm_host->clk);
2689 if (!IS_ERR_OR_NULL(msm_host->pclk))
2690 clk_disable_unprepare(msm_host->pclk);
2691 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302692remove_vote:
2693 if (msm_host->msm_bus_vote.client_handle)
2694 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302695out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302696 return rc;
2697}
2698
2699static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2700{
2701 int rc;
2702 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2703 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2704 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002705 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302706 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302707
2708 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302709 /*
2710 * disable pwrsave to ensure clock is not auto-gated until
2711 * the rate is >400KHz (initialization complete).
2712 */
2713 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2714 ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302715 sdhci_msm_prepare_clocks(host, false);
2716 host->clock = clock;
2717 goto out;
2718 }
2719
2720 rc = sdhci_msm_prepare_clocks(host, true);
2721 if (rc)
2722 goto out;
2723
Sahitya Tummala043744a2013-06-24 09:55:33 +05302724 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2725 CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05302726 if ((clock > 400000) &&
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002727 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302728 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2729 | CORE_CLK_PWRSAVE,
2730 host->ioaddr + CORE_VENDOR_SPEC);
2731 /*
2732 * Disable pwrsave for a newly added card if doesn't allow clock
2733 * gating.
2734 */
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002735 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302736 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2737 & ~CORE_CLK_PWRSAVE,
2738 host->ioaddr + CORE_VENDOR_SPEC);
2739
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302740 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002741 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002742 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002743 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302744 /*
2745 * The SDHC requires internal clock frequency to be double the
2746 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002747 * uses the faster clock(100/400MHz) for some of its parts and
2748 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302749 */
2750 ddr_clock = clock * 2;
2751 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2752 ddr_clock);
2753 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002754
2755 /*
2756 * In general all timing modes are controlled via UHS mode select in
2757 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2758 * their respective modes defined here, hence we use these values.
2759 *
2760 * HS200 - SDR104 (Since they both are equivalent in functionality)
2761 * HS400 - This involves multiple configurations
2762 * Initially SDR104 - when tuning is required as HS200
2763 * Then when switching to DDR @ 400MHz (HS400) we use
2764 * the vendor specific HC_SELECT_IN to control the mode.
2765 *
2766 * In addition to controlling the modes we also need to select the
2767 * correct input clock for DLL depending on the mode.
2768 *
2769 * HS400 - divided clock (free running MCLK/2)
2770 * All other modes - default (free running MCLK)
2771 */
2772 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2773 /* Select the divided clock (free running MCLK/2) */
2774 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2775 & ~CORE_HC_MCLK_SEL_MASK)
2776 | CORE_HC_MCLK_SEL_HS400),
2777 host->ioaddr + CORE_VENDOR_SPEC);
2778 /*
2779 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2780 * register
2781 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302782 if ((msm_host->tuning_done ||
2783 (mmc_card_strobe(msm_host->mmc->card) &&
2784 msm_host->enhanced_strobe)) &&
2785 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002786 /*
2787 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2788 * field in VENDOR_SPEC_FUNC
2789 */
2790 writel_relaxed((readl_relaxed(host->ioaddr + \
2791 CORE_VENDOR_SPEC)
2792 | CORE_HC_SELECT_IN_HS400
2793 | CORE_HC_SELECT_IN_EN),
2794 host->ioaddr + CORE_VENDOR_SPEC);
2795 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002796 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
2797 /*
2798 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
2799 * CORE_DLL_STATUS to be set. This should get set
2800 * with in 15 us at 200 MHz.
2801 */
2802 rc = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
2803 dll_lock, (dll_lock & (CORE_DLL_LOCK |
2804 CORE_DDR_DLL_LOCK)), 10, 1000);
2805 if (rc == -ETIMEDOUT)
2806 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
2807 mmc_hostname(host->mmc),
2808 dll_lock);
2809 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002810 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002811 if (!msm_host->use_cdclp533)
2812 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
2813 writel_relaxed((readl_relaxed(host->ioaddr +
2814 CORE_VENDOR_SPEC3) & ~CORE_PWRSAVE_DLL),
2815 host->ioaddr + CORE_VENDOR_SPEC3);
2816
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002817 /* Select the default clock (free running MCLK) */
2818 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2819 & ~CORE_HC_MCLK_SEL_MASK)
2820 | CORE_HC_MCLK_SEL_DFLT),
2821 host->ioaddr + CORE_VENDOR_SPEC);
2822
2823 /*
2824 * Disable HC_SELECT_IN to be able to use the UHS mode select
2825 * configuration from Host Control2 register for all other
2826 * modes.
2827 *
2828 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2829 * in VENDOR_SPEC_FUNC
2830 */
2831 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2832 & ~CORE_HC_SELECT_IN_EN
2833 & ~CORE_HC_SELECT_IN_MASK),
2834 host->ioaddr + CORE_VENDOR_SPEC);
2835 }
2836 mb();
2837
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302838 if (sup_clock != msm_host->clk_rate) {
2839 pr_debug("%s: %s: setting clk rate to %u\n",
2840 mmc_hostname(host->mmc), __func__, sup_clock);
2841 rc = clk_set_rate(msm_host->clk, sup_clock);
2842 if (rc) {
2843 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2844 mmc_hostname(host->mmc), __func__,
2845 sup_clock, rc);
2846 goto out;
2847 }
2848 msm_host->clk_rate = sup_clock;
2849 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302850 /*
2851 * Update the bus vote in case of frequency change due to
2852 * clock scaling.
2853 */
2854 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302855 }
2856out:
2857 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302858}
2859
Sahitya Tummala14613432013-03-21 11:13:25 +05302860static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2861 unsigned int uhs)
2862{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002863 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2864 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala14613432013-03-21 11:13:25 +05302865 u16 ctrl_2;
2866
2867 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2868 /* Select Bus Speed Mode for host */
2869 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002870 if ((uhs == MMC_TIMING_MMC_HS400) ||
2871 (uhs == MMC_TIMING_MMC_HS200) ||
2872 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05302873 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2874 else if (uhs == MMC_TIMING_UHS_SDR12)
2875 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2876 else if (uhs == MMC_TIMING_UHS_SDR25)
2877 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2878 else if (uhs == MMC_TIMING_UHS_SDR50)
2879 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002880 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
2881 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05302882 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302883 /*
2884 * When clock frquency is less than 100MHz, the feedback clock must be
2885 * provided and DLL must not be used so that tuning can be skipped. To
2886 * provide feedback clock, the mode selection can be any value less
2887 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2888 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002889 if (host->clock <= CORE_FREQ_100MHZ) {
2890 if ((uhs == MMC_TIMING_MMC_HS400) ||
2891 (uhs == MMC_TIMING_MMC_HS200) ||
2892 (uhs == MMC_TIMING_UHS_SDR104))
2893 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302894
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002895 /*
2896 * Make sure DLL is disabled when not required
2897 *
2898 * Write 1 to DLL_RST bit of DLL_CONFIG register
2899 */
2900 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2901 | CORE_DLL_RST),
2902 host->ioaddr + CORE_DLL_CONFIG);
2903
2904 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2905 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2906 | CORE_DLL_PDN),
2907 host->ioaddr + CORE_DLL_CONFIG);
2908 mb();
2909
2910 /*
2911 * The DLL needs to be restored and CDCLP533 recalibrated
2912 * when the clock frequency is set back to 400MHz.
2913 */
2914 msm_host->calibration_done = false;
2915 }
2916
2917 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2918 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05302919 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2920
2921}
2922
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002923#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002924#define DRV_NAME "cmdq-host"
2925static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_msm_host *msm_host)
2926{
2927 int i = 0;
2928 struct cmdq_host *cq_host = mmc_cmdq_private(msm_host->mmc);
2929 u32 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2930 u16 minor = version & CORE_VERSION_TARGET_MASK;
2931 /* registers offset changed starting from 4.2.0 */
2932 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
2933
2934 pr_err("---- Debug RAM dump ----\n");
2935 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
2936 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
2937 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
2938
2939 while (i < 16) {
2940 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
2941 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
2942 i++;
2943 }
2944 pr_err("-------------------------\n");
2945}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302946
2947void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2948{
2949 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2950 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2951 int tbsel, tbsel2;
2952 int i, index = 0;
2953 u32 test_bus_val = 0;
2954 u32 debug_reg[MAX_TEST_BUS] = {0};
2955
2956 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07002957 if (host->cq_host)
2958 sdhci_msm_cmdq_dump_debug_ram(msm_host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002959
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302960 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
2961 readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
2962 readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
2963 readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
2964 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
2965 readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
2966 readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
2967 readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
2968 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
2969 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
2970 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
2971 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05302972 pr_info("Vndr func2: 0x%08x\n",
2973 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302974
2975 /*
2976 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
2977 * of CORE_TESTBUS_CONFIG register.
2978 *
2979 * To select test bus 0 to 7 use tbsel and to select any test bus
2980 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
2981 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
2982 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
2983 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002984 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302985 for (tbsel = 0; tbsel < 8; tbsel++) {
2986 if (index >= MAX_TEST_BUS)
2987 break;
2988 test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
2989 tbsel | CORE_TESTBUS_ENA;
2990 writel_relaxed(test_bus_val,
2991 msm_host->core_mem + CORE_TESTBUS_CONFIG);
2992 debug_reg[index++] = readl_relaxed(msm_host->core_mem +
2993 CORE_SDCC_DEBUG_REG);
2994 }
2995 }
2996 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
2997 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
2998 i, i + 3, debug_reg[i], debug_reg[i+1],
2999 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003000}
3001
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303002/*
3003 * sdhci_msm_enhanced_strobe_mask :-
3004 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3005 * SW should write 3 to
3006 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3007 * The default reset value of this register is 2.
3008 */
3009static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3010{
3011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3012 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3013
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303014 if (!msm_host->enhanced_strobe ||
3015 !mmc_card_strobe(msm_host->mmc->card)) {
3016 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303017 mmc_hostname(host->mmc));
3018 return;
3019 }
3020
3021 if (set) {
3022 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3023 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3024 host->ioaddr + CORE_VENDOR_SPEC3);
3025 } else {
3026 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3027 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3028 host->ioaddr + CORE_VENDOR_SPEC3);
3029 }
3030}
3031
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003032static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3033{
3034 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3035 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3036
3037 if (set) {
3038 writel_relaxed(CORE_TESTBUS_ENA,
3039 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3040 } else {
3041 u32 value;
3042
3043 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
3044 value &= ~CORE_TESTBUS_ENA;
3045 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
3046 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303047}
3048
Dov Levenglick9c575e22015-07-20 09:30:52 +03003049static void sdhci_msm_detect(struct sdhci_host *host, bool detected)
3050{
3051 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3052 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3053 struct mmc_host *mmc = msm_host->mmc;
3054 struct mmc_card *card = mmc->card;
3055
3056 if (detected && mmc_card_sdio(card))
3057 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3058 else
3059 mmc->pm_caps &= ~MMC_PM_KEEP_POWER;
3060}
3061
Pavan Anamula691dd592015-08-25 16:11:20 +05303062void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3063{
3064 u32 vendor_func2;
3065 unsigned long timeout;
3066
3067 vendor_func2 = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3068
3069 if (enable) {
3070 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
3071 CORE_VENDOR_SPEC_FUNC2);
3072 timeout = 10000;
3073 while (readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2) &
3074 HC_SW_RST_REQ) {
3075 if (timeout == 0) {
3076 pr_info("%s: Applying wait idle disable workaround\n",
3077 mmc_hostname(host->mmc));
3078 /*
3079 * Apply the reset workaround to not wait for
3080 * pending data transfers on AXI before
3081 * resetting the controller. This could be
3082 * risky if the transfers were stuck on the
3083 * AXI bus.
3084 */
3085 vendor_func2 = readl_relaxed(host->ioaddr +
3086 CORE_VENDOR_SPEC_FUNC2);
3087 writel_relaxed(vendor_func2 |
3088 HC_SW_RST_WAIT_IDLE_DIS,
3089 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3090 host->reset_wa_t = ktime_get();
3091 return;
3092 }
3093 timeout--;
3094 udelay(10);
3095 }
3096 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3097 mmc_hostname(host->mmc));
3098 } else {
3099 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
3100 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3101 }
3102}
3103
Gilad Broner44445992015-09-29 16:05:39 +03003104static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3105{
3106 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
3107 container_of(work, struct sdhci_msm_pm_qos_irq, unvote_work);
3108
3109 if (atomic_read(&pm_qos_irq->counter))
3110 return;
3111
3112 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3113 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3114}
3115
3116void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3117{
3118 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3119 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3120 struct sdhci_msm_pm_qos_latency *latency =
3121 &msm_host->pdata->pm_qos_data.irq_latency;
3122 int counter;
3123
3124 if (!msm_host->pm_qos_irq.enabled)
3125 return;
3126
3127 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3128 /* Make sure to update the voting in case power policy has changed */
3129 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3130 && counter > 1)
3131 return;
3132
3133 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3134 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3135 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3136 msm_host->pm_qos_irq.latency);
3137}
3138
3139void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3140{
3141 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3142 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3143 int counter;
3144
3145 if (!msm_host->pm_qos_irq.enabled)
3146 return;
3147
Subhash Jadavani4d813902015-10-15 12:16:43 -07003148 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3149 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3150 } else {
3151 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3152 return;
Gilad Broner44445992015-09-29 16:05:39 +03003153 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003154
Gilad Broner44445992015-09-29 16:05:39 +03003155 if (counter)
3156 return;
3157
3158 if (async) {
3159 schedule_work(&msm_host->pm_qos_irq.unvote_work);
3160 return;
3161 }
3162
3163 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3164 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3165 msm_host->pm_qos_irq.latency);
3166}
3167
Gilad Broner68c54562015-09-20 11:59:46 +03003168static ssize_t
3169sdhci_msm_pm_qos_irq_show(struct device *dev,
3170 struct device_attribute *attr, char *buf)
3171{
3172 struct sdhci_host *host = dev_get_drvdata(dev);
3173 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3174 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3175 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3176
3177 return snprintf(buf, PAGE_SIZE,
3178 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3179 irq->enabled, atomic_read(&irq->counter), irq->latency);
3180}
3181
3182static ssize_t
3183sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3184 struct device_attribute *attr, char *buf)
3185{
3186 struct sdhci_host *host = dev_get_drvdata(dev);
3187 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3188 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3189
3190 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3191}
3192
3193static ssize_t
3194sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3195 struct device_attribute *attr, const char *buf, size_t count)
3196{
3197 struct sdhci_host *host = dev_get_drvdata(dev);
3198 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3199 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3200 uint32_t value;
3201 bool enable;
3202 int ret;
3203
3204 ret = kstrtou32(buf, 0, &value);
3205 if (ret)
3206 goto out;
3207 enable = !!value;
3208
3209 if (enable == msm_host->pm_qos_irq.enabled)
3210 goto out;
3211
3212 msm_host->pm_qos_irq.enabled = enable;
3213 if (!enable) {
3214 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3215 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3216 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3217 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3218 msm_host->pm_qos_irq.latency);
3219 }
3220
3221out:
3222 return count;
3223}
3224
Gilad Broner44445992015-09-29 16:05:39 +03003225void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3226{
3227 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3228 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3229 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003230 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003231
3232 if (!msm_host->pdata->pm_qos_data.irq_valid)
3233 return;
3234
3235 /* Initialize only once as this gets called per partition */
3236 if (msm_host->pm_qos_irq.enabled)
3237 return;
3238
3239 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3240 msm_host->pm_qos_irq.req.type =
3241 msm_host->pdata->pm_qos_data.irq_req_type;
3242 if (msm_host->pm_qos_irq.req.type == PM_QOS_REQ_AFFINE_IRQ)
3243 msm_host->pm_qos_irq.req.irq = host->irq;
3244 else
3245 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3246 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3247
3248 INIT_WORK(&msm_host->pm_qos_irq.unvote_work,
3249 sdhci_msm_pm_qos_irq_unvote_work);
3250 /* For initialization phase, set the performance latency */
3251 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3252 msm_host->pm_qos_irq.latency =
3253 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3254 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3255 msm_host->pm_qos_irq.latency);
3256 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003257
3258 /* sysfs */
3259 msm_host->pm_qos_irq.enable_attr.show =
3260 sdhci_msm_pm_qos_irq_enable_show;
3261 msm_host->pm_qos_irq.enable_attr.store =
3262 sdhci_msm_pm_qos_irq_enable_store;
3263 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3264 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3265 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3266 ret = device_create_file(&msm_host->pdev->dev,
3267 &msm_host->pm_qos_irq.enable_attr);
3268 if (ret)
3269 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3270 __func__, ret);
3271
3272 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3273 msm_host->pm_qos_irq.status_attr.store = NULL;
3274 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3275 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3276 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3277 ret = device_create_file(&msm_host->pdev->dev,
3278 &msm_host->pm_qos_irq.status_attr);
3279 if (ret)
3280 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3281 __func__, ret);
3282}
3283
3284static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3285 struct device_attribute *attr, char *buf)
3286{
3287 struct sdhci_host *host = dev_get_drvdata(dev);
3288 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3289 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3290 struct sdhci_msm_pm_qos_group *group;
3291 int i;
3292 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3293 int offset = 0;
3294
3295 for (i = 0; i < nr_groups; i++) {
3296 group = &msm_host->pm_qos[i];
3297 offset += snprintf(&buf[offset], PAGE_SIZE,
3298 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3299 i, group->req.cpus_affine.bits[0],
3300 msm_host->pm_qos_group_enable,
3301 atomic_read(&group->counter),
3302 group->latency);
3303 }
3304
3305 return offset;
3306}
3307
3308static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3309 struct device_attribute *attr, char *buf)
3310{
3311 struct sdhci_host *host = dev_get_drvdata(dev);
3312 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3313 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3314
3315 return snprintf(buf, PAGE_SIZE, "%s\n",
3316 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3317}
3318
3319static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3320 struct device_attribute *attr, const char *buf, size_t count)
3321{
3322 struct sdhci_host *host = dev_get_drvdata(dev);
3323 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3324 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3325 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3326 uint32_t value;
3327 bool enable;
3328 int ret;
3329 int i;
3330
3331 ret = kstrtou32(buf, 0, &value);
3332 if (ret)
3333 goto out;
3334 enable = !!value;
3335
3336 if (enable == msm_host->pm_qos_group_enable)
3337 goto out;
3338
3339 msm_host->pm_qos_group_enable = enable;
3340 if (!enable) {
3341 for (i = 0; i < nr_groups; i++) {
3342 cancel_work_sync(&msm_host->pm_qos[i].unvote_work);
3343 atomic_set(&msm_host->pm_qos[i].counter, 0);
3344 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3345 pm_qos_update_request(&msm_host->pm_qos[i].req,
3346 msm_host->pm_qos[i].latency);
3347 }
3348 }
3349
3350out:
3351 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003352}
3353
3354static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3355{
3356 int i;
3357 struct sdhci_msm_cpu_group_map *map =
3358 &msm_host->pdata->pm_qos_data.cpu_group_map;
3359
3360 if (cpu < 0)
3361 goto not_found;
3362
3363 for (i = 0; i < map->nr_groups; i++)
3364 if (cpumask_test_cpu(cpu, &map->mask[i]))
3365 return i;
3366
3367not_found:
3368 return -EINVAL;
3369}
3370
3371void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3372 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3373{
3374 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3375 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3376 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3377 struct sdhci_msm_pm_qos_group *pm_qos_group;
3378 int counter;
3379
3380 if (!msm_host->pm_qos_group_enable || group < 0)
3381 return;
3382
3383 pm_qos_group = &msm_host->pm_qos[group];
3384 counter = atomic_inc_return(&pm_qos_group->counter);
3385
3386 /* Make sure to update the voting in case power policy has changed */
3387 if (pm_qos_group->latency == latency->latency[host->power_policy]
3388 && counter > 1)
3389 return;
3390
3391 cancel_work_sync(&pm_qos_group->unvote_work);
3392
3393 pm_qos_group->latency = latency->latency[host->power_policy];
3394 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3395}
3396
3397static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3398{
3399 struct sdhci_msm_pm_qos_group *group =
3400 container_of(work, struct sdhci_msm_pm_qos_group, unvote_work);
3401
3402 if (atomic_read(&group->counter))
3403 return;
3404
3405 group->latency = PM_QOS_DEFAULT_VALUE;
3406 pm_qos_update_request(&group->req, group->latency);
3407}
3408
Gilad Broner07d92eb2015-09-29 16:57:21 +03003409bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003410{
3411 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3412 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3413 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3414
3415 if (!msm_host->pm_qos_group_enable || group < 0 ||
3416 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003417 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003418
3419 if (async) {
3420 schedule_work(&msm_host->pm_qos[group].unvote_work);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003421 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003422 }
3423
3424 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3425 pm_qos_update_request(&msm_host->pm_qos[group].req,
3426 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003427 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003428}
3429
3430void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3431 struct sdhci_msm_pm_qos_latency *latency)
3432{
3433 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3434 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3435 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3436 struct sdhci_msm_pm_qos_group *group;
3437 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003438 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003439
3440 if (msm_host->pm_qos_group_enable)
3441 return;
3442
3443 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3444 GFP_KERNEL);
3445 if (!msm_host->pm_qos)
3446 return;
3447
3448 for (i = 0; i < nr_groups; i++) {
3449 group = &msm_host->pm_qos[i];
3450 INIT_WORK(&group->unvote_work,
3451 sdhci_msm_pm_qos_cpu_unvote_work);
3452 atomic_set(&group->counter, 0);
3453 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3454 cpumask_copy(&group->req.cpus_affine,
3455 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3456 /* For initialization phase, set the performance mode latency */
3457 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3458 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3459 group->latency);
3460 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3461 __func__, i,
3462 group->req.cpus_affine.bits[0],
3463 group->latency,
3464 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3465 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003466 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003467 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003468
3469 /* sysfs */
3470 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3471 msm_host->pm_qos_group_status_attr.store = NULL;
3472 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3473 msm_host->pm_qos_group_status_attr.attr.name =
3474 "pm_qos_cpu_groups_status";
3475 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3476 ret = device_create_file(&msm_host->pdev->dev,
3477 &msm_host->pm_qos_group_status_attr);
3478 if (ret)
3479 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3480 __func__, ret);
3481 msm_host->pm_qos_group_enable_attr.show =
3482 sdhci_msm_pm_qos_group_enable_show;
3483 msm_host->pm_qos_group_enable_attr.store =
3484 sdhci_msm_pm_qos_group_enable_store;
3485 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3486 msm_host->pm_qos_group_enable_attr.attr.name =
3487 "pm_qos_cpu_groups_enable";
3488 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3489 ret = device_create_file(&msm_host->pdev->dev,
3490 &msm_host->pm_qos_group_enable_attr);
3491 if (ret)
3492 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3493 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003494}
3495
Gilad Broner07d92eb2015-09-29 16:57:21 +03003496static void sdhci_msm_pre_req(struct sdhci_host *host,
3497 struct mmc_request *mmc_req)
3498{
3499 int cpu;
3500 int group;
3501 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3502 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3503 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3504 msm_host->pm_qos_prev_cpu);
3505
3506 sdhci_msm_pm_qos_irq_vote(host);
3507
3508 cpu = get_cpu();
3509 put_cpu();
3510 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3511 if (group < 0)
3512 return;
3513
3514 if (group != prev_group && prev_group >= 0) {
3515 sdhci_msm_pm_qos_cpu_unvote(host,
3516 msm_host->pm_qos_prev_cpu, false);
3517 prev_group = -1; /* make sure to vote for new group */
3518 }
3519
3520 if (prev_group < 0) {
3521 sdhci_msm_pm_qos_cpu_vote(host,
3522 msm_host->pdata->pm_qos_data.latency, cpu);
3523 msm_host->pm_qos_prev_cpu = cpu;
3524 }
3525}
3526
3527static void sdhci_msm_post_req(struct sdhci_host *host,
3528 struct mmc_request *mmc_req)
3529{
3530 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3531 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3532
3533 sdhci_msm_pm_qos_irq_unvote(host, false);
3534
3535 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3536 msm_host->pm_qos_prev_cpu = -1;
3537}
3538
3539static void sdhci_msm_init(struct sdhci_host *host)
3540{
3541 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3542 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3543
3544 sdhci_msm_pm_qos_irq_init(host);
3545
3546 if (msm_host->pdata->pm_qos_data.legacy_valid)
3547 sdhci_msm_pm_qos_cpu_init(host,
3548 msm_host->pdata->pm_qos_data.latency);
3549}
3550
Asutosh Das0ef24812012-12-18 16:14:02 +05303551static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303552 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303553 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003554 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303555 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003556 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303557 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303558 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303559 .get_min_clock = sdhci_msm_get_min_clock,
3560 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303561 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303562 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303563 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003564 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003565 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003566 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303567 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Dov Levenglick9c575e22015-07-20 09:30:52 +03003568 .detect = sdhci_msm_detect,
Pavan Anamula691dd592015-08-25 16:11:20 +05303569 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003570 .init = sdhci_msm_init,
3571 .pre_req = sdhci_msm_pre_req,
3572 .post_req = sdhci_msm_post_req,
Asutosh Das0ef24812012-12-18 16:14:02 +05303573};
3574
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303575static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3576 struct sdhci_host *host)
3577{
Krishna Konda46fd1432014-10-30 21:13:27 -07003578 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303579 u16 minor;
3580 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303581 u32 val;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303582
3583 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
3584 major = (version & CORE_VERSION_MAJOR_MASK) >>
3585 CORE_VERSION_MAJOR_SHIFT;
3586 minor = version & CORE_VERSION_TARGET_MASK;
3587
Krishna Konda46fd1432014-10-30 21:13:27 -07003588 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3589
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303590 /*
3591 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003592 * controller won't advertise 3.0v, 1.8v and 8-bit features
3593 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303594 */
3595 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003596 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003597 /*
3598 * Enable 1.8V support capability on controllers that
3599 * support dual voltage
3600 */
3601 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003602 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3603 caps |= CORE_3_0V_SUPPORT;
3604 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003605 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303606 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3607 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303608 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003609
3610 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303611 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3612 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3613 */
3614 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303615 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303616 val = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3617 writel_relaxed((val | CORE_ONE_MID_EN),
3618 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3619 }
3620 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003621 * SDCC 5 controller with major version 1, minor version 0x34 and later
3622 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3623 */
3624 if ((major == 1) && (minor < 0x34))
3625 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003626
3627 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003628 * SDCC 5 controller with major version 1, minor version 0x42 and later
3629 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303630 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003631 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303632 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003633 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303634 msm_host->enhanced_strobe = true;
3635 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003636
3637 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003638 * SDCC 5 controller with major version 1 and minor version 0x42,
3639 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3640 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303641 * when MCLK is gated OFF, it is not gated for less than 0.5us
3642 * and MCLK must be switched on for at-least 1us before DATA
3643 * starts coming.
3644 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03003645 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
3646 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05303647 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003648
Pavan Anamula5a256df2015-10-16 14:38:28 +05303649 /* Fake 3.0V support for SDIO devices which requires such voltage */
3650 if (msm_host->pdata->core_3_0v_support) {
3651 caps |= CORE_3_0V_SUPPORT;
3652 writel_relaxed(
3653 (readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES) |
3654 caps), host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
3655 }
3656
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003657 if ((major == 1) && (minor >= 0x49))
3658 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05303659 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03003660 * Mask 64-bit support for controller with 32-bit address bus so that
3661 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03003662 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08003663 if (!msm_host->pdata->largeaddressbus)
3664 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
3665
Gilad Broner2a10ca02014-10-02 17:20:35 +03003666 writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07003667 /* keep track of the value in SDHCI_CAPABILITIES */
3668 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303669}
3670
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003671#ifdef CONFIG_MMC_CQ_HCI
3672static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3673 struct platform_device *pdev)
3674{
3675 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3676 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3677
3678 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003679 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003680 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
3681 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003682 host->cq_host = NULL;
3683 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003684 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003685 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003686}
3687#else
3688static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3689 struct platform_device *pdev)
3690{
3691
3692}
3693#endif
3694
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003695static bool sdhci_msm_is_bootdevice(struct device *dev)
3696{
3697 if (strnstr(saved_command_line, "androidboot.bootdevice=",
3698 strlen(saved_command_line))) {
3699 char search_string[50];
3700
3701 snprintf(search_string, ARRAY_SIZE(search_string),
3702 "androidboot.bootdevice=%s", dev_name(dev));
3703 if (strnstr(saved_command_line, search_string,
3704 strlen(saved_command_line)))
3705 return true;
3706 else
3707 return false;
3708 }
3709
3710 /*
3711 * "androidboot.bootdevice=" argument is not present then
3712 * return true as we don't know the boot device anyways.
3713 */
3714 return true;
3715}
3716
Asutosh Das0ef24812012-12-18 16:14:02 +05303717static int sdhci_msm_probe(struct platform_device *pdev)
3718{
3719 struct sdhci_host *host;
3720 struct sdhci_pltfm_host *pltfm_host;
3721 struct sdhci_msm_host *msm_host;
3722 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003723 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003724 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003725 u32 irq_status, irq_ctl;
Asutosh Das0ef24812012-12-18 16:14:02 +05303726
3727 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
3728 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
3729 GFP_KERNEL);
3730 if (!msm_host) {
3731 ret = -ENOMEM;
3732 goto out;
3733 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303734
3735 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
3736 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
3737 if (IS_ERR(host)) {
3738 ret = PTR_ERR(host);
3739 goto out;
3740 }
3741
3742 pltfm_host = sdhci_priv(host);
3743 pltfm_host->priv = msm_host;
3744 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303745 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05303746
3747 /* Extract platform data */
3748 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003749 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
3750 if (ret < 0) {
3751 dev_err(&pdev->dev, "Failed to get slot index %d\n",
3752 ret);
3753 goto pltfm_free;
3754 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003755
3756 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003757 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
3758 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003759 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003760 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003761
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003762 if (disable_slots & (1 << (ret - 1))) {
3763 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
3764 ret);
3765 ret = -ENODEV;
3766 goto pltfm_free;
3767 }
3768
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003769 if (ret <= 2)
3770 sdhci_slot[ret-1] = msm_host;
3771
Dov Levenglickc9033ab2015-03-10 16:00:56 +02003772 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
3773 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303774 if (!msm_host->pdata) {
3775 dev_err(&pdev->dev, "DT parsing error\n");
3776 goto pltfm_free;
3777 }
3778 } else {
3779 dev_err(&pdev->dev, "No device tree node\n");
3780 goto pltfm_free;
3781 }
3782
3783 /* Setup Clocks */
3784
3785 /* Setup SDCC bus voter clock. */
3786 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
3787 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3788 /* Vote for max. clk rate for max. performance */
3789 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
3790 if (ret)
3791 goto pltfm_free;
3792 ret = clk_prepare_enable(msm_host->bus_clk);
3793 if (ret)
3794 goto pltfm_free;
3795 }
3796
3797 /* Setup main peripheral bus clock */
3798 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
3799 if (!IS_ERR(msm_host->pclk)) {
3800 ret = clk_prepare_enable(msm_host->pclk);
3801 if (ret)
3802 goto bus_clk_disable;
3803 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303804 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05303805
3806 /* Setup SDC MMC clock */
3807 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
3808 if (IS_ERR(msm_host->clk)) {
3809 ret = PTR_ERR(msm_host->clk);
3810 goto pclk_disable;
3811 }
3812
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303813 /* Set to the minimum supported clock frequency */
3814 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
3815 if (ret) {
3816 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303817 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303818 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303819 ret = clk_prepare_enable(msm_host->clk);
3820 if (ret)
3821 goto pclk_disable;
3822
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303823 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303824 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303825
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003826 /* Setup CDC calibration fixed feedback clock */
3827 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
3828 if (!IS_ERR(msm_host->ff_clk)) {
3829 ret = clk_prepare_enable(msm_host->ff_clk);
3830 if (ret)
3831 goto clk_disable;
3832 }
3833
3834 /* Setup CDC calibration sleep clock */
3835 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
3836 if (!IS_ERR(msm_host->sleep_clk)) {
3837 ret = clk_prepare_enable(msm_host->sleep_clk);
3838 if (ret)
3839 goto ff_clk_disable;
3840 }
3841
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07003842 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
3843
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303844 ret = sdhci_msm_bus_register(msm_host, pdev);
3845 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003846 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303847
3848 if (msm_host->msm_bus_vote.client_handle)
3849 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
3850 sdhci_msm_bus_work);
3851 sdhci_msm_bus_voting(host, 1);
3852
Asutosh Das0ef24812012-12-18 16:14:02 +05303853 /* Setup regulators */
3854 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
3855 if (ret) {
3856 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303857 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05303858 }
3859
3860 /* Reset the core and Enable SDHC mode */
3861 core_memres = platform_get_resource_byname(pdev,
3862 IORESOURCE_MEM, "core_mem");
Asutosh Das890bdee2014-08-08 23:01:42 +05303863 if (!core_memres) {
3864 dev_err(&pdev->dev, "Failed to get iomem resource\n");
3865 goto vreg_deinit;
3866 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303867 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
3868 resource_size(core_memres));
3869
3870 if (!msm_host->core_mem) {
3871 dev_err(&pdev->dev, "Failed to remap registers\n");
3872 ret = -ENOMEM;
3873 goto vreg_deinit;
3874 }
3875
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303876 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003877 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303878 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003879 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
3880 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303881
Asutosh Das0ef24812012-12-18 16:14:02 +05303882 /* Set HC_MODE_EN bit in HC_MODE register */
3883 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
3884
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003885 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
3886 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
3887 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
3888
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303889 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07003890
3891 /*
3892 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
3893 * be used as required later on.
3894 */
3895 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
3896 CORE_IO_PAD_PWR_SWITCH_EN),
3897 host->ioaddr + CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05303898 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05303899 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
3900 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
3901 * interrupt in GIC (by registering the interrupt handler), we need to
3902 * ensure that any pending power irq interrupt status is acknowledged
3903 * otherwise power irq interrupt handler would be fired prematurely.
3904 */
3905 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
3906 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
3907 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
3908 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
3909 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
3910 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
3911 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
3912 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
Krishna Konda46fd1432014-10-30 21:13:27 -07003913
Subhash Jadavani28137342013-05-14 17:46:43 +05303914 /*
3915 * Ensure that above writes are propogated before interrupt enablement
3916 * in GIC.
3917 */
3918 mb();
3919
3920 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05303921 * Following are the deviations from SDHC spec v3.0 -
3922 * 1. Card detection is handled using separate GPIO.
3923 * 2. Bus power control is handled by interacting with PMIC.
3924 */
3925 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
3926 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303927 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03003928 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303929 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d439442013-04-12 11:49:11 +05303930 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05303931 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05303932 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das0ef24812012-12-18 16:14:02 +05303933
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05303934 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
3935 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
3936
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003937 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003938 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
3939 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
3940 SDHCI_VENDOR_VER_SHIFT));
3941 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
3942 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
3943 /*
3944 * Add 40us delay in interrupt handler when
3945 * operating at initialization frequency(400KHz).
3946 */
3947 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
3948 /*
3949 * Set Software Reset for DAT line in Software
3950 * Reset Register (Bit 2).
3951 */
3952 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
3953 }
3954
Asutosh Das214b9662013-06-13 14:27:42 +05303955 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
3956
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003957 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003958 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
3959 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05303960 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003961 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05303962 goto vreg_deinit;
3963 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003964 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05303965 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003966 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303967 if (ret) {
3968 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003969 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05303970 goto vreg_deinit;
3971 }
3972
3973 /* Enable pwr irq interrupts */
3974 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
3975
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303976#ifdef CONFIG_MMC_CLKGATE
3977 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
3978 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
3979#endif
3980
Asutosh Das0ef24812012-12-18 16:14:02 +05303981 /* Set host capabilities */
3982 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
3983 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02003984 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05303985 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05303986 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08003987 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3988 msm_host->mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
Subhash Jadavani6d472b22013-05-29 15:52:10 +05303989 msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08003990 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03003991 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05303992 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07003993 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03003994 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Asutosh Das0ef24812012-12-18 16:14:02 +05303995
3996 if (msm_host->pdata->nonremovable)
3997 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
3998
Guoping Yuf7c91332014-08-20 16:56:18 +08003999 if (msm_host->pdata->nonhotplug)
4000 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4001
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304002 init_completion(&msm_host->pwr_irq_completion);
4003
Sahitya Tummala581df132013-03-12 14:57:46 +05304004 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304005 /*
4006 * Set up the card detect GPIO in active configuration before
4007 * configuring it as an IRQ. Otherwise, it can be in some
4008 * weird/inconsistent state resulting in flood of interrupts.
4009 */
4010 sdhci_msm_setup_pins(msm_host->pdata, true);
4011
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304012 /*
4013 * This delay is needed for stabilizing the card detect GPIO
4014 * line after changing the pull configs.
4015 */
4016 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304017 ret = mmc_gpio_request_cd(msm_host->mmc,
4018 msm_host->pdata->status_gpio, 0);
4019 if (ret) {
4020 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4021 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304022 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304023 }
4024 }
4025
Krishna Konda7feab352013-09-17 23:55:40 -07004026 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4027 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4028 host->dma_mask = DMA_BIT_MASK(64);
4029 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304030 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004031 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304032 host->dma_mask = DMA_BIT_MASK(32);
4033 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304034 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304035 } else {
4036 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4037 }
4038
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004039 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304040 ret = sdhci_add_host(host);
4041 if (ret) {
4042 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304043 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304044 }
4045
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004046 pm_runtime_set_active(&pdev->dev);
4047 pm_runtime_enable(&pdev->dev);
4048 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4049 pm_runtime_use_autosuspend(&pdev->dev);
4050
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304051 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4052 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4053 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4054 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4055 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4056 ret = device_create_file(&pdev->dev,
4057 &msm_host->msm_bus_vote.max_bus_bw);
4058 if (ret)
4059 goto remove_host;
4060
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304061 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4062 msm_host->polling.show = show_polling;
4063 msm_host->polling.store = store_polling;
4064 sysfs_attr_init(&msm_host->polling.attr);
4065 msm_host->polling.attr.name = "polling";
4066 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4067 ret = device_create_file(&pdev->dev, &msm_host->polling);
4068 if (ret)
4069 goto remove_max_bus_bw_file;
4070 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304071
4072 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4073 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4074 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4075 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4076 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4077 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4078 if (ret) {
4079 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4080 mmc_hostname(host->mmc), __func__, ret);
4081 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4082 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304083 /* Successful initialization */
4084 goto out;
4085
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304086remove_max_bus_bw_file:
4087 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304088remove_host:
4089 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004090 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304091 sdhci_remove_host(host, dead);
4092vreg_deinit:
4093 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304094bus_unregister:
4095 if (msm_host->msm_bus_vote.client_handle)
4096 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4097 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004098sleep_clk_disable:
4099 if (!IS_ERR(msm_host->sleep_clk))
4100 clk_disable_unprepare(msm_host->sleep_clk);
4101ff_clk_disable:
4102 if (!IS_ERR(msm_host->ff_clk))
4103 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304104clk_disable:
4105 if (!IS_ERR(msm_host->clk))
4106 clk_disable_unprepare(msm_host->clk);
4107pclk_disable:
4108 if (!IS_ERR(msm_host->pclk))
4109 clk_disable_unprepare(msm_host->pclk);
4110bus_clk_disable:
4111 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4112 clk_disable_unprepare(msm_host->bus_clk);
4113pltfm_free:
4114 sdhci_pltfm_free(pdev);
4115out:
4116 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4117 return ret;
4118}
4119
4120static int sdhci_msm_remove(struct platform_device *pdev)
4121{
4122 struct sdhci_host *host = platform_get_drvdata(pdev);
4123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4124 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4125 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4126 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4127 0xffffffff);
4128
4129 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304130 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4131 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304132 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004133 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304134 sdhci_remove_host(host, dead);
4135 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304136
Asutosh Das0ef24812012-12-18 16:14:02 +05304137 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304138
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304139 sdhci_msm_setup_pins(pdata, true);
4140 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304141
4142 if (msm_host->msm_bus_vote.client_handle) {
4143 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4144 sdhci_msm_bus_unregister(msm_host);
4145 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304146 return 0;
4147}
4148
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004149#ifdef CONFIG_PM
4150static int sdhci_msm_runtime_suspend(struct device *dev)
4151{
4152 struct sdhci_host *host = dev_get_drvdata(dev);
4153 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4154 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004155 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004156
4157 disable_irq(host->irq);
4158 disable_irq(msm_host->pwr_irq);
4159
4160 /*
4161 * Remove the vote immediately only if clocks are off in which
4162 * case we might have queued work to remove vote but it may not
4163 * be completed before runtime suspend or system suspend.
4164 */
4165 if (!atomic_read(&msm_host->clks_on)) {
4166 if (msm_host->msm_bus_vote.client_handle)
4167 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4168 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004169 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4170 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004171
4172 return 0;
4173}
4174
4175static int sdhci_msm_runtime_resume(struct device *dev)
4176{
4177 struct sdhci_host *host = dev_get_drvdata(dev);
4178 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4179 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004180 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004181
4182 enable_irq(msm_host->pwr_irq);
4183 enable_irq(host->irq);
4184
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004185 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4186 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004187 return 0;
4188}
4189
4190static int sdhci_msm_suspend(struct device *dev)
4191{
4192 struct sdhci_host *host = dev_get_drvdata(dev);
4193 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4194 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004195 int ret = 0;
4196 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004197
4198 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4199 (msm_host->mmc->slot.cd_irq >= 0))
4200 disable_irq(msm_host->mmc->slot.cd_irq);
4201
4202 if (pm_runtime_suspended(dev)) {
4203 pr_debug("%s: %s: already runtime suspended\n",
4204 mmc_hostname(host->mmc), __func__);
4205 goto out;
4206 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004207 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004208out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004209 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4210 ktime_to_us(ktime_sub(ktime_get(), start)));
4211 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004212}
4213
4214static int sdhci_msm_resume(struct device *dev)
4215{
4216 struct sdhci_host *host = dev_get_drvdata(dev);
4217 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4218 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4219 int ret = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004220 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004221
4222 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4223 (msm_host->mmc->slot.cd_irq >= 0))
4224 enable_irq(msm_host->mmc->slot.cd_irq);
4225
4226 if (pm_runtime_suspended(dev)) {
4227 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4228 mmc_hostname(host->mmc), __func__);
4229 goto out;
4230 }
4231
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004232 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004233out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004234 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4235 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004236 return ret;
4237}
4238
4239static const struct dev_pm_ops sdhci_msm_pmops = {
4240 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4241 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4242 NULL)
4243};
4244
4245#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4246
4247#else
4248#define SDHCI_MSM_PMOPS NULL
4249#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304250static const struct of_device_id sdhci_msm_dt_match[] = {
4251 {.compatible = "qcom,sdhci-msm"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004252 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304253};
4254MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4255
4256static struct platform_driver sdhci_msm_driver = {
4257 .probe = sdhci_msm_probe,
4258 .remove = sdhci_msm_remove,
4259 .driver = {
4260 .name = "sdhci_msm",
4261 .owner = THIS_MODULE,
4262 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004263 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304264 },
4265};
4266
4267module_platform_driver(sdhci_msm_driver);
4268
4269MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4270MODULE_LICENSE("GPL v2");