blob: d326b8088c3293e95bfbd500ef793b0a0811cba1 [file] [log] [blame]
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001/*
Subhash Jadavani9c807702017-04-01 00:35:51 -07002 * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/time.h>
16#include <linux/of.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070017#include <linux/iopoll.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020018#include <linux/platform_device.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070019
20#ifdef CONFIG_QCOM_BUS_SCALING
21#include <linux/msm-bus.h>
22#endif
23
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020024#include <linux/phy/phy.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020025#include <linux/phy/phy-qcom-ufs.h>
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +020026
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020027#include "ufshcd.h"
Yaniv Gardi47555a52015-10-28 13:15:49 +020028#include "ufshcd-pltfrm.h"
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020029#include "unipro.h"
30#include "ufs-qcom.h"
31#include "ufshci.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070032#include "ufs_quirks.h"
33#include "ufs-qcom-ice.h"
34#include "ufs-qcom-debugfs.h"
35
Subhash Jadavani9c807702017-04-01 00:35:51 -070036#define MAX_PROP_SIZE 32
37#define VDDP_REF_CLK_MIN_UV 1200000
38#define VDDP_REF_CLK_MAX_UV 1200000
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070039/* TODO: further tuning for this parameter may be required */
40#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
41
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020042#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
43 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
44
45enum {
46 TSTBUS_UAWM,
47 TSTBUS_UARM,
48 TSTBUS_TXUC,
49 TSTBUS_RXUC,
50 TSTBUS_DFC,
51 TSTBUS_TRLUT,
52 TSTBUS_TMRLUT,
53 TSTBUS_OCSC,
54 TSTBUS_UTP_HCI,
55 TSTBUS_COMBINED,
56 TSTBUS_WRAPPER,
57 TSTBUS_UNIPRO,
58 TSTBUS_MAX,
59};
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020060
61static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
62
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070063static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020064static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020065static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
66 u32 clk_cycles);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070067static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020068
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020069static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
70 char *prefix)
71{
72 print_hex_dump(KERN_ERR, prefix,
73 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070074 16, 4, hba->mmio_base + offset, len * 4, false);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020075}
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020076
Yaniv Gardieba5ed32016-03-10 17:37:21 +020077static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
78 char *prefix, void *priv)
79{
80 ufs_qcom_dump_regs(hba, offset, len, prefix);
81}
82
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020083static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
84{
85 int err = 0;
86
87 err = ufshcd_dme_get(hba,
88 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
89 if (err)
90 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
91 __func__, err);
92
93 return err;
94}
95
96static int ufs_qcom_host_clk_get(struct device *dev,
97 const char *name, struct clk **clk_out)
98{
99 struct clk *clk;
100 int err = 0;
101
102 clk = devm_clk_get(dev, name);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700103 if (IS_ERR(clk))
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200104 err = PTR_ERR(clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700105 else
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200106 *clk_out = clk;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200107
108 return err;
109}
110
111static int ufs_qcom_host_clk_enable(struct device *dev,
112 const char *name, struct clk *clk)
113{
114 int err = 0;
115
116 err = clk_prepare_enable(clk);
117 if (err)
118 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
119
120 return err;
121}
122
123static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
124{
125 if (!host->is_lane_clks_enabled)
126 return;
127
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700128 if (host->tx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200129 clk_disable_unprepare(host->tx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200130 clk_disable_unprepare(host->tx_l0_sync_clk);
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700131 if (host->rx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200132 clk_disable_unprepare(host->rx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200133 clk_disable_unprepare(host->rx_l0_sync_clk);
134
135 host->is_lane_clks_enabled = false;
136}
137
138static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
139{
140 int err = 0;
141 struct device *dev = host->hba->dev;
142
143 if (host->is_lane_clks_enabled)
144 return 0;
145
146 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
147 host->rx_l0_sync_clk);
148 if (err)
149 goto out;
150
151 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
152 host->tx_l0_sync_clk);
153 if (err)
154 goto disable_rx_l0;
155
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200156 if (host->hba->lanes_per_direction > 1) {
157 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
158 host->rx_l1_sync_clk);
159 if (err)
160 goto disable_tx_l0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200161
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700162 /* The tx lane1 clk could be muxed, hence keep this optional */
163 if (host->tx_l1_sync_clk)
164 ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
165 host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200166 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200167 host->is_lane_clks_enabled = true;
168 goto out;
169
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200170disable_tx_l0:
171 clk_disable_unprepare(host->tx_l0_sync_clk);
172disable_rx_l0:
173 clk_disable_unprepare(host->rx_l0_sync_clk);
174out:
175 return err;
176}
177
178static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
179{
180 int err = 0;
181 struct device *dev = host->hba->dev;
182
183 err = ufs_qcom_host_clk_get(dev,
184 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700185 if (err) {
186 dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
187 __func__, err);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200188 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700189 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200190
191 err = ufs_qcom_host_clk_get(dev,
192 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700193 if (err) {
194 dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
195 __func__, err);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200196 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700197 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200198
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200199 /* In case of single lane per direction, don't read lane1 clocks */
200 if (host->hba->lanes_per_direction > 1) {
201 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
202 &host->rx_l1_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700203 if (err) {
204 dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
205 __func__, err);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200206 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700207 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200208
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700209 /* The tx lane1 clk could be muxed, hence keep this optional */
210 ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
211 &host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200212 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200213out:
214 return err;
215}
216
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200217static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
218{
219 int err;
220 u32 tx_fsm_val = 0;
221 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
222
223 do {
224 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200225 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
226 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
227 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200228 if (err || tx_fsm_val == TX_FSM_HIBERN8)
229 break;
230
231 /* sleep for max. 200us */
232 usleep_range(100, 200);
233 } while (time_before(jiffies, timeout));
234
235 /*
236 * we might have scheduled out for long during polling so
237 * check the state again.
238 */
239 if (time_after(jiffies, timeout))
240 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200241 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
242 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
243 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200244
245 if (err) {
246 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
247 __func__, err);
248 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
249 err = tx_fsm_val;
250 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
251 __func__, err);
252 }
253
254 return err;
255}
256
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200257static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
258{
259 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
260 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
261 REG_UFS_CFG1);
262 /* make sure above configuration is applied before we return */
263 mb();
264}
265
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200266static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
267{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200268 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200269 struct phy *phy = host->generic_phy;
270 int ret = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200271 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
272 ? true : false;
273
274 /* Assert PHY reset and apply PHY calibration values */
275 ufs_qcom_assert_reset(hba);
276 /* provide 1ms delay to let the reset pulse propagate */
277 usleep_range(1000, 1100);
278
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200279 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200280
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200281 if (ret) {
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200282 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
283 __func__, ret);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200284 goto out;
285 }
286
287 /* De-assert PHY reset and start serdes */
288 ufs_qcom_deassert_reset(hba);
289
290 /*
291 * after reset deassertion, phy will need all ref clocks,
292 * voltage, current to settle down before starting serdes.
293 */
294 usleep_range(1000, 1100);
295 ret = ufs_qcom_phy_start_serdes(phy);
296 if (ret) {
297 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
298 __func__, ret);
299 goto out;
300 }
301
302 ret = ufs_qcom_phy_is_pcs_ready(phy);
303 if (ret)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700304 dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200305 __func__, ret);
306
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200307 ufs_qcom_select_unipro_mode(host);
308
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200309out:
310 return ret;
311}
312
313/*
314 * The UTP controller has a number of internal clock gating cells (CGCs).
315 * Internal hardware sub-modules within the UTP controller control the CGCs.
316 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
317 * in a specific operation, UTP controller CGCs are by default disabled and
318 * this function enables them (after every UFS link startup) to save some power
319 * leakage.
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700320 *
321 * UFS host controller v3.0.0 onwards has internal clock gating mechanism
322 * in Qunipro, enable them to save additional power.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200323 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700324static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200325{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700326 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
327 int err = 0;
328
329 /* Enable UTP internal clock gating */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200330 ufshcd_writel(hba,
331 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
332 REG_UFS_CFG2);
333
334 /* Ensure that HW clock gating is enabled before next operations */
335 mb();
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700336
337 /* Enable Qunipro internal clock gating if supported */
338 if (!ufs_qcom_cap_qunipro_clk_gating(host))
339 goto out;
340
341 /* Enable all the mask bits */
342 err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
343 DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
344 if (err)
345 goto out;
346
347 err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
348 PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
349 if (err)
350 goto out;
351
352 err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
353 DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
354 DME_VS_CORE_CLK_CTRL);
355out:
356 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200357}
358
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200359static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
360 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200361{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200362 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200363 int err = 0;
364
365 switch (status) {
366 case PRE_CHANGE:
367 ufs_qcom_power_up_sequence(hba);
368 /*
369 * The PHY PLL output is the source of tx/rx lane symbol
370 * clocks, hence, enable the lane clocks only after PHY
371 * is initialized.
372 */
373 err = ufs_qcom_enable_lane_clks(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700374 if (!err && host->ice.pdev) {
375 err = ufs_qcom_ice_init(host);
376 if (err) {
377 dev_err(hba->dev, "%s: ICE init failed (%d)\n",
378 __func__, err);
379 err = -EINVAL;
380 }
381 }
382
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200383 break;
384 case POST_CHANGE:
385 /* check if UFS PHY moved from DISABLED to HIBERN8 */
386 err = ufs_qcom_check_hibern8(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200387 break;
388 default:
389 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
390 err = -EINVAL;
391 break;
392 }
393 return err;
394}
395
396/**
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200397 * Returns zero for success and non-zero in case of a failure
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200398 */
Subhash Jadavani9c807702017-04-01 00:35:51 -0700399static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
400 u32 hs, u32 rate, bool update_link_startup_timer,
401 bool is_pre_scale_up)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200402{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200403 int ret = 0;
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200404 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200405 struct ufs_clk_info *clki;
406 u32 core_clk_period_in_ns;
407 u32 tx_clk_cycles_per_us = 0;
408 unsigned long core_clk_rate = 0;
409 u32 core_clk_cycles_per_us = 0;
410
411 static u32 pwm_fr_table[][2] = {
412 {UFS_PWM_G1, 0x1},
413 {UFS_PWM_G2, 0x1},
414 {UFS_PWM_G3, 0x1},
415 {UFS_PWM_G4, 0x1},
416 };
417
418 static u32 hs_fr_table_rA[][2] = {
419 {UFS_HS_G1, 0x1F},
420 {UFS_HS_G2, 0x3e},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200421 {UFS_HS_G3, 0x7D},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200422 };
423
424 static u32 hs_fr_table_rB[][2] = {
425 {UFS_HS_G1, 0x24},
426 {UFS_HS_G2, 0x49},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200427 {UFS_HS_G3, 0x92},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200428 };
429
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300430 /*
431 * The Qunipro controller does not use following registers:
432 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
433 * UFS_REG_PA_LINK_STARTUP_TIMER
434 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700435 * Aggregation / Auto hibern8 logic.
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300436 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700437 if (ufs_qcom_cap_qunipro(host) &&
438 (!(ufshcd_is_intr_aggr_allowed(hba) ||
439 ufshcd_is_auto_hibern8_supported(hba))))
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300440 goto out;
441
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200442 if (gear == 0) {
443 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
444 goto out_error;
445 }
446
447 list_for_each_entry(clki, &hba->clk_list_head, list) {
Subhash Jadavani9c807702017-04-01 00:35:51 -0700448 if (!strcmp(clki->name, "core_clk")) {
449 if (is_pre_scale_up)
450 core_clk_rate = clki->max_freq;
451 else
452 core_clk_rate = clk_get_rate(clki->clk);
453 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200454 }
455
456 /* If frequency is smaller than 1MHz, set to 1MHz */
457 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
458 core_clk_rate = DEFAULT_CLK_RATE_HZ;
459
460 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200461 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
462 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
463 /*
464 * make sure above write gets applied before we return from
465 * this function.
466 */
467 mb();
468 }
469
470 if (ufs_qcom_cap_qunipro(host))
471 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200472
473 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
474 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
475 core_clk_period_in_ns &= MASK_CLK_NS_REG;
476
477 switch (hs) {
478 case FASTAUTO_MODE:
479 case FAST_MODE:
480 if (rate == PA_HS_MODE_A) {
481 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
482 dev_err(hba->dev,
483 "%s: index %d exceeds table size %zu\n",
484 __func__, gear,
485 ARRAY_SIZE(hs_fr_table_rA));
486 goto out_error;
487 }
488 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
489 } else if (rate == PA_HS_MODE_B) {
490 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
491 dev_err(hba->dev,
492 "%s: index %d exceeds table size %zu\n",
493 __func__, gear,
494 ARRAY_SIZE(hs_fr_table_rB));
495 goto out_error;
496 }
497 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
498 } else {
499 dev_err(hba->dev, "%s: invalid rate = %d\n",
500 __func__, rate);
501 goto out_error;
502 }
503 break;
504 case SLOWAUTO_MODE:
505 case SLOW_MODE:
506 if (gear > ARRAY_SIZE(pwm_fr_table)) {
507 dev_err(hba->dev,
508 "%s: index %d exceeds table size %zu\n",
509 __func__, gear,
510 ARRAY_SIZE(pwm_fr_table));
511 goto out_error;
512 }
513 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
514 break;
515 case UNCHANGED:
516 default:
517 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
518 goto out_error;
519 }
520
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200521 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
522 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
523 /* this register 2 fields shall be written at once */
524 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
525 REG_UFS_TX_SYMBOL_CLK_NS_US);
526 /*
527 * make sure above write gets applied before we return from
528 * this function.
529 */
530 mb();
531 }
532
533 if (update_link_startup_timer) {
534 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
535 REG_UFS_PA_LINK_STARTUP_TIMER);
536 /*
537 * make sure that this configuration is applied before
538 * we return
539 */
540 mb();
541 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200542 goto out;
543
544out_error:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200545 ret = -EINVAL;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200546out:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200547 return ret;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200548}
549
Subhash Jadavani9c807702017-04-01 00:35:51 -0700550static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
551 u32 hs, u32 rate, bool update_link_startup_timer)
552{
553 return __ufs_qcom_cfg_timers(hba, gear, hs, rate,
554 update_link_startup_timer, false);
555}
556
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700557static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
558{
559 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
560 struct phy *phy = host->generic_phy;
561 u32 unipro_ver;
562 int err = 0;
563
564 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
565 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
566 __func__);
567 err = -EINVAL;
568 goto out;
569 }
570
571 /* make sure RX LineCfg is enabled before link startup */
572 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
573 if (err)
574 goto out;
575
576 if (ufs_qcom_cap_qunipro(host)) {
577 /*
578 * set unipro core clock cycles to 150 & clear clock divider
579 */
580 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
581 if (err)
582 goto out;
583 }
584
585 err = ufs_qcom_enable_hw_clk_gating(hba);
586 if (err)
587 goto out;
588
589 /*
590 * Some UFS devices (and may be host) have issues if LCC is
591 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
592 * before link startup which will make sure that both host
593 * and device TX LCC are disabled once link startup is
594 * completed.
595 */
596 unipro_ver = ufshcd_get_local_unipro_ver(hba);
597 if (unipro_ver != UFS_UNIPRO_VER_1_41)
598 err = ufshcd_dme_set(hba,
599 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
600 0);
601 if (err)
602 goto out;
603
604 if (!ufs_qcom_cap_qunipro_clk_gating(host))
605 goto out;
606
607 /* Enable all the mask bits */
608 err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
609 SAVECONFIGTIME_MODE_MASK,
610 PA_VS_CONFIG_REG1);
611out:
612 return err;
613}
614
615static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
616{
617 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
618 struct phy *phy = host->generic_phy;
619 u32 tx_lanes;
620 int err = 0;
621
622 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
623 if (err)
624 goto out;
625
626 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
627 if (err) {
628 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
629 __func__);
630 goto out;
631 }
632
633 /*
634 * Some UFS devices send incorrect LineCfg data as part of power mode
635 * change sequence which may cause host PHY to go into bad state.
636 * Disabling Rx LineCfg of host PHY should help avoid this.
637 */
638 if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
639 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
640 if (err) {
641 dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
642 __func__);
643 goto out;
644 }
645
646 /*
647 * UFS controller has *clk_req output to GCC, for each one if the clocks
648 * entering it. When *clk_req for a specific clock is de-asserted,
649 * a corresponding clock from GCC is stopped. UFS controller de-asserts
650 * *clk_req outputs when it is in Auto Hibernate state only if the
651 * Clock request feature is enabled.
652 * Enable the Clock request feature:
653 * - Enable HW clock control for UFS clocks in GCC (handled by the
654 * clock driver as part of clk_prepare_enable).
655 * - Set the AH8_CFG.*CLK_REQ register bits to 1.
656 */
657 if (ufshcd_is_auto_hibern8_supported(hba))
658 ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
659 UFS_HW_CLK_CTRL_EN,
660 UFS_AH8_CFG);
661 /*
662 * Make sure clock request feature gets enabled for HW clk gating
663 * before further operations.
664 */
665 mb();
666
667out:
668 return err;
669}
670
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200671static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
672 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200673{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200674 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200675
676 switch (status) {
677 case PRE_CHANGE:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700678 err = ufs_qcom_link_startup_pre_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200679 break;
680 case POST_CHANGE:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700681 err = ufs_qcom_link_startup_post_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200682 break;
683 default:
684 break;
685 }
686
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200687 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200688}
689
Subhash Jadavani9c807702017-04-01 00:35:51 -0700690
691static int ufs_qcom_config_vreg(struct device *dev,
692 struct ufs_vreg *vreg, bool on)
693{
694 int ret = 0;
695 struct regulator *reg;
696 int min_uV, uA_load;
697
698 if (!vreg) {
699 WARN_ON(1);
700 ret = -EINVAL;
701 goto out;
702 }
703
704 reg = vreg->reg;
705 if (regulator_count_voltages(reg) > 0) {
706 min_uV = on ? vreg->min_uV : 0;
707 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
708 if (ret) {
709 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
710 __func__, vreg->name, ret);
711 goto out;
712 }
713
714 uA_load = on ? vreg->max_uA : 0;
715 ret = regulator_set_load(vreg->reg, uA_load);
716 if (ret)
717 goto out;
718 }
719out:
720 return ret;
721}
722
723static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
724{
725 int ret = 0;
726
727 if (vreg->enabled)
728 return ret;
729
730 ret = ufs_qcom_config_vreg(dev, vreg, true);
731 if (ret)
732 goto out;
733
734 ret = regulator_enable(vreg->reg);
735 if (ret)
736 goto out;
737
738 vreg->enabled = true;
739out:
740 return ret;
741}
742
743static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
744{
745 int ret = 0;
746
747 if (!vreg->enabled)
748 return ret;
749
750 ret = regulator_disable(vreg->reg);
751 if (ret)
752 goto out;
753
754 ret = ufs_qcom_config_vreg(dev, vreg, false);
755 if (ret)
756 goto out;
757
758 vreg->enabled = false;
759out:
760 return ret;
761}
762
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200763static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
764{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200765 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200766 struct phy *phy = host->generic_phy;
767 int ret = 0;
768
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200769 /*
Subhash Jadavani9c807702017-04-01 00:35:51 -0700770 * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
771 * power rail and low noise analog power rail for PLL can be
772 * switched off.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200773 */
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200774 if (!ufs_qcom_is_link_active(hba)) {
775 ufs_qcom_disable_lane_clks(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200776 phy_power_off(phy);
777
Subhash Jadavani9c807702017-04-01 00:35:51 -0700778 if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
779 ret = ufs_qcom_disable_vreg(hba->dev,
780 host->vddp_ref_clk);
781 ufs_qcom_ice_suspend(host);
782
783 if (ufs_qcom_is_link_off(hba)) {
784 /* Assert PHY soft reset */
785 ufs_qcom_assert_reset(hba);
786 goto out;
787 }
788 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700789 /* Unvote PM QoS */
790 ufs_qcom_pm_qos_suspend(host);
791
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200792out:
793 return ret;
794}
795
796static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
797{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200798 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200799 struct phy *phy = host->generic_phy;
800 int err;
801
802 err = phy_power_on(phy);
803 if (err) {
804 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
805 __func__, err);
806 goto out;
807 }
808
Subhash Jadavani9c807702017-04-01 00:35:51 -0700809 if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
810 hba->spm_lvl > UFS_PM_LVL_3))
811 ufs_qcom_enable_vreg(hba->dev,
812 host->vddp_ref_clk);
813
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200814 err = ufs_qcom_enable_lane_clks(host);
815 if (err)
816 goto out;
817
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700818 err = ufs_qcom_ice_resume(host);
819 if (err) {
820 dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
821 __func__, err);
822 goto out;
823 }
824
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200825 hba->is_sys_suspended = false;
826
827out:
828 return err;
829}
830
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700831static int ufs_qcom_full_reset(struct ufs_hba *hba)
832{
Subhash Jadavani9c807702017-04-01 00:35:51 -0700833 int ret = -ENOTSUPP;
834
835 if (!hba->core_reset) {
836 dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
837 ret);
838 goto out;
839 }
840
841 ret = reset_control_assert(hba->core_reset);
842 if (ret) {
843 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
844 __func__, ret);
845 goto out;
846 }
847
848 /*
849 * The hardware requirement for delay between assert/deassert
850 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
851 * ~125us (4/32768). To be on the safe side add 200us delay.
852 */
853 usleep_range(200, 210);
854
855 ret = reset_control_deassert(hba->core_reset);
856 if (ret)
857 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
858 __func__, ret);
859
860out:
861 return ret;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700862}
863
864#ifdef CONFIG_SCSI_UFS_QCOM_ICE
865static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
866 struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
867{
868 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
869 struct request *req;
870 int ret;
871
872 if (lrbp->cmd && lrbp->cmd->request)
873 req = lrbp->cmd->request;
874 else
875 return 0;
876
877 /* Use request LBA as the DUN value */
878 if (req->bio)
Subhash Jadavani9c807702017-04-01 00:35:51 -0700879 *dun = (req->bio->bi_iter.bi_sector) >>
880 UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700881
882 ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
883
884 return ret;
885}
886
887static
888int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
889{
890 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
891 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
892 int err = 0;
893
894 if (!host->ice.pdev ||
895 !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
896 goto out;
897
898 err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
899out:
900 return err;
901}
902
903static
904int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
905 struct ufshcd_lrb *lrbp, struct request *req)
906{
907 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
908 int err = 0;
909
910 if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
911 goto out;
912
913 err = ufs_qcom_ice_cfg_end(host, req);
914out:
915 return err;
916}
917
918static
919int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
920{
921 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
922 int err = 0;
923
924 if (!host->ice.pdev)
925 goto out;
926
927 err = ufs_qcom_ice_reset(host);
928out:
929 return err;
930}
931
932static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
933{
934 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
935
936 if (!status)
937 return -EINVAL;
938
939 return ufs_qcom_ice_get_status(host, status);
940}
941#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
942#define ufs_qcom_crypto_req_setup NULL
943#define ufs_qcom_crytpo_engine_cfg_start NULL
944#define ufs_qcom_crytpo_engine_cfg_end NULL
945#define ufs_qcom_crytpo_engine_reset NULL
946#define ufs_qcom_crypto_engine_get_status NULL
947#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
948
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200949struct ufs_qcom_dev_params {
950 u32 pwm_rx_gear; /* pwm rx gear to work in */
951 u32 pwm_tx_gear; /* pwm tx gear to work in */
952 u32 hs_rx_gear; /* hs rx gear to work in */
953 u32 hs_tx_gear; /* hs tx gear to work in */
954 u32 rx_lanes; /* number of rx lanes */
955 u32 tx_lanes; /* number of tx lanes */
956 u32 rx_pwr_pwm; /* rx pwm working pwr */
957 u32 tx_pwr_pwm; /* tx pwm working pwr */
958 u32 rx_pwr_hs; /* rx hs working pwr */
959 u32 tx_pwr_hs; /* tx hs working pwr */
960 u32 hs_rate; /* rate A/B to work in HS */
961 u32 desired_working_mode;
962};
963
964static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
965 struct ufs_pa_layer_attr *dev_max,
966 struct ufs_pa_layer_attr *agreed_pwr)
967{
968 int min_qcom_gear;
969 int min_dev_gear;
970 bool is_dev_sup_hs = false;
971 bool is_qcom_max_hs = false;
972
973 if (dev_max->pwr_rx == FAST_MODE)
974 is_dev_sup_hs = true;
975
976 if (qcom_param->desired_working_mode == FAST) {
977 is_qcom_max_hs = true;
978 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
979 qcom_param->hs_tx_gear);
980 } else {
981 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
982 qcom_param->pwm_tx_gear);
983 }
984
985 /*
986 * device doesn't support HS but qcom_param->desired_working_mode is
987 * HS, thus device and qcom_param don't agree
988 */
989 if (!is_dev_sup_hs && is_qcom_max_hs) {
990 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
991 __func__);
992 return -ENOTSUPP;
993 } else if (is_dev_sup_hs && is_qcom_max_hs) {
994 /*
995 * since device supports HS, it supports FAST_MODE.
996 * since qcom_param->desired_working_mode is also HS
997 * then final decision (FAST/FASTAUTO) is done according
998 * to qcom_params as it is the restricting factor
999 */
1000 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1001 qcom_param->rx_pwr_hs;
1002 } else {
1003 /*
1004 * here qcom_param->desired_working_mode is PWM.
1005 * it doesn't matter whether device supports HS or PWM,
1006 * in both cases qcom_param->desired_working_mode will
1007 * determine the mode
1008 */
1009 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1010 qcom_param->rx_pwr_pwm;
1011 }
1012
1013 /*
1014 * we would like tx to work in the minimum number of lanes
1015 * between device capability and vendor preferences.
1016 * the same decision will be made for rx
1017 */
1018 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
1019 qcom_param->tx_lanes);
1020 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
1021 qcom_param->rx_lanes);
1022
1023 /* device maximum gear is the minimum between device rx and tx gears */
1024 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
1025
1026 /*
1027 * if both device capabilities and vendor pre-defined preferences are
1028 * both HS or both PWM then set the minimum gear to be the chosen
1029 * working gear.
1030 * if one is PWM and one is HS then the one that is PWM get to decide
1031 * what is the gear, as it is the one that also decided previously what
1032 * pwr the device will be configured to.
1033 */
1034 if ((is_dev_sup_hs && is_qcom_max_hs) ||
1035 (!is_dev_sup_hs && !is_qcom_max_hs))
1036 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
1037 min_t(u32, min_dev_gear, min_qcom_gear);
1038 else if (!is_dev_sup_hs)
1039 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
1040 else
1041 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
1042
1043 agreed_pwr->hs_rate = qcom_param->hs_rate;
1044 return 0;
1045}
1046
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001047#ifdef CONFIG_QCOM_BUS_SCALING
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001048static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
1049 const char *speed_mode)
1050{
1051 struct device *dev = host->hba->dev;
1052 struct device_node *np = dev->of_node;
1053 int err;
1054 const char *key = "qcom,bus-vector-names";
1055
1056 if (!speed_mode) {
1057 err = -EINVAL;
1058 goto out;
1059 }
1060
1061 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
1062 err = of_property_match_string(np, key, "MAX");
1063 else
1064 err = of_property_match_string(np, key, speed_mode);
1065
1066out:
1067 if (err < 0)
1068 dev_err(dev, "%s: Invalid %s mode %d\n",
1069 __func__, speed_mode, err);
1070 return err;
1071}
1072
1073static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
1074{
1075 int gear = max_t(u32, p->gear_rx, p->gear_tx);
1076 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
1077 int pwr;
1078
1079 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
1080 if (!gear)
1081 gear = 1;
1082
1083 if (!lanes)
1084 lanes = 1;
1085
1086 if (!p->pwr_rx && !p->pwr_tx) {
1087 pwr = SLOWAUTO_MODE;
1088 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
1089 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
1090 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
1091 pwr = FAST_MODE;
1092 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
1093 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
1094 } else {
1095 pwr = SLOW_MODE;
1096 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
1097 "PWM", gear, lanes);
1098 }
1099}
1100
Subhash Jadavani9c807702017-04-01 00:35:51 -07001101static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001102{
1103 int err = 0;
1104
1105 if (vote != host->bus_vote.curr_vote) {
1106 err = msm_bus_scale_client_update_request(
1107 host->bus_vote.client_handle, vote);
1108 if (err) {
1109 dev_err(host->hba->dev,
1110 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1111 __func__, host->bus_vote.client_handle,
1112 vote, err);
1113 goto out;
1114 }
1115
1116 host->bus_vote.curr_vote = vote;
1117 }
1118out:
1119 return err;
1120}
1121
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001122static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1123{
1124 int vote;
1125 int err = 0;
1126 char mode[BUS_VECTOR_NAME_LEN];
1127
1128 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
1129
1130 vote = ufs_qcom_get_bus_vote(host, mode);
1131 if (vote >= 0)
Subhash Jadavani9c807702017-04-01 00:35:51 -07001132 err = __ufs_qcom_set_bus_vote(host, vote);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001133 else
1134 err = vote;
1135
1136 if (err)
1137 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
1138 else
1139 host->bus_vote.saved_vote = vote;
1140 return err;
1141}
1142
Subhash Jadavani9c807702017-04-01 00:35:51 -07001143static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
1144{
1145 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1146 int vote, err;
1147
1148 /*
1149 * In case ufs_qcom_init() is not yet done, simply ignore.
1150 * This ufs_qcom_set_bus_vote() shall be called from
1151 * ufs_qcom_init() after init is done.
1152 */
1153 if (!host)
1154 return 0;
1155
1156 if (on) {
1157 vote = host->bus_vote.saved_vote;
1158 if (vote == host->bus_vote.min_bw_vote)
1159 ufs_qcom_update_bus_bw_vote(host);
1160 } else {
1161 vote = host->bus_vote.min_bw_vote;
1162 }
1163
1164 err = __ufs_qcom_set_bus_vote(host, vote);
1165 if (err)
1166 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1167 __func__, err);
1168
1169 return err;
1170}
1171
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001172static ssize_t
1173show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1174 char *buf)
1175{
1176 struct ufs_hba *hba = dev_get_drvdata(dev);
1177 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1178
1179 return snprintf(buf, PAGE_SIZE, "%u\n",
1180 host->bus_vote.is_max_bw_needed);
1181}
1182
1183static ssize_t
1184store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1185 const char *buf, size_t count)
1186{
1187 struct ufs_hba *hba = dev_get_drvdata(dev);
1188 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1189 uint32_t value;
1190
1191 if (!kstrtou32(buf, 0, &value)) {
1192 host->bus_vote.is_max_bw_needed = !!value;
1193 ufs_qcom_update_bus_bw_vote(host);
1194 }
1195
1196 return count;
1197}
1198
1199static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1200{
1201 int err;
1202 struct msm_bus_scale_pdata *bus_pdata;
1203 struct device *dev = host->hba->dev;
1204 struct platform_device *pdev = to_platform_device(dev);
1205 struct device_node *np = dev->of_node;
1206
1207 bus_pdata = msm_bus_cl_get_pdata(pdev);
1208 if (!bus_pdata) {
1209 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
1210 err = -ENODATA;
1211 goto out;
1212 }
1213
1214 err = of_property_count_strings(np, "qcom,bus-vector-names");
1215 if (err < 0 || err != bus_pdata->num_usecases) {
1216 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
1217 __func__, err);
1218 goto out;
1219 }
1220
1221 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
1222 if (!host->bus_vote.client_handle) {
1223 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
1224 __func__);
1225 err = -EFAULT;
1226 goto out;
1227 }
1228
1229 /* cache the vote index for minimum and maximum bandwidth */
1230 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
1231 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
1232
1233 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
1234 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
1235 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
1236 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
1237 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
1238 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
1239out:
1240 return err;
1241}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001242#else /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001243static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1244{
1245 return 0;
1246}
1247
Subhash Jadavani9c807702017-04-01 00:35:51 -07001248static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001249{
1250 return 0;
1251}
1252
1253static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1254{
1255 return 0;
1256}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001257static inline void msm_bus_scale_unregister_client(uint32_t cl)
1258{
1259}
1260#endif /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001261
1262static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
1263{
1264 if (host->dev_ref_clk_ctrl_mmio &&
1265 (enable ^ host->is_dev_ref_clk_enabled)) {
1266 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
1267
1268 if (enable)
1269 temp |= host->dev_ref_clk_en_mask;
1270 else
1271 temp &= ~host->dev_ref_clk_en_mask;
1272
1273 /*
1274 * If we are here to disable this clock it might be immediately
1275 * after entering into hibern8 in which case we need to make
1276 * sure that device ref_clk is active at least 1us after the
1277 * hibern8 enter.
1278 */
1279 if (!enable)
1280 udelay(1);
1281
1282 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
1283
1284 /* ensure that ref_clk is enabled/disabled before we return */
1285 wmb();
1286
1287 /*
1288 * If we call hibern8 exit after this, we need to make sure that
1289 * device ref_clk is stable for at least 1us before the hibern8
1290 * exit command.
1291 */
1292 if (enable)
1293 udelay(1);
1294
1295 host->is_dev_ref_clk_enabled = enable;
1296 }
1297}
1298
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001299static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001300 enum ufs_notify_change_status status,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001301 struct ufs_pa_layer_attr *dev_max_params,
1302 struct ufs_pa_layer_attr *dev_req_params)
1303{
1304 u32 val;
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001305 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001306 struct phy *phy = host->generic_phy;
1307 struct ufs_qcom_dev_params ufs_qcom_cap;
1308 int ret = 0;
1309 int res = 0;
1310
1311 if (!dev_req_params) {
1312 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
1313 ret = -EINVAL;
1314 goto out;
1315 }
1316
1317 switch (status) {
1318 case PRE_CHANGE:
1319 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
1320 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
1321 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
1322 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
1323 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
1324 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
1325 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
1326 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
1327 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
1328 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
1329 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
1330 ufs_qcom_cap.desired_working_mode =
1331 UFS_QCOM_LIMIT_DESIRED_MODE;
1332
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001333 if (host->hw_ver.major == 0x1) {
1334 /*
1335 * HS-G3 operations may not reliably work on legacy QCOM
1336 * UFS host controller hardware even though capability
1337 * exchange during link startup phase may end up
1338 * negotiating maximum supported gear as G3.
1339 * Hence downgrade the maximum supported gear to HS-G2.
1340 */
1341 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
1342 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
1343 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
1344 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
1345 }
1346
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001347 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
1348 dev_max_params,
1349 dev_req_params);
1350 if (ret) {
1351 pr_err("%s: failed to determine capabilities\n",
1352 __func__);
1353 goto out;
1354 }
1355
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001356 /* enable the device ref clock before changing to HS mode */
1357 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
1358 ufshcd_is_hs_mode(dev_req_params))
1359 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001360 break;
1361 case POST_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001362 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001363 dev_req_params->pwr_rx,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001364 dev_req_params->hs_rate, false)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001365 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1366 __func__);
1367 /*
1368 * we return error code at the end of the routine,
1369 * but continue to configure UFS_PHY_TX_LANE_ENABLE
1370 * and bus voting as usual
1371 */
1372 ret = -EINVAL;
1373 }
1374
1375 val = ~(MAX_U32 << dev_req_params->lane_tx);
1376 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1377 if (res) {
1378 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1379 __func__, res);
1380 ret = res;
1381 }
1382
1383 /* cache the power mode parameters to use internally */
1384 memcpy(&host->dev_req_params,
1385 dev_req_params, sizeof(*dev_req_params));
1386 ufs_qcom_update_bus_bw_vote(host);
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001387
1388 /* disable the device ref clock if entered PWM mode */
1389 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1390 !ufshcd_is_hs_mode(dev_req_params))
1391 ufs_qcom_dev_ref_clk_ctrl(host, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001392 break;
1393 default:
1394 ret = -EINVAL;
1395 break;
1396 }
1397out:
1398 return ret;
1399}
1400
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001401static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1402{
1403 int err;
1404 u32 pa_vs_config_reg1;
1405
1406 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1407 &pa_vs_config_reg1);
1408 if (err)
1409 goto out;
1410
1411 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1412 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1413 (pa_vs_config_reg1 | (1 << 12)));
1414
1415out:
1416 return err;
1417}
1418
1419static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1420{
1421 int err = 0;
1422
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08001423 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001424 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1425
1426 return err;
1427}
1428
Yaniv Gardiae977582015-05-17 18:55:06 +03001429static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1430{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001431 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardiae977582015-05-17 18:55:06 +03001432
1433 if (host->hw_ver.major == 0x1)
1434 return UFSHCI_VERSION_11;
1435 else
1436 return UFSHCI_VERSION_20;
1437}
1438
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001439/**
1440 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1441 * @hba: host controller instance
1442 *
1443 * QCOM UFS host controller might have some non standard behaviours (quirks)
1444 * than what is specified by UFSHCI specification. Advertise all such
1445 * quirks to standard UFS host controller driver so standard takes them into
1446 * account.
1447 */
1448static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1449{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001450 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001451
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001452 if (host->hw_ver.major == 0x1) {
1453 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1454 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1455 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001456
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001457 if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001458 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001459
1460 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001461 }
1462
Subhash Jadavanic04fcdd2016-08-05 11:20:10 -07001463 if (host->hw_ver.major == 0x2) {
Yaniv Gardiae977582015-05-17 18:55:06 +03001464 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
Yaniv Gardi2f018372015-05-17 18:55:00 +03001465
Yaniv Gardicad2e032015-03-31 17:37:14 +03001466 if (!ufs_qcom_cap_qunipro(host))
1467 /* Legacy UniPro mode still need following quirks */
Yaniv Gardi81637432015-05-17 18:55:02 +03001468 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
Yaniv Gardi2c0cc2e2015-05-17 18:55:04 +03001469 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
Yaniv Gardi81637432015-05-17 18:55:02 +03001470 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001471 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001472
1473 if (host->disable_lpm)
1474 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
Yaniv Gardicad2e032015-03-31 17:37:14 +03001475}
1476
1477static void ufs_qcom_set_caps(struct ufs_hba *hba)
1478{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001479 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001480
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001481 if (!host->disable_lpm) {
1482 hba->caps |= UFSHCD_CAP_CLK_GATING;
1483 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1484 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1485 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001486 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001487
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001488 if (host->hw_ver.major >= 0x2) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001489 if (!host->disable_lpm)
1490 hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001491 host->caps = UFS_QCOM_CAP_QUNIPRO |
1492 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001493 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001494 if (host->hw_ver.major >= 0x3) {
1495 host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
1496 /*
1497 * The UFS PHY attached to v3.0.0 controller supports entering
1498 * deeper low power state of SVS2. This lets the controller
1499 * run at much lower clock frequencies for saving power.
1500 * Assuming this and any future revisions of the controller
1501 * support this capability. Need to revist this assumption if
1502 * any future platform with this core doesn't support the
1503 * capability, as there will be no benefit running at lower
1504 * frequencies then.
1505 */
1506 host->caps |= UFS_QCOM_CAP_SVS2;
1507 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001508}
1509
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001510/**
1511 * ufs_qcom_setup_clocks - enables/disable clocks
1512 * @hba: host controller instance
1513 * @on: If true, enable clocks else disable them.
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001514 * @is_gating_context: If true then it means this function is called from
1515 * aggressive clock gating context and we may only need to gate off important
1516 * clocks. If false then make sure to gate off all clocks.
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001517 *
1518 * Returns 0 on success, non-zero on failure.
1519 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001520static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1521 bool is_gating_context)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001522{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001523 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001524 int err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001525
1526 /*
1527 * In case ufs_qcom_init() is not yet done, simply ignore.
1528 * This ufs_qcom_setup_clocks() shall be called from
1529 * ufs_qcom_init() after init is done.
1530 */
1531 if (!host)
1532 return 0;
1533
1534 if (on) {
1535 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1536 if (err)
1537 goto out;
1538
1539 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1540 if (err) {
1541 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1542 __func__, err);
1543 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1544 goto out;
1545 }
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001546 /* enable the device ref clock for HS mode*/
1547 if (ufshcd_is_hs_mode(&hba->pwr_info))
1548 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001549
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001550 err = ufs_qcom_ice_resume(host);
1551 if (err)
1552 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001553 } else {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001554 err = ufs_qcom_ice_suspend(host);
1555 if (err)
1556 goto out;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001557
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001558 /* M-PHY RMMI interface clocks can be turned off */
1559 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
Subhash Jadavani9c807702017-04-01 00:35:51 -07001560 /*
1561 * If auto hibern8 is supported then the link will already
1562 * be in hibern8 state and the ref clock can be gated.
1563 */
1564 if (ufshcd_is_auto_hibern8_supported(hba) ||
1565 !ufs_qcom_is_link_active(hba)) {
1566 /* turn off UFS local PHY ref_clk */
1567 ufs_qcom_phy_disable_ref_clk(host->generic_phy);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001568 /* disable device ref_clk */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001569 ufs_qcom_dev_ref_clk_ctrl(host, false);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001570 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001571 }
1572
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001573out:
1574 return err;
1575}
1576
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001577#ifdef CONFIG_SMP /* CONFIG_SMP */
1578static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
1579{
1580 int i;
1581
1582 if (cpu >= 0 && cpu < num_possible_cpus())
1583 for (i = 0; i < host->pm_qos.num_groups; i++)
1584 if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
1585 return i;
1586
1587 return host->pm_qos.default_cpu;
1588}
1589
1590static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
1591{
1592 unsigned long flags;
1593 struct ufs_qcom_host *host;
1594 struct ufs_qcom_pm_qos_cpu_group *group;
1595
1596 if (!hba || !req)
1597 return;
1598
1599 host = ufshcd_get_variant(hba);
1600 if (!host->pm_qos.groups)
1601 return;
1602
1603 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
1604
1605 spin_lock_irqsave(hba->host->host_lock, flags);
1606 if (!host->pm_qos.is_enabled)
1607 goto out;
1608
1609 group->active_reqs++;
1610 if (group->state != PM_QOS_REQ_VOTE &&
1611 group->state != PM_QOS_VOTED) {
1612 group->state = PM_QOS_REQ_VOTE;
1613 queue_work(host->pm_qos.workq, &group->vote_work);
1614 }
1615out:
1616 spin_unlock_irqrestore(hba->host->host_lock, flags);
1617}
1618
1619/* hba->host->host_lock is assumed to be held by caller */
1620static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
1621{
1622 struct ufs_qcom_pm_qos_cpu_group *group;
1623
1624 if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
1625 return;
1626
1627 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
1628
1629 if (--group->active_reqs)
1630 return;
1631 group->state = PM_QOS_REQ_UNVOTE;
1632 queue_work(host->pm_qos.workq, &group->unvote_work);
1633}
1634
1635static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
1636 bool should_lock)
1637{
1638 unsigned long flags = 0;
1639
1640 if (!hba || !req)
1641 return;
1642
1643 if (should_lock)
1644 spin_lock_irqsave(hba->host->host_lock, flags);
1645 __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
1646 if (should_lock)
1647 spin_unlock_irqrestore(hba->host->host_lock, flags);
1648}
1649
1650static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
1651{
1652 struct ufs_qcom_pm_qos_cpu_group *group =
1653 container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
1654 struct ufs_qcom_host *host = group->host;
1655 unsigned long flags;
1656
1657 spin_lock_irqsave(host->hba->host->host_lock, flags);
1658
1659 if (!host->pm_qos.is_enabled || !group->active_reqs) {
1660 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1661 return;
1662 }
1663
1664 group->state = PM_QOS_VOTED;
1665 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1666
1667 pm_qos_update_request(&group->req, group->latency_us);
1668}
1669
1670static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
1671{
1672 struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
1673 struct ufs_qcom_pm_qos_cpu_group, unvote_work);
1674 struct ufs_qcom_host *host = group->host;
1675 unsigned long flags;
1676
1677 /*
1678 * Check if new requests were submitted in the meantime and do not
1679 * unvote if so.
1680 */
1681 spin_lock_irqsave(host->hba->host->host_lock, flags);
1682
1683 if (!host->pm_qos.is_enabled || group->active_reqs) {
1684 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1685 return;
1686 }
1687
1688 group->state = PM_QOS_UNVOTED;
1689 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1690
1691 pm_qos_update_request_timeout(&group->req,
1692 group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
1693}
1694
1695static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
1696 struct device_attribute *attr, char *buf)
1697{
1698 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1699 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1700
1701 return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
1702}
1703
1704static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
1705 struct device_attribute *attr, const char *buf, size_t count)
1706{
1707 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1708 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1709 unsigned long value;
1710 unsigned long flags;
1711 bool enable;
1712 int i;
1713
1714 if (kstrtoul(buf, 0, &value))
1715 return -EINVAL;
1716
1717 enable = !!value;
1718
1719 /*
1720 * Must take the spinlock and save irqs before changing the enabled
1721 * flag in order to keep correctness of PM QoS release.
1722 */
1723 spin_lock_irqsave(hba->host->host_lock, flags);
1724 if (enable == host->pm_qos.is_enabled) {
1725 spin_unlock_irqrestore(hba->host->host_lock, flags);
1726 return count;
1727 }
1728 host->pm_qos.is_enabled = enable;
1729 spin_unlock_irqrestore(hba->host->host_lock, flags);
1730
1731 if (!enable)
1732 for (i = 0; i < host->pm_qos.num_groups; i++) {
1733 cancel_work_sync(&host->pm_qos.groups[i].vote_work);
1734 cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
1735 spin_lock_irqsave(hba->host->host_lock, flags);
1736 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1737 host->pm_qos.groups[i].active_reqs = 0;
1738 spin_unlock_irqrestore(hba->host->host_lock, flags);
1739 pm_qos_update_request(&host->pm_qos.groups[i].req,
1740 PM_QOS_DEFAULT_VALUE);
1741 }
1742
1743 return count;
1744}
1745
1746static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
1747 struct device_attribute *attr, char *buf)
1748{
1749 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1750 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1751 int ret;
1752 int i;
1753 int offset = 0;
1754
1755 for (i = 0; i < host->pm_qos.num_groups; i++) {
1756 ret = snprintf(&buf[offset], PAGE_SIZE,
1757 "cpu group #%d(mask=0x%lx): %d\n", i,
1758 host->pm_qos.groups[i].mask.bits[0],
1759 host->pm_qos.groups[i].latency_us);
1760 if (ret > 0)
1761 offset += ret;
1762 else
1763 break;
1764 }
1765
1766 return offset;
1767}
1768
1769static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
1770 struct device_attribute *attr, const char *buf, size_t count)
1771{
1772 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1773 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1774 unsigned long value;
1775 unsigned long flags;
1776 char *strbuf;
1777 char *strbuf_copy;
1778 char *token;
1779 int i;
1780 int ret;
1781
1782 /* reserve one byte for null termination */
1783 strbuf = kmalloc(count + 1, GFP_KERNEL);
1784 if (!strbuf)
1785 return -ENOMEM;
1786 strbuf_copy = strbuf;
1787 strlcpy(strbuf, buf, count + 1);
1788
1789 for (i = 0; i < host->pm_qos.num_groups; i++) {
1790 token = strsep(&strbuf, ",");
1791 if (!token)
1792 break;
1793
1794 ret = kstrtoul(token, 0, &value);
1795 if (ret)
1796 break;
1797
1798 spin_lock_irqsave(hba->host->host_lock, flags);
1799 host->pm_qos.groups[i].latency_us = value;
1800 spin_unlock_irqrestore(hba->host->host_lock, flags);
1801 }
1802
1803 kfree(strbuf_copy);
1804 return count;
1805}
1806
1807static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
1808{
1809 struct device_node *node = host->hba->dev->of_node;
1810 struct device_attribute *attr;
1811 int ret = 0;
1812 int num_groups;
1813 int num_values;
1814 char wq_name[sizeof("ufs_pm_qos_00")];
1815 int i;
1816
1817 num_groups = of_property_count_u32_elems(node,
1818 "qcom,pm-qos-cpu-groups");
1819 if (num_groups <= 0)
1820 goto no_pm_qos;
1821
1822 num_values = of_property_count_u32_elems(node,
1823 "qcom,pm-qos-cpu-group-latency-us");
1824 if (num_values <= 0)
1825 goto no_pm_qos;
1826
1827 if (num_values != num_groups || num_groups > num_possible_cpus()) {
1828 dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
1829 __func__, num_groups, num_values, num_possible_cpus());
1830 goto no_pm_qos;
1831 }
1832
1833 host->pm_qos.num_groups = num_groups;
1834 host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
1835 sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
1836 if (!host->pm_qos.groups)
1837 return -ENOMEM;
1838
1839 for (i = 0; i < host->pm_qos.num_groups; i++) {
1840 u32 mask;
1841
1842 ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
1843 i, &mask);
1844 if (ret)
1845 goto free_groups;
1846 host->pm_qos.groups[i].mask.bits[0] = mask;
1847 if (!cpumask_subset(&host->pm_qos.groups[i].mask,
1848 cpu_possible_mask)) {
1849 dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
1850 __func__, mask);
1851 goto free_groups;
1852 }
1853
1854 ret = of_property_read_u32_index(node,
1855 "qcom,pm-qos-cpu-group-latency-us", i,
1856 &host->pm_qos.groups[i].latency_us);
1857 if (ret)
1858 goto free_groups;
1859
1860 host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
1861 host->pm_qos.groups[i].req.cpus_affine =
1862 host->pm_qos.groups[i].mask;
1863 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1864 host->pm_qos.groups[i].active_reqs = 0;
1865 host->pm_qos.groups[i].host = host;
1866
1867 INIT_WORK(&host->pm_qos.groups[i].vote_work,
1868 ufs_qcom_pm_qos_vote_work);
1869 INIT_WORK(&host->pm_qos.groups[i].unvote_work,
1870 ufs_qcom_pm_qos_unvote_work);
1871 }
1872
1873 ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
1874 &host->pm_qos.default_cpu);
1875 if (ret || host->pm_qos.default_cpu > num_possible_cpus())
1876 host->pm_qos.default_cpu = 0;
1877
1878 /*
1879 * Use a single-threaded workqueue to assure work submitted to the queue
1880 * is performed in order. Consider the following 2 possible cases:
1881 *
1882 * 1. A new request arrives and voting work is scheduled for it. Before
1883 * the voting work is performed the request is finished and unvote
1884 * work is also scheduled.
1885 * 2. A request is finished and unvote work is scheduled. Before the
1886 * work is performed a new request arrives and voting work is also
1887 * scheduled.
1888 *
1889 * In both cases a vote work and unvote work wait to be performed.
1890 * If ordering is not guaranteed, then the end state might be the
1891 * opposite of the desired state.
1892 */
1893 snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
1894 host->hba->host->host_no);
1895 host->pm_qos.workq = create_singlethread_workqueue(wq_name);
1896 if (!host->pm_qos.workq) {
1897 dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
1898 __func__);
1899 ret = -ENOMEM;
1900 goto free_groups;
1901 }
1902
1903 /* Initialization was ok, add all PM QoS requests */
1904 for (i = 0; i < host->pm_qos.num_groups; i++)
1905 pm_qos_add_request(&host->pm_qos.groups[i].req,
1906 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
1907
1908 /* PM QoS latency sys-fs attribute */
1909 attr = &host->pm_qos.latency_attr;
1910 attr->show = ufs_qcom_pm_qos_latency_show;
1911 attr->store = ufs_qcom_pm_qos_latency_store;
1912 sysfs_attr_init(&attr->attr);
1913 attr->attr.name = "pm_qos_latency_us";
1914 attr->attr.mode = S_IRUGO | S_IWUSR;
1915 if (device_create_file(host->hba->var->dev, attr))
1916 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
1917
1918 /* PM QoS enable sys-fs attribute */
1919 attr = &host->pm_qos.enable_attr;
1920 attr->show = ufs_qcom_pm_qos_enable_show;
1921 attr->store = ufs_qcom_pm_qos_enable_store;
1922 sysfs_attr_init(&attr->attr);
1923 attr->attr.name = "pm_qos_enable";
1924 attr->attr.mode = S_IRUGO | S_IWUSR;
1925 if (device_create_file(host->hba->var->dev, attr))
1926 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
1927
1928 host->pm_qos.is_enabled = true;
1929
1930 return 0;
1931
1932free_groups:
1933 kfree(host->pm_qos.groups);
1934no_pm_qos:
1935 host->pm_qos.groups = NULL;
1936 return ret ? ret : -ENOTSUPP;
1937}
1938
1939static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
1940{
1941 int i;
1942
1943 if (!host->pm_qos.groups)
1944 return;
1945
1946 for (i = 0; i < host->pm_qos.num_groups; i++)
1947 flush_work(&host->pm_qos.groups[i].unvote_work);
1948}
1949
1950static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
1951{
1952 int i;
1953
1954 if (!host->pm_qos.groups)
1955 return;
1956
1957 for (i = 0; i < host->pm_qos.num_groups; i++)
1958 pm_qos_remove_request(&host->pm_qos.groups[i].req);
1959 destroy_workqueue(host->pm_qos.workq);
1960
1961 kfree(host->pm_qos.groups);
1962 host->pm_qos.groups = NULL;
1963}
1964#endif /* CONFIG_SMP */
1965
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001966/*
1967 * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
1968 */
1969static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
1970{
1971 struct device_node *node = host->hba->dev->of_node;
1972
1973 host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
1974 if (host->disable_lpm)
1975 pr_info("%s: will disable all LPM modes\n", __func__);
1976}
1977
Subhash Jadavania889db02016-12-09 10:24:58 -08001978static void ufs_qcom_save_host_ptr(struct ufs_hba *hba)
1979{
1980 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1981 int id;
1982
1983 if (!hba->dev->of_node)
1984 return;
1985
1986 /* Extract platform data */
1987 id = of_alias_get_id(hba->dev->of_node, "ufshc");
1988 if (id <= 0)
1989 dev_err(hba->dev, "Failed to get host index %d\n", id);
1990 else if (id <= MAX_UFS_QCOM_HOSTS)
1991 ufs_qcom_hosts[id - 1] = host;
1992 else
1993 dev_err(hba->dev, "invalid host index %d\n", id);
1994}
1995
Subhash Jadavani9c807702017-04-01 00:35:51 -07001996static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
1997 struct ufs_vreg **out_vreg)
1998{
1999 int ret = 0;
2000 char prop_name[MAX_PROP_SIZE];
2001 struct ufs_vreg *vreg = NULL;
2002 struct device *dev = host->hba->dev;
2003 struct device_node *np = dev->of_node;
2004
2005 if (!np) {
2006 dev_err(dev, "%s: non DT initialization\n", __func__);
2007 goto out;
2008 }
2009
2010 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
2011 if (!of_parse_phandle(np, prop_name, 0)) {
2012 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
2013 __func__, prop_name);
2014 ret = -ENODEV;
2015 goto out;
2016 }
2017
2018 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
2019 if (!vreg)
2020 return -ENOMEM;
2021
2022 vreg->name = name;
2023
2024 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
2025 ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
2026 if (ret) {
2027 dev_err(dev, "%s: unable to find %s err %d\n",
2028 __func__, prop_name, ret);
2029 goto out;
2030 }
2031
2032 vreg->reg = devm_regulator_get(dev, vreg->name);
2033 if (IS_ERR(vreg->reg)) {
2034 ret = PTR_ERR(vreg->reg);
2035 dev_err(dev, "%s: %s get failed, err=%d\n",
2036 __func__, vreg->name, ret);
2037 }
2038 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
2039 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
2040
2041out:
2042 if (!ret)
2043 *out_vreg = vreg;
2044 return ret;
2045}
2046
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002047/**
2048 * ufs_qcom_init - bind phy with controller
2049 * @hba: host controller instance
2050 *
2051 * Binds PHY with controller and powers up PHY enabling clocks
2052 * and regulators.
2053 *
2054 * Returns -EPROBE_DEFER if binding fails, returns negative error
2055 * on phy power up failure and returns zero on success.
2056 */
2057static int ufs_qcom_init(struct ufs_hba *hba)
2058{
2059 int err;
2060 struct device *dev = hba->dev;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002061 struct platform_device *pdev = to_platform_device(dev);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002062 struct ufs_qcom_host *host;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002063 struct resource *res;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002064
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002065 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2066 if (!host) {
2067 err = -ENOMEM;
2068 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
2069 goto out;
2070 }
2071
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002072 /* Make a two way bind between the qcom host and the hba */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002073 host->hba = hba;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002074 spin_lock_init(&host->ice_work_lock);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002075
Subhash Jadavani9c807702017-04-01 00:35:51 -07002076 ufshcd_set_variant(hba, host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002077
2078 err = ufs_qcom_ice_get_dev(host);
2079 if (err == -EPROBE_DEFER) {
2080 /*
2081 * UFS driver might be probed before ICE driver does.
2082 * In that case we would like to return EPROBE_DEFER code
2083 * in order to delay its probing.
2084 */
2085 dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
2086 __func__, err);
2087 goto out_host_free;
2088
2089 } else if (err == -ENODEV) {
2090 /*
2091 * ICE device is not enabled in DTS file. No need for further
2092 * initialization of ICE driver.
2093 */
2094 dev_warn(dev, "%s: ICE device is not enabled",
2095 __func__);
2096 } else if (err) {
2097 dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
2098 __func__, err);
2099 goto out_host_free;
2100 }
2101
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002102 host->generic_phy = devm_phy_get(dev, "ufsphy");
2103
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002104 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
2105 /*
2106 * UFS driver might be probed before the phy driver does.
2107 * In that case we would like to return EPROBE_DEFER code.
2108 */
2109 err = -EPROBE_DEFER;
2110 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
2111 __func__, err);
2112 goto out_host_free;
2113 } else if (IS_ERR(host->generic_phy)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002114 err = PTR_ERR(host->generic_phy);
2115 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
2116 goto out;
2117 }
2118
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002119 err = ufs_qcom_pm_qos_init(host);
2120 if (err)
2121 dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
2122
2123 /* restore the secure configuration */
2124 ufs_qcom_update_sec_cfg(hba, true);
2125
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002126 err = ufs_qcom_bus_register(host);
2127 if (err)
2128 goto out_host_free;
2129
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03002130 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
2131 &host->hw_ver.minor, &host->hw_ver.step);
2132
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002133 /*
2134 * for newer controllers, device reference clock control bit has
2135 * moved inside UFS controller register address space itself.
2136 */
2137 if (host->hw_ver.major >= 0x02) {
2138 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
2139 host->dev_ref_clk_en_mask = BIT(26);
2140 } else {
2141 /* "dev_ref_clk_ctrl_mem" is optional resource */
2142 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2143 if (res) {
2144 host->dev_ref_clk_ctrl_mmio =
2145 devm_ioremap_resource(dev, res);
2146 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
2147 dev_warn(dev,
2148 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
2149 __func__,
2150 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
2151 host->dev_ref_clk_ctrl_mmio = NULL;
2152 }
2153 host->dev_ref_clk_en_mask = BIT(5);
2154 }
2155 }
2156
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03002157 /* update phy revision information before calling phy_init() */
2158 ufs_qcom_phy_save_controller_version(host->generic_phy,
2159 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
2160
Subhash Jadavani9c807702017-04-01 00:35:51 -07002161 err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
2162 &host->vddp_ref_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002163 phy_init(host->generic_phy);
2164 err = phy_power_on(host->generic_phy);
2165 if (err)
2166 goto out_unregister_bus;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002167 if (host->vddp_ref_clk) {
2168 err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
2169 if (err) {
2170 dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
2171 __func__, err);
2172 goto out_disable_phy;
2173 }
2174 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002175
2176 err = ufs_qcom_init_lane_clks(host);
2177 if (err)
Subhash Jadavani9c807702017-04-01 00:35:51 -07002178 goto out_disable_vddp;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002179
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002180 ufs_qcom_parse_lpm(host);
2181 if (host->disable_lpm)
2182 pm_runtime_forbid(host->hba->dev);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002183 ufs_qcom_set_caps(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002184 ufs_qcom_advertise_quirks(hba);
2185
Subhash Jadavani9c807702017-04-01 00:35:51 -07002186 ufs_qcom_set_bus_vote(hba, true);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002187 ufs_qcom_setup_clocks(hba, true, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002188
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002189 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
2190 ufs_qcom_get_default_testbus_cfg(host);
2191 err = ufs_qcom_testbus_config(host);
2192 if (err) {
2193 dev_warn(dev, "%s: failed to configure the testbus %d\n",
2194 __func__, err);
2195 err = 0;
2196 }
2197
Subhash Jadavania889db02016-12-09 10:24:58 -08002198 ufs_qcom_save_host_ptr(hba);
2199
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002200 goto out;
2201
Subhash Jadavani9c807702017-04-01 00:35:51 -07002202out_disable_vddp:
2203 if (host->vddp_ref_clk)
2204 ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002205out_disable_phy:
2206 phy_power_off(host->generic_phy);
2207out_unregister_bus:
2208 phy_exit(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002209 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002210out_host_free:
2211 devm_kfree(dev, host);
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002212 ufshcd_set_variant(hba, NULL);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002213out:
2214 return err;
2215}
2216
2217static void ufs_qcom_exit(struct ufs_hba *hba)
2218{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002219 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002220
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002221 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002222 ufs_qcom_disable_lane_clks(host);
2223 phy_power_off(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002224 ufs_qcom_pm_qos_remove(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002225}
2226
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002227static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
2228 u32 clk_cycles)
2229{
2230 int err;
2231 u32 core_clk_ctrl_reg;
2232
2233 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
2234 return -EINVAL;
2235
2236 err = ufshcd_dme_get(hba,
2237 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2238 &core_clk_ctrl_reg);
2239 if (err)
2240 goto out;
2241
2242 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
2243 core_clk_ctrl_reg |= clk_cycles;
2244
2245 /* Clear CORE_CLK_DIV_EN */
2246 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
2247
2248 err = ufshcd_dme_set(hba,
2249 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2250 core_clk_ctrl_reg);
2251out:
2252 return err;
2253}
2254
2255static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
2256{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002257 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002258 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2259 int err = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002260
2261 if (!ufs_qcom_cap_qunipro(host))
Subhash Jadavani9c807702017-04-01 00:35:51 -07002262 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002263
Subhash Jadavani9c807702017-04-01 00:35:51 -07002264 if (attr)
2265 __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2266 attr->hs_rate, false, true);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002267
2268 /* set unipro core clock cycles to 150 and clear clock divider */
Subhash Jadavani9c807702017-04-01 00:35:51 -07002269 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
2270out:
2271 return err;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002272}
2273
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002274static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
2275{
2276 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002277 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002278 int err = 0;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002279
2280 if (!ufs_qcom_cap_qunipro(host))
2281 return 0;
2282
Subhash Jadavani9c807702017-04-01 00:35:51 -07002283 if (attr)
2284 ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2285 attr->hs_rate, false);
2286
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002287 if (ufs_qcom_cap_svs2(host))
2288 /*
2289 * For SVS2 set unipro core clock cycles to 37 and
2290 * clear clock divider
2291 */
2292 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
2293 else
2294 /*
2295 * For SVS set unipro core clock cycles to 75 and
2296 * clear clock divider
2297 */
2298 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
2299
2300 return err;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002301}
2302
2303static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
2304 bool scale_up, enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002305{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002306 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002307 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002308
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002309 switch (status) {
2310 case PRE_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002311 if (scale_up)
2312 err = ufs_qcom_clk_scale_up_pre_change(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002313 break;
2314 case POST_CHANGE:
Subhash Jadavani9c807702017-04-01 00:35:51 -07002315 if (!scale_up)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002316 err = ufs_qcom_clk_scale_down_post_change(hba);
2317
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002318 ufs_qcom_update_bus_bw_vote(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002319 break;
2320 default:
2321 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
2322 err = -EINVAL;
2323 break;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002324 }
2325
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002326 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002327}
2328
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002329/*
2330 * This function should be called to restore the security configuration of UFS
2331 * register space after coming out of UFS host core power collapse.
2332 *
2333 * @hba: host controller instance
2334 * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
2335 * and set "false" when secure configuration is lost.
2336 */
2337static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
2338{
2339 return 0;
2340}
2341
2342
2343static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
2344{
2345 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2346
2347 if (ufs_qcom_cap_svs2(host))
2348 return UFS_HS_G1;
2349 /* Default SVS support @ HS G2 frequencies*/
2350 return UFS_HS_G2;
2351}
2352
2353void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
2354 void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
2355 char *str, void *priv))
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002356{
2357 u32 reg;
2358 struct ufs_qcom_host *host;
2359
2360 if (unlikely(!hba)) {
2361 pr_err("%s: hba is NULL\n", __func__);
2362 return;
2363 }
2364 if (unlikely(!print_fn)) {
2365 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
2366 return;
2367 }
2368
2369 host = ufshcd_get_variant(hba);
2370 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
2371 return;
2372
2373 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
2374 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
2375
2376 reg = ufshcd_readl(hba, REG_UFS_CFG1);
2377 reg |= UFS_BIT(17);
2378 ufshcd_writel(hba, reg, REG_UFS_CFG1);
2379
2380 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
2381 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
2382
2383 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
2384 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
2385
2386 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
2387 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
2388
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002389 /* clear bit 17 - UTP_DBG_RAMS_EN */
2390 ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002391
2392 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
2393 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
2394
2395 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
2396 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
2397
2398 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
2399 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
2400
2401 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
2402 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
2403
2404 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
2405 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
2406
2407 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
2408 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
2409
2410 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
2411 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
2412}
2413
2414static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
2415{
Subhash Jadavani9c807702017-04-01 00:35:51 -07002416 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
2417 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
2418 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002419 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002420 } else {
2421 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002422 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002423 }
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002424}
2425
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002426static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
2427{
2428 /* provide a legal default configuration */
Subhash Jadavani9c807702017-04-01 00:35:51 -07002429 host->testbus.select_major = TSTBUS_UNIPRO;
2430 host->testbus.select_minor = 37;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002431}
2432
2433static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
2434{
2435 if (host->testbus.select_major >= TSTBUS_MAX) {
2436 dev_err(host->hba->dev,
2437 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
2438 __func__, host->testbus.select_major);
2439 return false;
2440 }
2441
2442 /*
2443 * Not performing check for each individual select_major
2444 * mappings of select_minor, since there is no harm in
2445 * configuring a non-existent select_minor
2446 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07002447 if (host->testbus.select_minor > 0xFF) {
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002448 dev_err(host->hba->dev,
2449 "%s: 0x%05X is not a legal testbus option\n",
2450 __func__, host->testbus.select_minor);
2451 return false;
2452 }
2453
2454 return true;
2455}
2456
Subhash Jadavani9c807702017-04-01 00:35:51 -07002457/*
2458 * The caller of this function must make sure that the controller
2459 * is out of runtime suspend and appropriate clocks are enabled
2460 * before accessing.
2461 */
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002462int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
2463{
2464 int reg;
2465 int offset;
2466 u32 mask = TEST_BUS_SUB_SEL_MASK;
2467
2468 if (!host)
2469 return -EINVAL;
2470
2471 if (!ufs_qcom_testbus_cfg_is_ok(host))
2472 return -EPERM;
2473
2474 switch (host->testbus.select_major) {
2475 case TSTBUS_UAWM:
2476 reg = UFS_TEST_BUS_CTRL_0;
2477 offset = 24;
2478 break;
2479 case TSTBUS_UARM:
2480 reg = UFS_TEST_BUS_CTRL_0;
2481 offset = 16;
2482 break;
2483 case TSTBUS_TXUC:
2484 reg = UFS_TEST_BUS_CTRL_0;
2485 offset = 8;
2486 break;
2487 case TSTBUS_RXUC:
2488 reg = UFS_TEST_BUS_CTRL_0;
2489 offset = 0;
2490 break;
2491 case TSTBUS_DFC:
2492 reg = UFS_TEST_BUS_CTRL_1;
2493 offset = 24;
2494 break;
2495 case TSTBUS_TRLUT:
2496 reg = UFS_TEST_BUS_CTRL_1;
2497 offset = 16;
2498 break;
2499 case TSTBUS_TMRLUT:
2500 reg = UFS_TEST_BUS_CTRL_1;
2501 offset = 8;
2502 break;
2503 case TSTBUS_OCSC:
2504 reg = UFS_TEST_BUS_CTRL_1;
2505 offset = 0;
2506 break;
2507 case TSTBUS_WRAPPER:
2508 reg = UFS_TEST_BUS_CTRL_2;
2509 offset = 16;
2510 break;
2511 case TSTBUS_COMBINED:
2512 reg = UFS_TEST_BUS_CTRL_2;
2513 offset = 8;
2514 break;
2515 case TSTBUS_UTP_HCI:
2516 reg = UFS_TEST_BUS_CTRL_2;
2517 offset = 0;
2518 break;
2519 case TSTBUS_UNIPRO:
2520 reg = UFS_UNIPRO_CFG;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002521 offset = 20;
2522 mask = 0xFFF;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002523 break;
2524 /*
2525 * No need for a default case, since
2526 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
2527 * is legal
2528 */
2529 }
2530 mask <<= offset;
2531
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002532 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
2533 (u32)host->testbus.select_major << 19,
2534 REG_UFS_CFG1);
2535 ufshcd_rmwl(host->hba, mask,
2536 (u32)host->testbus.select_minor << offset,
2537 reg);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002538 ufs_qcom_enable_test_bus(host);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002539 /*
2540 * Make sure the test bus configuration is
2541 * committed before returning.
2542 */
2543 mb();
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002544
2545 return 0;
2546}
2547
2548static void ufs_qcom_testbus_read(struct ufs_hba *hba)
2549{
2550 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
2551}
2552
Subhash Jadavani9c807702017-04-01 00:35:51 -07002553static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002554{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002555 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002556 u32 *testbus = NULL;
2557 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
2558
2559 testbus = kmalloc(testbus_len, GFP_KERNEL);
2560 if (!testbus)
2561 return;
2562
2563 host->testbus.select_major = TSTBUS_UNIPRO;
2564 for (i = 0; i < nminor; i++) {
2565 host->testbus.select_minor = i;
2566 ufs_qcom_testbus_config(host);
2567 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2568 }
2569 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
2570 16, 4, testbus, testbus_len, false);
2571 kfree(testbus);
2572}
2573
2574static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
2575{
2576 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2577 struct phy *phy = host->generic_phy;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002578
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002579 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
2580 "HCI Vendor Specific Registers ");
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002581 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002582
2583 if (no_sleep)
2584 return;
2585
2586 /* sleep a bit intermittently as we are dumping too much data */
2587 usleep_range(1000, 1100);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002588 ufs_qcom_testbus_read(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002589 usleep_range(1000, 1100);
2590 ufs_qcom_print_unipro_testbus(hba);
2591 usleep_range(1000, 1100);
2592 ufs_qcom_phy_dbg_register_dump(phy);
2593 usleep_range(1000, 1100);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002594 ufs_qcom_ice_print_regs(host);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002595}
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002596
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002597/**
2598 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
2599 *
2600 * The variant operations configure the necessary controller and PHY
2601 * handshake during initialization.
2602 */
Yaniv Gardi47555a52015-10-28 13:15:49 +02002603static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002604 .init = ufs_qcom_init,
2605 .exit = ufs_qcom_exit,
Yaniv Gardiae977582015-05-17 18:55:06 +03002606 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002607 .clk_scale_notify = ufs_qcom_clk_scale_notify,
2608 .setup_clocks = ufs_qcom_setup_clocks,
2609 .hce_enable_notify = ufs_qcom_hce_enable_notify,
2610 .link_startup_notify = ufs_qcom_link_startup_notify,
2611 .pwr_change_notify = ufs_qcom_pwr_change_notify,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002612 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002613 .suspend = ufs_qcom_suspend,
2614 .resume = ufs_qcom_resume,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002615 .full_reset = ufs_qcom_full_reset,
2616 .update_sec_cfg = ufs_qcom_update_sec_cfg,
2617 .get_scale_down_gear = ufs_qcom_get_scale_down_gear,
Subhash Jadavani9c807702017-04-01 00:35:51 -07002618 .set_bus_vote = ufs_qcom_set_bus_vote,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002619 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002620#ifdef CONFIG_DEBUG_FS
2621 .add_debugfs = ufs_qcom_dbg_add_debugfs,
2622#endif
2623};
2624
2625static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
2626 .crypto_req_setup = ufs_qcom_crypto_req_setup,
2627 .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start,
2628 .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
2629 .crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
2630 .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
2631};
2632
2633static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
2634 .req_start = ufs_qcom_pm_qos_req_start,
2635 .req_end = ufs_qcom_pm_qos_req_end,
2636};
2637
2638static struct ufs_hba_variant ufs_hba_qcom_variant = {
2639 .name = "qcom",
2640 .vops = &ufs_hba_qcom_vops,
2641 .crypto_vops = &ufs_hba_crypto_variant_ops,
2642 .pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002643};
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002644
Yaniv Gardi47555a52015-10-28 13:15:49 +02002645/**
2646 * ufs_qcom_probe - probe routine of the driver
2647 * @pdev: pointer to Platform device handle
2648 *
2649 * Return zero for success and non-zero for failure
2650 */
2651static int ufs_qcom_probe(struct platform_device *pdev)
2652{
2653 int err;
2654 struct device *dev = &pdev->dev;
2655
2656 /* Perform generic probe */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002657 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
Yaniv Gardi47555a52015-10-28 13:15:49 +02002658 if (err)
2659 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
2660
2661 return err;
2662}
2663
2664/**
2665 * ufs_qcom_remove - set driver_data of the device to NULL
2666 * @pdev: pointer to platform device handle
2667 *
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002668 * Always return 0
Yaniv Gardi47555a52015-10-28 13:15:49 +02002669 */
2670static int ufs_qcom_remove(struct platform_device *pdev)
2671{
2672 struct ufs_hba *hba = platform_get_drvdata(pdev);
2673
2674 pm_runtime_get_sync(&(pdev)->dev);
2675 ufshcd_remove(hba);
2676 return 0;
2677}
2678
2679static const struct of_device_id ufs_qcom_of_match[] = {
2680 { .compatible = "qcom,ufshc"},
2681 {},
2682};
2683
2684static const struct dev_pm_ops ufs_qcom_pm_ops = {
2685 .suspend = ufshcd_pltfrm_suspend,
2686 .resume = ufshcd_pltfrm_resume,
2687 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
2688 .runtime_resume = ufshcd_pltfrm_runtime_resume,
2689 .runtime_idle = ufshcd_pltfrm_runtime_idle,
2690};
2691
2692static struct platform_driver ufs_qcom_pltform = {
2693 .probe = ufs_qcom_probe,
2694 .remove = ufs_qcom_remove,
2695 .shutdown = ufshcd_pltfrm_shutdown,
2696 .driver = {
2697 .name = "ufshcd-qcom",
2698 .pm = &ufs_qcom_pm_ops,
2699 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2700 },
2701};
2702module_platform_driver(ufs_qcom_pltform);
2703
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002704MODULE_LICENSE("GPL v2");