clk: msm: Add snapshot of clock framework files

This is snapshot of the clock framework files as of msm-3.18
'commit c0b3f609196f ("ARM: dts: msm: Remove GPU mempool
for msm8909")'.

Below is the brief description of the additional changes made:
1. Add COMMON_CLK_MSM config flag for conditional compilation for
   some common files used between COMMON_CLK_MSM and COMMON_CLK_QCOM
   clock framework files.
2. Add reset controller framework files for BCR operation.
3. Add conditional compilation support for FTRACE clock functions
   to maintain compatibility for clock framework based on
   COMMON_CLK_MSM and COMMON_CLK_QCOM.
4. Add files for GDSC operation.

Change-Id: Ia5688600ca8e548beb15745d3ce938fdf41f82de
Signed-off-by: Shefali Jain <shefjain@codeaurora.org>
diff --git a/Documentation/devicetree/bindings/clock/qcom,msm-clock-controller.txt b/Documentation/devicetree/bindings/clock/qcom,msm-clock-controller.txt
new file mode 100644
index 0000000..ef7d9c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,msm-clock-controller.txt
@@ -0,0 +1,22 @@
+Qualcomm Technologies MSM Clock Controller
+
+Required properties :
+- compatible : shall contain "qcom,msm-clock-controller"
+- reg : shall contain base register location and length
+- reg-names: names of registers listed in the same order as in
+		the reg property.
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Optional properties :
+- vdd_<rail>-supply: The logic rail supply.
+
+Example:
+	clock_gcc: qcom,gcc@1800000 {
+		compatible = "qcom,msm-clock-controller";
+		reg = <0x1800000 0x80000>;
+		reg-names = "cc-base";
+		#clock-cells = <1>;
+		clock-names = "a7_debug_clk";
+		clocks = <&clock_a7pll clk_a7_debug_mux>;
+	};
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 6a8ac04..f9cc7c8 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -212,3 +212,5 @@
 source "drivers/clk/uniphier/Kconfig"
 
 endmenu
+
+source "drivers/clk/msm/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 42042c0..4fdbebb 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,7 +1,7 @@
 # common clock types
 obj-$(CONFIG_HAVE_CLK)		+= clk-devres.o
 obj-$(CONFIG_CLKDEV_LOOKUP)	+= clkdev.o
-obj-$(CONFIG_COMMON_CLK)	+= clk.o
+obj-$(CONFIG_OF)	        += clk.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-divider.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-fixed-factor.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-fixed-rate.o
@@ -92,3 +92,4 @@
 endif
 obj-$(CONFIG_ARCH_ZX)			+= zte/
 obj-$(CONFIG_ARCH_ZYNQ)			+= zynq/
+obj-$(CONFIG_ARCH_QCOM)			+= msm/
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 4f2fb77..020e8ad 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -32,6 +32,8 @@
 
 #include "clk.h"
 
+#if defined(CONFIG_COMMON_CLK)
+
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
 
@@ -4032,6 +4034,8 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
 
+#endif /* CONFIG_COMMON_CLK */
+
 #ifdef CONFIG_OF
 /**
  * struct of_clk_provider - Clock provider registration structure
@@ -4069,6 +4073,8 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
 }
 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
 
+#if defined(CONFIG_COMMON_CLK)
+
 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
 {
 	struct clk_onecell_data *clk_data = data;
@@ -4098,6 +4104,29 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
 }
 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
 
+#endif /* CONFIG_COMMON_CLK */
+
+/**
+ * of_clk_del_provider() - Remove a previously registered clock provider
+ * @np: Device node pointer associated with clock provider
+ */
+void of_clk_del_provider(struct device_node *np)
+{
+	struct of_clk_provider *cp;
+
+	mutex_lock(&of_clk_mutex);
+	list_for_each_entry(cp, &of_clk_providers, link) {
+		if (cp->node == np) {
+			list_del(&cp->link);
+			of_node_put(cp->node);
+			kfree(cp);
+			break;
+		}
+	}
+	mutex_unlock(&of_clk_mutex);
+}
+EXPORT_SYMBOL_GPL(of_clk_del_provider);
+
 /**
  * of_clk_add_provider() - Register a clock provider for a node
  * @np: Device node pointer associated with clock provider
@@ -4168,27 +4197,6 @@ int of_clk_add_hw_provider(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
 
-/**
- * of_clk_del_provider() - Remove a previously registered clock provider
- * @np: Device node pointer associated with clock provider
- */
-void of_clk_del_provider(struct device_node *np)
-{
-	struct of_clk_provider *cp;
-
-	mutex_lock(&of_clk_mutex);
-	list_for_each_entry(cp, &of_clk_providers, link) {
-		if (cp->node == np) {
-			list_del(&cp->link);
-			of_node_put(cp->node);
-			kfree(cp);
-			break;
-		}
-	}
-	mutex_unlock(&of_clk_mutex);
-}
-EXPORT_SYMBOL_GPL(of_clk_del_provider);
-
 static struct clk_hw *
 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
 			      struct of_phandle_args *clkspec)
@@ -4317,8 +4325,10 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
 			else
 				clk_name = NULL;
 		} else {
+#if defined(CONFIG_COMMON_CLK)
 			clk_name = __clk_get_name(clk);
 			clk_put(clk);
+#endif
 		}
 	}
 
@@ -4349,6 +4359,8 @@ int of_clk_parent_fill(struct device_node *np, const char **parents,
 }
 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
 
+#if defined(CONFIG_COMMON_CLK)
+
 struct clock_provider {
 	of_clk_init_cb_t clk_init_cb;
 	struct device_node *np;
@@ -4499,4 +4511,7 @@ void __init of_clk_init(const struct of_device_id *matches)
 			force = true;
 	}
 }
+
+#endif /* CONFIG_COMMON_CLK */
+
 #endif
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index a7d0981..9776a1c 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -12,7 +12,7 @@
 struct clk_hw;
 struct clk_core;
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
 				       const char *dev_id, const char *con_id);
 #endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index bb8a77a..94dcad5 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -27,7 +27,7 @@
 static LIST_HEAD(clocks);
 static DEFINE_MUTEX(clocks_mutex);
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 static struct clk *__of_clk_get(struct device_node *np, int index,
 			       const char *dev_id, const char *con_id)
 {
@@ -73,14 +73,10 @@ static struct clk *__of_clk_get_by_name(struct device_node *np,
 		if (name)
 			index = of_property_match_string(np, "clock-names", name);
 		clk = __of_clk_get(np, index, dev_id, name);
-		if (!IS_ERR(clk)) {
+		if (!IS_ERR(clk))
 			break;
-		} else if (name && index >= 0) {
-			if (PTR_ERR(clk) != -EPROBE_DEFER)
-				pr_err("ERROR: could not get clock %s:%s(%i)\n",
-					np->full_name, name ? name : "", index);
+		else if (name && index >= 0)
 			return clk;
-		}
 
 		/*
 		 * No matching clock found on this node.  If the parent node
@@ -190,7 +186,7 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id)
 out:
 	mutex_unlock(&clocks_mutex);
 
-	return cl ? clk : ERR_PTR(-ENOENT);
+	return cl ? cl->clk : ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL(clk_get_sys);
 
diff --git a/drivers/clk/msm/Kconfig b/drivers/clk/msm/Kconfig
new file mode 100644
index 0000000..16f8c32
--- /dev/null
+++ b/drivers/clk/msm/Kconfig
@@ -0,0 +1,18 @@
+config COMMON_CLK_MSM
+	tristate "Support for MSM clock controllers"
+	depends on OF
+	depends on ARCH_QCOM
+	select RATIONAL
+	help
+	  This support clock controller used by MSM devices which support
+	  global, mmss and gpu clock controller.
+	  Say Y if you want to support the clocks exposed by the MSM on
+	  platforms such as msm8953 etc.
+
+config MSM_CLK_CONTROLLER_V2
+	bool "QTI clock driver"
+	depends on COMMON_CLK_MSM
+	---help---
+	   Generate clock data structures from definitions found in
+	   device tree.
+
diff --git a/drivers/clk/msm/Makefile b/drivers/clk/msm/Makefile
new file mode 100644
index 0000000..1117579
--- /dev/null
+++ b/drivers/clk/msm/Makefile
@@ -0,0 +1,16 @@
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-dummy.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-generic.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-local2.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-pll.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-alpha-pll.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-rpm.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-voter.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= reset.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-debug.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= gdsc.o
+
+obj-$(CONFIG_MSM_CLK_CONTROLLER_V2)	+= msm-clock-controller.o
+
+
+
diff --git a/drivers/clk/msm/clock-alpha-pll.c b/drivers/clk/msm/clock-alpha-pll.c
new file mode 100644
index 0000000..dbe8d8e
--- /dev/null
+++ b/drivers/clk/msm/clock-alpha-pll.c
@@ -0,0 +1,1265 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#include "clock.h"
+
+#define WAIT_MAX_LOOPS 100
+
+#define MODE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define LOCK_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define ACTIVE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define UPDATE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define L_REG(pll)		(*pll->base + pll->offset + 0x4)
+#define A_REG(pll)		(*pll->base + pll->offset + 0x8)
+#define VCO_REG(pll)		(*pll->base + pll->offset + 0x10)
+#define ALPHA_EN_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define OUTPUT_REG(pll)		(*pll->base + pll->offset + 0x10)
+#define VOTE_REG(pll)		(*pll->base + pll->fsm_reg_offset)
+#define USER_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define USER_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x14)
+#define CONFIG_CTL_REG(pll)	(*pll->base + pll->offset + 0x18)
+#define TEST_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x1c)
+#define TEST_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x20)
+
+#define PLL_BYPASSNL 0x2
+#define PLL_RESET_N  0x4
+#define PLL_OUTCTRL  0x1
+#define PLL_LATCH_INTERFACE	BIT(11)
+
+#define FABIA_CONFIG_CTL_REG(pll)	(*pll->base + pll->offset + 0x14)
+#define FABIA_USER_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0xc)
+#define FABIA_USER_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define FABIA_TEST_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x1c)
+#define FABIA_TEST_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x20)
+#define FABIA_L_REG(pll)		(*pll->base + pll->offset + 0x4)
+#define FABIA_FRAC_REG(pll)		(*pll->base + pll->offset + 0x38)
+#define FABIA_PLL_OPMODE(pll)		(*pll->base + pll->offset + 0x2c)
+#define FABIA_FRAC_OFF(pll)		(*pll->base + pll->fabia_frac_offset)
+
+#define FABIA_PLL_STANDBY	0x0
+#define FABIA_PLL_RUN		0x1
+#define FABIA_PLL_OUT_MAIN	0x7
+#define FABIA_RATE_MARGIN	500
+#define ALPHA_PLL_ACK_LATCH	BIT(29)
+#define ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS	BIT(23)
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_BITWIDTH 32
+#define FABIA_ALPHA_BITWIDTH 16
+
+/*
+ * Enable/disable registers could be shared among PLLs when FSM voting
+ * is used. This lock protects against potential race when multiple
+ * PLLs are being enabled/disabled together.
+ */
+static DEFINE_SPINLOCK(alpha_pll_reg_lock);
+
+static unsigned long compute_rate(struct alpha_pll_clk *pll,
+				u32 l_val, u32 a_val)
+{
+	u64 rate, parent_rate;
+	int alpha_bw = ALPHA_BITWIDTH;
+
+	if (pll->is_fabia)
+		alpha_bw = FABIA_ALPHA_BITWIDTH;
+
+	parent_rate = clk_get_rate(pll->c.parent);
+	rate = parent_rate * l_val;
+	rate += (parent_rate * a_val) >> alpha_bw;
+
+	return rate;
+}
+
+static bool is_locked(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(LOCK_REG(pll));
+	u32 mask = pll->masks->lock_mask;
+
+	return (reg & mask) == mask;
+}
+
+static bool is_active(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(ACTIVE_REG(pll));
+	u32 mask = pll->masks->active_mask;
+
+	return (reg & mask) == mask;
+}
+
+/*
+ * Check active_flag if PLL is in FSM mode, otherwise check lock_det
+ * bit. This function assumes PLLs are already configured to the
+ * right mode.
+ */
+static bool update_finish(struct alpha_pll_clk *pll)
+{
+	if (pll->fsm_en_mask)
+		return is_active(pll);
+	else
+		return is_locked(pll);
+}
+
+static int wait_for_update(struct alpha_pll_clk *pll)
+{
+	int count;
+
+	for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+		if (update_finish(pll))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		pr_err("%s didn't lock after enabling it!\n", pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __alpha_pll_vote_enable(struct alpha_pll_clk *pll)
+{
+	u32 ena;
+
+	ena = readl_relaxed(VOTE_REG(pll));
+	ena |= pll->fsm_en_mask;
+	writel_relaxed(ena, VOTE_REG(pll));
+
+	/* Make sure enable request goes through before waiting for update */
+	mb();
+
+	return wait_for_update(pll);
+}
+
+static int __alpha_pll_enable(struct alpha_pll_clk *pll, int enable_output)
+{
+	int rc;
+	u32 mode;
+
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset.
+	 */
+	mb();
+	udelay(5);
+
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/* Enable PLL output. */
+	if (enable_output) {
+		mode |= PLL_OUTCTRL;
+		writel_relaxed(mode, MODE_REG(pll));
+	}
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+	return 0;
+}
+
+static void setup_alpha_pll_values(u64 a_val, u32 l_val, u32 vco_val,
+				struct alpha_pll_clk *pll)
+{
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	if (vco_val != UINT_MAX) {
+		regval = readl_relaxed(VCO_REG(pll));
+		regval &= ~(masks->vco_mask << masks->vco_shift);
+		regval |= vco_val << masks->vco_shift;
+		writel_relaxed(regval, VCO_REG(pll));
+	}
+
+	regval = readl_relaxed(ALPHA_EN_REG(pll));
+	regval |= masks->alpha_en_mask;
+	writel_relaxed(regval, ALPHA_EN_REG(pll));
+}
+
+static int alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	if (unlikely(!pll->inited))
+		__init_alpha_pll(c);
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __alpha_pll_enable(pll, true);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+static int __calibrate_alpha_pll(struct alpha_pll_clk *pll);
+static int dyna_alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	if (unlikely(!pll->inited))
+		__init_alpha_pll(c);
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+
+	if (pll->slew)
+		__calibrate_alpha_pll(pll);
+
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __alpha_pll_enable(pll, true);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+#define PLL_OFFLINE_REQ_BIT BIT(7)
+#define PLL_FSM_ENA_BIT BIT(20)
+#define PLL_OFFLINE_ACK_BIT BIT(28)
+#define PLL_ACTIVE_FLAG BIT(30)
+
+static int alpha_pll_enable_hwfsm(struct clk *c)
+{
+	u32 mode;
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+
+	/* Re-enable HW FSM mode, clear OFFLINE request */
+	mode = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_FSM_ENA_BIT;
+	mode &= ~PLL_OFFLINE_REQ_BIT;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Make sure enable request goes through before waiting for update */
+	mb();
+
+	if (wait_for_update(pll) < 0)
+		panic("PLL %s failed to lock", c->dbg_name);
+
+	return 0;
+}
+
+static void alpha_pll_disable_hwfsm(struct clk *c)
+{
+	u32 mode;
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+
+	/* Request PLL_OFFLINE and wait for ack */
+	mode = readl_relaxed(MODE_REG(pll));
+	writel_relaxed(mode | PLL_OFFLINE_REQ_BIT, MODE_REG(pll));
+	while (!(readl_relaxed(MODE_REG(pll)) & PLL_OFFLINE_ACK_BIT))
+		;
+
+	/* Disable HW FSM */
+	mode = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_FSM_ENA_BIT;
+	if (pll->offline_bit_workaround)
+		mode &= ~PLL_OFFLINE_REQ_BIT;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	while (readl_relaxed(MODE_REG(pll)) & PLL_ACTIVE_FLAG)
+		;
+}
+
+static void __alpha_pll_vote_disable(struct alpha_pll_clk *pll)
+{
+	u32 ena;
+
+	ena = readl_relaxed(VOTE_REG(pll));
+	ena &= ~pll->fsm_en_mask;
+	writel_relaxed(ena, VOTE_REG(pll));
+}
+
+static void __alpha_pll_disable(struct alpha_pll_clk *pll)
+{
+	u32 mode;
+
+	mode = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Delay of 2 output clock ticks required until output is disabled */
+	mb();
+	udelay(1);
+
+	mode &= ~(PLL_BYPASSNL | PLL_RESET_N);
+	writel_relaxed(mode, MODE_REG(pll));
+}
+
+static void alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__alpha_pll_disable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static void dyna_alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__alpha_pll_disable(pll);
+
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static u32 find_vco(struct alpha_pll_clk *pll, unsigned long rate)
+{
+	unsigned long i;
+	struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+
+	for (i = 0; i < pll->num_vco; i++) {
+		if (rate >= v[i].min_freq && rate <= v[i].max_freq)
+			return v[i].vco_val;
+	}
+
+	return -EINVAL;
+}
+
+static unsigned long __calc_values(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val, bool round_up)
+{
+	u32 parent_rate;
+	u64 remainder;
+	u64 quotient;
+	unsigned long freq_hz;
+	int alpha_bw = ALPHA_BITWIDTH;
+
+	parent_rate = clk_get_rate(pll->c.parent);
+	quotient = rate;
+	remainder = do_div(quotient, parent_rate);
+	*l_val = quotient;
+
+	if (!remainder) {
+		*a_val = 0;
+		return rate;
+	}
+
+	if (pll->is_fabia)
+		alpha_bw = FABIA_ALPHA_BITWIDTH;
+
+	/* Upper ALPHA_BITWIDTH bits of Alpha */
+	quotient = remainder << alpha_bw;
+	remainder = do_div(quotient, parent_rate);
+
+	if (remainder && round_up)
+		quotient++;
+
+	*a_val = quotient;
+	freq_hz = compute_rate(pll, *l_val, *a_val);
+	return freq_hz;
+}
+
+static unsigned long round_rate_down(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val)
+{
+	return __calc_values(pll, rate, l_val, a_val, false);
+}
+
+static unsigned long round_rate_up(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val)
+{
+	return __calc_values(pll, rate, l_val, a_val, true);
+}
+
+static bool dynamic_update_finish(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(UPDATE_REG(pll));
+	u32 mask = pll->masks->update_mask;
+
+	return (reg & mask) == 0;
+}
+
+static int wait_for_dynamic_update(struct alpha_pll_clk *pll)
+{
+	int count;
+
+	for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+		if (dynamic_update_finish(pll))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		pr_err("%s didn't latch after updating it!\n", pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dyna_alpha_pll_dynamic_update(struct alpha_pll_clk *pll)
+{
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+	int rc;
+
+	regval = readl_relaxed(UPDATE_REG(pll));
+	regval |= masks->update_mask;
+	writel_relaxed(regval, UPDATE_REG(pll));
+
+	rc = wait_for_dynamic_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * HPG mandates a wait of at least 570ns before polling the LOCK
+	 * detect bit. Have a delay of 1us just to be safe.
+	 */
+	mb();
+	udelay(1);
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate);
+static int dyna_alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long freq_hz, flags;
+	u32 l_val, vco_val;
+	u64 a_val;
+	int ret;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz != rate) {
+		pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	vco_val = find_vco(pll, freq_hz);
+
+	/*
+	 * Dynamic pll update will not support switching frequencies across
+	 * vco ranges. In those cases fall back to normal alpha set rate.
+	 */
+	if (pll->current_vco_val != vco_val) {
+		ret = alpha_pll_set_rate(c, rate);
+		if (!ret)
+			pll->current_vco_val = vco_val;
+		else
+			return ret;
+		return 0;
+	}
+
+	spin_lock_irqsave(&c->lock, flags);
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	/* Ensure that the write above goes through before proceeding. */
+	mb();
+
+	if (c->count)
+		dyna_alpha_pll_dynamic_update(pll);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+/*
+ * Slewing plls should be bought up at frequency which is in the middle of the
+ * desired VCO range. So after bringing up the pll at calibration freq, set it
+ * back to desired frequency(that was set by previous clk_set_rate).
+ */
+static int __calibrate_alpha_pll(struct alpha_pll_clk *pll)
+{
+	unsigned long calibration_freq, freq_hz;
+	struct alpha_pll_vco_tbl *vco_tbl = pll->vco_tbl;
+	u64 a_val;
+	u32 l_val, vco_val;
+	int rc;
+
+	vco_val = find_vco(pll, pll->c.rate);
+	if (IS_ERR_VALUE((unsigned long)vco_val)) {
+		pr_err("alpha pll: not in a valid vco range\n");
+		return -EINVAL;
+	}
+	/*
+	 * As during slewing plls vco_sel won't be allowed to change, vco table
+	 * should have only one entry table, i.e. index = 0, find the
+	 * calibration frequency.
+	 */
+	calibration_freq = (vco_tbl[0].min_freq +
+					vco_tbl[0].max_freq)/2;
+
+	freq_hz = round_rate_up(pll, calibration_freq, &l_val, &a_val);
+	if (freq_hz != calibration_freq) {
+		pr_err("alpha_pll: call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	setup_alpha_pll_values(a_val, l_val, vco_tbl->vco_val, pll);
+
+	/* Bringup the pll at calibration frequency */
+	rc = __alpha_pll_enable(pll, false);
+	if (rc) {
+		pr_err("alpha pll calibration failed\n");
+		return rc;
+	}
+
+	/*
+	 * PLL is already running at calibration frequency.
+	 * So slew pll to the previously set frequency.
+	 */
+	pr_debug("pll %s: setting back to required rate %lu\n", pll->c.dbg_name,
+					pll->c.rate);
+	freq_hz = round_rate_up(pll, pll->c.rate, &l_val, &a_val);
+	setup_alpha_pll_values(a_val, l_val, UINT_MAX, pll);
+	dyna_alpha_pll_dynamic_update(pll);
+
+	return 0;
+}
+
+static int alpha_pll_dynamic_update(struct alpha_pll_clk *pll)
+{
+	u32 regval;
+
+	/* Latch the input to the PLL */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval |= pll->masks->update_mask;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	/* Wait for 2 reference cycle before checking ACK bit */
+	udelay(1);
+	if (!(readl_relaxed(MODE_REG(pll)) & ALPHA_PLL_ACK_LATCH)) {
+		WARN(1, "%s: PLL latch failed. Output may be unstable!\n",
+						pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	/* Return latch input to 0 */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval &= ~pll->masks->update_mask;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	/* Wait for PLL output to stabilize */
+	udelay(100);
+
+	return 0;
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	unsigned long flags = 0, freq_hz = 0;
+	u32 l_val, regval;
+	int vco_val;
+	u64 a_val;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz != rate) {
+		pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	vco_val = find_vco(pll, freq_hz);
+	if (IS_ERR_VALUE((unsigned long)vco_val)) {
+		pr_err("alpha pll: not in a valid vco range\n");
+		return -EINVAL;
+	}
+
+	if (pll->no_irq_dis)
+		spin_lock(&c->lock);
+	else
+		spin_lock_irqsave(&c->lock, flags);
+
+	/*
+	 * For PLLs that do not support dynamic programming (dynamic_update
+	 * is not set), ensure PLL is off before changing rate. For
+	 * optimization reasons, assume no downstream clock is actively
+	 * using it.
+	 */
+	if (c->count && !pll->dynamic_update)
+		c->ops->disable(c);
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	if (masks->vco_mask) {
+		regval = readl_relaxed(VCO_REG(pll));
+		regval &= ~(masks->vco_mask << masks->vco_shift);
+		regval |= vco_val << masks->vco_shift;
+		writel_relaxed(regval, VCO_REG(pll));
+	}
+
+	regval = readl_relaxed(ALPHA_EN_REG(pll));
+	regval |= masks->alpha_en_mask;
+	writel_relaxed(regval, ALPHA_EN_REG(pll));
+
+	if (c->count && pll->dynamic_update)
+		alpha_pll_dynamic_update(pll);
+
+	if (c->count && !pll->dynamic_update)
+		c->ops->enable(c);
+
+	if (pll->no_irq_dis)
+		spin_unlock(&c->lock);
+	else
+		spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+static long alpha_pll_round_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+	int ret;
+	u32 l_val;
+	unsigned long freq_hz;
+	u64 a_val;
+	int i;
+
+	if (pll->no_prepared_reconfig && c->prepare_count)
+		return -EINVAL;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (rate < pll->min_supported_freq)
+		return pll->min_supported_freq;
+	if (pll->is_fabia)
+		return freq_hz;
+
+	ret = find_vco(pll, freq_hz);
+	if (!IS_ERR_VALUE((unsigned long)ret))
+		return freq_hz;
+
+	freq_hz = 0;
+	for (i = 0; i < pll->num_vco; i++) {
+		if (is_better_rate(rate, freq_hz, v[i].min_freq))
+			freq_hz = v[i].min_freq;
+		if (is_better_rate(rate, freq_hz, v[i].max_freq))
+			freq_hz = v[i].max_freq;
+	}
+	if (!freq_hz)
+		return -EINVAL;
+	return freq_hz;
+}
+
+static void update_vco_tbl(struct alpha_pll_clk *pll)
+{
+	int i, l_val;
+	u64 a_val;
+	unsigned long hz;
+
+	/* Round vco limits to valid rates */
+	for (i = 0; i < pll->num_vco; i++) {
+		hz = round_rate_up(pll, pll->vco_tbl[i].min_freq, &l_val,
+					&a_val);
+		pll->vco_tbl[i].min_freq = hz;
+
+		hz = round_rate_down(pll, pll->vco_tbl[i].max_freq, &l_val,
+					&a_val);
+		pll->vco_tbl[i].max_freq = hz;
+	}
+}
+
+/*
+ * Program bias count to be 0x6 (corresponds to 5us), and lock count
+ * bits to 0 (check lock_det for locking).
+ */
+static void __set_fsm_mode(void __iomem *mode_reg)
+{
+	u32 regval = readl_relaxed(mode_reg);
+
+	/* De-assert reset to FSM */
+	regval &= ~BIT(21);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program bias count */
+	regval &= ~BM(19, 14);
+	regval |= BVAL(19, 14, 0x6);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program lock count */
+	regval &= ~BM(13, 8);
+	regval |= BVAL(13, 8, 0x0);
+	writel_relaxed(regval, mode_reg);
+
+	/* Enable PLL FSM voting */
+	regval |= BIT(20);
+	writel_relaxed(regval, mode_reg);
+}
+
+static bool is_fsm_mode(void __iomem *mode_reg)
+{
+	return !!(readl_relaxed(mode_reg) & BIT(20));
+}
+
+void __init_alpha_pll(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	if (pll->config_ctl_val)
+		writel_relaxed(pll->config_ctl_val, CONFIG_CTL_REG(pll));
+
+	if (masks->output_mask && pll->enable_config) {
+		regval = readl_relaxed(OUTPUT_REG(pll));
+		regval &= ~masks->output_mask;
+		regval |= pll->enable_config;
+		writel_relaxed(regval, OUTPUT_REG(pll));
+	}
+
+	if (masks->post_div_mask) {
+		regval = readl_relaxed(USER_CTL_LO_REG(pll));
+		regval &= ~masks->post_div_mask;
+		regval |= pll->post_div_config;
+		writel_relaxed(regval, USER_CTL_LO_REG(pll));
+	}
+
+	if (pll->slew) {
+		regval = readl_relaxed(USER_CTL_HI_REG(pll));
+		regval &= ~PLL_LATCH_INTERFACE;
+		writel_relaxed(regval, USER_CTL_HI_REG(pll));
+	}
+
+	if (masks->test_ctl_lo_mask) {
+		regval = readl_relaxed(TEST_CTL_LO_REG(pll));
+		regval &= ~masks->test_ctl_lo_mask;
+		regval |= pll->test_ctl_lo_val;
+		writel_relaxed(regval, TEST_CTL_LO_REG(pll));
+	}
+
+	if (masks->test_ctl_hi_mask) {
+		regval = readl_relaxed(TEST_CTL_HI_REG(pll));
+		regval &= ~masks->test_ctl_hi_mask;
+		regval |= pll->test_ctl_hi_val;
+		writel_relaxed(regval, TEST_CTL_HI_REG(pll));
+	}
+
+	if (pll->fsm_en_mask)
+		__set_fsm_mode(MODE_REG(pll));
+
+	pll->inited = true;
+}
+
+static enum handoff alpha_pll_handoff(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u64 a_val;
+	u32 alpha_en, l_val, regval;
+
+	/* Set the PLL_HW_UPDATE_LOGIC_BYPASS bit before continuing */
+	if (pll->dynamic_update) {
+		regval = readl_relaxed(MODE_REG(pll));
+		regval |= ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS;
+		writel_relaxed(regval, MODE_REG(pll));
+	}
+
+	update_vco_tbl(pll);
+
+	if (!is_locked(pll)) {
+		if (pll->slew) {
+			if (c->rate && dyna_alpha_pll_set_rate(c, c->rate))
+				WARN(1, "%s: Failed to configure rate\n",
+					c->dbg_name);
+		} else {
+			if (c->rate && alpha_pll_set_rate(c, c->rate))
+				WARN(1, "%s: Failed to configure rate\n",
+					c->dbg_name);
+		}
+		__init_alpha_pll(c);
+		return HANDOFF_DISABLED_CLK;
+	} else if (pll->fsm_en_mask && !is_fsm_mode(MODE_REG(pll))) {
+		WARN(1, "%s should be in FSM mode but is not\n", c->dbg_name);
+	}
+
+	l_val = readl_relaxed(L_REG(pll));
+	/* read u64 in two steps to satisfy alignment constraint */
+	a_val = readl_relaxed(A_REG(pll) + 0x4);
+	a_val = a_val << 32 | readl_relaxed(A_REG(pll));
+	/* get upper 32 bits */
+	a_val = a_val >> (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	alpha_en = readl_relaxed(ALPHA_EN_REG(pll));
+	alpha_en &= masks->alpha_en_mask;
+	if (!alpha_en)
+		a_val = 0;
+
+	c->rate = compute_rate(pll, l_val, a_val);
+
+	/*
+	 * Unconditionally vote for the PLL; it might be on because of
+	 * another master's vote.
+	 */
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_enable(pll);
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static void __iomem *alpha_pll_list_registers(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(clk);
+	static struct clk_register_data data[] = {
+		{"PLL_MODE", 0x0},
+		{"PLL_L_VAL", 0x4},
+		{"PLL_ALPHA_VAL", 0x8},
+		{"PLL_ALPHA_VAL_U", 0xC},
+		{"PLL_USER_CTL", 0x10},
+		{"PLL_CONFIG_CTL", 0x18},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return MODE_REG(pll);
+}
+
+static int __fabia_alpha_pll_enable(struct alpha_pll_clk *pll)
+{
+	int rc;
+	u32 mode;
+
+	/* Disable PLL output */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Set operation mode to STANDBY */
+	writel_relaxed(FABIA_PLL_STANDBY, FABIA_PLL_OPMODE(pll));
+
+	/* PLL should be in STANDBY mode before continuing */
+	mb();
+
+	/* Bring PLL out of reset */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Set operation mode to RUN */
+	writel_relaxed(FABIA_PLL_RUN, FABIA_PLL_OPMODE(pll));
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/* Enable the main PLL output */
+	mode  = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+	mode |= FABIA_PLL_OUT_MAIN;
+	writel_relaxed(mode, FABIA_USER_CTL_LO_REG(pll));
+
+	/* Enable PLL outputs */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+	return 0;
+}
+
+static int fabia_alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __fabia_alpha_pll_enable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+static void __fabia_alpha_pll_disable(struct alpha_pll_clk *pll)
+{
+	u32 mode;
+
+	/* Disable PLL outputs */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Disable the main PLL output */
+	mode  = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+	mode &= ~FABIA_PLL_OUT_MAIN;
+	writel_relaxed(mode, FABIA_USER_CTL_LO_REG(pll));
+
+	/* Place PLL is the OFF state */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_RESET_N;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Place the PLL mode in STANDBY */
+	writel_relaxed(FABIA_PLL_STANDBY, FABIA_PLL_OPMODE(pll));
+}
+
+static void fabia_alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__fabia_alpha_pll_disable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static int fabia_alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags, freq_hz;
+	u32 l_val;
+	u64 a_val;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz > rate + FABIA_RATE_MARGIN || freq_hz < rate) {
+		pr_err("%s: Call clk_set_rate with rounded rates!\n",
+						c->dbg_name);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&c->lock, flags);
+	/* Set the new L value */
+	writel_relaxed(l_val, FABIA_L_REG(pll));
+	if (pll->fabia_frac_offset)
+		writel_relaxed(a_val, FABIA_FRAC_OFF(pll));
+	else
+		writel_relaxed(a_val, FABIA_FRAC_REG(pll));
+
+	alpha_pll_dynamic_update(pll);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+void __init_fabia_alpha_pll(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	if (pll->config_ctl_val)
+		writel_relaxed(pll->config_ctl_val, FABIA_CONFIG_CTL_REG(pll));
+
+	if (masks->output_mask && pll->enable_config) {
+		regval = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+		regval &= ~masks->output_mask;
+		regval |= pll->enable_config;
+		writel_relaxed(regval, FABIA_USER_CTL_LO_REG(pll));
+	}
+
+	if (masks->post_div_mask) {
+		regval = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+		regval &= ~masks->post_div_mask;
+		regval |= pll->post_div_config;
+		writel_relaxed(regval, FABIA_USER_CTL_LO_REG(pll));
+	}
+
+	if (pll->slew) {
+		regval = readl_relaxed(FABIA_USER_CTL_HI_REG(pll));
+		regval &= ~PLL_LATCH_INTERFACE;
+		writel_relaxed(regval, FABIA_USER_CTL_HI_REG(pll));
+	}
+
+	if (masks->test_ctl_lo_mask) {
+		regval = readl_relaxed(FABIA_TEST_CTL_LO_REG(pll));
+		regval &= ~masks->test_ctl_lo_mask;
+		regval |= pll->test_ctl_lo_val;
+		writel_relaxed(regval, FABIA_TEST_CTL_LO_REG(pll));
+	}
+
+	if (masks->test_ctl_hi_mask) {
+		regval = readl_relaxed(FABIA_TEST_CTL_HI_REG(pll));
+		regval &= ~masks->test_ctl_hi_mask;
+		regval |= pll->test_ctl_hi_val;
+		writel_relaxed(regval, FABIA_TEST_CTL_HI_REG(pll));
+	}
+
+	if (pll->fsm_en_mask)
+		__set_fsm_mode(MODE_REG(pll));
+
+	pll->inited = true;
+}
+
+static enum handoff fabia_alpha_pll_handoff(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	u64 a_val;
+	u32 l_val, regval;
+
+	/* Set the PLL_HW_UPDATE_LOGIC_BYPASS bit before continuing */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval |= ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	if (!is_locked(pll)) {
+		if (c->rate && fabia_alpha_pll_set_rate(c, c->rate))
+			WARN(1, "%s: Failed to configure rate\n", c->dbg_name);
+		__init_alpha_pll(c);
+		return HANDOFF_DISABLED_CLK;
+	} else if (pll->fsm_en_mask && !is_fsm_mode(MODE_REG(pll))) {
+		WARN(1, "%s should be in FSM mode but is not\n", c->dbg_name);
+	}
+
+	l_val = readl_relaxed(FABIA_L_REG(pll));
+
+	if (pll->fabia_frac_offset)
+		a_val = readl_relaxed(FABIA_FRAC_OFF(pll));
+	else
+		a_val = readl_relaxed(FABIA_FRAC_REG(pll));
+
+	c->rate = compute_rate(pll, l_val, a_val);
+
+	/*
+	 * Unconditionally vote for the PLL; it might be on because of
+	 * another master's vote.
+	 */
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_enable(pll);
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+const struct clk_ops clk_ops_alpha_pll = {
+	.enable = alpha_pll_enable,
+	.disable = alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+const struct clk_ops clk_ops_alpha_pll_hwfsm = {
+	.enable = alpha_pll_enable_hwfsm,
+	.disable = alpha_pll_disable_hwfsm,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+const struct clk_ops clk_ops_fixed_alpha_pll = {
+	.enable = alpha_pll_enable,
+	.disable = alpha_pll_disable,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+const struct clk_ops clk_ops_fixed_fabia_alpha_pll = {
+	.enable = fabia_alpha_pll_enable,
+	.disable = fabia_alpha_pll_disable,
+	.handoff = fabia_alpha_pll_handoff,
+};
+
+const struct clk_ops clk_ops_fabia_alpha_pll = {
+	.enable = fabia_alpha_pll_enable,
+	.disable = fabia_alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = fabia_alpha_pll_set_rate,
+	.handoff = fabia_alpha_pll_handoff,
+};
+
+const struct clk_ops clk_ops_dyna_alpha_pll = {
+	.enable = dyna_alpha_pll_enable,
+	.disable = dyna_alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = dyna_alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+static struct alpha_pll_masks masks_20nm_p = {
+	.lock_mask = BIT(31),
+	.active_mask = BIT(30),
+	.vco_mask = BM(21, 20) >> 20,
+	.vco_shift = 20,
+	.alpha_en_mask = BIT(24),
+	.output_mask = 0xF,
+	.post_div_mask = 0xF00,
+};
+
+static struct alpha_pll_vco_tbl vco_20nm_p[] = {
+	VCO(3,  250000000,  500000000),
+	VCO(2,  500000000, 1000000000),
+	VCO(1, 1000000000, 1500000000),
+	VCO(0, 1500000000, 2000000000),
+};
+
+static struct alpha_pll_masks masks_20nm_t = {
+	.lock_mask = BIT(31),
+	.alpha_en_mask = BIT(24),
+	.output_mask = 0xf,
+};
+
+static struct alpha_pll_vco_tbl vco_20nm_t[] = {
+	VCO(0, 500000000, 1250000000),
+};
+
+static struct alpha_pll_clk *alpha_pll_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+	struct msmclk_data *drv;
+
+	pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return ERR_PTR(-ENOMEM);
+
+	if (of_property_read_u32(np, "qcom,base-offset", &pll->offset)) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,post-div-config",
+					&pll->post_div_config);
+
+	pll->masks = devm_kzalloc(dev, sizeof(*pll->masks), GFP_KERNEL);
+	if (!pll->masks)
+		return ERR_PTR(-ENOMEM);
+
+	if (of_device_is_compatible(np, "qcom,fixed-alpha-pll-20p") ||
+		of_device_is_compatible(np, "qcom,alpha-pll-20p")) {
+		*pll->masks = masks_20nm_p;
+		pll->vco_tbl = vco_20nm_p;
+		pll->num_vco = ARRAY_SIZE(vco_20nm_p);
+	} else if (of_device_is_compatible(np, "qcom,fixed-alpha-pll-20t") ||
+		of_device_is_compatible(np, "qcom,alpha-pll-20t")) {
+		*pll->masks = masks_20nm_t;
+		pll->vco_tbl = vco_20nm_t;
+		pll->num_vco = ARRAY_SIZE(vco_20nm_t);
+	} else {
+		dt_err(np, "unexpected compatible string\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	pll->base = &drv->base;
+	return pll;
+}
+
+static void *variable_rate_alpha_pll_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+
+	pll = alpha_pll_dt_parser(dev, np);
+	if (IS_ERR(pll))
+		return pll;
+
+	/* Optional Property */
+	of_property_read_u32(np, "qcom,output-enable", &pll->enable_config);
+
+	pll->c.ops = &clk_ops_alpha_pll;
+	return msmclk_generic_clk_init(dev, np, &pll->c);
+}
+
+static void *fixed_rate_alpha_pll_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+	int rc;
+	u32 val;
+
+	pll = alpha_pll_dt_parser(dev, np);
+	if (IS_ERR(pll))
+		return pll;
+
+	rc = of_property_read_u32(np, "qcom,pll-config-rate", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,pll-config-rate\n");
+		return ERR_PTR(-EINVAL);
+	}
+	pll->c.rate = val;
+
+	rc = of_property_read_u32(np, "qcom,output-enable",
+						&pll->enable_config);
+	if (rc) {
+		dt_err(np, "missing qcom,output-enable\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Optional Property */
+	rc = of_property_read_u32(np, "qcom,fsm-en-bit", &val);
+	if (!rc) {
+		rc = of_property_read_u32(np, "qcom,fsm-en-offset",
+						&pll->fsm_reg_offset);
+		if (rc) {
+			dt_err(np, "missing qcom,fsm-en-offset\n");
+			return ERR_PTR(-EINVAL);
+		}
+		pll->fsm_en_mask = BIT(val);
+	}
+
+	pll->c.ops = &clk_ops_fixed_alpha_pll;
+	return msmclk_generic_clk_init(dev, np, &pll->c);
+}
+
+MSMCLK_PARSER(fixed_rate_alpha_pll_dt_parser, "qcom,fixed-alpha-pll-20p", 0);
+MSMCLK_PARSER(fixed_rate_alpha_pll_dt_parser, "qcom,fixed-alpha-pll-20t", 1);
+MSMCLK_PARSER(variable_rate_alpha_pll_dt_parser, "qcom,alpha-pll-20p", 0);
+MSMCLK_PARSER(variable_rate_alpha_pll_dt_parser, "qcom,alpha-pll-20t", 1);
diff --git a/drivers/clk/msm/clock-debug.c b/drivers/clk/msm/clock-debug.c
new file mode 100644
index 0000000..f182fe1
--- /dev/null
+++ b/drivers/clk/msm/clock-debug.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, 2017,  The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <trace/events/power.h>
+
+
+#include "clock.h"
+
+static LIST_HEAD(clk_list);
+static DEFINE_MUTEX(clk_list_lock);
+
+static struct dentry *debugfs_base;
+static u32 debug_suspend;
+
+static int clock_debug_rate_set(void *data, u64 val)
+{
+	struct clk *clock = data;
+	int ret;
+
+	/* Only increases to max rate will succeed, but that's actually good
+	 * for debugging purposes so we don't check for error.
+	 */
+	if (clock->flags & CLKFLAG_MAX)
+		clk_set_max_rate(clock, val);
+	ret = clk_set_rate(clock, val);
+	if (ret)
+		pr_err("clk_set_rate(%s, %lu) failed (%d)\n", clock->dbg_name,
+				(unsigned long)val, ret);
+
+	return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+	*val = clk_get_rate(clock);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+			clock_debug_rate_set, "%llu\n");
+
+static struct clk *measure;
+
+static int clock_debug_measure_get(void *data, u64 *val)
+{
+	struct clk *clock = data, *par;
+	int ret, is_hw_gated;
+	unsigned long meas_rate, sw_rate;
+
+	/* Check to see if the clock is in hardware gating mode */
+	if (clock->ops->in_hwcg_mode)
+		is_hw_gated = clock->ops->in_hwcg_mode(clock);
+	else
+		is_hw_gated = 0;
+
+	ret = clk_set_parent(measure, clock);
+	if (!ret) {
+		/*
+		 * Disable hw gating to get accurate rate measurements. Only do
+		 * this if the clock is explicitly enabled by software. This
+		 * allows us to detect errors where clocks are on even though
+		 * software is not requesting them to be on due to broken
+		 * hardware gating signals.
+		 */
+		if (is_hw_gated && clock->count)
+			clock->ops->disable_hwcg(clock);
+		par = measure;
+		while (par && par != clock) {
+			if (par->ops->enable)
+				par->ops->enable(par);
+			par = par->parent;
+		}
+		*val = clk_get_rate(measure);
+		/* Reenable hwgating if it was disabled */
+		if (is_hw_gated && clock->count)
+			clock->ops->enable_hwcg(clock);
+	}
+
+	/*
+	 * If there's a divider on the path from the clock output to the
+	 * measurement circuitry, account for it by dividing the original clock
+	 * rate with the rate set on the parent of the measure clock.
+	 */
+	meas_rate = clk_get_rate(clock);
+	sw_rate = clk_get_rate(measure->parent);
+	if (sw_rate && meas_rate >= (sw_rate * 2))
+		*val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_measure_fops, clock_debug_measure_get,
+			NULL, "%lld\n");
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+	struct clk *clock = data;
+	int rc = 0;
+
+	if (val)
+		rc = clk_prepare_enable(clock);
+	else
+		clk_disable_unprepare(clock);
+
+	return rc;
+}
+
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+	int enabled;
+
+	if (clock->ops->is_enabled)
+		enabled = clock->ops->is_enabled(clock);
+	else
+		enabled = !!(clock->count);
+
+	*val = enabled;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+			clock_debug_enable_set, "%lld\n");
+
+static int clock_debug_local_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+
+	if (!clock->ops->is_local)
+		*val = true;
+	else
+		*val = clock->ops->is_local(clock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
+			NULL, "%llu\n");
+
+static int clock_debug_hwcg_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+
+	if (clock->ops->in_hwcg_mode)
+		*val = !!clock->ops->in_hwcg_mode(clock);
+	else
+		*val = 0;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get,
+			NULL, "%llu\n");
+
+static void clock_print_fmax_by_level(struct seq_file *m, int level)
+{
+	struct clk *clock = m->private;
+	struct clk_vdd_class *vdd_class = clock->vdd_class;
+	int off, i, vdd_level, nregs = vdd_class->num_regulators;
+
+	vdd_level = find_vdd_level(clock, clock->rate);
+
+	seq_printf(m, "%2s%10lu", vdd_level == level ? "[" : "",
+		clock->fmax[level]);
+	for (i = 0; i < nregs; i++) {
+		off = nregs*level + i;
+		if (vdd_class->vdd_uv)
+			seq_printf(m, "%10u", vdd_class->vdd_uv[off]);
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10u", vdd_class->vdd_ua[off]);
+	}
+
+	if (vdd_level == level)
+		seq_puts(m, "]");
+	seq_puts(m, "\n");
+}
+
+static int fmax_rates_show(struct seq_file *m, void *unused)
+{
+	struct clk *clock = m->private;
+	struct clk_vdd_class *vdd_class = clock->vdd_class;
+	int level = 0, i, nregs = vdd_class->num_regulators;
+	char reg_name[10];
+
+	int vdd_level = find_vdd_level(clock, clock->rate);
+
+	if (vdd_level < 0) {
+		seq_printf(m, "could not find_vdd_level for %s, %ld\n",
+			clock->dbg_name, clock->rate);
+		return 0;
+	}
+
+	seq_printf(m, "%12s", "");
+	for (i = 0; i < nregs; i++) {
+		snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
+		seq_printf(m, "%10s", reg_name);
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10s", "");
+	}
+
+	seq_printf(m, "\n%12s", "freq");
+	for (i = 0; i < nregs; i++) {
+		seq_printf(m, "%10s", "uV");
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10s", "uA");
+	}
+	seq_puts(m, "\n");
+
+	for (level = 0; level < clock->num_fmax; level++)
+		clock_print_fmax_by_level(m, level);
+
+	return 0;
+}
+
+static int fmax_rates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, fmax_rates_show, inode->i_private);
+}
+
+static const struct file_operations fmax_rates_fops = {
+	.open		= fmax_rates_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int orphan_list_show(struct seq_file *m, void *unused)
+{
+	struct clk *c, *safe;
+
+	list_for_each_entry_safe(c, safe, &orphan_clk_list, list)
+		seq_printf(m, "%s\n", c->dbg_name);
+
+	return 0;
+}
+
+static int orphan_list_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, orphan_list_show, inode->i_private);
+}
+
+static const struct file_operations orphan_list_fops = {
+	.open		= orphan_list_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#define clock_debug_output(m, c, fmt, ...)		\
+do {							\
+	if (m)						\
+		seq_printf(m, fmt, ##__VA_ARGS__);	\
+	else if (c)					\
+		pr_cont(fmt, ##__VA_ARGS__);		\
+	else						\
+		pr_info(fmt, ##__VA_ARGS__);		\
+} while (0)
+
+/*
+ * clock_debug_print_enabled_debug_suspend() - Print names of enabled clocks
+ * during suspend.
+ */
+static void clock_debug_print_enabled_debug_suspend(struct seq_file *s)
+{
+	struct clk *c;
+	int cnt = 0;
+
+	if (!mutex_trylock(&clk_list_lock))
+		return;
+
+	clock_debug_output(s, 0, "Enabled clocks:\n");
+
+	list_for_each_entry(c, &clk_list, list) {
+		if (!c || !c->prepare_count)
+			continue;
+		if (c->vdd_class)
+			clock_debug_output(s, 0, " %s:%lu:%lu [%ld, %d]",
+					c->dbg_name, c->prepare_count,
+						c->count, c->rate,
+					find_vdd_level(c, c->rate));
+		else
+				 clock_debug_output(s, 0, " %s:%lu:%lu [%ld]",
+					c->dbg_name, c->prepare_count,
+					c->count, c->rate);
+		cnt++;
+	}
+
+	mutex_unlock(&clk_list_lock);
+
+	if (cnt)
+		clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(s, 0, "No clocks enabled.\n");
+}
+
+static int clock_debug_print_clock(struct clk *c, struct seq_file *m)
+{
+	char *start = "";
+
+	if (!c || !c->prepare_count)
+		return 0;
+
+	clock_debug_output(m, 0, "\t");
+	do {
+		if (c->vdd_class)
+			clock_debug_output(m, 1, "%s%s:%lu:%lu [%ld, %d]",
+				start, c->dbg_name, c->prepare_count, c->count,
+				c->rate, find_vdd_level(c, c->rate));
+		else
+			clock_debug_output(m, 1, "%s%s:%lu:%lu [%ld]", start,
+				c->dbg_name, c->prepare_count, c->count,
+				c->rate);
+		start = " -> ";
+	} while ((c = clk_get_parent(c)));
+
+	clock_debug_output(m, 1, "\n");
+
+	return 1;
+}
+
+/**
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ *
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *m)
+{
+	struct clk *c;
+	int cnt = 0;
+
+	if (!mutex_trylock(&clk_list_lock)) {
+		pr_err("clock-debug: Clocks are being registered. Cannot print clock state now.\n");
+		return;
+	}
+	clock_debug_output(m, 0, "Enabled clocks:\n");
+	list_for_each_entry(c, &clk_list, list) {
+		cnt += clock_debug_print_clock(c, m);
+	}
+	mutex_unlock(&clk_list_lock);
+
+	if (cnt)
+		clock_debug_output(m, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(m, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *m, void *unused)
+{
+	clock_debug_print_enabled_clocks(m);
+	return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations enabled_clocks_fops = {
+	.open		= enabled_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int trace_clocks_show(struct seq_file *m, void *unused)
+{
+	struct clk *c;
+	int total_cnt = 0;
+
+	if (!mutex_trylock(&clk_list_lock)) {
+		pr_err("trace_clocks: Clocks are being registered. Cannot trace clock state now.\n");
+		return 1;
+	}
+	list_for_each_entry(c, &clk_list, list) {
+		trace_clock_state(c->dbg_name, c->prepare_count, c->count,
+					c->rate);
+		total_cnt++;
+	}
+	mutex_unlock(&clk_list_lock);
+	clock_debug_output(m, 0, "Total clock count: %d\n", total_cnt);
+
+	return 0;
+}
+
+static int trace_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, trace_clocks_show, inode->i_private);
+}
+static const struct file_operations trace_clocks_fops = {
+	.open		= trace_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int list_rates_show(struct seq_file *m, void *unused)
+{
+	struct clk *clock = m->private;
+	int level, i = 0;
+	unsigned long rate, fmax = 0;
+
+	/* Find max frequency supported within voltage constraints. */
+	if (!clock->vdd_class) {
+		fmax = ULONG_MAX;
+	} else {
+		for (level = 0; level < clock->num_fmax; level++)
+			if (clock->fmax[level])
+				fmax = clock->fmax[level];
+	}
+
+	/*
+	 * List supported frequencies <= fmax. Higher frequencies may appear in
+	 * the frequency table, but are not valid and should not be listed.
+	 */
+	while (!IS_ERR_VALUE(rate = clock->ops->list_rate(clock, i++))) {
+		if (rate <= fmax)
+			seq_printf(m, "%lu\n", rate);
+	}
+
+	return 0;
+}
+
+static int list_rates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, list_rates_show, inode->i_private);
+}
+
+static const struct file_operations list_rates_fops = {
+	.open		= list_rates_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	struct clk *clock = filp->private_data;
+	struct clk *p = clock->parent;
+	char name[256] = {0};
+
+	snprintf(name, sizeof(name), "%s\n", p ? p->dbg_name : "None\n");
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
+}
+
+
+static ssize_t clock_parent_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct clk *clock = filp->private_data;
+	char buf[256];
+	char *cmp;
+	int ret;
+	struct clk *parent = NULL;
+
+	cnt = min(cnt, sizeof(buf) - 1);
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+	buf[cnt] = '\0';
+	cmp = strstrip(buf);
+
+	mutex_lock(&clk_list_lock);
+	list_for_each_entry(parent, &clk_list, list) {
+		if (!strcmp(cmp, parent->dbg_name))
+			break;
+	}
+
+	if (&parent->list == &clk_list) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	mutex_unlock(&clk_list_lock);
+	ret = clk_set_parent(clock, parent);
+	if (ret)
+		return ret;
+
+	return cnt;
+err:
+	mutex_unlock(&clk_list_lock);
+	return ret;
+}
+
+
+static const struct file_operations clock_parent_fops = {
+	.open		= simple_open,
+	.read		= clock_parent_read,
+	.write		= clock_parent_write,
+};
+
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f)
+{
+	void __iomem *base;
+	struct clk_register_data *regs;
+	u32 i, j, size;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+
+	clk_debug_print_hw(clk->parent, f);
+
+	clock_debug_output(f, false, "%s\n", clk->dbg_name);
+
+	if (!clk->ops->list_registers)
+		return;
+
+	j = 0;
+	base = clk->ops->list_registers(clk, j, &regs, &size);
+	while (!IS_ERR(base)) {
+		for (i = 0; i < size; i++) {
+			u32 val = readl_relaxed(base + regs[i].offset);
+
+			clock_debug_output(f, false, "%20s: 0x%.8x\n",
+						regs[i].name, val);
+		}
+		j++;
+		base = clk->ops->list_registers(clk, j, &regs, &size);
+	}
+}
+
+static int print_hw_show(struct seq_file *m, void *unused)
+{
+	struct clk *c = m->private;
+
+	clk_debug_print_hw(c, m);
+
+	return 0;
+}
+
+static int print_hw_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, print_hw_show, inode->i_private);
+}
+
+static const struct file_operations clock_print_hw_fops = {
+	.open		= print_hw_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+
+static void clock_measure_add(struct clk *clock)
+{
+	if (IS_ERR_OR_NULL(measure))
+		return;
+
+	if (clk_set_parent(measure, clock))
+		return;
+
+	debugfs_create_file("measure", 0444, clock->clk_dir, clock,
+				&clock_measure_fops);
+}
+
+static int clock_debug_add(struct clk *clock)
+{
+	char temp[50], *ptr;
+	struct dentry *clk_dir;
+
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	strlcpy(temp, clock->dbg_name, ARRAY_SIZE(temp));
+	for (ptr = temp; *ptr; ptr++)
+		*ptr = tolower(*ptr);
+
+	clk_dir = debugfs_create_dir(temp, debugfs_base);
+	if (!clk_dir)
+		return -ENOMEM;
+
+	clock->clk_dir = clk_dir;
+
+	if (!debugfs_create_file("rate", 0644, clk_dir,
+				clock, &clock_rate_fops))
+		goto error;
+
+	if (!debugfs_create_file("enable", 0644, clk_dir,
+				clock, &clock_enable_fops))
+		goto error;
+
+	if (!debugfs_create_file("is_local", 0444, clk_dir, clock,
+				&clock_local_fops))
+		goto error;
+
+	if (!debugfs_create_file("has_hw_gating", 0444, clk_dir, clock,
+				&clock_hwcg_fops))
+		goto error;
+
+	if (clock->ops->list_rate)
+		if (!debugfs_create_file("list_rates",
+				0444, clk_dir, clock, &list_rates_fops))
+			goto error;
+
+	if (clock->vdd_class && !debugfs_create_file(
+			"fmax_rates", 0444, clk_dir, clock, &fmax_rates_fops))
+		goto error;
+
+	if (!debugfs_create_file("parent", 0444, clk_dir, clock,
+			&clock_parent_fops))
+		goto error;
+
+	if (!debugfs_create_file("print", 0444, clk_dir, clock,
+			&clock_print_hw_fops))
+		goto error;
+
+	clock_measure_add(clock);
+
+	return 0;
+error:
+	debugfs_remove_recursive(clk_dir);
+	return -ENOMEM;
+}
+static DEFINE_MUTEX(clk_debug_lock);
+static int clk_debug_init_once;
+
+/**
+ * clock_debug_init() - Initialize clock debugfs
+ * Lock clk_debug_lock before invoking this function.
+ */
+static int clock_debug_init(void)
+{
+	if (clk_debug_init_once)
+		return 0;
+
+	clk_debug_init_once = 1;
+
+	debugfs_base = debugfs_create_dir("clk", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	if (!debugfs_create_u32("debug_suspend", 0644,
+				debugfs_base, &debug_suspend)) {
+		debugfs_remove_recursive(debugfs_base);
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_file("enabled_clocks", 0444, debugfs_base, NULL,
+				&enabled_clocks_fops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("orphan_list", 0444, debugfs_base, NULL,
+				&orphan_list_fops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("trace_clocks", 0444, debugfs_base, NULL,
+				&trace_clocks_fops))
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * clock_debug_register() - Add additional clocks to clock debugfs hierarchy
+ * @list: List of clocks to create debugfs nodes for
+ */
+int clock_debug_register(struct clk *clk)
+{
+	int ret = 0;
+	struct clk *c;
+
+	mutex_lock(&clk_list_lock);
+	if (!list_empty(&clk->list))
+		goto out;
+
+	ret = clock_debug_init();
+	if (ret)
+		goto out;
+
+	if (IS_ERR_OR_NULL(measure)) {
+		if (clk->flags & CLKFLAG_MEASURE)
+			measure = clk;
+		if (!IS_ERR_OR_NULL(measure)) {
+			list_for_each_entry(c, &clk_list, list)
+				clock_measure_add(c);
+		}
+	}
+
+	list_add_tail(&clk->list, &clk_list);
+	clock_debug_add(clk);
+out:
+	mutex_unlock(&clk_list_lock);
+	return ret;
+}
+
+/*
+ * Print the names of enabled clocks and their parents if debug_suspend is set
+ */
+void clock_debug_print_enabled(bool print_parent)
+{
+	if (likely(!debug_suspend))
+		return;
+	if (print_parent)
+		clock_debug_print_enabled_clocks(NULL);
+	else
+		clock_debug_print_enabled_debug_suspend(NULL);
+
+}
diff --git a/drivers/clk/msm/clock-dummy.c b/drivers/clk/msm/clock-dummy.c
new file mode 100644
index 0000000..ad6952a
--- /dev/null
+++ b/drivers/clk/msm/clock-dummy.c
@@ -0,0 +1,113 @@
+/* Copyright (c) 2011,2013-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+static int dummy_clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+	return 0;
+}
+
+static int dummy_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	clk->rate = rate;
+	return 0;
+}
+
+static int dummy_clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+	return 0;
+}
+
+static int dummy_clk_set_flags(struct clk *clk, unsigned long flags)
+{
+	return 0;
+}
+
+static unsigned long dummy_clk_get_rate(struct clk *clk)
+{
+	return clk->rate;
+}
+
+static long dummy_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	return rate;
+}
+
+const struct clk_ops clk_ops_dummy = {
+	.reset = dummy_clk_reset,
+	.set_rate = dummy_clk_set_rate,
+	.set_max_rate = dummy_clk_set_max_rate,
+	.set_flags = dummy_clk_set_flags,
+	.get_rate = dummy_clk_get_rate,
+	.round_rate = dummy_clk_round_rate,
+};
+
+struct clk dummy_clk = {
+	.dbg_name = "dummy_clk",
+	.ops = &clk_ops_dummy,
+	CLK_INIT(dummy_clk),
+};
+
+static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct clk *c;
+
+	c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	c->ops = &clk_ops_dummy;
+	return msmclk_generic_clk_init(dev, np, c);
+}
+MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0);
+
+static struct clk *of_dummy_get(struct of_phandle_args *clkspec,
+				  void *data)
+{
+	return &dummy_clk;
+}
+
+static const struct of_device_id msm_clock_dummy_match_table[] = {
+	{ .compatible = "qcom,dummycc" },
+	{}
+};
+
+static int msm_clock_dummy_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = of_clk_add_provider(pdev->dev.of_node, of_dummy_get, NULL);
+	if (ret)
+		return -ENOMEM;
+
+	dev_info(&pdev->dev, "Registered DUMMY provider.\n");
+	return ret;
+}
+
+static struct platform_driver msm_clock_dummy_driver = {
+	.probe = msm_clock_dummy_probe,
+	.driver = {
+		.name = "clock-dummy",
+		.of_match_table = msm_clock_dummy_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init msm_dummy_clk_init(void)
+{
+	return platform_driver_register(&msm_clock_dummy_driver);
+}
+arch_initcall(msm_dummy_clk_init);
diff --git a/drivers/clk/msm/clock-generic.c b/drivers/clk/msm/clock-generic.c
new file mode 100644
index 0000000..b4e6bdd
--- /dev/null
+++ b/drivers/clk/msm/clock-generic.c
@@ -0,0 +1,921 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+/* ==================== Mux clock ==================== */
+
+static int mux_parent_to_src_sel(struct mux_clk *mux, struct clk *p)
+{
+	return parent_to_src_sel(mux->parents, mux->num_parents, p);
+}
+
+static int mux_set_parent(struct clk *c, struct clk *p)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int sel = mux_parent_to_src_sel(mux, p);
+	struct clk *old_parent;
+	int rc = 0, i;
+	unsigned long flags;
+
+	if (sel < 0 && mux->rec_parents) {
+		for (i = 0; i < mux->num_rec_parents; i++) {
+			rc = clk_set_parent(mux->rec_parents[i], p);
+			if (!rc) {
+				/*
+				 * This is necessary to ensure prepare/enable
+				 * counts get propagated correctly.
+				 */
+				p = mux->rec_parents[i];
+				sel = mux_parent_to_src_sel(mux, p);
+				break;
+			}
+		}
+	}
+
+	if (sel < 0)
+		return sel;
+
+	rc = __clk_pre_reparent(c, p, &flags);
+	if (rc)
+		goto out;
+
+	rc = mux->ops->set_mux_sel(mux, sel);
+	if (rc)
+		goto set_fail;
+
+	old_parent = c->parent;
+	c->parent = p;
+	c->rate = clk_get_rate(p);
+	__clk_post_reparent(c, old_parent, &flags);
+
+	return 0;
+
+set_fail:
+	__clk_post_reparent(c, p, &flags);
+out:
+	return rc;
+}
+
+static long mux_round_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int i;
+	unsigned long prate, rrate = 0;
+
+	for (i = 0; i < mux->num_parents; i++) {
+		prate = clk_round_rate(mux->parents[i].src, rate);
+		if (is_better_rate(rate, rrate, prate))
+			rrate = prate;
+	}
+	if (!rrate)
+		return -EINVAL;
+
+	return rrate;
+}
+
+static int mux_set_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	struct clk *new_parent = NULL;
+	int rc = 0, i;
+	unsigned long new_par_curr_rate;
+	unsigned long flags;
+
+	/*
+	 * Check if one of the possible parents is already at the requested
+	 * rate.
+	 */
+	for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) {
+		struct clk *p = mux->parents[i].src;
+
+		if (p->rate == rate && clk_round_rate(p, rate) == rate) {
+			new_parent = mux->parents[i].src;
+			break;
+		}
+	}
+
+	for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) {
+		if (clk_round_rate(mux->parents[i].src, rate) == rate) {
+			new_parent = mux->parents[i].src;
+			if (!mux->try_new_parent)
+				break;
+			if (mux->try_new_parent && new_parent != c->parent)
+				break;
+		}
+	}
+
+	if (new_parent == NULL)
+		return -EINVAL;
+
+	/*
+	 * Switch to safe parent since the old and new parent might be the
+	 * same and the parent might temporarily turn off while switching
+	 * rates. If the mux can switch between distinct sources safely
+	 * (indicated by try_new_parent), and the new source is not the current
+	 * parent, do not switch to the safe parent.
+	 */
+	if (mux->safe_sel >= 0 &&
+		!(mux->try_new_parent && (new_parent != c->parent))) {
+		/*
+		 * The safe parent might be a clock with multiple sources;
+		 * to select the "safe" source, set a safe frequency.
+		 */
+		if (mux->safe_freq) {
+			rc = clk_set_rate(mux->safe_parent, mux->safe_freq);
+			if (rc) {
+				pr_err("Failed to set safe rate on %s\n",
+					clk_name(mux->safe_parent));
+				return rc;
+			}
+		}
+
+		/*
+		 * Some mux implementations might switch to/from a low power
+		 * parent as part of their disable/enable ops. Grab the
+		 * enable lock to avoid racing with these implementations.
+		 */
+		spin_lock_irqsave(&c->lock, flags);
+		rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
+		spin_unlock_irqrestore(&c->lock, flags);
+		if (rc)
+			return rc;
+
+	}
+
+	new_par_curr_rate = clk_get_rate(new_parent);
+	rc = clk_set_rate(new_parent, rate);
+	if (rc)
+		goto set_rate_fail;
+
+	rc = mux_set_parent(c, new_parent);
+	if (rc)
+		goto set_par_fail;
+
+	return 0;
+
+set_par_fail:
+	clk_set_rate(new_parent, new_par_curr_rate);
+set_rate_fail:
+	WARN(mux->ops->set_mux_sel(mux,
+		mux_parent_to_src_sel(mux, c->parent)),
+		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+	return rc;
+}
+
+static int mux_enable(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	if (mux->ops->enable)
+		return mux->ops->enable(mux);
+	return 0;
+}
+
+static void mux_disable(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	if (mux->ops->disable)
+		return mux->ops->disable(mux);
+}
+
+static struct clk *mux_get_parent(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int sel = mux->ops->get_mux_sel(mux);
+	int i;
+
+	for (i = 0; i < mux->num_parents; i++) {
+		if (mux->parents[i].sel == sel)
+			return mux->parents[i].src;
+	}
+
+	/* Unfamiliar parent. */
+	return NULL;
+}
+
+static enum handoff mux_handoff(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	c->rate = clk_get_rate(c->parent);
+	mux->safe_sel = mux_parent_to_src_sel(mux, mux->safe_parent);
+
+	if (mux->en_mask && mux->ops && mux->ops->is_enabled)
+		return mux->ops->is_enabled(mux)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_clk_list_registers(struct clk *c, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	if (mux->ops && mux->ops->list_registers)
+		return mux->ops->list_registers(mux, n, regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+const struct clk_ops clk_ops_gen_mux = {
+	.enable = mux_enable,
+	.disable = mux_disable,
+	.set_parent = mux_set_parent,
+	.round_rate = mux_round_rate,
+	.set_rate = mux_set_rate,
+	.handoff = mux_handoff,
+	.get_parent = mux_get_parent,
+	.list_registers = mux_clk_list_registers,
+};
+
+/* ==================== Divider clock ==================== */
+
+static long __div_round_rate(struct div_data *data, unsigned long rate,
+	struct clk *parent, unsigned int *best_div, unsigned long *best_prate)
+{
+	unsigned int div, min_div, max_div, _best_div = 1;
+	unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
+	unsigned int numer;
+
+	rate = max(rate, 1UL);
+
+	min_div = max(data->min_div, 1U);
+	max_div = min(data->max_div, (unsigned int) (ULONG_MAX));
+
+	/*
+	 * div values are doubled for half dividers.
+	 * Adjust for that by picking a numer of 2.
+	 */
+	numer = data->is_half_divider ? 2 : 1;
+
+	for (div = min_div; div <= max_div; div++) {
+		if (data->skip_odd_div && (div & 1))
+			if (!(data->allow_div_one && (div == 1)))
+				continue;
+		if (data->skip_even_div && !(div & 1))
+			continue;
+		req_prate = mult_frac(rate, div, numer);
+		prate = clk_round_rate(parent, req_prate);
+		if (IS_ERR_VALUE(prate))
+			break;
+
+		actual_rate = mult_frac(prate, numer, div);
+		if (is_better_rate(rate, rrate, actual_rate)) {
+			rrate = actual_rate;
+			_best_div = div;
+			_best_prate = prate;
+		}
+
+		/*
+		 * Trying higher dividers is only going to ask the parent for
+		 * a higher rate. If it can't even output a rate higher than
+		 * the one we request for this divider, the parent is not
+		 * going to be able to output an even higher rate required
+		 * for a higher divider. So, stop trying higher dividers.
+		 */
+		if (actual_rate < rate)
+			break;
+
+		if (rrate <= rate + data->rate_margin)
+			break;
+	}
+
+	if (!rrate)
+		return -EINVAL;
+	if (best_div)
+		*best_div = _best_div;
+	if (best_prate)
+		*best_prate = _best_prate;
+
+	return rrate;
+}
+
+static long div_round_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	return __div_round_rate(&d->data, rate, c->parent, NULL, NULL);
+}
+
+static int _find_safe_div(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	struct div_data *data = &d->data;
+	unsigned long fast = max(rate, c->rate);
+	unsigned int numer = data->is_half_divider ? 2 : 1;
+	int i, safe_div = 0;
+
+	if (!d->safe_freq)
+		return 0;
+
+	/* Find the max safe freq that is lesser than fast */
+	for (i = data->max_div; i >= data->min_div; i--)
+		if (mult_frac(d->safe_freq, numer, i) <= fast)
+			safe_div = i;
+
+	return safe_div ?: -EINVAL;
+}
+
+static int div_set_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	int safe_div, div, rc = 0;
+	long rrate, old_prate, new_prate;
+	struct div_data *data = &d->data;
+
+	rrate = __div_round_rate(data, rate, c->parent, &div, &new_prate);
+	if (rrate < rate || rrate > rate + data->rate_margin)
+		return -EINVAL;
+
+	/*
+	 * For fixed divider clock we don't want to return an error if the
+	 * requested rate matches the achievable rate. So, don't check for
+	 * !d->ops and return an error. __div_round_rate() ensures div ==
+	 * d->div if !d->ops.
+	 */
+
+	safe_div = _find_safe_div(c, rate);
+	if (d->safe_freq && safe_div < 0) {
+		pr_err("No safe div on %s for transitioning from %lu to %lu\n",
+			c->dbg_name, c->rate, rate);
+		return -EINVAL;
+	}
+
+	safe_div = max(safe_div, div);
+
+	if (safe_div > data->div) {
+		rc = d->ops->set_div(d, safe_div);
+		if (rc) {
+			pr_err("Failed to set div %d on %s\n", safe_div,
+				c->dbg_name);
+			return rc;
+		}
+	}
+
+	old_prate = clk_get_rate(c->parent);
+	rc = clk_set_rate(c->parent, new_prate);
+	if (rc)
+		goto set_rate_fail;
+
+	if (div < data->div)
+		rc = d->ops->set_div(d, div);
+	else if (div < safe_div)
+		rc = d->ops->set_div(d, div);
+	if (rc)
+		goto div_dec_fail;
+
+	data->div = div;
+
+	return 0;
+
+div_dec_fail:
+	WARN(clk_set_rate(c->parent, old_prate),
+		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+set_rate_fail:
+	if (safe_div > data->div)
+		WARN(d->ops->set_div(d, data->div),
+			"Set rate failed for %s. Also in bad state!\n",
+			c->dbg_name);
+	return rc;
+}
+
+static int div_enable(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (d->ops && d->ops->enable)
+		return d->ops->enable(d);
+	return 0;
+}
+
+static void div_disable(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (d->ops && d->ops->disable)
+		return d->ops->disable(d);
+}
+
+static enum handoff div_handoff(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+	unsigned int div = d->data.div;
+
+	if (d->ops && d->ops->get_div)
+		div = max(d->ops->get_div(d), 1);
+	div = max(div, 1U);
+	c->rate = clk_get_rate(c->parent) / div;
+
+	if (!d->ops || !d->ops->set_div)
+		d->data.min_div = d->data.max_div = div;
+	d->data.div = div;
+
+	if (d->en_mask && d->ops && d->ops->is_enabled)
+		return d->ops->is_enabled(d)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *div_clk_list_registers(struct clk *c, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (d->ops && d->ops->list_registers)
+		return d->ops->list_registers(d, n, regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+const struct clk_ops clk_ops_div = {
+	.enable = div_enable,
+	.disable = div_disable,
+	.round_rate = div_round_rate,
+	.set_rate = div_set_rate,
+	.handoff = div_handoff,
+	.list_registers = div_clk_list_registers,
+};
+
+static long __slave_div_round_rate(struct clk *c, unsigned long rate,
+					int *best_div)
+{
+	struct div_clk *d = to_div_clk(c);
+	unsigned int div, min_div, max_div;
+	long p_rate;
+
+	rate = max(rate, 1UL);
+
+	min_div = d->data.min_div;
+	max_div = d->data.max_div;
+
+	p_rate = clk_get_rate(c->parent);
+	div = DIV_ROUND_CLOSEST(p_rate, rate);
+	div = max(div, min_div);
+	div = min(div, max_div);
+	if (best_div)
+		*best_div = div;
+
+	return p_rate / div;
+}
+
+static long slave_div_round_rate(struct clk *c, unsigned long rate)
+{
+	return __slave_div_round_rate(c, rate, NULL);
+}
+
+static int slave_div_set_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	int div, rc = 0;
+	long rrate;
+
+	rrate = __slave_div_round_rate(c, rate, &div);
+	if (rrate != rate)
+		return -EINVAL;
+
+	if (div == d->data.div)
+		return 0;
+
+	/*
+	 * For fixed divider clock we don't want to return an error if the
+	 * requested rate matches the achievable rate. So, don't check for
+	 * !d->ops and return an error. __slave_div_round_rate() ensures
+	 * div == d->data.div if !d->ops.
+	 */
+	rc = d->ops->set_div(d, div);
+	if (rc)
+		return rc;
+
+	d->data.div = div;
+
+	return 0;
+}
+
+static unsigned long slave_div_get_rate(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (!d->data.div)
+		return 0;
+	return clk_get_rate(c->parent) / d->data.div;
+}
+
+const struct clk_ops clk_ops_slave_div = {
+	.enable = div_enable,
+	.disable = div_disable,
+	.round_rate = slave_div_round_rate,
+	.set_rate = slave_div_set_rate,
+	.get_rate = slave_div_get_rate,
+	.handoff = div_handoff,
+	.list_registers = div_clk_list_registers,
+};
+
+
+/**
+ * External clock
+ * Some clock controllers have input clock signal that come from outside the
+ * clock controller. That input clock signal might then be used as a source for
+ * several clocks inside the clock controller. This external clock
+ * implementation models this input clock signal by just passing on the requests
+ * to the clock's parent, the original external clock source. The driver for the
+ * clock controller should clk_get() the original external clock in the probe
+ * function and set is as a parent to this external clock..
+ */
+
+long parent_round_rate(struct clk *c, unsigned long rate)
+{
+	return clk_round_rate(c->parent, rate);
+}
+
+int parent_set_rate(struct clk *c, unsigned long rate)
+{
+	return clk_set_rate(c->parent, rate);
+}
+
+unsigned long parent_get_rate(struct clk *c)
+{
+	return clk_get_rate(c->parent);
+}
+
+static int ext_set_parent(struct clk *c, struct clk *p)
+{
+	return clk_set_parent(c->parent, p);
+}
+
+static struct clk *ext_get_parent(struct clk *c)
+{
+	struct ext_clk *ext = to_ext_clk(c);
+
+	if (!IS_ERR_OR_NULL(c->parent))
+		return c->parent;
+	return clk_get(ext->dev, ext->clk_id);
+}
+
+static enum handoff ext_handoff(struct clk *c)
+{
+	c->rate = clk_get_rate(c->parent);
+	/* Similar reasoning applied in div_handoff, see comment there. */
+	return HANDOFF_DISABLED_CLK;
+}
+
+const struct clk_ops clk_ops_ext = {
+	.handoff = ext_handoff,
+	.round_rate = parent_round_rate,
+	.set_rate = parent_set_rate,
+	.get_rate = parent_get_rate,
+	.set_parent = ext_set_parent,
+	.get_parent = ext_get_parent,
+};
+
+static void *ext_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct ext_clk *ext;
+	const char *str;
+	int rc;
+
+	ext = devm_kzalloc(dev, sizeof(*ext), GFP_KERNEL);
+	if (!ext)
+		return ERR_PTR(-ENOMEM);
+
+	ext->dev = dev;
+	rc = of_property_read_string(np, "qcom,clock-names", &str);
+	if (!rc)
+		ext->clk_id = (void *)str;
+
+	ext->c.ops = &clk_ops_ext;
+	return msmclk_generic_clk_init(dev, np, &ext->c);
+}
+MSMCLK_PARSER(ext_clk_dt_parser, "qcom,ext-clk", 0);
+
+/* ==================== Mux_div clock ==================== */
+
+static int mux_div_clk_enable(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops->enable)
+		return md->ops->enable(md);
+	return 0;
+}
+
+static void mux_div_clk_disable(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops->disable)
+		return md->ops->disable(md);
+}
+
+static long __mux_div_round_rate(struct clk *c, unsigned long rate,
+	struct clk **best_parent, int *best_div, unsigned long *best_prate)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned int i;
+	unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
+	struct clk *_best_parent = 0;
+
+	if (md->try_get_rate) {
+		for (i = 0; i < md->num_parents; i++) {
+			int divider;
+			unsigned long p_rate;
+
+			rrate = __div_round_rate(&md->data, rate,
+						md->parents[i].src,
+						&divider, &p_rate);
+			/*
+			 * Check if one of the possible parents is already at
+			 * the requested rate.
+			 */
+			if (p_rate == clk_get_rate(md->parents[i].src)
+					&& rrate == rate) {
+				best = rrate;
+				_best_div = divider;
+				_best_prate = p_rate;
+				_best_parent = md->parents[i].src;
+				goto end;
+			}
+		}
+	}
+
+	for (i = 0; i < md->num_parents; i++) {
+		int div;
+		unsigned long prate;
+
+		rrate = __div_round_rate(&md->data, rate, md->parents[i].src,
+				&div, &prate);
+
+		if (is_better_rate(rate, best, rrate)) {
+			best = rrate;
+			_best_div = div;
+			_best_prate = prate;
+			_best_parent = md->parents[i].src;
+		}
+
+		if (rate <= rrate && rrate <= rate + md->data.rate_margin)
+			break;
+	}
+end:
+	if (best_div)
+		*best_div = _best_div;
+	if (best_prate)
+		*best_prate = _best_prate;
+	if (best_parent)
+		*best_parent = _best_parent;
+
+	if (best)
+		return best;
+	return -EINVAL;
+}
+
+static long mux_div_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	return __mux_div_round_rate(c, rate, NULL, NULL, NULL);
+}
+
+/* requires enable lock to be held */
+static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+	u32 rc = 0, src_sel;
+
+	src_sel = parent_to_src_sel(md->parents, md->num_parents, parent);
+	/*
+	 * If the clock is disabled, don't change to the new settings until
+	 * the clock is reenabled
+	 */
+	if (md->c.count)
+		rc = md->ops->set_src_div(md, src_sel, div);
+	if (!rc) {
+		md->data.div = div;
+		md->src_sel = src_sel;
+	}
+
+	return rc;
+}
+
+static int set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+	unsigned long flags;
+	u32 rc;
+
+	spin_lock_irqsave(&md->c.lock, flags);
+	rc = __set_src_div(md, parent, div);
+	spin_unlock_irqrestore(&md->c.lock, flags);
+
+	return rc;
+}
+
+/* Must be called after handoff to ensure parent clock rates are initialized */
+static int safe_parent_init_once(struct clk *c)
+{
+	unsigned long rrate;
+	u32 best_div;
+	struct clk *best_parent;
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (IS_ERR(md->safe_parent))
+		return -EINVAL;
+	if (!md->safe_freq || md->safe_parent)
+		return 0;
+
+	rrate = __mux_div_round_rate(c, md->safe_freq, &best_parent,
+			&best_div, NULL);
+
+	if (rrate == md->safe_freq) {
+		md->safe_div = best_div;
+		md->safe_parent = best_parent;
+	} else {
+		md->safe_parent = ERR_PTR(-EINVAL);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned long flags, rrate;
+	unsigned long new_prate, new_parent_orig_rate;
+	struct clk *old_parent, *new_parent;
+	u32 new_div, old_div;
+	int rc;
+
+	rc = safe_parent_init_once(c);
+	if (rc)
+		return rc;
+
+	rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
+							&new_prate);
+	if (rrate < rate || rrate > rate + md->data.rate_margin)
+		return -EINVAL;
+
+	old_parent = c->parent;
+	old_div = md->data.div;
+
+	/* Refer to the description of safe_freq in clock-generic.h */
+	if (md->safe_freq)
+		rc = set_src_div(md, md->safe_parent, md->safe_div);
+
+	else if (new_parent == old_parent && new_div >= old_div) {
+		/*
+		 * If both the parent_rate and divider changes, there may be an
+		 * intermediate frequency generated. Ensure this intermediate
+		 * frequency is less than both the new rate and previous rate.
+		 */
+		rc = set_src_div(md, old_parent, new_div);
+	}
+	if (rc)
+		return rc;
+
+	new_parent_orig_rate = clk_get_rate(new_parent);
+	rc = clk_set_rate(new_parent, new_prate);
+	if (rc) {
+		pr_err("failed to set %s to %ld\n",
+			clk_name(new_parent), new_prate);
+		goto err_set_rate;
+	}
+
+	rc = __clk_pre_reparent(c, new_parent, &flags);
+	if (rc)
+		goto err_pre_reparent;
+
+	/* Set divider and mux src atomically */
+	rc = __set_src_div(md, new_parent, new_div);
+	if (rc)
+		goto err_set_src_div;
+
+	c->parent = new_parent;
+
+	__clk_post_reparent(c, old_parent, &flags);
+	return 0;
+
+err_set_src_div:
+	/* Not switching to new_parent, so disable it */
+	__clk_post_reparent(c, new_parent, &flags);
+err_pre_reparent:
+	rc = clk_set_rate(new_parent, new_parent_orig_rate);
+	WARN(rc, "%s: error changing new_parent (%s) rate back to %ld\n",
+		clk_name(c), clk_name(new_parent), new_parent_orig_rate);
+err_set_rate:
+	rc = set_src_div(md, old_parent, old_div);
+	WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
+		clk_name(c), old_div, clk_name(old_parent));
+
+	return rc;
+}
+
+static struct clk *mux_div_clk_get_parent(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	u32 i, div, src_sel;
+
+	md->ops->get_src_div(md, &src_sel, &div);
+
+	md->data.div = div;
+	md->src_sel = src_sel;
+
+	for (i = 0; i < md->num_parents; i++) {
+		if (md->parents[i].sel == src_sel)
+			return md->parents[i].src;
+	}
+
+	return NULL;
+}
+
+static enum handoff mux_div_clk_handoff(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned long parent_rate;
+	unsigned int numer;
+
+	parent_rate = clk_get_rate(c->parent);
+	/*
+	 * div values are doubled for half dividers.
+	 * Adjust for that by picking a numer of 2.
+	 */
+	numer = md->data.is_half_divider ? 2 : 1;
+
+	if (md->data.div) {
+		c->rate = mult_frac(parent_rate, numer, md->data.div);
+	} else {
+		c->rate = 0;
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if (md->en_mask && md->ops && md->ops->is_enabled)
+		return md->ops->is_enabled(md)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_div_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops && md->ops->list_registers)
+		return md->ops->list_registers(md, n, regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+const struct clk_ops clk_ops_mux_div_clk = {
+	.enable = mux_div_clk_enable,
+	.disable = mux_div_clk_disable,
+	.set_rate = mux_div_clk_set_rate,
+	.round_rate = mux_div_clk_round_rate,
+	.get_parent = mux_div_clk_get_parent,
+	.handoff = mux_div_clk_handoff,
+	.list_registers = mux_div_clk_list_registers,
+};
diff --git a/drivers/clk/msm/clock-local2.c b/drivers/clk/msm/clock-local2.c
new file mode 100644
index 0000000..f200d0b
--- /dev/null
+++ b/drivers/clk/msm/clock-local2.c
@@ -0,0 +1,2907 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+/*
+ * When enabling/disabling a clock, check the halt bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define HALT_CHECK_MAX_LOOPS	500
+/* For clock without halt checking, wait this long after enables/disables. */
+#define HALT_CHECK_DELAY_US	500
+
+#define RCG_FORCE_DISABLE_DELAY_US	100
+
+/*
+ * When updating an RCG configuration, check the update bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define UPDATE_CHECK_MAX_LOOPS	500
+
+DEFINE_SPINLOCK(local_clock_reg_lock);
+struct clk_freq_tbl rcg_dummy_freq = F_END;
+
+#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
+#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
+#define M_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0x8)
+#define N_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0xC)
+#define D_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0x10)
+#define CBCR_REG(x)	(*(x)->base + (x)->cbcr_reg)
+#define BCR_REG(x)	(*(x)->base + (x)->bcr_reg)
+#define RST_REG(x)	(*(x)->base + (x)->reset_reg)
+#define VOTE_REG(x)	(*(x)->base + (x)->vote_reg)
+#define GATE_EN_REG(x)	(*(x)->base + (x)->en_reg)
+#define DIV_REG(x)	(*(x)->base + (x)->offset)
+#define MUX_REG(x)	(*(x)->base + (x)->offset)
+
+/*
+ * Important clock bit positions and masks
+ */
+#define CMD_RCGR_ROOT_ENABLE_BIT	BIT(1)
+#define CBCR_BRANCH_ENABLE_BIT		BIT(0)
+#define CBCR_BRANCH_OFF_BIT		BIT(31)
+#define CMD_RCGR_CONFIG_UPDATE_BIT	BIT(0)
+#define CMD_RCGR_ROOT_STATUS_BIT	BIT(31)
+#define BCR_BLK_ARES_BIT		BIT(0)
+#define CBCR_HW_CTL_BIT			BIT(1)
+#define CFG_RCGR_DIV_MASK		BM(4, 0)
+#define CFG_RCGR_SRC_SEL_MASK		BM(10, 8)
+#define MND_MODE_MASK			BM(13, 12)
+#define MND_DUAL_EDGE_MODE_BVAL		BVAL(13, 12, 0x2)
+#define CMD_RCGR_CONFIG_DIRTY_MASK	BM(7, 4)
+#define CBCR_CDIV_LSB			16
+#define CBCR_CDIV_MSB			19
+
+enum branch_state {
+	BRANCH_ON,
+	BRANCH_OFF,
+};
+
+static struct clk_freq_tbl cxo_f = {
+	.freq_hz = 19200000,
+	.m_val = 0,
+	.n_val = 0,
+	.d_val = 0,
+	.div_src_val = 0,
+};
+
+struct div_map {
+	u32 mask;
+	int div;
+};
+
+/*
+ * RCG functions
+ */
+
+/*
+ * Update an RCG with a new configuration. This may include a new M, N, or D
+ * value, source selection or pre-divider value.
+ *
+ */
+static void rcg_update_config(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	int count = UPDATE_CHECK_MAX_LOOPS;
+
+	if (rcg->non_local_control_timeout)
+		count = rcg->non_local_control_timeout;
+
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+
+	/* Wait for update to take effect */
+	for (; count > 0; count--) {
+		if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+				CMD_RCGR_CONFIG_UPDATE_BIT))
+			return;
+		udelay(1);
+	}
+
+	CLK_WARN(&rcg->c, count == 0, "rcg didn't update its configuration.");
+}
+
+static void rcg_on_check(struct rcg_clk *rcg)
+{
+	int count = UPDATE_CHECK_MAX_LOOPS;
+
+	if (rcg->non_local_control_timeout)
+		count = rcg->non_local_control_timeout;
+
+	/* Wait for RCG to turn on */
+	for (; count > 0; count--) {
+		if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+				CMD_RCGR_ROOT_STATUS_BIT))
+			return;
+		udelay(1);
+	}
+	CLK_WARN(&rcg->c, count == 0, "rcg didn't turn on.");
+}
+
+/* RCG set rate function for clocks with Half Integer Dividers. */
+static void __set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	u32 cfg_regval;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+	cfg_regval |= nf->div_src_val;
+	writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+	rcg_update_config(rcg);
+}
+
+void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	__set_rate_hid(rcg, nf);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+/* RCG set rate function for clocks with MND & Half Integer Dividers. */
+static void __set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	u32 cfg_regval;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	writel_relaxed(nf->m_val, M_REG(rcg));
+	writel_relaxed(nf->n_val, N_REG(rcg));
+	writel_relaxed(nf->d_val, D_REG(rcg));
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+	cfg_regval |= nf->div_src_val;
+
+	/* Activate or disable the M/N:D divider as necessary */
+	cfg_regval &= ~MND_MODE_MASK;
+	if (nf->n_val != 0)
+		cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
+	writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+	rcg_update_config(rcg);
+}
+
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	__set_rate_mnd(rcg, nf);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void rcg_set_force_enable(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval |= CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+	rcg_on_check(rcg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void rcg_clear_force_enable(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval &= ~CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	/* Add a delay of 100usecs to let the RCG disable */
+	udelay(RCG_FORCE_DISABLE_DELAY_US);
+}
+
+static int rcg_clk_enable(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	WARN(rcg->current_freq == &rcg_dummy_freq,
+		"Attempting to prepare %s before setting its rate."
+		, rcg->c.dbg_name);
+
+	if (rcg->force_enable_rcgr) {
+		rcg_set_force_enable(rcg);
+		return 0;
+	}
+
+	if (!rcg->non_local_children || rcg->current_freq == &rcg_dummy_freq)
+		return 0;
+	/*
+	 * Switch from CXO to saved mux value. Force enable/disable while
+	 * switching. The current parent is already prepared and enabled
+	 * at this point, and the CXO source is always-on. Therefore the
+	 * RCG can safely execute a dynamic switch.
+	 */
+	rcg_set_force_enable(rcg);
+	rcg->set_rate(rcg, rcg->current_freq);
+	rcg_clear_force_enable(rcg);
+
+	return 0;
+}
+
+static void rcg_clk_disable(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	if (rcg->force_enable_rcgr) {
+		rcg_clear_force_enable(rcg);
+		return;
+	}
+
+	if (!rcg->non_local_children)
+		return;
+
+	/*
+	 * Save mux select and switch to CXO. Force enable/disable while
+	 * switching. The current parent is still prepared and enabled at this
+	 * point, and the CXO source is always-on. Therefore the RCG can safely
+	 * execute a dynamic switch.
+	 */
+	rcg_set_force_enable(rcg);
+	rcg->set_rate(rcg, &cxo_f);
+	rcg_clear_force_enable(rcg);
+}
+
+static int prepare_enable_rcg_srcs(struct clk *c, struct clk *curr,
+					struct clk *new, unsigned long *flags)
+{
+	int rc;
+
+	rc = clk_prepare(curr);
+	if (rc)
+		return rc;
+
+	if (c->prepare_count) {
+		rc = clk_prepare(new);
+		if (rc)
+			goto err_new_src_prepare;
+	}
+
+	rc = clk_prepare(new);
+	if (rc)
+		goto err_new_src_prepare2;
+
+	spin_lock_irqsave(&c->lock, *flags);
+	rc = clk_enable(curr);
+	if (rc) {
+		spin_unlock_irqrestore(&c->lock, *flags);
+		goto err_curr_src_enable;
+	}
+
+	if (c->count) {
+		rc = clk_enable(new);
+		if (rc) {
+			spin_unlock_irqrestore(&c->lock, *flags);
+			goto err_new_src_enable;
+		}
+	}
+
+	rc = clk_enable(new);
+	if (rc) {
+		spin_unlock_irqrestore(&c->lock, *flags);
+		goto err_new_src_enable2;
+	}
+	return 0;
+
+err_new_src_enable2:
+	if (c->count)
+		clk_disable(new);
+err_new_src_enable:
+	clk_disable(curr);
+err_curr_src_enable:
+	clk_unprepare(new);
+err_new_src_prepare2:
+	if (c->prepare_count)
+		clk_unprepare(new);
+err_new_src_prepare:
+	clk_unprepare(curr);
+	return rc;
+}
+
+static void disable_unprepare_rcg_srcs(struct clk *c, struct clk *curr,
+					struct clk *new, unsigned long *flags)
+{
+	clk_disable(new);
+	clk_disable(curr);
+	if (c->count)
+		clk_disable(curr);
+	spin_unlock_irqrestore(&c->lock, *flags);
+
+	clk_unprepare(new);
+	clk_unprepare(curr);
+	if (c->prepare_count)
+		clk_unprepare(curr);
+}
+
+static int rcg_clk_set_duty_cycle(struct clk *c, u32 numerator,
+				u32 denominator)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	u32 notn_m_val, n_val, m_val, d_val, not2d_val;
+	u32 max_n_value;
+
+	if (!numerator || numerator == denominator)
+		return -EINVAL;
+
+	if (!rcg->mnd_reg_width)
+		rcg->mnd_reg_width = 8;
+
+	max_n_value = 1 << (rcg->mnd_reg_width - 1);
+
+	notn_m_val = readl_relaxed(N_REG(rcg));
+	m_val = readl_relaxed(M_REG(rcg));
+	n_val = ((~notn_m_val) + m_val) & BM((rcg->mnd_reg_width - 1), 0);
+
+	if (n_val > max_n_value) {
+		pr_warn("%s duty-cycle cannot be set for required frequency %ld\n",
+				c->dbg_name, clk_get_rate(c));
+		return -EINVAL;
+	}
+
+	/* Calculate the 2d value */
+	d_val = DIV_ROUND_CLOSEST((numerator * n_val * 2),  denominator);
+
+	/* Check BIT WIDTHS OF 2d.  If D is too big reduce Duty cycle. */
+	if (d_val > (BIT(rcg->mnd_reg_width) - 1)) {
+		d_val = (BIT(rcg->mnd_reg_width) - 1) / 2;
+		d_val *= 2;
+	}
+
+	not2d_val = (~d_val) & BM((rcg->mnd_reg_width - 1), 0);
+
+	writel_relaxed(not2d_val, D_REG(rcg));
+	rcg_update_config(rcg);
+
+	return 0;
+}
+
+static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct clk_freq_tbl *cf, *nf;
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	int rc;
+	unsigned long flags;
+
+	for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
+			&& nf->freq_hz != rate; nf++)
+		;
+
+	if (nf->freq_hz == FREQ_END)
+		return -EINVAL;
+
+	cf = rcg->current_freq;
+	if (nf->src_freq != FIXED_CLK_SRC) {
+		rc = clk_set_rate(nf->src_clk, nf->src_freq);
+		if (rc)
+			return rc;
+	}
+
+	if (rcg->non_local_control_timeout) {
+		/*
+		 * __clk_pre_reparent only enables the RCG source if the SW
+		 * count for the RCG is non-zero. We need to make sure that
+		 * both PLL sources are ON before force turning on the RCG.
+		 */
+		rc = prepare_enable_rcg_srcs(c, cf->src_clk, nf->src_clk,
+								&flags);
+	} else
+		rc = __clk_pre_reparent(c, nf->src_clk, &flags);
+
+	if (rc)
+		return rc;
+
+	WARN_ON(!rcg->set_rate);
+
+	/* Perform clock-specific frequency switch operations. */
+	if ((rcg->non_local_children && c->count) ||
+			rcg->non_local_control_timeout) {
+		/*
+		 * Force enable the RCG before updating the RCG configuration
+		 * since the downstream clock/s can be disabled at around the
+		 * same time causing the feedback from the CBCR to turn off
+		 * the RCG.
+		 */
+		rcg_set_force_enable(rcg);
+		rcg->set_rate(rcg, nf);
+		rcg_clear_force_enable(rcg);
+	} else if (!rcg->non_local_children) {
+		rcg->set_rate(rcg, nf);
+	}
+
+	/*
+	 * If non_local_children is set and the RCG is not enabled,
+	 * the following operations switch parent in software and cache
+	 * the frequency. The mux switch will occur when the RCG is enabled.
+	 */
+	rcg->current_freq = nf;
+	c->parent = nf->src_clk;
+
+	if (rcg->non_local_control_timeout)
+		disable_unprepare_rcg_srcs(c, cf->src_clk, nf->src_clk,
+								&flags);
+	else
+		__clk_post_reparent(c, cf->src_clk, &flags);
+
+	return 0;
+}
+
+/*
+ * Return a supported rate that's at least the specified rate or
+ * the max supported rate if the specified rate is larger than the
+ * max supported rate.
+ */
+static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *f;
+
+	for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
+		if (f->freq_hz >= rate)
+			return f->freq_hz;
+
+	f--;
+	return f->freq_hz;
+}
+
+/* Return the nth supported frequency for a given clock. */
+static long rcg_clk_list_rate(struct clk *c, unsigned long  n)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
+		return -ENXIO;
+
+	return (rcg->freq_tbl + n)->freq_hz;
+}
+
+static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, bool has_mnd,
+								bool match_rate)
+{
+	u32 n_regval = 0, m_regval = 0, d_regval = 0;
+	u32 cfg_regval, div, div_regval;
+	struct clk_freq_tbl *freq;
+	u32 cmd_rcgr_regval;
+
+	if (!rcg->freq_tbl) {
+		WARN(1, "No frequency table present for rcg %s\n",
+							rcg->c.dbg_name);
+		return NULL;
+	}
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+		WARN(1, "Pending transaction for rcg %s\n", rcg->c.dbg_name);
+		return NULL;
+	}
+
+	/* Get values of m, n, d, div and src_sel registers. */
+	if (has_mnd) {
+		m_regval = readl_relaxed(M_REG(rcg));
+		n_regval = readl_relaxed(N_REG(rcg));
+		d_regval = readl_relaxed(D_REG(rcg));
+
+		/*
+		 * The n and d values stored in the frequency tables are sign
+		 * extended to 32 bits. The n and d values in the registers are
+		 * sign extended to 8 or 16 bits. Sign extend the values read
+		 * from the registers so that they can be compared to the
+		 * values in the frequency tables.
+		 */
+		n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
+		d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
+	}
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
+				| MND_MODE_MASK;
+
+	/* If mnd counter is present, check if it's in use. */
+	has_mnd = (has_mnd) &&
+		((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
+
+	/*
+	 * Clear out the mn counter mode bits since we now want to compare only
+	 * the source mux selection and pre-divider values in the registers.
+	 */
+	cfg_regval &= ~MND_MODE_MASK;
+
+	/* Figure out what rate the rcg is running at */
+	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+		/* source select does not match */
+		if ((freq->div_src_val & CFG_RCGR_SRC_SEL_MASK)
+		    != (cfg_regval & CFG_RCGR_SRC_SEL_MASK))
+			continue;
+		/*
+		 * Stop if we found the required parent in the frequency table
+		 * and only care if the source matches but dont care if the
+		 * frequency matches
+		 */
+		if (!match_rate)
+			break;
+		/* divider does not match */
+		div = freq->div_src_val & CFG_RCGR_DIV_MASK;
+		div_regval = cfg_regval & CFG_RCGR_DIV_MASK;
+		if (div != div_regval && (div > 1 || div_regval > 1))
+			continue;
+
+		if (has_mnd) {
+			if (freq->m_val != m_regval)
+				continue;
+			if (freq->n_val != n_regval)
+				continue;
+			if (freq->d_val != d_regval)
+				continue;
+		} else if (freq->n_val) {
+			continue;
+		}
+		break;
+	}
+
+	/* No known frequency found */
+	if (freq->freq_hz == FREQ_END) {
+		/*
+		 * If we can't recognize the frequency and non_local_children is
+		 * set, switch to safe frequency. It is assumed the current
+		 * parent has been turned on by the bootchain if the RCG is on.
+		 */
+		if (rcg->non_local_children) {
+			rcg->set_rate(rcg, &cxo_f);
+			WARN(1, "don't recognize rcg frequency for %s\n",
+				rcg->c.dbg_name);
+		}
+		return NULL;
+	}
+
+	rcg->current_freq = freq;
+	return freq->src_clk;
+}
+
+static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+
+	if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
+		rcg->c.rate = rcg->current_freq->freq_hz;
+
+	/* Is the root enabled? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static struct clk *display_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), false, false);
+}
+
+static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), true, true);
+}
+
+static struct clk *rcg_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), false, true);
+}
+
+static enum handoff rcg_mnd_clk_handoff(struct clk *c)
+{
+	return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static enum handoff rcg_clk_handoff(struct clk *c)
+{
+	return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static void __iomem *rcg_hid_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CMD_RCGR_REG(rcg);
+}
+
+static void __iomem *rcg_mnd_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+		{"M_VAL", 0x8},
+		{"N_VAL", 0xC},
+		{"D_VAL", 0x10},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CMD_RCGR_REG(rcg);
+}
+
+#define BRANCH_CHECK_MASK	BM(31, 28)
+#define BRANCH_ON_VAL		BVAL(31, 28, 0x0)
+#define BRANCH_OFF_VAL		BVAL(31, 28, 0x8)
+#define BRANCH_NOC_FSM_ON_VAL	BVAL(31, 28, 0x2)
+
+/*
+ * Branch clock functions
+ */
+static void branch_clk_halt_check(struct clk *c, u32 halt_check,
+			void __iomem *cbcr_reg, enum branch_state br_status)
+{
+	char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
+
+	/*
+	 * Use a memory barrier since some halt status registers are
+	 * not within the same 1K segment as the branch/root enable
+	 * registers.  It's also needed in the udelay() case to ensure
+	 * the delay starts after the branch disable.
+	 */
+	mb();
+
+	if (halt_check == DELAY || halt_check == HALT_VOTED) {
+		udelay(HALT_CHECK_DELAY_US);
+	} else if (halt_check == HALT) {
+		int count;
+		u32 val;
+
+		for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+			val = readl_relaxed(cbcr_reg);
+			val &= BRANCH_CHECK_MASK;
+			switch (br_status) {
+			case BRANCH_ON:
+				if (val == BRANCH_ON_VAL
+					|| val == BRANCH_NOC_FSM_ON_VAL)
+					return;
+				break;
+
+			case BRANCH_OFF:
+				if (val == BRANCH_OFF_VAL)
+					return;
+				break;
+			};
+			udelay(1);
+		}
+		CLK_WARN(c, count == 0, "status stuck %s", status_str);
+	}
+}
+
+static unsigned long branch_clk_aggregate_rate(const struct clk *parent)
+{
+	struct clk *clk;
+	unsigned long rate = 0;
+
+	list_for_each_entry(clk, &parent->children, siblings) {
+		struct branch_clk *v = to_branch_clk(clk);
+
+		if (v->is_prepared)
+			rate = max(clk->rate, rate);
+	}
+	return rate;
+}
+
+static int cbcr_set_flags(void * __iomem regaddr, unsigned long flags)
+{
+	u32 cbcr_val;
+	unsigned long irq_flags;
+	int delay_us = 0, ret = 0;
+
+	spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
+	cbcr_val = readl_relaxed(regaddr);
+	switch (flags) {
+	case CLKFLAG_PERIPH_OFF_SET:
+		cbcr_val |= BIT(12);
+		delay_us = 1;
+		break;
+	case CLKFLAG_PERIPH_OFF_CLEAR:
+		cbcr_val &= ~BIT(12);
+		break;
+	case CLKFLAG_RETAIN_PERIPH:
+		cbcr_val |= BIT(13);
+		delay_us = 1;
+		break;
+	case CLKFLAG_NORETAIN_PERIPH:
+		cbcr_val &= ~BIT(13);
+		break;
+	case CLKFLAG_RETAIN_MEM:
+		cbcr_val |= BIT(14);
+		delay_us = 1;
+		break;
+	case CLKFLAG_NORETAIN_MEM:
+		cbcr_val &= ~BIT(14);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	writel_relaxed(cbcr_val, regaddr);
+	/* Make sure power is enabled before returning. */
+	mb();
+	udelay(delay_us);
+
+	spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
+
+	return ret;
+}
+
+static int branch_clk_set_flags(struct clk *c, unsigned long flags)
+{
+	return cbcr_set_flags(CBCR_REG(to_branch_clk(c)), flags);
+}
+
+static DEFINE_MUTEX(branch_clk_lock);
+
+static int branch_clk_prepare(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	unsigned long curr_rate;
+	int ret = 0;
+
+	if (!branch->aggr_sibling_rates)
+		return ret;
+
+	mutex_lock(&branch_clk_lock);
+	branch->is_prepared = false;
+	curr_rate = branch_clk_aggregate_rate(c->parent);
+	if (c->rate > curr_rate) {
+		ret = clk_set_rate(c->parent, c->rate);
+		if (ret)
+			goto exit;
+	}
+	branch->is_prepared = true;
+exit:
+	mutex_unlock(&branch_clk_lock);
+	return ret;
+}
+
+static int branch_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 cbcr_val;
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (branch->toggle_memory) {
+		branch_clk_set_flags(c, CLKFLAG_RETAIN_MEM);
+		branch_clk_set_flags(c, CLKFLAG_RETAIN_PERIPH);
+	}
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cbcr_val = readl_relaxed(CBCR_REG(branch));
+	cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(cbcr_val, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/*
+	 * For clocks controlled by other masters via voting registers,
+	 * delay polling for the status bit to allow previous clk_disable
+	 * by the GDS controller to go through.
+	 */
+	if (branch->no_halt_check_on_disable)
+		udelay(5);
+
+	/* Wait for clock to enable before continuing. */
+	branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+				BRANCH_ON);
+
+	return 0;
+}
+
+static void branch_clk_unprepare(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	unsigned long curr_rate, new_rate;
+
+	if (!branch->aggr_sibling_rates)
+		return;
+
+	mutex_lock(&branch_clk_lock);
+	branch->is_prepared = false;
+	new_rate = branch_clk_aggregate_rate(c->parent);
+	curr_rate = max(new_rate, c->rate);
+	if (new_rate < curr_rate)
+		clk_set_rate(c->parent, new_rate);
+	mutex_unlock(&branch_clk_lock);
+}
+
+static void branch_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct branch_clk *branch = to_branch_clk(c);
+	u32 reg_val;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	reg_val = readl_relaxed(CBCR_REG(branch));
+	reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(reg_val, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/* Wait for clock to disable before continuing. */
+	if (!branch->no_halt_check_on_disable)
+		branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+					BRANCH_OFF);
+
+	if (branch->toggle_memory) {
+		branch_clk_set_flags(c, CLKFLAG_NORETAIN_MEM);
+		branch_clk_set_flags(c, CLKFLAG_NORETAIN_PERIPH);
+	}
+}
+
+static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
+{
+	unsigned long flags;
+	u32 regval;
+
+	if (rate > branch->max_div)
+		return -EINVAL;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(CBCR_REG(branch));
+	regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+	regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
+	writel_relaxed(regval, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+static int branch_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct branch_clk *clkh, *branch = to_branch_clk(c);
+	struct clk *clkp, *parent = c->parent;
+	unsigned long curr_rate, new_rate, other_rate = 0;
+	int ret = 0;
+
+	if (branch->max_div)
+		return branch_cdiv_set_rate(branch, rate);
+
+	if (branch->has_sibling)
+		return -EPERM;
+
+	if (!branch->aggr_sibling_rates)
+		return clk_set_rate(c->parent, rate);
+
+	mutex_lock(&branch_clk_lock);
+	if (!branch->is_prepared) {
+		c->rate = rate;
+		goto exit;
+	}
+	/*
+	 * Get the aggregate rate without this clock's vote and update
+	 * if the new rate is different than the current rate.
+	 */
+	list_for_each_entry(clkp, &parent->children, siblings) {
+		clkh = to_branch_clk(clkp);
+		if (clkh->is_prepared && clkh != branch)
+			other_rate = max(clkp->rate, other_rate);
+	}
+	curr_rate = max(other_rate, c->rate);
+	new_rate = max(other_rate, rate);
+	if (new_rate != curr_rate) {
+		ret = clk_set_rate(parent, new_rate);
+		if (!ret)
+			c->rate = rate;
+	}
+exit:
+	mutex_unlock(&branch_clk_lock);
+	return ret;
+}
+
+static long branch_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (branch->max_div)
+		return rate <= (branch->max_div) ? rate : -EPERM;
+
+	if (!branch->has_sibling)
+		return clk_round_rate(c->parent, rate);
+
+	return -EPERM;
+}
+
+static unsigned long branch_clk_get_rate(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (branch->max_div)
+		return branch->c.rate;
+
+	return clk_get_rate(c->parent);
+}
+
+static long branch_clk_list_rate(struct clk *c, unsigned long  n)
+{
+	int level;
+	unsigned long fmax = 0, rate;
+	struct branch_clk *branch = to_branch_clk(c);
+	struct clk *parent = c->parent;
+
+	if (branch->has_sibling == 1)
+		return -ENXIO;
+
+	if (!parent || !parent->ops->list_rate)
+		return -ENXIO;
+
+	/* Find max frequency supported within voltage constraints. */
+	if (!parent->vdd_class) {
+		fmax = ULONG_MAX;
+	} else {
+		for (level = 0; level < parent->num_fmax; level++)
+			if (parent->fmax[level])
+				fmax = parent->fmax[level];
+	}
+
+	rate = parent->ops->list_rate(parent, n);
+	if (rate <= fmax)
+		return rate;
+	else
+		return -ENXIO;
+}
+
+static enum handoff branch_clk_handoff(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	u32 cbcr_regval;
+
+	cbcr_regval = readl_relaxed(CBCR_REG(branch));
+
+	/* Set the cdiv to c->rate for fixed divider branch clock */
+	if (c->rate && (c->rate < branch->max_div)) {
+		cbcr_regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+		cbcr_regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, c->rate);
+		writel_relaxed(cbcr_regval, CBCR_REG(branch));
+	}
+
+	if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
+		return HANDOFF_DISABLED_CLK;
+
+	if (!(cbcr_regval & CBCR_BRANCH_ENABLE_BIT)) {
+		if (!branch->check_enable_bit) {
+			pr_warn("%s clock is enabled in HW", c->dbg_name);
+			pr_warn("even though ENABLE_BIT is not set\n");
+		}
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if (branch->max_div) {
+		cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+		cbcr_regval >>= CBCR_CDIV_LSB;
+		c->rate = cbcr_regval;
+	} else if (!branch->has_sibling) {
+		c->rate = clk_get_rate(c->parent);
+	}
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static int __branch_clk_reset(void __iomem *bcr_reg,
+				enum clk_reset_action action)
+{
+	int ret = 0;
+	unsigned long flags;
+	u32 reg_val;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	reg_val = readl_relaxed(bcr_reg);
+	switch (action) {
+	case CLK_RESET_ASSERT:
+		reg_val |= BCR_BLK_ARES_BIT;
+		break;
+	case CLK_RESET_DEASSERT:
+		reg_val &= ~BCR_BLK_ARES_BIT;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	writel_relaxed(reg_val, bcr_reg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/* Make sure write is issued before returning. */
+	mb();
+
+	return ret;
+}
+
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (!branch->bcr_reg)
+		return -EPERM;
+	return __branch_clk_reset(BCR_REG(branch), action);
+}
+
+static void __iomem *branch_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	static struct clk_register_data data[] = {
+		{"CBCR", 0x0},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CBCR_REG(branch);
+}
+
+/*
+ * Voteable clock functions
+ */
+static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+	if (!vclk->bcr_reg) {
+		WARN("clk_reset called on an unsupported clock (%s)\n",
+			c->dbg_name);
+		return -EPERM;
+	}
+	return __branch_clk_reset(BCR_REG(vclk), action);
+}
+
+static int local_vote_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 ena;
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	ena = readl_relaxed(VOTE_REG(vclk));
+	ena |= vclk->en_mask;
+	writel_relaxed(ena, VOTE_REG(vclk));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	branch_clk_halt_check(c, vclk->halt_check, CBCR_REG(vclk), BRANCH_ON);
+
+	return 0;
+}
+
+static void local_vote_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	u32 ena;
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	ena = readl_relaxed(VOTE_REG(vclk));
+	ena &= ~vclk->en_mask;
+	writel_relaxed(ena, VOTE_REG(vclk));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static enum handoff local_vote_clk_handoff(struct clk *c)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+	u32 vote_regval;
+
+	/* Is the branch voted on by apps? */
+	vote_regval = readl_relaxed(VOTE_REG(vclk));
+	if (!(vote_regval & vclk->en_mask))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned long ticks, void __iomem *ctl_reg,
+				void __iomem *status_reg)
+{
+	/* Stop counters and set the XO4 counter start value. */
+	writel_relaxed(ticks, ctl_reg);
+
+	/* Wait for timer to become ready. */
+	while ((readl_relaxed(status_reg) & BIT(25)) != 0)
+		cpu_relax();
+
+	/* Run measurement and wait for completion. */
+	writel_relaxed(BIT(20)|ticks, ctl_reg);
+	while ((readl_relaxed(status_reg) & BIT(25)) == 0)
+		cpu_relax();
+
+	/* Return measured ticks. */
+	return readl_relaxed(status_reg) & BM(24, 0);
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+unsigned long measure_get_rate(struct clk *c)
+{
+	unsigned long flags;
+	u32 gcc_xo4_reg, regval;
+	u64 raw_count_short, raw_count_full;
+	unsigned long ret;
+	u32 sample_ticks = 0x10000;
+	u32 multiplier = to_mux_clk(c)->post_div + 1;
+	struct measure_clk_data *data = to_mux_clk(c)->priv;
+
+	regval = readl_relaxed(MUX_REG(to_mux_clk(c)));
+	/* clear and set post divider bits */
+	regval &= ~BM(15, 12);
+	regval |= BVAL(15, 12, to_mux_clk(c)->post_div);
+	writel_relaxed(regval, MUX_REG(to_mux_clk(c)));
+
+	ret = clk_prepare_enable(data->cxo);
+	if (ret) {
+		pr_warn("CXO clock failed to enable. Can't measure\n");
+		ret = 0;
+		goto fail;
+	}
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+
+	/* Enable CXO/4 and RINGOSC branch. */
+	gcc_xo4_reg = readl_relaxed(*data->base + data->xo_div4_cbcr);
+	gcc_xo4_reg |= CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(gcc_xo4_reg, *data->base + data->xo_div4_cbcr);
+
+	/*
+	 * The ring oscillator counter will not reset if the measured clock
+	 * is not running.  To detect this, run a short measurement before
+	 * the full measurement.  If the raw results of the two are the same
+	 * then the clock must be off.
+	 */
+
+	/* Run a short measurement. (~1 ms) */
+	raw_count_short = run_measurement(0x1000, *data->base + data->ctl_reg,
+					  *data->base + data->status_reg);
+	/* Run a full measurement. (~14 ms) */
+	raw_count_full = run_measurement(sample_ticks,
+					 *data->base + data->ctl_reg,
+					 *data->base + data->status_reg);
+
+	gcc_xo4_reg &= ~CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(gcc_xo4_reg, *data->base + data->xo_div4_cbcr);
+
+	/* Return 0 if the clock is off. */
+	if (raw_count_full == raw_count_short) {
+		ret = 0;
+	} else {
+		/* Compute rate in Hz. */
+		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
+		do_div(raw_count_full, ((sample_ticks * 10) + 35));
+		ret = (raw_count_full * multiplier);
+	}
+	writel_relaxed(data->plltest_val, *data->base + data->plltest_reg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	clk_disable_unprepare(data->cxo);
+
+fail:
+	regval = readl_relaxed(MUX_REG(to_mux_clk(c)));
+	/* clear post divider bits */
+	regval &= ~BM(15, 12);
+	writel_relaxed(regval, MUX_REG(to_mux_clk(c)));
+
+	return ret;
+}
+
+struct frac_entry {
+	int num;
+	int den;
+};
+
+static void __iomem *local_vote_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+	static struct clk_register_data data1[] = {
+		{"CBCR", 0x0},
+	};
+	static struct clk_register_data data2[] = {
+		{"APPS_VOTE", 0x0},
+		{"APPS_SLEEP_VOTE", 0x4},
+	};
+	switch (n) {
+	case 0:
+		*regs = data1;
+		*size = ARRAY_SIZE(data1);
+		return CBCR_REG(vclk);
+	case 1:
+		*regs = data2;
+		*size = ARRAY_SIZE(data2);
+		return VOTE_REG(vclk);
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+}
+
+static struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
+	{52, 295},	/* 119 M */
+	{11, 57},	/* 130.25 M */
+	{63, 307},	/* 138.50 M */
+	{11, 50},	/* 148.50 M */
+	{47, 206},	/* 154 M */
+	{31, 100},	/* 205.25 M */
+	{107, 269},	/* 268.50 M */
+	{0, 0},
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+	{31, 211},	/* 119 M */
+	{32, 199},	/* 130.25 M */
+	{63, 307},	/* 138.50 M */
+	{11, 60},	/* 148.50 M */
+	{50, 263},	/* 154 M */
+	{31, 120},	/* 205.25 M */
+	{119, 359},	/* 268.50 M */
+	{0, 0},
+};
+
+static bool is_same_rcg_config(struct rcg_clk *rcg, struct clk_freq_tbl *freq,
+			       bool has_mnd)
+{
+	u32 cfg;
+
+	/* RCG update pending */
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_CONFIG_DIRTY_MASK)
+		return false;
+	if (has_mnd)
+		if (readl_relaxed(M_REG(rcg)) != freq->m_val ||
+		    readl_relaxed(N_REG(rcg)) != freq->n_val ||
+		    readl_relaxed(D_REG(rcg)) != freq->d_val)
+			return false;
+	/*
+	 * Both 0 and 1 represent same divider value in HW.
+	 * Always use 0 to simplify comparison.
+	 */
+	if ((freq->div_src_val & CFG_RCGR_DIV_MASK) == 1)
+		freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+	cfg = readl_relaxed(CFG_RCGR_REG(rcg));
+	if ((cfg & CFG_RCGR_DIV_MASK) == 1)
+		cfg &= ~CFG_RCGR_DIV_MASK;
+	if (cfg != freq->div_src_val)
+		return false;
+
+	return true;
+}
+
+static int set_rate_edp_pixel(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+	struct frac_entry *frac;
+	int delta = 100000;
+	s64 request;
+	s64 src_rate;
+	unsigned long flags;
+
+	src_rate = clk_get_rate(clk->parent);
+
+	if (src_rate == 810000000)
+		frac = frac_table_810m;
+	else
+		frac = frac_table_675m;
+
+	while (frac->num) {
+		request = rate;
+		request *= frac->den;
+		request = div_s64(request, frac->num);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta))) {
+			frac++;
+			continue;
+		}
+
+		pixel_freq->div_src_val &= ~BM(4, 0);
+		if (frac->den == frac->num) {
+			pixel_freq->m_val = 0;
+			pixel_freq->n_val = 0;
+		} else {
+			pixel_freq->m_val = frac->num;
+			pixel_freq->n_val = ~(frac->den - frac->num);
+			pixel_freq->d_val = ~frac->den;
+		}
+		spin_lock_irqsave(&local_clock_reg_lock, flags);
+		if (!is_same_rcg_config(rcg, pixel_freq, true))
+			__set_rate_mnd(rcg, pixel_freq);
+		spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+enum handoff byte_rcg_handoff(struct clk *clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	u32 div_val;
+	unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+	/* If the pre-divider is used, find the rate after the division */
+	div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
+	if (div_val > 1)
+		pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+	else
+		pre_div_rate = parent_rate;
+
+	clk->rate = pre_div_rate;
+
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static int set_rate_byte(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk *pll = clk->parent;
+	unsigned long source_rate, div, flags;
+	struct clk_freq_tbl *byte_freq = rcg->current_freq;
+	int rc;
+
+	if (rate == 0)
+		return -EINVAL;
+
+	rc = clk_set_rate(pll, rate);
+	if (rc)
+		return rc;
+
+	source_rate = clk_round_rate(pll, rate);
+	if ((2 * source_rate) % rate)
+		return -EINVAL;
+
+	div = ((2 * source_rate)/rate) - 1;
+	if (div > CFG_RCGR_DIV_MASK)
+		return -EINVAL;
+
+	/*
+	 * Both 0 and 1 represent same divider value in HW.
+	 * Always use 0 to simplify comparison.
+	 */
+	div = (div == 1) ? 0 : div;
+
+	byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+	byte_freq->div_src_val |= BVAL(4, 0, div);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	if (!is_same_rcg_config(rcg, byte_freq, false))
+		__set_rate_hid(rcg, byte_freq);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+enum handoff pixel_rcg_handoff(struct clk *clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
+	unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+	/* If the pre-divider is used, find the rate after the division */
+	div_val = cfg_regval & CFG_RCGR_DIV_MASK;
+	if (div_val > 1)
+		pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+	else
+		pre_div_rate = parent_rate;
+
+	clk->rate = pre_div_rate;
+
+	/*
+	 * Pixel clocks have one frequency entry in their frequency table.
+	 * Update that entry.
+	 */
+	if (rcg->current_freq) {
+		rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+		rcg->current_freq->div_src_val |= div_val;
+	}
+
+	/* If MND is used, find the rate after the MND division */
+	if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+		mval = readl_relaxed(M_REG(rcg));
+		nval = readl_relaxed(N_REG(rcg));
+		if (!nval)
+			return HANDOFF_DISABLED_CLK;
+		nval = (~nval) + mval;
+		if (rcg->current_freq) {
+			rcg->current_freq->n_val = ~(nval - mval);
+			rcg->current_freq->m_val = mval;
+			rcg->current_freq->d_val = ~nval;
+		}
+		clk->rate = (pre_div_rate * mval) / nval;
+	}
+
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long round_rate_pixel(struct clk *clk, unsigned long rate)
+{
+	int frac_num[] = {3, 2, 4, 1};
+	int frac_den[] = {8, 9, 9, 1};
+	int delta = 100000;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+		unsigned long request = (rate * frac_den[i]) / frac_num[i];
+		unsigned long src_rate;
+
+		src_rate = clk_round_rate(clk->parent, request);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta)))
+			continue;
+
+		return (src_rate * frac_num[i]) / frac_den[i];
+	}
+
+	return -EINVAL;
+}
+
+
+static int set_rate_pixel(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+	int frac_num[] = {3, 2, 4, 1};
+	int frac_den[] = {8, 9, 9, 1};
+	int delta = 100000;
+	int i, rc;
+
+	for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+		unsigned long request = (rate * frac_den[i]) / frac_num[i];
+		unsigned long src_rate;
+
+		src_rate = clk_round_rate(clk->parent, request);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta)))
+			continue;
+
+		rc =  clk_set_rate(clk->parent, src_rate);
+		if (rc)
+			return rc;
+
+		pixel_freq->div_src_val &= ~BM(4, 0);
+		if (frac_den[i] == frac_num[i]) {
+			pixel_freq->m_val = 0;
+			pixel_freq->n_val = 0;
+		} else {
+			pixel_freq->m_val = frac_num[i];
+			pixel_freq->n_val = ~(frac_den[i] - frac_num[i]);
+			pixel_freq->d_val = ~frac_den[i];
+		}
+		set_rate_mnd(rcg, pixel_freq);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int rcg_clk_set_parent(struct clk *clk, struct clk *parent_clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk *old_parent = clk->parent;
+	struct clk_freq_tbl *nf;
+	unsigned long flags;
+	int rc = 0;
+	unsigned int parent_rate, rate;
+	u32 m_val, n_val, d_val, div_val;
+	u32 cfg_regval;
+
+	/* Find the source clock freq tbl for the requested parent */
+	if (!rcg->freq_tbl)
+		return -ENXIO;
+
+	for (nf = rcg->freq_tbl; parent_clk != nf->src_clk; nf++) {
+		if (nf->freq_hz == FREQ_END)
+			return -ENXIO;
+	}
+
+	/* This implementation recommends that the RCG be unprepared
+	 * when switching RCG source since the divider configuration
+	 * remains unchanged.
+	 */
+	WARN(clk->prepare_count,
+		"Trying to switch RCG source while it is prepared!\n");
+
+	parent_rate = clk_get_rate(parent_clk);
+
+	div_val = (rcg->current_freq->div_src_val & CFG_RCGR_DIV_MASK);
+	if (div_val)
+		parent_rate /= ((div_val + 1) >> 1);
+
+	/* Update divisor. Source select bits should already be as expected */
+	nf->div_src_val &= ~CFG_RCGR_DIV_MASK;
+	nf->div_src_val |= div_val;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+	if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+		nf->m_val = m_val = readl_relaxed(M_REG(rcg));
+		n_val = readl_relaxed(N_REG(rcg));
+		d_val = readl_relaxed(D_REG(rcg));
+
+		/* Sign extend the n and d values as those in registers are not
+		 * sign extended.
+		 */
+		n_val |= (n_val >> 8) ? BM(31, 16) : BM(31, 8);
+		d_val |= (d_val >> 8) ? BM(31, 16) : BM(31, 8);
+
+		nf->n_val = n_val;
+		nf->d_val = d_val;
+
+		n_val = ~(n_val) + m_val;
+		rate = parent_rate * m_val;
+		if (n_val)
+			rate /= n_val;
+		else
+			WARN(1, "n_val was 0!!");
+	} else
+		rate = parent_rate;
+
+	/* Warn if switching to the new parent with the current m, n ,d values
+	 * violates the voltage constraints for the RCG.
+	 */
+	WARN(!is_rate_valid(clk, rate) && clk->prepare_count,
+		"Switch to new RCG parent violates voltage requirement!\n");
+
+	rc = __clk_pre_reparent(clk, nf->src_clk, &flags);
+	if (rc)
+		return rc;
+
+	/* Switch RCG source */
+	rcg->set_rate(rcg, nf);
+
+	rcg->current_freq = nf;
+	clk->parent = parent_clk;
+	clk->rate = rate;
+
+	__clk_post_reparent(clk, old_parent, &flags);
+
+	return 0;
+}
+
+/*
+ * Unlike other clocks, the HDMI rate is adjusted through PLL
+ * re-programming. It is also routed through an HID divider.
+ */
+static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *nf = rcg->freq_tbl;
+	int rc;
+
+	rc = clk_set_rate(nf->src_clk, rate);
+	if (rc < 0)
+		goto out;
+	set_rate_hid(rcg, nf);
+
+	rcg->current_freq = nf;
+out:
+	return rc;
+}
+
+static struct clk *rcg_hdmi_clk_get_parent(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *freq = rcg->freq_tbl;
+	u32 cmd_rcgr_regval;
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+		return NULL;
+
+	rcg->current_freq->freq_hz = clk_get_rate(c->parent);
+
+	return freq->src_clk;
+}
+
+static int rcg_clk_set_rate_edp(struct clk *c, unsigned long rate)
+{
+	struct clk_freq_tbl *nf;
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	int rc;
+
+	for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
+		if (nf->freq_hz == FREQ_END) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+	rc = clk_set_rate(nf->src_clk, rate);
+	if (rc < 0)
+		goto out;
+	set_rate_hid(rcg, nf);
+
+	rcg->current_freq = nf;
+	c->parent = nf->src_clk;
+out:
+	return rc;
+}
+
+static struct clk *edp_clk_get_parent(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk *clk;
+	struct clk_freq_tbl *freq;
+	unsigned long rate;
+	u32 cmd_rcgr_regval;
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+		return NULL;
+
+	/* Figure out what rate the rcg is running at */
+	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+		clk = freq->src_clk;
+		if (clk && clk->ops->get_rate) {
+			rate = clk->ops->get_rate(clk);
+			if (rate == freq->freq_hz)
+				break;
+		}
+	}
+
+	/* No known frequency found */
+	if (freq->freq_hz == FREQ_END)
+		return NULL;
+
+	rcg->current_freq = freq;
+	return freq->src_clk;
+}
+
+static int gate_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 regval;
+	struct gate_clk *g = to_gate_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(GATE_EN_REG(g));
+	regval |= g->en_mask;
+	writel_relaxed(regval, GATE_EN_REG(g));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	if (g->delay_us)
+		udelay(g->delay_us);
+
+	return 0;
+}
+
+static void gate_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	u32 regval;
+	struct gate_clk *g = to_gate_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(GATE_EN_REG(g));
+	regval &= ~(g->en_mask);
+	writel_relaxed(regval, GATE_EN_REG(g));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	if (g->delay_us)
+		udelay(g->delay_us);
+}
+
+static void __iomem *gate_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct gate_clk *g = to_gate_clk(c);
+	static struct clk_register_data data[] = {
+		{"EN_REG", 0x0},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return GATE_EN_REG(g);
+}
+
+static enum handoff gate_clk_handoff(struct clk *c)
+{
+	struct gate_clk *g = to_gate_clk(c);
+	u32 regval;
+
+	regval = readl_relaxed(GATE_EN_REG(g));
+	if (regval & g->en_mask)
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+static int gate_clk_set_flags(struct clk *c, unsigned long flags)
+{
+	return cbcr_set_flags(GATE_EN_REG(to_gate_clk(c)), flags);
+}
+
+
+static int reset_clk_rst(struct clk *c, enum clk_reset_action action)
+{
+	struct reset_clk *rst = to_reset_clk(c);
+
+	if (!rst->reset_reg)
+		return -EPERM;
+
+	return __branch_clk_reset(RST_REG(rst), action);
+}
+
+static void __iomem *reset_clk_list_registers(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct reset_clk *rst = to_reset_clk(clk);
+	static struct clk_register_data data[] = {
+		{"BCR", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return RST_REG(rst);
+}
+
+static DEFINE_SPINLOCK(mux_reg_lock);
+
+static int mux_reg_enable(struct mux_clk *clk)
+{
+	u32 regval;
+	unsigned long flags;
+
+	if (!clk->en_mask)
+		return 0;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->en_offset);
+	regval |= clk->en_mask;
+	writel_relaxed(regval, *clk->base + clk->en_offset);
+	/* Ensure enable request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+	return 0;
+}
+
+static void mux_reg_disable(struct mux_clk *clk)
+{
+	u32 regval;
+	unsigned long flags;
+
+	if (!clk->en_mask)
+		return;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->en_offset);
+	regval &= ~clk->en_mask;
+	writel_relaxed(regval, *clk->base + clk->en_offset);
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+}
+
+static int mux_reg_set_mux_sel(struct mux_clk *clk, int sel)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(MUX_REG(clk));
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (sel & clk->mask) << clk->shift;
+	writel_relaxed(regval, MUX_REG(clk));
+	/* Ensure switch request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+	return 0;
+}
+
+static int mux_reg_get_mux_sel(struct mux_clk *clk)
+{
+	u32 regval = readl_relaxed(MUX_REG(clk));
+
+	return (regval >> clk->shift) & clk->mask;
+}
+
+static bool mux_reg_is_enabled(struct mux_clk *clk)
+{
+	u32 regval = readl_relaxed(MUX_REG(clk));
+
+	return !!(regval & clk->en_mask);
+}
+
+static void __iomem *mux_clk_list_registers(struct mux_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	static struct clk_register_data data[] = {
+		{"DEBUG_CLK_CTL", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return *clk->base + clk->offset;
+}
+
+/* PLL post-divider setting for each divider value */
+static struct div_map postdiv_map[] = {
+	{  0x0, 1  },
+	{  0x1, 2  },
+	{  0x3, 3  },
+	{  0x3, 4  },
+	{  0x5, 5  },
+	{  0x7, 7  },
+	{  0x7, 8  },
+	{  0xF, 16 },
+};
+
+static int postdiv_reg_set_div(struct div_clk *clk, int div)
+{
+	struct clk *parent = NULL;
+	u32 regval;
+	unsigned long flags;
+	unsigned int mask = -1;
+	int i, ret = 0;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(postdiv_map); i++) {
+		if (postdiv_map[i].div == div) {
+			mask = postdiv_map[i].mask;
+			break;
+		}
+	}
+
+	if (mask < 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&clk->c.lock, flags);
+	parent = clk->c.parent;
+	if (parent->count && parent->ops->disable)
+		parent->ops->disable(parent);
+
+	regval = readl_relaxed(DIV_REG(clk));
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (mask & clk->mask) << clk->shift;
+	writel_relaxed(regval, DIV_REG(clk));
+	/* Ensure switch request goes through before returning */
+	mb();
+
+	if (parent->count && parent->ops->enable) {
+		ret = parent->ops->enable(parent);
+		if (ret)
+			pr_err("Failed to force enable div parent!\n");
+	}
+
+	spin_unlock_irqrestore(&clk->c.lock, flags);
+	return ret;
+}
+
+static int postdiv_reg_get_div(struct div_clk *clk)
+{
+	u32 regval;
+	int i, div = 0;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return clk->data.div;
+
+	regval = readl_relaxed(DIV_REG(clk));
+	regval = (regval >> clk->shift) & clk->mask;
+	for (i = 0; i < ARRAY_SIZE(postdiv_map); i++) {
+		if (postdiv_map[i].mask == regval) {
+			div = postdiv_map[i].div;
+			break;
+		}
+	}
+	if (!div)
+		return -EINVAL;
+
+	return div;
+}
+
+static int div_reg_set_div(struct div_clk *clk, int div)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return 0;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->offset);
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (div & clk->mask) << clk->shift;
+	/* Ensure switch request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+static int div_reg_get_div(struct div_clk *clk)
+{
+	u32 regval;
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return clk->data.div;
+
+	regval = readl_relaxed(*clk->base + clk->offset);
+	return (regval >> clk->shift) & clk->mask;
+}
+
+/* =================Half-integer RCG without MN counter================= */
+#define RCGR_CMD_REG(x) ((x)->base + (x)->div_offset)
+#define RCGR_DIV_REG(x) ((x)->base + (x)->div_offset + 4)
+#define RCGR_SRC_REG(x) ((x)->base + (x)->div_offset + 4)
+
+static int rcg_mux_div_update_config(struct mux_div_clk *md)
+{
+	u32 regval, count;
+
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+
+	/* Wait for update to take effect */
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+			    CMD_RCGR_CONFIG_UPDATE_BIT))
+			return 0;
+		udelay(1);
+	}
+
+	CLK_WARN(&md->c, true, "didn't update its configuration.");
+
+	return -EBUSY;
+}
+
+static void rcg_get_src_div(struct mux_div_clk *md, u32 *src_sel, u32 *div)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	/* Is there a pending configuration? */
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	if (regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+		CLK_WARN(&md->c, true, "it's a pending configuration.");
+		spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+		return;
+	}
+
+	regval = readl_relaxed(RCGR_DIV_REG(md));
+	regval &= (md->div_mask << md->div_shift);
+	*div = regval >> md->div_shift;
+
+	/* bypass */
+	if (*div == 0)
+		*div = 1;
+	/* the div is doubled here*/
+	*div += 1;
+
+	regval = readl_relaxed(RCGR_SRC_REG(md));
+	regval &= (md->src_mask << md->src_shift);
+	*src_sel = regval >> md->src_shift;
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void mux_div_set_force_enable(struct mux_div_clk *md)
+{
+	u32 regval;
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval |= CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+
+	/* Wait for RCG to turn ON */
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+				CMD_RCGR_CONFIG_UPDATE_BIT))
+			goto exit;
+		udelay(1);
+	}
+	CLK_WARN(&md->c, count == 0, "rcg didn't turn on.");
+exit:
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void mux_div_clear_force_enable(struct mux_div_clk *md)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval &= ~CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int rcg_set_src_div(struct mux_div_clk *md, u32 src_sel, u32 div)
+{
+	u32 regval;
+	unsigned long flags;
+	int ret;
+
+	/* for half-integer divider, div here is doubled */
+	if (div)
+		div -= 1;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_DIV_REG(md));
+	regval &= ~(md->div_mask << md->div_shift);
+	regval |= div << md->div_shift;
+	writel_relaxed(regval, RCGR_DIV_REG(md));
+
+	regval = readl_relaxed(RCGR_SRC_REG(md));
+	regval &= ~(md->src_mask << md->src_shift);
+	regval |= src_sel << md->src_shift;
+	writel_relaxed(regval, RCGR_SRC_REG(md));
+
+	ret = rcg_mux_div_update_config(md);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	return ret;
+}
+
+static int rcg_enable(struct mux_div_clk *md)
+{
+	if (md->force_enable_md)
+		mux_div_set_force_enable(md);
+
+	return rcg_set_src_div(md, md->src_sel, md->data.div);
+}
+
+static void rcg_disable(struct mux_div_clk *md)
+{
+	u32 src_sel;
+
+	if (md->force_enable_md)
+		mux_div_clear_force_enable(md);
+
+	if (!md->safe_freq)
+		return;
+
+	src_sel = parent_to_src_sel(md->parents, md->num_parents,
+				md->safe_parent);
+
+	rcg_set_src_div(md, src_sel, md->safe_div);
+}
+
+static bool rcg_is_enabled(struct mux_div_clk *md)
+{
+	u32 regval;
+
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	if (regval & CMD_RCGR_ROOT_STATUS_BIT)
+		return false;
+	else
+		return true;
+}
+
+static void __iomem *rcg_list_registers(struct mux_div_clk *md, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return RCGR_CMD_REG(md);
+}
+
+const struct clk_ops clk_ops_empty;
+
+const struct clk_ops clk_ops_rst = {
+	.reset = reset_clk_rst,
+	.list_registers = reset_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_rcg = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = rcg_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_rcg_mnd = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate,
+	.set_duty_cycle = rcg_clk_set_duty_cycle,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_mnd_clk_handoff,
+	.get_parent = rcg_mnd_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_pixel = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = round_rate_pixel,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_pixel_multiparent = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = round_rate_pixel,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+	.get_parent = display_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+};
+
+const struct clk_ops clk_ops_edppixel = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_edp_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_byte = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_byte,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = byte_rcg_handoff,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_byte_multiparent = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_byte,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = byte_rcg_handoff,
+	.list_registers = rcg_hid_clk_list_registers,
+	.get_parent = display_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+};
+
+const struct clk_ops clk_ops_rcg_hdmi = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate_hdmi,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = rcg_hdmi_clk_get_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_rcg_edp = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate_edp,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = edp_clk_get_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_branch = {
+	.enable = branch_clk_enable,
+	.prepare = branch_clk_prepare,
+	.disable = branch_clk_disable,
+	.unprepare = branch_clk_unprepare,
+	.set_rate = branch_clk_set_rate,
+	.get_rate = branch_clk_get_rate,
+	.list_rate = branch_clk_list_rate,
+	.round_rate = branch_clk_round_rate,
+	.reset = branch_clk_reset,
+	.set_flags = branch_clk_set_flags,
+	.handoff = branch_clk_handoff,
+	.list_registers = branch_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_vote = {
+	.enable = local_vote_clk_enable,
+	.disable = local_vote_clk_disable,
+	.reset = local_vote_clk_reset,
+	.handoff = local_vote_clk_handoff,
+	.list_registers = local_vote_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_gate = {
+	.enable = gate_clk_enable,
+	.disable = gate_clk_disable,
+	.set_rate = parent_set_rate,
+	.get_rate = parent_get_rate,
+	.round_rate = parent_round_rate,
+	.set_flags = gate_clk_set_flags,
+	.handoff = gate_clk_handoff,
+	.list_registers = gate_clk_list_registers,
+};
+
+struct clk_mux_ops mux_reg_ops = {
+	.enable = mux_reg_enable,
+	.disable = mux_reg_disable,
+	.set_mux_sel = mux_reg_set_mux_sel,
+	.get_mux_sel = mux_reg_get_mux_sel,
+	.is_enabled = mux_reg_is_enabled,
+	.list_registers = mux_clk_list_registers,
+};
+
+struct clk_div_ops div_reg_ops = {
+	.set_div = div_reg_set_div,
+	.get_div = div_reg_get_div,
+};
+
+const struct clk_div_ops postdiv_reg_ops = {
+	.set_div = postdiv_reg_set_div,
+	.get_div = postdiv_reg_get_div,
+};
+
+struct mux_div_ops rcg_mux_div_ops = {
+	.enable = rcg_enable,
+	.disable = rcg_disable,
+	.set_src_div = rcg_set_src_div,
+	.get_src_div = rcg_get_src_div,
+	.is_enabled = rcg_is_enabled,
+	.list_registers = rcg_list_registers,
+};
+
+static void *cbc_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct msmclk_data *drv;
+	struct branch_clk *branch_clk;
+	u32 rc;
+
+	branch_clk = devm_kzalloc(dev, sizeof(*branch_clk), GFP_KERNEL);
+	if (!branch_clk)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	branch_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&branch_clk->cbcr_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,base-offset dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,bcr-offset", &branch_clk->bcr_reg);
+
+	of_property_read_u32(np, "qcom,halt-check",
+					(u32 *)&branch_clk->halt_check);
+
+	branch_clk->has_sibling = of_property_read_bool(np,
+							"qcom,has-sibling");
+
+	branch_clk->c.ops = &clk_ops_branch;
+
+	return msmclk_generic_clk_init(dev, np, &branch_clk->c);
+}
+MSMCLK_PARSER(cbc_dt_parser, "qcom,cbc", 0);
+
+static void *local_vote_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct local_vote_clk *vote_clk;
+	struct msmclk_data *drv;
+	int rc, val;
+
+	vote_clk = devm_kzalloc(dev, sizeof(*vote_clk), GFP_KERNEL);
+	if (!vote_clk)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	vote_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&vote_clk->cbcr_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,base-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-offset", &vote_clk->vote_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &val);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	vote_clk->en_mask = BIT(val);
+
+	vote_clk->c.ops = &clk_ops_vote;
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,bcr-offset", &vote_clk->bcr_reg);
+
+	return msmclk_generic_clk_init(dev, np, &vote_clk->c);
+}
+MSMCLK_PARSER(local_vote_clk_dt_parser, "qcom,local-vote-clk", 0);
+
+static void *gate_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct gate_clk *gate_clk;
+	struct msmclk_data *drv;
+	u32 en_bit, rc;
+
+	gate_clk = devm_kzalloc(dev, sizeof(*gate_clk), GFP_KERNEL);
+	if (!gate_clk)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	gate_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,en-offset", &gate_clk->en_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &en_bit);
+	if (rc) {
+		dt_err(np, "missing qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	gate_clk->en_mask = BIT(en_bit);
+
+	/* Optional Property */
+	rc = of_property_read_u32(np, "qcom,delay", &gate_clk->delay_us);
+	if (rc)
+		gate_clk->delay_us = 0;
+
+	gate_clk->c.ops = &clk_ops_gate;
+	return msmclk_generic_clk_init(dev, np, &gate_clk->c);
+}
+MSMCLK_PARSER(gate_clk_dt_parser, "qcom,gate-clk", 0);
+
+
+static inline u32 rcg_calc_m(u32 m, u32 n)
+{
+	return m;
+}
+
+static inline u32 rcg_calc_n(u32 m, u32 n)
+{
+	n = n > 1 ? n : 0;
+	return ~((n)-(m)) * !!(n);
+}
+
+static inline u32 rcg_calc_duty_cycle(u32 m, u32 n)
+{
+	return ~n;
+}
+
+static inline u32 rcg_calc_div_src(u32 div_int, u32 div_frac, u32 src_sel)
+{
+	int div = 2 * div_int + (div_frac ? 1 : 0) - 1;
+	/* set bypass mode instead of a divider of 1 */
+	div = (div != 1) ? div : 0;
+	return BVAL(4, 0, max(div, 0))
+			| BVAL(10, 8, src_sel);
+}
+
+struct clk_src *msmclk_parse_clk_src(struct device *dev,
+				struct device_node *np, int *array_size)
+{
+	struct clk_src *clks;
+	const void *prop;
+	int num_parents, len, i, prop_len, rc;
+	char *name = "qcom,parents";
+
+	if (!array_size) {
+		dt_err(np, "array_size must be a valid pointer\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	prop = of_get_property(np, name, &prop_len);
+	if (!prop) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	len = sizeof(phandle) + sizeof(u32);
+	if (prop_len % len) {
+		dt_prop_err(np, name, "invalid property length\n");
+		return ERR_PTR(-EINVAL);
+	}
+	num_parents = prop_len / len;
+
+	clks = devm_kzalloc(dev, sizeof(*clks) * num_parents, GFP_KERNEL);
+	if (!clks)
+		return ERR_PTR(-ENOMEM);
+
+	/* Assume that u32 and phandle have the same size */
+	for (i = 0; i < num_parents; i++) {
+		phandle p;
+		struct clk_src *a = &clks[i];
+
+		rc = of_property_read_u32_index(np, name, 2 * i, &a->sel);
+		rc |= of_property_read_phandle_index(np, name, 2 * i + 1, &p);
+
+		if (rc) {
+			dt_prop_err(np, name,
+				"unable to read parent clock or mux index\n");
+			return ERR_PTR(-EINVAL);
+		}
+
+		a->src = msmclk_parse_phandle(dev, p);
+		if (IS_ERR(a->src)) {
+			dt_prop_err(np, name, "hashtable lookup failed\n");
+			return ERR_CAST(a->src);
+		}
+	}
+
+	*array_size = num_parents;
+
+	return clks;
+}
+
+static int rcg_parse_freq_tbl(struct device *dev,
+			struct device_node *np, struct rcg_clk *rcg)
+{
+	const void *prop;
+	u32 prop_len, num_rows, i, j = 0;
+	struct clk_freq_tbl *tbl;
+	int rc;
+	char *name = "qcom,freq-tbl";
+
+	prop = of_get_property(np, name, &prop_len);
+	if (!prop) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % 6) {
+		dt_prop_err(np, name, "bad length\n");
+		return -EINVAL;
+	}
+
+	num_rows = prop_len / 6;
+	/* Array is null terminated. */
+	rcg->freq_tbl = devm_kzalloc(dev,
+				sizeof(*rcg->freq_tbl) * (num_rows + 1),
+				GFP_KERNEL);
+
+	if (!rcg->freq_tbl) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	tbl = rcg->freq_tbl;
+	for (i = 0; i < num_rows; i++, tbl++) {
+		phandle p;
+		u32 div_int, div_frac, m, n, src_sel, freq_hz;
+
+		rc = of_property_read_u32_index(np, name, j++, &freq_hz);
+		rc |= of_property_read_u32_index(np, name, j++, &div_int);
+		rc |= of_property_read_u32_index(np, name, j++, &div_frac);
+		rc |= of_property_read_u32_index(np, name, j++, &m);
+		rc |= of_property_read_u32_index(np, name, j++, &n);
+		rc |= of_property_read_u32_index(np, name, j++, &p);
+
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return -EINVAL;
+		}
+
+		tbl->freq_hz = (unsigned long)freq_hz;
+		tbl->src_clk = msmclk_parse_phandle(dev, p);
+		if (IS_ERR_OR_NULL(tbl->src_clk)) {
+			dt_prop_err(np, name, "hashtable lookup failure\n");
+			return PTR_ERR(tbl->src_clk);
+		}
+
+		tbl->m_val = rcg_calc_m(m, n);
+		tbl->n_val = rcg_calc_n(m, n);
+		tbl->d_val = rcg_calc_duty_cycle(m, n);
+
+		src_sel = parent_to_src_sel(rcg->c.parents,
+					rcg->c.num_parents, tbl->src_clk);
+		tbl->div_src_val = rcg_calc_div_src(div_int, div_frac,
+								src_sel);
+	}
+	/* End table with special value */
+	tbl->freq_hz = FREQ_END;
+	return 0;
+}
+
+static void *rcg_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct rcg_clk *rcg;
+	struct msmclk_data *drv;
+	int rc;
+
+	rcg = devm_kzalloc(dev, sizeof(*rcg), GFP_KERNEL);
+	if (!rcg)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return drv;
+	rcg->base = &drv->base;
+
+	rcg->c.parents = msmclk_parse_clk_src(dev, np, &rcg->c.num_parents);
+	if (IS_ERR(rcg->c.parents)) {
+		dt_err(np, "unable to read parents\n");
+		return ERR_CAST(rcg->c.parents);
+	}
+
+	rc = of_property_read_u32(np, "qcom,base-offset", &rcg->cmd_rcgr_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = rcg_parse_freq_tbl(dev, np, rcg);
+	if (rc) {
+		dt_err(np, "unable to read freq_tbl\n");
+		return ERR_PTR(rc);
+	}
+	rcg->current_freq = &rcg_dummy_freq;
+
+	if (of_device_is_compatible(np, "qcom,rcg-hid")) {
+		rcg->c.ops = &clk_ops_rcg;
+		rcg->set_rate = set_rate_hid;
+	} else if (of_device_is_compatible(np, "qcom,rcg-mn")) {
+		rcg->c.ops = &clk_ops_rcg_mnd;
+		rcg->set_rate = set_rate_mnd;
+	} else {
+		dt_err(np, "unexpected compatible string\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return msmclk_generic_clk_init(dev, np, &rcg->c);
+}
+MSMCLK_PARSER(rcg_clk_dt_parser, "qcom,rcg-hid", 0);
+MSMCLK_PARSER(rcg_clk_dt_parser, "qcom,rcg-mn", 1);
+
+static int parse_rec_parents(struct device *dev,
+			struct device_node *np, struct mux_clk *mux)
+{
+	int i, rc;
+	char *name = "qcom,recursive-parents";
+	phandle p;
+
+	mux->num_rec_parents = of_property_count_phandles(np, name);
+	if (mux->num_rec_parents <= 0)
+		return 0;
+
+	mux->rec_parents = devm_kzalloc(dev,
+			sizeof(*mux->rec_parents) * mux->num_rec_parents,
+			GFP_KERNEL);
+
+	if (!mux->rec_parents) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < mux->num_rec_parents; i++) {
+		rc = of_property_read_phandle_index(np, name, i, &p);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return rc;
+		}
+
+		mux->rec_parents[i] = msmclk_parse_phandle(dev, p);
+		if (IS_ERR(mux->rec_parents[i])) {
+			dt_prop_err(np, name, "hashtable lookup failure\n");
+			return PTR_ERR(mux->rec_parents[i]);
+		}
+	}
+
+	return 0;
+}
+
+static void *mux_reg_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct mux_clk *mux;
+	struct msmclk_data *drv;
+	int rc;
+
+	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+	if (!mux)
+		return ERR_PTR(-ENOMEM);
+
+	mux->parents = msmclk_parse_clk_src(dev, np, &mux->num_parents);
+	if (IS_ERR(mux->parents))
+		return mux->parents;
+
+	mux->c.parents = mux->parents;
+	mux->c.num_parents = mux->num_parents;
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return drv;
+	mux->base = &drv->base;
+
+	rc = parse_rec_parents(dev, np, mux);
+	if (rc) {
+		dt_err(np, "Incorrect qcom,recursive-parents dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = of_property_read_u32(np, "qcom,offset", &mux->offset);
+	if (rc) {
+		dt_err(np, "missing qcom,offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,mask", &mux->mask);
+	if (rc) {
+		dt_err(np, "missing qcom,mask dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,shift", &mux->shift);
+	if (rc) {
+		dt_err(np, "missing qcom,shift dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	mux->c.ops = &clk_ops_gen_mux;
+	mux->ops = &mux_reg_ops;
+
+	/* Optional Properties */
+	of_property_read_u32(np, "qcom,en-offset", &mux->en_offset);
+	of_property_read_u32(np, "qcom,en-mask", &mux->en_mask);
+
+	return msmclk_generic_clk_init(dev, np, &mux->c);
+};
+MSMCLK_PARSER(mux_reg_clk_dt_parser, "qcom,mux-reg", 0);
+
+static void *measure_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct mux_clk *mux;
+	struct clk *c;
+	struct measure_clk_data *p;
+	struct clk_ops *clk_ops_measure_mux;
+	phandle cxo;
+	int rc;
+
+	c = mux_reg_clk_dt_parser(dev, np);
+	if (IS_ERR(c))
+		return c;
+
+	mux = to_mux_clk(c);
+
+	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_phandle_index(np, "qcom,cxo", 0, &cxo);
+	if (rc) {
+		dt_err(np, "missing qcom,cxo\n");
+		return ERR_PTR(-EINVAL);
+	}
+	p->cxo = msmclk_parse_phandle(dev, cxo);
+	if (IS_ERR_OR_NULL(p->cxo)) {
+		dt_prop_err(np, "qcom,cxo", "hashtable lookup failure\n");
+		return p->cxo;
+	}
+
+	rc = of_property_read_u32(np, "qcom,xo-div4-cbcr", &p->xo_div4_cbcr);
+	if (rc) {
+		dt_err(np, "missing qcom,xo-div4-cbcr dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,test-pad-config", &p->plltest_val);
+	if (rc) {
+		dt_err(np, "missing qcom,test-pad-config dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	p->base = mux->base;
+	p->ctl_reg = mux->offset + 0x4;
+	p->status_reg = mux->offset + 0x8;
+	p->plltest_reg = mux->offset + 0xC;
+	mux->priv = p;
+
+	clk_ops_measure_mux = devm_kzalloc(dev, sizeof(*clk_ops_measure_mux),
+								GFP_KERNEL);
+	if (!clk_ops_measure_mux)
+		return ERR_PTR(-ENOMEM);
+
+	*clk_ops_measure_mux = clk_ops_gen_mux;
+	clk_ops_measure_mux->get_rate = measure_get_rate;
+
+	mux->c.ops = clk_ops_measure_mux;
+
+	/* Already did generic clk init */
+	return &mux->c;
+};
+MSMCLK_PARSER(measure_clk_dt_parser, "qcom,measure-mux", 0);
+
+static void *div_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct div_clk *div_clk;
+	struct msmclk_data *drv;
+	int rc;
+
+	div_clk = devm_kzalloc(dev, sizeof(*div_clk), GFP_KERNEL);
+	if (!div_clk)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_u32(np, "qcom,max-div", &div_clk->data.max_div);
+	if (rc) {
+		dt_err(np, "missing qcom,max-div\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,min-div", &div_clk->data.min_div);
+	if (rc) {
+		dt_err(np, "missing qcom,min-div\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,base-offset", &div_clk->offset);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,mask", &div_clk->mask);
+	if (rc) {
+		dt_err(np, "missing qcom,mask\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,shift", &div_clk->shift);
+	if (rc) {
+		dt_err(np, "missing qcom,shift\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (of_property_read_bool(np, "qcom,slave-div"))
+		div_clk->c.ops = &clk_ops_slave_div;
+	else
+		div_clk->c.ops = &clk_ops_div;
+	div_clk->ops = &div_reg_ops;
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	div_clk->base = &drv->base;
+
+	return msmclk_generic_clk_init(dev, np, &div_clk->c);
+};
+MSMCLK_PARSER(div_clk_dt_parser, "qcom,div-clk", 0);
+
+static void *fixed_div_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct div_clk *div_clk;
+	int rc;
+
+	div_clk = devm_kzalloc(dev, sizeof(*div_clk), GFP_KERNEL);
+	if (!div_clk)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_u32(np, "qcom,div", &div_clk->data.div);
+	if (rc) {
+		dt_err(np, "missing qcom,div\n");
+		return ERR_PTR(-EINVAL);
+	}
+	div_clk->data.min_div = div_clk->data.div;
+	div_clk->data.max_div = div_clk->data.div;
+
+	if (of_property_read_bool(np, "qcom,slave-div"))
+		div_clk->c.ops = &clk_ops_slave_div;
+	else
+		div_clk->c.ops = &clk_ops_div;
+	div_clk->ops = &div_reg_ops;
+
+	return msmclk_generic_clk_init(dev, np, &div_clk->c);
+}
+MSMCLK_PARSER(fixed_div_clk_dt_parser, "qcom,fixed-div-clk", 0);
+
+static void *reset_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct reset_clk *reset_clk;
+	struct msmclk_data *drv;
+	int rc;
+
+	reset_clk = devm_kzalloc(dev, sizeof(*reset_clk), GFP_KERNEL);
+	if (!reset_clk)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&reset_clk->reset_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	reset_clk->base = &drv->base;
+
+	reset_clk->c.ops = &clk_ops_rst;
+	return msmclk_generic_clk_init(dev, np, &reset_clk->c);
+};
+MSMCLK_PARSER(reset_clk_dt_parser, "qcom,reset-clk", 0);
diff --git a/drivers/clk/msm/clock-pll.c b/drivers/clk/msm/clock-pll.c
new file mode 100644
index 0000000..26c04e5
--- /dev/null
+++ b/drivers/clk/msm/clock-pll.c
@@ -0,0 +1,1204 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#include "clock.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_MODE_MASK BM(3, 0)
+
+#define PLL_EN_REG(x)		(*(x)->base + (unsigned long) (x)->en_reg)
+#define PLL_STATUS_REG(x)	(*(x)->base + (unsigned long) (x)->status_reg)
+#define PLL_ALT_STATUS_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->alt_status_reg)
+#define PLL_MODE_REG(x)		(*(x)->base + (unsigned long) (x)->mode_reg)
+#define PLL_L_REG(x)		(*(x)->base + (unsigned long) (x)->l_reg)
+#define PLL_M_REG(x)		(*(x)->base + (unsigned long) (x)->m_reg)
+#define PLL_N_REG(x)		(*(x)->base + (unsigned long) (x)->n_reg)
+#define PLL_CONFIG_REG(x)	(*(x)->base + (unsigned long) (x)->config_reg)
+#define PLL_ALPHA_REG(x)	(*(x)->base + (unsigned long) (x)->alpha_reg)
+#define PLL_CFG_ALT_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_alt_reg)
+#define PLL_CFG_CTL_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_ctl_reg)
+#define PLL_CFG_CTL_HI_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_ctl_hi_reg)
+#define PLL_TEST_CTL_LO_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->test_ctl_lo_reg)
+#define PLL_TEST_CTL_HI_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->test_ctl_hi_reg)
+static DEFINE_SPINLOCK(pll_reg_lock);
+
+#define ENABLE_WAIT_MAX_LOOPS 200
+#define PLL_LOCKED_BIT BIT(16)
+
+#define SPM_FORCE_EVENT   0x4
+
+static int pll_vote_clk_enable(struct clk *c)
+{
+	u32 ena, count;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena |= pllv->en_mask;
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	/*
+	 * Use a memory barrier since some PLL status registers are
+	 * not within the same 1K segment as the voting registers.
+	 */
+	mb();
+
+	/* Wait for pll to enable. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
+			return 0;
+		udelay(1);
+	}
+
+	WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
+
+	return -ETIMEDOUT;
+}
+
+static void pll_vote_clk_disable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena &= ~(pllv->en_mask);
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static int pll_vote_clk_is_enabled(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
+}
+
+static enum handoff pll_vote_clk_handoff(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *pll_vote_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	static struct clk_register_data data1[] = {
+		{"APPS_VOTE", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data1;
+	*size = ARRAY_SIZE(data1);
+	return PLL_EN_REG(pllv);
+}
+
+const struct clk_ops clk_ops_pll_vote = {
+	.enable = pll_vote_clk_enable,
+	.disable = pll_vote_clk_disable,
+	.is_enabled = pll_vote_clk_is_enabled,
+	.handoff = pll_vote_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+/*
+ *  spm_event() -- Set/Clear SPM events
+ *  PLL off sequence -- enable (1)
+ *    Set L2_SPM_FORCE_EVENT_EN[bit] register to 1
+ *    Set L2_SPM_FORCE_EVENT[bit] register to 1
+ *  PLL on sequence -- enable (0)
+ *   Clear L2_SPM_FORCE_EVENT[bit] register to 0
+ *   Clear L2_SPM_FORCE_EVENT_EN[bit] register to 0
+ */
+static void spm_event(void __iomem *base, u32 offset, u32 bit,
+							bool enable)
+{
+	uint32_t val;
+
+	if (!base)
+		return;
+
+	if (enable) {
+		/* L2_SPM_FORCE_EVENT_EN */
+		val = readl_relaxed(base + offset);
+		val |= BIT(bit);
+		writel_relaxed(val, (base + offset));
+		/* Ensure that the write above goes through. */
+		mb();
+
+		/* L2_SPM_FORCE_EVENT */
+		val = readl_relaxed(base + offset + SPM_FORCE_EVENT);
+		val |= BIT(bit);
+		writel_relaxed(val, (base + offset + SPM_FORCE_EVENT));
+		/* Ensure that the write above goes through. */
+		mb();
+	} else {
+		/* L2_SPM_FORCE_EVENT */
+		val = readl_relaxed(base + offset + SPM_FORCE_EVENT);
+		val &= ~BIT(bit);
+		writel_relaxed(val, (base + offset + SPM_FORCE_EVENT));
+		/* Ensure that the write above goes through. */
+		mb();
+
+		/* L2_SPM_FORCE_EVENT_EN */
+		val = readl_relaxed(base + offset);
+		val &= ~BIT(bit);
+		writel_relaxed(val, (base + offset));
+		/* Ensure that the write above goes through. */
+		mb();
+	}
+}
+
+static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
+			struct pll_config_masks *masks)
+{
+	u32 regval;
+
+	regval = readl_relaxed(pll_config);
+
+	/* Enable the MN counter if used */
+	if (f->m_val)
+		regval |= masks->mn_en_mask;
+
+	/* Set pre-divider and post-divider values */
+	regval &= ~masks->pre_div_mask;
+	regval |= f->pre_div_val;
+	regval &= ~masks->post_div_mask;
+	regval |= f->post_div_val;
+
+	/* Select VCO setting */
+	regval &= ~masks->vco_mask;
+	regval |= f->vco_val;
+
+	/* Enable main output if it has not been enabled */
+	if (masks->main_output_mask && !(regval & masks->main_output_mask))
+		regval |= masks->main_output_mask;
+
+	writel_relaxed(regval, pll_config);
+}
+
+static int sr2_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	int ret = 0, count;
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, false);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return ret;
+}
+
+void __variable_rate_pll_init(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 regval;
+
+	regval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+	if (pll->masks.post_div_mask) {
+		regval &= ~pll->masks.post_div_mask;
+		regval |= pll->vals.post_div_masked;
+	}
+
+	if (pll->masks.pre_div_mask) {
+		regval &= ~pll->masks.pre_div_mask;
+		regval |= pll->vals.pre_div_masked;
+	}
+
+	if (pll->masks.main_output_mask)
+		regval |= pll->masks.main_output_mask;
+
+	if (pll->masks.early_output_mask)
+		regval |= pll->masks.early_output_mask;
+
+	if (pll->vals.enable_mn)
+		regval |= pll->masks.mn_en_mask;
+	else
+		regval &= ~pll->masks.mn_en_mask;
+
+	writel_relaxed(regval, PLL_CONFIG_REG(pll));
+
+	regval = readl_relaxed(PLL_MODE_REG(pll));
+	if (pll->masks.apc_pdn_mask)
+		regval &= ~pll->masks.apc_pdn_mask;
+	writel_relaxed(regval, PLL_MODE_REG(pll));
+
+	writel_relaxed(pll->vals.alpha_val, PLL_ALPHA_REG(pll));
+	writel_relaxed(pll->vals.config_ctl_val, PLL_CFG_CTL_REG(pll));
+	if (pll->vals.config_ctl_hi_val)
+		writel_relaxed(pll->vals.config_ctl_hi_val,
+				PLL_CFG_CTL_HI_REG(pll));
+	if (pll->init_test_ctl) {
+		writel_relaxed(pll->vals.test_ctl_lo_val,
+				PLL_TEST_CTL_LO_REG(pll));
+		writel_relaxed(pll->vals.test_ctl_hi_val,
+				PLL_TEST_CTL_HI_REG(pll));
+	}
+
+	pll->inited = true;
+}
+
+static int variable_rate_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	int ret = 0, count;
+	u32 mode, testlo;
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	u32 mode_lock;
+	u64 time;
+	bool early_lock = false;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	if (unlikely(!to_pll_clk(c)->inited))
+		__variable_rate_pll_init(c);
+
+	mode = readl_relaxed(PLL_MODE_REG(pll));
+
+	/* Set test control bits as required by HW doc */
+	if (pll->test_ctl_lo_reg && pll->vals.test_ctl_lo_val &&
+		pll->pgm_test_ctl_enable)
+		writel_relaxed(pll->vals.test_ctl_lo_val,
+				PLL_TEST_CTL_LO_REG(pll));
+
+	if (!pll->test_ctl_dbg) {
+		/* Enable test_ctl debug */
+		mode |= BIT(3);
+		writel_relaxed(mode, PLL_MODE_REG(pll));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0xC0;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+	}
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Use 10us to be sure.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * 5us delay mandated by HPG. However, put in a 200us delay here.
+	 * This is to address possible locking issues with the PLL exhibit
+	 * early "transient" locks about 16us from this point. With this
+	 * higher delay, we avoid running into those transients.
+	 */
+	mb();
+	udelay(200);
+
+	/* Clear test control bits */
+	if (pll->test_ctl_lo_reg && pll->vals.test_ctl_lo_val &&
+		pll->pgm_test_ctl_enable)
+		writel_relaxed(0x0, PLL_TEST_CTL_LO_REG(pll));
+
+
+	time = sched_clock();
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask) {
+			udelay(1);
+			/*
+			 * Check again to be sure. This is to avoid
+			 * breaking too early if there is a "transient"
+			 * lock.
+			 */
+			if ((readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+				break;
+				early_lock = true;
+		}
+		udelay(1);
+	}
+	time = sched_clock() - time;
+
+	mode_lock = readl_relaxed(PLL_STATUS_REG(pll));
+
+	if (!(mode_lock & lockmask)) {
+		pr_err("PLL lock bit detection total wait time: %lld ns", time);
+		pr_err("PLL %s didn't lock after enabling for L value 0x%x!\n",
+			c->dbg_name, readl_relaxed(PLL_L_REG(pll)));
+		pr_err("mode register is 0x%x\n",
+			readl_relaxed(PLL_STATUS_REG(pll)));
+		pr_err("user control register is 0x%x\n",
+			readl_relaxed(PLL_CONFIG_REG(pll)));
+		pr_err("config control register is 0x%x\n",
+			readl_relaxed(PLL_CFG_CTL_REG(pll)));
+		pr_err("test control high register is 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_HI_REG(pll)));
+		pr_err("test control low register is 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)));
+		pr_err("early lock? %s\n", early_lock ? "yes" : "no");
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0x40;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0x80;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0xC0;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+		panic("failed to lock %s PLL\n", c->dbg_name);
+	}
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return ret;
+}
+
+static void variable_rate_pll_clk_disable_hwfsm(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 regval;
+
+	/* Set test control bit to stay-in-CFA if necessary */
+	if (pll->test_ctl_lo_reg && pll->pgm_test_ctl_enable) {
+		regval = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		writel_relaxed(regval | BIT(16),
+				PLL_TEST_CTL_LO_REG(pll));
+	}
+
+	/* 8 reference clock cycle delay mandated by the HPG */
+	udelay(1);
+}
+
+static int variable_rate_pll_clk_enable_hwfsm(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	int count;
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	unsigned long flags;
+	u32 regval;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	/* Clear test control bit if necessary */
+	if (pll->test_ctl_lo_reg && pll->pgm_test_ctl_enable) {
+		regval = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		regval &= ~BIT(16);
+		writel_relaxed(regval, PLL_TEST_CTL_LO_REG(pll));
+	}
+
+	/* Wait for 50us explicitly to avoid transient locks */
+	udelay(50);
+
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+static void __pll_clk_enable_reg(void __iomem *mode_reg)
+{
+	u32 mode = readl_relaxed(mode_reg);
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, mode_reg);
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, mode_reg);
+
+	/* Wait until PLL is locked. */
+	mb();
+	udelay(50);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, mode_reg);
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+}
+
+static int local_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	__pll_clk_enable_reg(PLL_MODE_REG(pll));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+static void __pll_clk_disable_reg(void __iomem *mode_reg)
+{
+	u32 mode = readl_relaxed(mode_reg);
+
+	mode &= ~PLL_MODE_MASK;
+	writel_relaxed(mode, mode_reg);
+}
+
+static void local_pll_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	/*
+	 * Disable the PLL output, disable test mode, enable
+	 * the bypass mode, and assert the reset.
+	 */
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, true);
+	__pll_clk_disable_reg(PLL_MODE_REG(pll));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff local_pll_clk_handoff(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+	unsigned long parent_rate;
+	u32 lval, mval, nval, userval;
+
+	if ((mode & mask) != mask)
+		return HANDOFF_DISABLED_CLK;
+
+	/* Assume bootloaders configure PLL to c->rate */
+	if (c->rate)
+		return HANDOFF_ENABLED_CLK;
+
+	parent_rate = clk_get_rate(c->parent);
+	lval = readl_relaxed(PLL_L_REG(pll));
+	mval = readl_relaxed(PLL_M_REG(pll));
+	nval = readl_relaxed(PLL_N_REG(pll));
+	userval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+	c->rate = parent_rate * lval;
+
+	if (pll->masks.mn_en_mask && userval) {
+		if (!nval)
+			nval = 1;
+		c->rate += (parent_rate * mval) / nval;
+	}
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long local_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_freq_tbl *nf;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	if (!pll->freq_tbl)
+		return -EINVAL;
+
+	for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END; nf++)
+		if (nf->freq_hz >= rate)
+			return nf->freq_hz;
+
+	nf--;
+	return nf->freq_hz;
+}
+
+static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_freq_tbl *nf;
+	struct pll_clk *pll = to_pll_clk(c);
+	unsigned long flags;
+
+	for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
+			&& nf->freq_hz != rate; nf++)
+		;
+
+	if (nf->freq_hz == PLL_FREQ_END)
+		return -EINVAL;
+
+	/*
+	 * Ensure PLL is off before changing rate. For optimization reasons,
+	 * assume no downstream clock is using actively using it.
+	 */
+	spin_lock_irqsave(&c->lock, flags);
+	if (c->count)
+		c->ops->disable(c);
+
+	writel_relaxed(nf->l_val, PLL_L_REG(pll));
+	writel_relaxed(nf->m_val, PLL_M_REG(pll));
+	writel_relaxed(nf->n_val, PLL_N_REG(pll));
+
+	__pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
+
+	if (c->count)
+		c->ops->enable(c);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+static enum handoff variable_rate_pll_handoff(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+	u32 lval;
+
+	pll->src_rate = clk_get_rate(c->parent);
+
+	lval = readl_relaxed(PLL_L_REG(pll));
+	if (!lval)
+		return HANDOFF_DISABLED_CLK;
+
+	c->rate = pll->src_rate * lval;
+
+	if (c->rate > pll->max_rate || c->rate < pll->min_rate) {
+		WARN(1, "%s: Out of spec PLL", c->dbg_name);
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if ((mode & mask) != mask)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long variable_rate_pll_round_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+
+	if (!pll->src_rate)
+		return 0;
+
+	if (pll->no_prepared_reconfig && c->prepare_count && c->rate != rate)
+		return -EINVAL;
+
+	if (rate < pll->min_rate)
+		rate = pll->min_rate;
+	if (rate > pll->max_rate)
+		rate = pll->max_rate;
+
+	return min(pll->max_rate,
+			DIV_ROUND_UP(rate, pll->src_rate) * pll->src_rate);
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int variable_rate_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	unsigned long flags;
+	u32 l_val;
+
+	if (rate != variable_rate_pll_round_rate(c, rate))
+		return -EINVAL;
+
+	l_val = rate / pll->src_rate;
+
+	spin_lock_irqsave(&c->lock, flags);
+
+	if (c->count && c->ops->disable)
+		c->ops->disable(c);
+
+	writel_relaxed(l_val, PLL_L_REG(pll));
+
+	if (c->count && c->ops->enable)
+		c->ops->enable(c);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+
+	return 0;
+}
+
+int sr_pll_clk_enable(struct clk *c)
+{
+	u32 mode;
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	mode = readl_relaxed(PLL_MODE_REG(pll));
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait until PLL is locked. */
+	mb();
+	udelay(60);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+int sr_hpm_lp_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 count, mode;
+	int ret = 0;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	/* Disable PLL bypass mode and de-assert reset. */
+	mode = PLL_BYPASSNL | PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
+		WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure the write above goes through before returning. */
+	mb();
+
+out:
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return ret;
+}
+
+
+static void __iomem *variable_rate_pll_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	static struct clk_register_data data[] = {
+		{"MODE", 0x0},
+		{"L", 0x4},
+		{"ALPHA", 0x8},
+		{"USER_CTL", 0x10},
+		{"CONFIG_CTL", 0x14},
+		{"STATUS", 0x1C},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return PLL_MODE_REG(pll);
+}
+
+static void __iomem *local_pll_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	/* Not compatible with 8960 & friends */
+	struct pll_clk *pll = to_pll_clk(c);
+	static struct clk_register_data data[] = {
+		{"MODE", 0x0},
+		{"L", 0x4},
+		{"M", 0x8},
+		{"N", 0xC},
+		{"USER", 0x10},
+		{"CONFIG", 0x14},
+		{"STATUS", 0x1C},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return PLL_MODE_REG(pll);
+}
+
+
+const struct clk_ops clk_ops_local_pll = {
+	.enable = local_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_sr2_pll = {
+	.enable = sr2_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.round_rate = local_pll_clk_round_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_variable_rate_pll_hwfsm = {
+	.enable = variable_rate_pll_clk_enable_hwfsm,
+	.disable = variable_rate_pll_clk_disable_hwfsm,
+	.set_rate = variable_rate_pll_set_rate,
+	.round_rate = variable_rate_pll_round_rate,
+	.handoff = variable_rate_pll_handoff,
+};
+
+const struct clk_ops clk_ops_variable_rate_pll = {
+	.enable = variable_rate_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = variable_rate_pll_set_rate,
+	.round_rate = variable_rate_pll_round_rate,
+	.handoff = variable_rate_pll_handoff,
+	.list_registers = variable_rate_pll_list_registers,
+};
+
+static DEFINE_SPINLOCK(soft_vote_lock);
+
+static int pll_acpu_vote_clk_enable(struct clk *c)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&soft_vote_lock, flags);
+
+	if (!*pllv->soft_vote)
+		ret = pll_vote_clk_enable(c);
+	if (ret == 0)
+		*pllv->soft_vote |= (pllv->soft_vote_mask);
+
+	spin_unlock_irqrestore(&soft_vote_lock, flags);
+	return ret;
+}
+
+static void pll_acpu_vote_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&soft_vote_lock, flags);
+
+	*pllv->soft_vote &= ~(pllv->soft_vote_mask);
+	if (!*pllv->soft_vote)
+		pll_vote_clk_disable(c);
+
+	spin_unlock_irqrestore(&soft_vote_lock, flags);
+}
+
+static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
+{
+	if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
+		return HANDOFF_DISABLED_CLK;
+
+	if (pll_acpu_vote_clk_enable(c))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+const struct clk_ops clk_ops_pll_acpu_vote = {
+	.enable = pll_acpu_vote_clk_enable,
+	.disable = pll_acpu_vote_clk_disable,
+	.is_enabled = pll_vote_clk_is_enabled,
+	.handoff = pll_acpu_vote_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+
+static int pll_sleep_clk_enable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena &= ~(pllv->en_mask);
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return 0;
+}
+
+static void pll_sleep_clk_disable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena |= pllv->en_mask;
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff pll_sleep_clk_handoff(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	if (!(readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask))
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+/*
+ * This .ops is meant to be used by gpll0_sleep_clk_src. The aim is to utilise
+ * the h/w feature of sleep enable bit to denote if the PLL can be turned OFF
+ * once APPS goes to PC. gpll0_sleep_clk_src will be enabled only if there is a
+ * peripheral client using it and disabled if there is none. The current
+ * implementation of enable .ops  clears the h/w bit of sleep enable while the
+ * disable .ops asserts it.
+ */
+
+const struct clk_ops clk_ops_pll_sleep_vote = {
+	.enable = pll_sleep_clk_enable,
+	.disable = pll_sleep_clk_disable,
+	.handoff = pll_sleep_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+static void __set_fsm_mode(void __iomem *mode_reg,
+					u32 bias_count, u32 lock_count)
+{
+	u32 regval = readl_relaxed(mode_reg);
+
+	/* De-assert reset to FSM */
+	regval &= ~BIT(21);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program bias count */
+	regval &= ~BM(19, 14);
+	regval |= BVAL(19, 14, bias_count);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program lock count */
+	regval &= ~BM(13, 8);
+	regval |= BVAL(13, 8, lock_count);
+	writel_relaxed(regval, mode_reg);
+
+	/* Enable PLL FSM voting */
+	regval |= BIT(20);
+	writel_relaxed(regval, mode_reg);
+}
+
+static void __configure_alt_config(struct pll_alt_config config,
+		struct pll_config_regs *regs)
+{
+	u32 regval;
+
+	regval = readl_relaxed(PLL_CFG_ALT_REG(regs));
+
+	if (config.mask) {
+		regval &= ~config.mask;
+		regval |= config.val;
+	}
+
+	writel_relaxed(regval, PLL_CFG_ALT_REG(regs));
+}
+
+void __configure_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	u32 regval;
+
+	writel_relaxed(config->l, PLL_L_REG(regs));
+	writel_relaxed(config->m, PLL_M_REG(regs));
+	writel_relaxed(config->n, PLL_N_REG(regs));
+
+	regval = readl_relaxed(PLL_CONFIG_REG(regs));
+
+	/* Enable the MN accumulator  */
+	if (config->mn_ena_mask) {
+		regval &= ~config->mn_ena_mask;
+		regval |= config->mn_ena_val;
+	}
+
+	/* Enable the main output */
+	if (config->main_output_mask) {
+		regval &= ~config->main_output_mask;
+		regval |= config->main_output_val;
+	}
+
+	/* Enable the aux output */
+	if (config->aux_output_mask) {
+		regval &= ~config->aux_output_mask;
+		regval |= config->aux_output_val;
+	}
+
+	/* Set pre-divider and post-divider values */
+	regval &= ~config->pre_div_mask;
+	regval |= config->pre_div_val;
+	regval &= ~config->post_div_mask;
+	regval |= config->post_div_val;
+
+	/* Select VCO setting */
+	regval &= ~config->vco_mask;
+	regval |= config->vco_val;
+
+	if (config->add_factor_mask) {
+		regval &= ~config->add_factor_mask;
+		regval |= config->add_factor_val;
+	}
+
+	writel_relaxed(regval, PLL_CONFIG_REG(regs));
+
+	if (regs->config_alt_reg)
+		__configure_alt_config(config->alt_cfg, regs);
+
+	if (regs->config_ctl_reg)
+		writel_relaxed(config->cfg_ctl_val, PLL_CFG_CTL_REG(regs));
+}
+
+void configure_sr_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	__configure_pll(config, regs, ena_fsm_mode);
+	if (ena_fsm_mode)
+		__set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
+}
+
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	__configure_pll(config, regs, ena_fsm_mode);
+	if (ena_fsm_mode)
+		__set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
+}
+
+static void *votable_pll_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct pll_vote_clk *v, *peer;
+	struct clk *c;
+	u32 val, rc;
+	phandle p;
+	struct msmclk_data *drv;
+
+	v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
+	if (!v)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	v->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,en-offset", (u32 *)&v->en_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->en_mask = BIT(val);
+
+	rc = of_property_read_u32(np, "qcom,status-offset",
+						(u32 *)&v->status_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,status-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,status-bit", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,status-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->status_mask = BIT(val);
+
+	rc = of_property_read_u32(np, "qcom,pll-config-rate", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,pll-config-rate dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->c.rate = val;
+
+	if (of_device_is_compatible(np, "qcom,active-only-pll"))
+		v->soft_vote_mask = PLL_SOFT_VOTE_ACPU;
+	else if (of_device_is_compatible(np, "qcom,sleep-active-pll"))
+		v->soft_vote_mask = PLL_SOFT_VOTE_PRIMARY;
+
+	if (of_device_is_compatible(np, "qcom,votable-pll")) {
+		v->c.ops = &clk_ops_pll_vote;
+		return msmclk_generic_clk_init(dev, np, &v->c);
+	}
+
+	rc = of_property_read_phandle_index(np, "qcom,peer", 0, &p);
+	if (rc) {
+		dt_err(np, "missing qcom,peer dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	c = msmclk_lookup_phandle(dev, p);
+	if (!IS_ERR_OR_NULL(c)) {
+		v->soft_vote = devm_kzalloc(dev, sizeof(*v->soft_vote),
+						GFP_KERNEL);
+		if (!v->soft_vote)
+			return ERR_PTR(-ENOMEM);
+
+		peer = to_pll_vote_clk(c);
+		peer->soft_vote = v->soft_vote;
+	}
+
+	v->c.ops = &clk_ops_pll_acpu_vote;
+	return msmclk_generic_clk_init(dev, np, &v->c);
+}
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,active-only-pll", 0);
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,sleep-active-pll", 1);
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,votable-pll", 2);
diff --git a/drivers/clk/msm/clock-rpm.c b/drivers/clk/msm/clock-rpm.c
new file mode 100644
index 0000000..f95823d
--- /dev/null
+++ b/drivers/clk/msm/clock-rpm.c
@@ -0,0 +1,473 @@
+/* Copyright (c) 2010-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/rtmutex.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-rpm.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#define __clk_rpmrs_set_rate(r, value, ctx) \
+	((r)->rpmrs_data->set_rate_fn((r), (value), (ctx)))
+
+#define clk_rpmrs_set_rate_sleep(r, value) \
+	    __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id)
+
+#define clk_rpmrs_set_rate_active(r, value) \
+	   __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id)
+
+static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
+				uint32_t context)
+{
+	int ret;
+
+	struct msm_rpm_kvp kvp = {
+		.key = r->rpm_key,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	switch (context) {
+	case MSM_RPM_CTX_ACTIVE_SET:
+		if (*r->last_active_set_vote == value)
+			return 0;
+		break;
+	case MSM_RPM_CTX_SLEEP_SET:
+		if (*r->last_sleep_set_vote == value)
+			return 0;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	ret = msm_rpm_send_message(context, r->rpm_res_type, r->rpm_clk_id,
+			&kvp, 1);
+	if (ret)
+		return ret;
+
+	switch (context) {
+	case MSM_RPM_CTX_ACTIVE_SET:
+		*r->last_active_set_vote = value;
+		break;
+	case MSM_RPM_CTX_SLEEP_SET:
+		*r->last_sleep_set_vote = value;
+		break;
+	}
+
+	return 0;
+}
+
+static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
+{
+	if (!r->branch)
+		r->c.rate = INT_MAX;
+
+	return 0;
+}
+
+static int clk_rpmrs_is_enabled_smd(struct rpm_clk *r)
+{
+	return !!r->c.prepare_count;
+}
+
+struct clk_rpmrs_data {
+	int (*set_rate_fn)(struct rpm_clk *r, uint32_t value, uint32_t context);
+	int (*get_rate_fn)(struct rpm_clk *r);
+	int (*handoff_fn)(struct rpm_clk *r);
+	int (*is_enabled)(struct rpm_clk *r);
+	int ctx_active_id;
+	int ctx_sleep_id;
+};
+
+struct clk_rpmrs_data clk_rpmrs_data_smd = {
+	.set_rate_fn = clk_rpmrs_set_rate_smd,
+	.handoff_fn = clk_rpmrs_handoff_smd,
+	.is_enabled = clk_rpmrs_is_enabled_smd,
+	.ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
+	.ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
+};
+
+static DEFINE_RT_MUTEX(rpm_clock_lock);
+
+static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
+			unsigned long *active_khz, unsigned long *sleep_khz)
+{
+	/* Convert the rate (hz) to khz */
+	*active_khz = DIV_ROUND_UP(rate, 1000);
+
+	/*
+	 * Active-only clocks don't care what the rate is during sleep. So,
+	 * they vote for zero.
+	 */
+	if (r->active_only)
+		*sleep_khz = 0;
+	else
+		*sleep_khz = *active_khz;
+}
+
+static int rpm_clk_prepare(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	uint32_t value;
+	int rc = 0;
+	unsigned long this_khz, this_sleep_khz;
+	unsigned long peer_khz = 0, peer_sleep_khz = 0;
+	struct rpm_clk *peer = r->peer;
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);
+
+	/* Don't send requests to the RPM if the rate has not been set. */
+	if (this_khz == 0)
+		goto out;
+
+	/* Take peer clock's rate into account only if it's enabled. */
+	if (peer->enabled)
+		to_active_sleep_khz(peer, peer->c.rate,
+				&peer_khz, &peer_sleep_khz);
+
+	value = max(this_khz, peer_khz);
+	if (r->branch)
+		value = !!value;
+
+	rc = clk_rpmrs_set_rate_active(r, value);
+	if (rc)
+		goto out;
+
+	value = max(this_sleep_khz, peer_sleep_khz);
+	if (r->branch)
+		value = !!value;
+
+	rc = clk_rpmrs_set_rate_sleep(r, value);
+	if (rc) {
+		/* Undo the active set vote and restore it to peer_khz */
+		value = peer_khz;
+		rc = clk_rpmrs_set_rate_active(r, value);
+	}
+
+out:
+	if (!rc)
+		r->enabled = true;
+
+	rt_mutex_unlock(&rpm_clock_lock);
+
+	return rc;
+}
+
+static void rpm_clk_unprepare(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	if (r->c.rate) {
+		uint32_t value;
+		struct rpm_clk *peer = r->peer;
+		unsigned long peer_khz = 0, peer_sleep_khz = 0;
+		int rc;
+
+		/* Take peer clock's rate into account only if it's enabled. */
+		if (peer->enabled)
+			to_active_sleep_khz(peer, peer->c.rate,
+				&peer_khz, &peer_sleep_khz);
+
+		value = r->branch ? !!peer_khz : peer_khz;
+		rc = clk_rpmrs_set_rate_active(r, value);
+		if (rc)
+			goto out;
+
+		value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
+		rc = clk_rpmrs_set_rate_sleep(r, value);
+	}
+	r->enabled = false;
+out:
+	rt_mutex_unlock(&rpm_clock_lock);
+
+}
+
+static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	unsigned long this_khz, this_sleep_khz;
+	int rc = 0;
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	if (r->enabled) {
+		uint32_t value;
+		struct rpm_clk *peer = r->peer;
+		unsigned long peer_khz = 0, peer_sleep_khz = 0;
+
+		to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);
+
+		/* Take peer clock's rate into account only if it's enabled. */
+		if (peer->enabled)
+			to_active_sleep_khz(peer, peer->c.rate,
+					&peer_khz, &peer_sleep_khz);
+
+		value = max(this_khz, peer_khz);
+		rc = clk_rpmrs_set_rate_active(r, value);
+		if (rc)
+			goto out;
+
+		value = max(this_sleep_khz, peer_sleep_khz);
+		rc = clk_rpmrs_set_rate_sleep(r, value);
+	}
+
+out:
+	rt_mutex_unlock(&rpm_clock_lock);
+
+	return rc;
+}
+
+static unsigned long rpm_clk_get_rate(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+
+	if (r->rpmrs_data->get_rate_fn)
+		return r->rpmrs_data->get_rate_fn(r);
+	else
+		return clk->rate;
+}
+
+static int rpm_clk_is_enabled(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+
+	return r->rpmrs_data->is_enabled(r);
+}
+
+static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	/* Not supported. */
+	return rate;
+}
+
+static bool rpm_clk_is_local(struct clk *clk)
+{
+	return false;
+}
+
+static enum handoff rpm_clk_handoff(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	int rc;
+
+	/*
+	 * Querying an RPM clock's status will return 0 unless the clock's
+	 * rate has previously been set through the RPM. When handing off,
+	 * assume these clocks are enabled (unless the RPM call fails) so
+	 * child clocks of these RPM clocks can still be handed off.
+	 */
+	rc  = r->rpmrs_data->handoff_fn(r);
+	if (rc < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	/*
+	 * Since RPM handoff code may update the software rate of the clock by
+	 * querying the RPM, we need to make sure our request to RPM now
+	 * matches the software rate of the clock. When we send the request
+	 * to RPM, we also need to update any other state info we would
+	 * normally update. So, call the appropriate clock function instead
+	 * of directly using the RPM driver APIs.
+	 */
+	rc = rpm_clk_prepare(clk);
+	if (rc < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+#define RPM_MISC_CLK_TYPE	0x306b6c63
+#define RPM_SCALING_ENABLE_ID	0x2
+
+int enable_rpm_scaling(void)
+{
+	int rc, value = 0x1;
+	static int is_inited;
+
+	struct msm_rpm_kvp kvp = {
+		.key = RPM_SMD_KEY_ENABLE,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	if (is_inited)
+		return 0;
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_SLEEP_SET,
+			RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "RPM clock scaling (sleep set) did not enable!\n");
+		return rc;
+	}
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+			RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "RPM clock scaling (active set) did not enable!\n");
+		return rc;
+	}
+
+	is_inited++;
+	return 0;
+}
+
+int vote_bimc(struct rpm_clk *r, uint32_t value)
+{
+	int rc;
+
+	struct msm_rpm_kvp kvp = {
+		.key = r->rpm_key,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+			r->rpm_res_type, r->rpmrs_data->ctx_active_id,
+			&kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "BIMC vote not sent!\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+const struct clk_ops clk_ops_rpm = {
+	.prepare = rpm_clk_prepare,
+	.unprepare = rpm_clk_unprepare,
+	.set_rate = rpm_clk_set_rate,
+	.get_rate = rpm_clk_get_rate,
+	.is_enabled = rpm_clk_is_enabled,
+	.round_rate = rpm_clk_round_rate,
+	.is_local = rpm_clk_is_local,
+	.handoff = rpm_clk_handoff,
+};
+
+const struct clk_ops clk_ops_rpm_branch = {
+	.prepare = rpm_clk_prepare,
+	.unprepare = rpm_clk_unprepare,
+	.is_local = rpm_clk_is_local,
+	.handoff = rpm_clk_handoff,
+};
+
+static struct rpm_clk *rpm_clk_dt_parser_common(struct device *dev,
+						struct device_node *np)
+{
+	struct rpm_clk *rpm, *peer;
+	struct clk *c;
+	int rc = 0;
+	phandle p;
+	const char *str;
+
+	rpm = devm_kzalloc(dev, sizeof(*rpm), GFP_KERNEL);
+	if (!rpm)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_phandle_index(np, "qcom,rpm-peer", 0, &p);
+	if (rc) {
+		dt_err(np, "missing qcom,rpm-peer dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	/* Rely on whoever's called last to setup the circular ref */
+	c = msmclk_lookup_phandle(dev, p);
+	if (!IS_ERR(c)) {
+		uint32_t *sleep = devm_kzalloc(dev, sizeof(uint32_t),
+					       GFP_KERNEL);
+		uint32_t *active =
+			devm_kzalloc(dev, sizeof(uint32_t),
+				     GFP_KERNEL);
+
+		if (!sleep || !active)
+			return ERR_PTR(-ENOMEM);
+		peer = to_rpm_clk(c);
+		peer->peer = rpm;
+		rpm->peer = peer;
+		rpm->last_active_set_vote = active;
+		peer->last_active_set_vote = active;
+		rpm->last_sleep_set_vote = sleep;
+		peer->last_sleep_set_vote = sleep;
+	}
+
+	rpm->rpmrs_data = &clk_rpmrs_data_smd;
+	rpm->active_only = of_device_is_compatible(np, "qcom,rpm-a-clk") ||
+			of_device_is_compatible(np, "qcom,rpm-branch-a-clk");
+
+	rc = of_property_read_string(np, "qcom,res-type", &str);
+	if (rc) {
+		dt_err(np, "missing qcom,res-type dt property\n");
+		return ERR_PTR(rc);
+	}
+	if (sscanf(str, "%4c", (char *) &rpm->rpm_res_type) <= 0)
+		return ERR_PTR(-EINVAL);
+
+	rc = of_property_read_u32(np, "qcom,res-id", &rpm->rpm_clk_id);
+	if (rc) {
+		dt_err(np, "missing qcom,res-id dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = of_property_read_string(np, "qcom,key", &str);
+	if (rc) {
+		dt_err(np, "missing qcom,key dt property\n");
+		return ERR_PTR(rc);
+	}
+	if (sscanf(str, "%4c", (char *) &rpm->rpm_key) <= 0)
+		return ERR_PTR(-EINVAL);
+	return rpm;
+}
+
+static void *rpm_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct rpm_clk *rpm;
+
+	rpm = rpm_clk_dt_parser_common(dev, np);
+	if (IS_ERR(rpm))
+		return rpm;
+
+	rpm->c.ops = &clk_ops_rpm;
+	return msmclk_generic_clk_init(dev, np, &rpm->c);
+}
+
+static void *rpm_branch_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct rpm_clk *rpm;
+	u32 rate;
+	int rc;
+
+	rpm = rpm_clk_dt_parser_common(dev, np);
+	if (IS_ERR(rpm))
+		return rpm;
+
+	rpm->c.ops = &clk_ops_rpm_branch;
+	rpm->branch = true;
+
+	rc = of_property_read_u32(np, "qcom,rcg-init-rate", &rate);
+	if (!rc)
+		rpm->c.rate = rate;
+
+	return msmclk_generic_clk_init(dev, np, &rpm->c);
+}
+MSMCLK_PARSER(rpm_clk_dt_parser, "qcom,rpm-clk", 0);
+MSMCLK_PARSER(rpm_clk_dt_parser, "qcom,rpm-a-clk", 1);
+MSMCLK_PARSER(rpm_branch_clk_dt_parser, "qcom,rpm-branch-clk", 0);
+MSMCLK_PARSER(rpm_branch_clk_dt_parser, "qcom,rpm-branch-a-clk", 1);
diff --git a/drivers/clk/msm/clock-voter.c b/drivers/clk/msm/clock-voter.c
new file mode 100644
index 0000000..b504724
--- /dev/null
+++ b/drivers/clk/msm/clock-voter.c
@@ -0,0 +1,202 @@
+/* Copyright (c) 2010-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+static DEFINE_RT_MUTEX(voter_clk_lock);
+
+/* Aggregate the rate of clocks that are currently on. */
+static unsigned long voter_clk_aggregate_rate(const struct clk *parent)
+{
+	struct clk *clk;
+	unsigned long rate = 0;
+
+	list_for_each_entry(clk, &parent->children, siblings) {
+		struct clk_voter *v = to_clk_voter(clk);
+
+		if (v->enabled)
+			rate = max(clk->rate, rate);
+	}
+	return rate;
+}
+
+static int voter_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int ret = 0;
+	struct clk *clkp;
+	struct clk_voter *clkh, *v = to_clk_voter(clk);
+	unsigned long cur_rate, new_rate, other_rate = 0;
+
+	if (v->is_branch)
+		return 0;
+
+	rt_mutex_lock(&voter_clk_lock);
+
+	if (v->enabled) {
+		struct clk *parent = clk->parent;
+
+		/*
+		 * Get the aggregate rate without this clock's vote and update
+		 * if the new rate is different than the current rate
+		 */
+		list_for_each_entry(clkp, &parent->children, siblings) {
+			clkh = to_clk_voter(clkp);
+			if (clkh->enabled && clkh != v)
+				other_rate = max(clkp->rate, other_rate);
+		}
+
+		cur_rate = max(other_rate, clk->rate);
+		new_rate = max(other_rate, rate);
+
+		if (new_rate != cur_rate) {
+			ret = clk_set_rate(parent, new_rate);
+			if (ret)
+				goto unlock;
+		}
+	}
+	clk->rate = rate;
+unlock:
+	rt_mutex_unlock(&voter_clk_lock);
+
+	return ret;
+}
+
+static int voter_clk_prepare(struct clk *clk)
+{
+	int ret = 0;
+	unsigned long cur_rate;
+	struct clk *parent;
+	struct clk_voter *v = to_clk_voter(clk);
+
+	rt_mutex_lock(&voter_clk_lock);
+	parent = clk->parent;
+
+	if (v->is_branch) {
+		v->enabled = true;
+		goto out;
+	}
+
+	/*
+	 * Increase the rate if this clock is voting for a higher rate
+	 * than the current rate.
+	 */
+	cur_rate = voter_clk_aggregate_rate(parent);
+	if (clk->rate > cur_rate) {
+		ret = clk_set_rate(parent, clk->rate);
+		if (ret)
+			goto out;
+	}
+	v->enabled = true;
+out:
+	rt_mutex_unlock(&voter_clk_lock);
+
+	return ret;
+}
+
+static void voter_clk_unprepare(struct clk *clk)
+{
+	unsigned long cur_rate, new_rate;
+	struct clk *parent;
+	struct clk_voter *v = to_clk_voter(clk);
+
+
+	rt_mutex_lock(&voter_clk_lock);
+	parent = clk->parent;
+
+	/*
+	 * Decrease the rate if this clock was the only one voting for
+	 * the highest rate.
+	 */
+	v->enabled = false;
+	if (v->is_branch)
+		goto out;
+
+	new_rate = voter_clk_aggregate_rate(parent);
+	cur_rate = max(new_rate, clk->rate);
+
+	if (new_rate < cur_rate)
+		clk_set_rate(parent, new_rate);
+
+out:
+	rt_mutex_unlock(&voter_clk_lock);
+}
+
+static int voter_clk_is_enabled(struct clk *clk)
+{
+	struct clk_voter *v = to_clk_voter(clk);
+
+	return v->enabled;
+}
+
+static long voter_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	return clk_round_rate(clk->parent, rate);
+}
+
+static bool voter_clk_is_local(struct clk *clk)
+{
+	return true;
+}
+
+static enum handoff voter_clk_handoff(struct clk *clk)
+{
+	if (!clk->rate)
+		return HANDOFF_DISABLED_CLK;
+
+	/*
+	 * Send the default rate to the parent if necessary and update the
+	 * software state of the voter clock.
+	 */
+	if (voter_clk_prepare(clk) < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+const struct clk_ops clk_ops_voter = {
+	.prepare = voter_clk_prepare,
+	.unprepare = voter_clk_unprepare,
+	.set_rate = voter_clk_set_rate,
+	.is_enabled = voter_clk_is_enabled,
+	.round_rate = voter_clk_round_rate,
+	.is_local = voter_clk_is_local,
+	.handoff = voter_clk_handoff,
+};
+
+static void *sw_vote_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct clk_voter *v;
+	int rc;
+	u32 temp;
+
+	v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
+	if (!v)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_u32(np, "qcom,config-rate", &temp);
+	if (rc) {
+		dt_prop_err(np, "qcom,config-rate", "is missing");
+		return ERR_PTR(rc);
+	}
+
+	v->c.ops = &clk_ops_voter;
+	return msmclk_generic_clk_init(dev, np, &v->c);
+}
+MSMCLK_PARSER(sw_vote_clk_dt_parser, "qcom,sw-vote-clk", 0);
diff --git a/drivers/clk/msm/clock.c b/drivers/clk/msm/clock.c
new file mode 100644
index 0000000..30eac98
--- /dev/null
+++ b/drivers/clk/msm/clock.c
@@ -0,0 +1,1407 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+
+#include <trace/events/power.h>
+#include "clock.h"
+
+struct handoff_clk {
+	struct list_head list;
+	struct clk *clk;
+};
+static LIST_HEAD(handoff_list);
+
+struct handoff_vdd {
+	struct list_head list;
+	struct clk_vdd_class *vdd_class;
+};
+static LIST_HEAD(handoff_vdd_list);
+
+static DEFINE_MUTEX(msm_clock_init_lock);
+LIST_HEAD(orphan_clk_list);
+static LIST_HEAD(clk_notifier_list);
+
+/* Find the voltage level required for a given rate. */
+int find_vdd_level(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	for (level = 0; level < clk->num_fmax; level++)
+		if (rate <= clk->fmax[level])
+			break;
+
+	if (level == clk->num_fmax) {
+		pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+			clk->dbg_name);
+		return -EINVAL;
+	}
+
+	return level;
+}
+
+/* Update voltage level given the current votes. */
+static int update_vdd(struct clk_vdd_class *vdd_class)
+{
+	int level, rc = 0, i, ignore;
+	struct regulator **r = vdd_class->regulator;
+	int *uv = vdd_class->vdd_uv;
+	int *ua = vdd_class->vdd_ua;
+	int n_reg = vdd_class->num_regulators;
+	int cur_lvl = vdd_class->cur_level;
+	int max_lvl = vdd_class->num_levels - 1;
+	int cur_base = cur_lvl * n_reg;
+	int new_base;
+
+	/* aggregate votes */
+	for (level = max_lvl; level > 0; level--)
+		if (vdd_class->level_votes[level])
+			break;
+
+	if (level == cur_lvl)
+		return 0;
+
+	max_lvl = max_lvl * n_reg;
+	new_base = level * n_reg;
+	for (i = 0; i < vdd_class->num_regulators; i++) {
+		rc = regulator_set_voltage(r[i], uv[new_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		if (rc)
+			goto set_voltage_fail;
+
+		if (ua) {
+			rc = regulator_set_load(r[i], ua[new_base + i]);
+			rc = rc > 0 ? 0 : rc;
+			if (rc)
+				goto set_mode_fail;
+		}
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			rc = regulator_enable(r[i]);
+		else if (level == 0)
+			rc = regulator_disable(r[i]);
+		if (rc)
+			goto enable_disable_fail;
+	}
+	if (vdd_class->set_vdd && !vdd_class->num_regulators)
+		rc = vdd_class->set_vdd(vdd_class, level);
+
+	if (!rc)
+		vdd_class->cur_level = level;
+
+	return rc;
+
+enable_disable_fail:
+	/*
+	 * set_optimum_mode could use voltage to derive mode.  Restore
+	 * previous voltage setting for r[i] first.
+	 */
+	if (ua) {
+		regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		regulator_set_load(r[i], ua[cur_base + i]);
+	}
+
+set_mode_fail:
+	regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+
+set_voltage_fail:
+	for (i--; i >= 0; i--) {
+		regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		if (ua)
+			regulator_set_load(r[i], ua[cur_base + i]);
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			regulator_disable(r[i]);
+		else if (level == 0)
+			ignore = regulator_enable(r[i]);
+	}
+	return rc;
+}
+
+/* Vote for a voltage level. */
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+	vdd_class->level_votes[level]++;
+	rc = update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]--;
+	mutex_unlock(&vdd_class->lock);
+
+	return rc;
+}
+
+/* Remove vote for a voltage level. */
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc = 0;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+	if (WARN(!vdd_class->level_votes[level],
+			"Reference counts are incorrect for %s level %d\n",
+			vdd_class->class_name, level))
+		goto out;
+	vdd_class->level_votes[level]--;
+	rc = update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]++;
+out:
+	mutex_unlock(&vdd_class->lock);
+	return rc;
+}
+
+/* Vote for a voltage level corresponding to a clock's rate. */
+static int vote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return 0;
+
+	level = find_vdd_level(clk, rate);
+	if (level < 0)
+		return level;
+
+	return vote_vdd_level(clk->vdd_class, level);
+}
+
+/* Remove vote for a voltage level corresponding to a clock's rate. */
+static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return;
+
+	level = find_vdd_level(clk, rate);
+	if (level < 0)
+		return;
+
+	unvote_vdd_level(clk->vdd_class, level);
+}
+
+/* Check if the rate is within the voltage limits of the clock. */
+bool is_rate_valid(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return true;
+
+	level = find_vdd_level(clk, rate);
+	return level >= 0;
+}
+
+/**
+ * __clk_pre_reparent() - Set up the new parent before switching to it and
+ * prevent the enable state of the child clock from changing.
+ * @c: The child clock that's going to switch parents
+ * @new: The new parent that the child clock is going to switch to
+ * @flags: Pointer to scratch space to save spinlock flags
+ *
+ * Cannot be called from atomic context.
+ *
+ * Use this API to set up the @new parent clock to be able to support the
+ * current prepare and enable state of the child clock @c. Once the parent is
+ * set up, the child clock can safely switch to it.
+ *
+ * The caller shall grab the prepare_lock of clock @c before calling this API
+ * and only release it after calling __clk_post_reparent() for clock @c (or
+ * if this API fails). This is necessary to prevent the prepare state of the
+ * child clock @c from changing while the reparenting is in progress. Since
+ * this API takes care of grabbing the enable lock of @c, only atomic
+ * operation are allowed between calls to __clk_pre_reparent and
+ * __clk_post_reparent()
+ *
+ * The scratch space pointed to by @flags should not be altered before
+ * calling __clk_post_reparent() for clock @c.
+ *
+ * See also: __clk_post_reparent()
+ */
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags)
+{
+	int rc;
+
+	if (c->prepare_count) {
+		rc = clk_prepare(new);
+		if (rc)
+			return rc;
+	}
+
+	spin_lock_irqsave(&c->lock, *flags);
+	if (c->count) {
+		rc = clk_enable(new);
+		if (rc) {
+			spin_unlock_irqrestore(&c->lock, *flags);
+			clk_unprepare(new);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+/**
+ * __clk_post_reparent() - Release requirements on old parent after switching
+ * away from it and allow changes to the child clock's enable state.
+ * @c:   The child clock that switched parents
+ * @old: The old parent that the child clock switched away from or the new
+ *	 parent of a failed reparent attempt.
+ * @flags: Pointer to scratch space where spinlock flags were saved
+ *
+ * Cannot be called from atomic context.
+ *
+ * This API works in tandem with __clk_pre_reparent. Use this API to
+ * - Remove prepare and enable requirements from the @old parent after
+ *   switching away from it
+ * - Or, undo the effects of __clk_pre_reparent() after a failed attempt to
+ *   change parents
+ *
+ * The caller shall release the prepare_lock of @c that was grabbed before
+ * calling __clk_pre_reparent() only after this API is called (or if
+ * __clk_pre_reparent() fails). This is necessary to prevent the prepare
+ * state of the child clock @c from changing while the reparenting is in
+ * progress. Since this API releases the enable lock of @c, the limit to
+ * atomic operations set by __clk_pre_reparent() is no longer present.
+ *
+ * The scratch space pointed to by @flags shall not be altered since the call
+ * to  __clk_pre_reparent() for clock @c.
+ *
+ * See also: __clk_pre_reparent()
+ */
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags)
+{
+	if (c->count)
+		clk_disable(old);
+	spin_unlock_irqrestore(&c->lock, *flags);
+
+	if (c->prepare_count)
+		clk_unprepare(old);
+}
+
+int clk_prepare(struct clk *clk)
+{
+	int ret = 0;
+	struct clk *parent;
+
+	if (!clk)
+		return 0;
+	if (IS_ERR(clk))
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+	if (clk->prepare_count == 0) {
+		parent = clk->parent;
+
+		ret = clk_prepare(parent);
+		if (ret)
+			goto out;
+		ret = clk_prepare(clk->depends);
+		if (ret)
+			goto err_prepare_depends;
+
+		ret = vote_rate_vdd(clk, clk->rate);
+		if (ret)
+			goto err_vote_vdd;
+		if (clk->ops->prepare)
+			ret = clk->ops->prepare(clk);
+		if (ret)
+			goto err_prepare_clock;
+	}
+	clk->prepare_count++;
+out:
+	mutex_unlock(&clk->prepare_lock);
+	return ret;
+err_prepare_clock:
+	unvote_rate_vdd(clk, clk->rate);
+err_vote_vdd:
+	clk_unprepare(clk->depends);
+err_prepare_depends:
+	clk_unprepare(parent);
+	goto out;
+}
+EXPORT_SYMBOL(clk_prepare);
+
+/*
+ * Standard clock functions defined in include/linux/clk.h
+ */
+int clk_enable(struct clk *clk)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct clk *parent;
+	const char *name;
+
+	if (!clk)
+		return 0;
+	if (IS_ERR(clk))
+		return -EINVAL;
+	name = clk->dbg_name;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	WARN(!clk->prepare_count,
+			"%s: Don't call enable on unprepared clocks\n", name);
+	if (clk->count == 0) {
+		parent = clk->parent;
+
+		ret = clk_enable(parent);
+		if (ret)
+			goto err_enable_parent;
+		ret = clk_enable(clk->depends);
+		if (ret)
+			goto err_enable_depends;
+
+		trace_clock_enable(name, 1, smp_processor_id());
+		if (clk->ops->enable)
+			ret = clk->ops->enable(clk);
+		if (ret)
+			goto err_enable_clock;
+	}
+	clk->count++;
+	spin_unlock_irqrestore(&clk->lock, flags);
+
+	return 0;
+
+err_enable_clock:
+	clk_disable(clk->depends);
+err_enable_depends:
+	clk_disable(parent);
+err_enable_parent:
+	spin_unlock_irqrestore(&clk->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+	const char *name;
+	unsigned long flags;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+	name = clk->dbg_name;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	WARN(!clk->prepare_count,
+			"%s: Never called prepare or calling disable after unprepare\n",
+			name);
+	if (WARN(clk->count == 0, "%s is unbalanced", name))
+		goto out;
+	if (clk->count == 1) {
+		struct clk *parent = clk->parent;
+
+		trace_clock_disable(name, 0, smp_processor_id());
+		if (clk->ops->disable)
+			clk->ops->disable(clk);
+		clk_disable(clk->depends);
+		clk_disable(parent);
+	}
+	clk->count--;
+out:
+	spin_unlock_irqrestore(&clk->lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+void clk_unprepare(struct clk *clk)
+{
+	const char *name;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+	name = clk->dbg_name;
+
+	mutex_lock(&clk->prepare_lock);
+	if (WARN(!clk->prepare_count, "%s is unbalanced (prepare)", name))
+		goto out;
+	if (clk->prepare_count == 1) {
+		struct clk *parent = clk->parent;
+
+		WARN(clk->count,
+			"%s: Don't call unprepare when the clock is enabled\n",
+			name);
+
+		if (clk->ops->unprepare)
+			clk->ops->unprepare(clk);
+		unvote_rate_vdd(clk, clk->rate);
+		clk_unprepare(clk->depends);
+		clk_unprepare(parent);
+	}
+	clk->prepare_count--;
+out:
+	mutex_unlock(&clk->prepare_lock);
+}
+EXPORT_SYMBOL(clk_unprepare);
+
+int clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->reset)
+		return -EINVAL;
+
+	return clk->ops->reset(clk, action);
+}
+EXPORT_SYMBOL(clk_reset);
+
+/**
+ * __clk_notify - call clk notifier chain
+ * @clk: struct clk * that is changing rate
+ * @msg: clk notifier type (see include/linux/clk.h)
+ * @old_rate: old clk rate
+ * @new_rate: new clk rate
+ *
+ * Triggers a notifier call chain on the clk rate-change notification
+ * for 'clk'.  Passes a pointer to the struct clk and the previous
+ * and current rates to the notifier callback.  Intended to be called by
+ * internal clock code only.  Returns NOTIFY_DONE from the last driver
+ * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
+ * a driver returns that.
+ */
+static int __clk_notify(struct clk *clk, unsigned long msg,
+		unsigned long old_rate, unsigned long new_rate)
+{
+	struct msm_clk_notifier *cn;
+	struct msm_clk_notifier_data cnd;
+	int ret = NOTIFY_DONE;
+
+	cnd.clk = clk;
+	cnd.old_rate = old_rate;
+	cnd.new_rate = new_rate;
+
+	list_for_each_entry(cn, &clk_notifier_list, node) {
+		if (cn->clk == clk) {
+			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
+					&cnd);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * clk rate change notifiers
+ *
+ * Note - The following notifier functionality is a verbatim copy
+ * of the implementation in the common clock framework, copied here
+ * until MSM switches to the common clock framework.
+ */
+
+/**
+ * msm_clk_notif_register - add a clk rate change notifier
+ * @clk: struct clk * to watch
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request notification when clk's rate changes.  This uses an SRCU
+ * notifier because we want it to block and notifier unregistrations are
+ * uncommon.  The callbacks associated with the notifier must not
+ * re-enter into the clk framework by calling any top-level clk APIs;
+ * this will cause a nested prepare_lock mutex.
+ *
+ * Pre-change notifier callbacks will be passed the current, pre-change
+ * rate of the clk via struct msm_clk_notifier_data.old_rate.  The new,
+ * post-change rate of the clk is passed via struct
+ * msm_clk_notifier_data.new_rate.
+ *
+ * Post-change notifiers will pass the now-current, post-change rate of
+ * the clk in both struct msm_clk_notifier_data.old_rate and struct
+ * msm_clk_notifier_data.new_rate.
+ *
+ * Abort-change notifiers are effectively the opposite of pre-change
+ * notifiers: the original pre-change clk rate is passed in via struct
+ * msm_clk_notifier_data.new_rate and the failed post-change rate is passed
+ * in via struct msm_clk_notifier_data.old_rate.
+ *
+ * msm_clk_notif_register() must be called from non-atomic context.
+ * Returns -EINVAL if called with null arguments, -ENOMEM upon
+ * allocation failure; otherwise, passes along the return value of
+ * srcu_notifier_chain_register().
+ */
+int msm_clk_notif_register(struct clk *clk, struct notifier_block *nb)
+{
+	struct msm_clk_notifier *cn;
+	int ret = -ENOMEM;
+
+	if (!clk || !nb)
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	/* search the list of notifiers for this clk */
+	list_for_each_entry(cn, &clk_notifier_list, node)
+		if (cn->clk == clk)
+			break;
+
+	/* if clk wasn't in the notifier list, allocate new clk_notifier */
+	if (cn->clk != clk) {
+		cn = kzalloc(sizeof(struct msm_clk_notifier), GFP_KERNEL);
+		if (!cn)
+			goto out;
+
+		cn->clk = clk;
+		srcu_init_notifier_head(&cn->notifier_head);
+
+		list_add(&cn->node, &clk_notifier_list);
+	}
+
+	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
+
+	clk->notifier_count++;
+
+out:
+	mutex_unlock(&clk->prepare_lock);
+
+	return ret;
+}
+
+/**
+ * msm_clk_notif_unregister - remove a clk rate change notifier
+ * @clk: struct clk *
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request no further notification for changes to 'clk' and frees memory
+ * allocated in msm_clk_notifier_register.
+ *
+ * Returns -EINVAL if called with null arguments; otherwise, passes
+ * along the return value of srcu_notifier_chain_unregister().
+ */
+int msm_clk_notif_unregister(struct clk *clk, struct notifier_block *nb)
+{
+	struct msm_clk_notifier *cn = NULL;
+	int ret = -EINVAL;
+
+	if (!clk || !nb)
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	list_for_each_entry(cn, &clk_notifier_list, node)
+		if (cn->clk == clk)
+			break;
+
+	if (cn->clk == clk) {
+		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+
+		clk->notifier_count--;
+
+		/* XXX the notifier code should handle this better */
+		if (!cn->notifier_head.head) {
+			srcu_cleanup_notifier_head(&cn->notifier_head);
+			list_del(&cn->node);
+			kfree(cn);
+		}
+
+	} else {
+		ret = -ENOENT;
+	}
+
+	mutex_unlock(&clk->prepare_lock);
+
+	return ret;
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return 0;
+
+	if (!clk->ops->get_rate)
+		return clk->rate;
+
+	return clk->ops->get_rate(clk);
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	unsigned long start_rate;
+	int rc = 0;
+	const char *name;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+	name = clk->dbg_name;
+
+	if (!is_rate_valid(clk, rate))
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	/* Return early if the rate isn't going to change */
+	if (clk->rate == rate && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+		goto out;
+
+	if (!clk->ops->set_rate) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	trace_clock_set_rate(name, rate, raw_smp_processor_id());
+
+	start_rate = clk->rate;
+
+	if (clk->notifier_count)
+		__clk_notify(clk, PRE_RATE_CHANGE, clk->rate, rate);
+
+	if (clk->ops->pre_set_rate) {
+		rc = clk->ops->pre_set_rate(clk, rate);
+		if (rc)
+			goto abort_set_rate;
+	}
+
+	/* Enforce vdd requirements for target frequency. */
+	if (clk->prepare_count) {
+		rc = vote_rate_vdd(clk, rate);
+		if (rc)
+			goto err_vote_vdd;
+	}
+
+	rc = clk->ops->set_rate(clk, rate);
+	if (rc)
+		goto err_set_rate;
+	clk->rate = rate;
+
+	/* Release vdd requirements for starting frequency. */
+	if (clk->prepare_count)
+		unvote_rate_vdd(clk, start_rate);
+
+	if (clk->ops->post_set_rate)
+		clk->ops->post_set_rate(clk, start_rate);
+
+	if (clk->notifier_count)
+		__clk_notify(clk, POST_RATE_CHANGE, start_rate, clk->rate);
+
+	trace_clock_set_rate_complete(name, clk->rate, raw_smp_processor_id());
+out:
+	mutex_unlock(&clk->prepare_lock);
+	return rc;
+
+abort_set_rate:
+	__clk_notify(clk, ABORT_RATE_CHANGE, clk->rate, rate);
+err_set_rate:
+	if (clk->prepare_count)
+		unvote_rate_vdd(clk, rate);
+err_vote_vdd:
+	/* clk->rate is still the old rate. So, pass the new rate instead. */
+	if (clk->ops->post_set_rate)
+		clk->ops->post_set_rate(clk, rate);
+	goto out;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	long rrate;
+	unsigned long fmax = 0, i;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	for (i = 0; i < clk->num_fmax; i++)
+		fmax = max(fmax, clk->fmax[i]);
+	if (!fmax)
+		fmax = ULONG_MAX;
+	rate = min(rate, fmax);
+
+	if (clk->ops->round_rate)
+		rrate = clk->ops->round_rate(clk, rate);
+	else if (clk->rate)
+		rrate = clk->rate;
+	else
+		return -EINVAL;
+
+	if (rrate > fmax)
+		return -EINVAL;
+	return rrate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->set_max_rate)
+		return -EINVAL;
+
+	return clk->ops->set_max_rate(clk, rate);
+}
+EXPORT_SYMBOL(clk_set_max_rate);
+
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p)
+{
+	int i;
+
+	for (i = 0; i < num_parents; i++) {
+		if (parents[i].src == p)
+			return parents[i].sel;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(parent_to_src_sel);
+
+int clk_get_parent_sel(struct clk *c, struct clk *parent)
+{
+	return parent_to_src_sel(c->parents, c->num_parents, parent);
+}
+EXPORT_SYMBOL(clk_get_parent_sel);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->set_parent && clk->parent == parent)
+		return 0;
+
+	if (!clk->ops->set_parent)
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+	if (clk->parent == parent && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+		goto out;
+	rc = clk->ops->set_parent(clk, parent);
+out:
+	mutex_unlock(&clk->prepare_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return NULL;
+
+	return clk->parent;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_flags(struct clk *clk, unsigned long flags)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+	if (!clk->ops->set_flags)
+		return -EINVAL;
+
+	return clk->ops->set_flags(clk, flags);
+}
+EXPORT_SYMBOL(clk_set_flags);
+
+int clk_set_duty_cycle(struct clk *clk, u32 numerator, u32 denominator)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (numerator > denominator) {
+		pr_err("Numerator cannot be > denominator\n");
+		return -EINVAL;
+	}
+
+	if (!denominator) {
+		pr_err("Denominator can not be Zero\n");
+		return -EINVAL;
+	}
+
+	if (!clk->ops->set_duty_cycle)
+		return -EINVAL;
+
+	return clk->ops->set_duty_cycle(clk, numerator, denominator);
+}
+EXPORT_SYMBOL(clk_set_duty_cycle);
+
+static LIST_HEAD(initdata_list);
+
+static void init_sibling_lists(struct clk_lookup *clock_tbl, size_t num_clocks)
+{
+	struct clk *clk, *parent;
+	unsigned long n;
+
+	for (n = 0; n < num_clocks; n++) {
+		clk = clock_tbl[n].clk;
+		parent = clk->parent;
+		if (parent && list_empty(&clk->siblings))
+			list_add(&clk->siblings, &parent->children);
+	}
+}
+
+static void vdd_class_init(struct clk_vdd_class *vdd)
+{
+	struct handoff_vdd *v;
+
+	if (!vdd)
+		return;
+
+	if (vdd->skip_handoff)
+		return;
+
+	list_for_each_entry(v, &handoff_vdd_list, list) {
+		if (v->vdd_class == vdd)
+			return;
+	}
+
+	pr_debug("voting for vdd_class %s\n", vdd->class_name);
+	if (vote_vdd_level(vdd, vdd->num_levels - 1))
+		pr_err("failed to vote for %s\n", vdd->class_name);
+
+	v = kmalloc(sizeof(*v), GFP_KERNEL);
+	if (!v)
+		return;
+
+	v->vdd_class = vdd;
+	list_add_tail(&v->list, &handoff_vdd_list);
+}
+
+static int __handoff_clk(struct clk *clk)
+{
+	enum handoff state = HANDOFF_DISABLED_CLK;
+	struct handoff_clk *h = NULL;
+	int rc, i;
+
+	if (clk == NULL || clk->flags & CLKFLAG_INIT_DONE ||
+	    clk->flags & CLKFLAG_SKIP_HANDOFF)
+		return 0;
+
+	if (clk->flags & CLKFLAG_INIT_ERR)
+		return -ENXIO;
+
+	if (clk->flags & CLKFLAG_EPROBE_DEFER)
+		return -EPROBE_DEFER;
+
+	/* Handoff any 'depends' clock first. */
+	rc = __handoff_clk(clk->depends);
+	if (rc)
+		goto err;
+
+	/*
+	 * Handoff functions for the parent must be called before the
+	 * children can be handed off. Without handing off the parents and
+	 * knowing their rate and state (on/off), it's impossible to figure
+	 * out the rate and state of the children.
+	 */
+	if (clk->ops->get_parent)
+		clk->parent = clk->ops->get_parent(clk);
+
+	if (IS_ERR(clk->parent)) {
+		rc = PTR_ERR(clk->parent);
+		goto err;
+	}
+
+	rc = __handoff_clk(clk->parent);
+	if (rc)
+		goto err;
+
+	for (i = 0; i < clk->num_parents; i++) {
+		rc = __handoff_clk(clk->parents[i].src);
+		if (rc)
+			goto err;
+	}
+
+	if (clk->ops->handoff)
+		state = clk->ops->handoff(clk);
+
+	if (state == HANDOFF_ENABLED_CLK) {
+
+		h = kmalloc(sizeof(*h), GFP_KERNEL);
+		if (!h) {
+			rc = -ENOMEM;
+			goto err;
+		}
+
+		rc = clk_prepare_enable(clk->parent);
+		if (rc)
+			goto err;
+
+		rc = clk_prepare_enable(clk->depends);
+		if (rc)
+			goto err_depends;
+
+		rc = vote_rate_vdd(clk, clk->rate);
+		WARN(rc, "%s unable to vote for voltage!\n", clk->dbg_name);
+
+		clk->count = 1;
+		clk->prepare_count = 1;
+		h->clk = clk;
+		list_add_tail(&h->list, &handoff_list);
+
+		pr_debug("Handed off %s rate=%lu\n", clk->dbg_name, clk->rate);
+	}
+
+	if (clk->init_rate && clk_set_rate(clk, clk->init_rate))
+		pr_err("failed to set an init rate of %lu on %s\n",
+			clk->init_rate, clk->dbg_name);
+	if (clk->always_on && clk_prepare_enable(clk))
+		pr_err("failed to enable always-on clock %s\n",
+			clk->dbg_name);
+
+	clk->flags |= CLKFLAG_INIT_DONE;
+	/* if the clk is on orphan list, remove it */
+	list_del_init(&clk->list);
+	clock_debug_register(clk);
+
+	return 0;
+
+err_depends:
+	clk_disable_unprepare(clk->parent);
+err:
+	kfree(h);
+	if (rc == -EPROBE_DEFER) {
+		clk->flags |= CLKFLAG_EPROBE_DEFER;
+		if (list_empty(&clk->list))
+			list_add_tail(&clk->list, &orphan_clk_list);
+	} else {
+		pr_err("%s handoff failed (%d)\n", clk->dbg_name, rc);
+		clk->flags |= CLKFLAG_INIT_ERR;
+	}
+	return rc;
+}
+
+/**
+ * msm_clock_register() - Register additional clock tables
+ * @table: Table of clocks
+ * @size: Size of @table
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int msm_clock_register(struct clk_lookup *table, size_t size)
+{
+	int n = 0, rc;
+	struct clk *c, *safe;
+	bool found_more_clks;
+
+	mutex_lock(&msm_clock_init_lock);
+
+	init_sibling_lists(table, size);
+
+	/*
+	 * Enable regulators and temporarily set them up at maximum voltage.
+	 * Once all the clocks have made their respective vote, remove this
+	 * temporary vote. The removing of the temporary vote is done at
+	 * late_init, by which time we assume all the clocks would have been
+	 * handed off.
+	 */
+	for (n = 0; n < size; n++)
+		vdd_class_init(table[n].clk->vdd_class);
+
+	/*
+	 * Detect and preserve initial clock state until clock_late_init() or
+	 * a driver explicitly changes it, whichever is first.
+	 */
+
+	for (n = 0; n < size; n++)
+		__handoff_clk(table[n].clk);
+
+	/* maintain backwards compatibility */
+	if (table[0].con_id || table[0].dev_id)
+		clkdev_add_table(table, size);
+
+	do {
+		found_more_clks = false;
+		/* clear cached __handoff_clk return values */
+		list_for_each_entry_safe(c, safe, &orphan_clk_list, list)
+			c->flags &= ~CLKFLAG_EPROBE_DEFER;
+
+		list_for_each_entry_safe(c, safe, &orphan_clk_list, list) {
+			rc = __handoff_clk(c);
+			if (!rc)
+				found_more_clks = true;
+		}
+	} while (found_more_clks);
+
+	mutex_unlock(&msm_clock_init_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_clock_register);
+
+struct of_msm_provider_data {
+	struct clk_lookup *table;
+	size_t size;
+};
+
+static struct clk *of_clk_src_get(struct of_phandle_args *clkspec,
+				  void *data)
+{
+	struct of_msm_provider_data *ofdata = data;
+	int n;
+
+	for (n = 0; n < ofdata->size; n++) {
+		if (clkspec->args[0] == ofdata->table[n].of_idx)
+			return ofdata->table[n].clk;
+	}
+	return ERR_PTR(-ENOENT);
+}
+
+#define MAX_LEN_OPP_HANDLE	50
+#define LEN_OPP_HANDLE		16
+#define LEN_OPP_VCORNER_HANDLE	22
+
+static struct device **derive_device_list(struct clk *clk,
+					struct device_node *np,
+					char *clk_handle_name, int len)
+{
+	int j, count, cpu;
+	struct platform_device *pdev;
+	struct device_node *dev_node;
+	struct device **device_list;
+
+	count = len/sizeof(u32);
+	device_list = kmalloc_array(count, sizeof(struct device *),
+							GFP_KERNEL);
+	if (!device_list)
+		return ERR_PTR(-ENOMEM);
+
+	for (j = 0; j < count; j++) {
+		device_list[j] = NULL;
+		dev_node = of_parse_phandle(np, clk_handle_name, j);
+		if (!dev_node) {
+			pr_err("Unable to get device_node pointer for %s opp-handle (%s)\n",
+					clk->dbg_name, clk_handle_name);
+			goto err_parse_phandle;
+		}
+
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == dev_node)
+				device_list[j] = get_cpu_device(cpu);
+		}
+
+		if (device_list[j])
+			continue;
+
+		pdev = of_find_device_by_node(dev_node);
+		if (!pdev) {
+			pr_err("Unable to find platform_device node for %s opp-handle\n",
+						clk->dbg_name);
+			goto err_parse_phandle;
+		}
+		device_list[j] = &pdev->dev;
+	}
+	return device_list;
+err_parse_phandle:
+	kfree(device_list);
+	return ERR_PTR(-EINVAL);
+}
+
+static int get_voltage(struct clk *clk, unsigned long rate,
+				int store_vcorner, int n)
+{
+	struct clk_vdd_class *vdd;
+	int uv, level, corner;
+
+	/*
+	 * Use the first regulator in the vdd class
+	 * for the OPP table.
+	 */
+	vdd = clk->vdd_class;
+	if (vdd->num_regulators > 1) {
+		corner = vdd->vdd_uv[vdd->num_regulators * n];
+	} else {
+		level = find_vdd_level(clk, rate);
+		if (level < 0) {
+			pr_err("Could not find vdd level\n");
+			return -EINVAL;
+		}
+		corner = vdd->vdd_uv[level];
+	}
+
+	if (!corner) {
+		pr_err("%s: Unable to find vdd level for rate %lu\n",
+					clk->dbg_name, rate);
+		return -EINVAL;
+	}
+
+	if (store_vcorner) {
+		uv = corner;
+		return uv;
+	}
+
+	uv = regulator_list_corner_voltage(vdd->regulator[0], corner);
+	if (uv < 0) {
+		pr_err("%s: no uv for corner %d - err: %d\n",
+				clk->dbg_name, corner, uv);
+		return uv;
+	}
+	return uv;
+}
+
+static int add_and_print_opp(struct clk *clk, struct device **device_list,
+				int count, unsigned long rate, int uv, int n)
+{
+	int j, ret = 0;
+
+	for (j = 0; j < count; j++) {
+		ret = dev_pm_opp_add(device_list[j], rate, uv);
+		if (ret) {
+			pr_err("%s: couldn't add OPP for %lu - err: %d\n",
+						clk->dbg_name, rate, ret);
+			return ret;
+		}
+		if (n == 1 || n == clk->num_fmax - 1 ||
+					rate == clk_round_rate(clk, INT_MAX))
+			pr_info("%s: set OPP pair(%lu Hz: %u uV) on %s\n",
+						clk->dbg_name, rate, uv,
+						dev_name(device_list[j]));
+	}
+	return ret;
+}
+
+static void populate_clock_opp_table(struct device_node *np,
+			struct clk_lookup *table, size_t size)
+{
+	struct device **device_list;
+	struct clk *clk;
+	char clk_handle_name[MAX_LEN_OPP_HANDLE];
+	char clk_store_volt_corner[MAX_LEN_OPP_HANDLE];
+	size_t i;
+	int n, len, count, uv = 0;
+	unsigned long rate, ret = 0;
+	bool store_vcorner;
+
+	/* Iterate across all clocks in the clock controller */
+	for (i = 0; i < size; i++) {
+		n = 1;
+		rate = 0;
+
+		store_vcorner = false;
+		clk = table[i].clk;
+		if (!clk || !clk->num_fmax || clk->opp_table_populated)
+			continue;
+
+		if (strlen(clk->dbg_name) + LEN_OPP_HANDLE
+					< MAX_LEN_OPP_HANDLE) {
+			ret = snprintf(clk_handle_name,
+					ARRAY_SIZE(clk_handle_name),
+					"qcom,%s-opp-handle", clk->dbg_name);
+			if (ret < strlen(clk->dbg_name) + LEN_OPP_HANDLE) {
+				pr_err("Failed to hold clk_handle_name\n");
+				continue;
+			}
+		} else {
+			pr_err("clk name (%s) too large to fit in clk_handle_name\n",
+							clk->dbg_name);
+			continue;
+		}
+
+		if (strlen(clk->dbg_name) + LEN_OPP_VCORNER_HANDLE
+					< MAX_LEN_OPP_HANDLE) {
+			ret = snprintf(clk_store_volt_corner,
+				ARRAY_SIZE(clk_store_volt_corner),
+				"qcom,%s-opp-store-vcorner", clk->dbg_name);
+			if (ret < strlen(clk->dbg_name) +
+						LEN_OPP_VCORNER_HANDLE) {
+				pr_err("Failed to hold clk_store_volt_corner\n");
+				continue;
+			}
+		} else {
+			pr_err("clk name (%s) too large to fit in clk_store_volt_corner\n",
+							clk->dbg_name);
+			continue;
+		}
+
+		if (!of_find_property(np, clk_handle_name, &len)) {
+			pr_debug("Unable to find %s\n", clk_handle_name);
+			if (!of_find_property(np, clk_store_volt_corner,
+								&len)) {
+				pr_debug("Unable to find %s\n",
+						clk_store_volt_corner);
+				continue;
+			} else {
+				store_vcorner = true;
+				device_list = derive_device_list(clk, np,
+						clk_store_volt_corner, len);
+			}
+		} else
+			device_list = derive_device_list(clk, np,
+						clk_handle_name, len);
+		if (IS_ERR_OR_NULL(device_list)) {
+			pr_err("Failed to fill device_list\n");
+			continue;
+		}
+
+		count = len/sizeof(u32);
+		while (1) {
+			/*
+			 * Calling clk_round_rate will not work for all clocks
+			 * (eg. mux_div). Use their fmax values instead to get
+			 *  list of all available frequencies.
+			 */
+			if (clk->ops->list_rate) {
+				ret = clk_round_rate(clk, rate + 1);
+				if (ret < 0) {
+					pr_err("clk_round_rate failed for %s\n",
+							clk->dbg_name);
+					goto err_round_rate;
+				}
+				/*
+				 * If clk_round_rate give the same value on
+				 * consecutive iterations, exit loop since
+				 * we're at the maximum clock frequency.
+				 */
+				if (rate == ret)
+					break;
+				rate = ret;
+			} else {
+				if (n < clk->num_fmax)
+					rate = clk->fmax[n];
+				else
+					break;
+			}
+
+			uv = get_voltage(clk, rate, store_vcorner, n);
+			if (uv < 0)
+				goto err_round_rate;
+
+			ret = add_and_print_opp(clk, device_list, count,
+							rate, uv, n);
+			if (ret)
+				goto err_round_rate;
+
+			n++;
+		}
+err_round_rate:
+		/* If OPP table population was successful, set the flag */
+		if (uv >= 0 && ret >= 0)
+			clk->opp_table_populated = true;
+		kfree(device_list);
+	}
+}
+
+/**
+ * of_msm_clock_register() - Register clock tables with clkdev and with the
+ *			     clock DT framework
+ * @table: Table of clocks
+ * @size: Size of @table
+ * @np: Device pointer corresponding to the clock-provider device
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+				size_t size)
+{
+	int ret = 0;
+	struct of_msm_provider_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->table = table;
+	data->size = size;
+
+	ret = of_clk_add_provider(np, of_clk_src_get, data);
+	if (ret) {
+		kfree(data);
+		return -ENOMEM;
+	}
+
+	populate_clock_opp_table(np, table, size);
+	return msm_clock_register(table, size);
+}
+EXPORT_SYMBOL(of_msm_clock_register);
+
+/**
+ * msm_clock_init() - Register and initialize a clock driver
+ * @data: Driver-specific clock initialization data
+ *
+ * Upon return from this call, clock APIs may be used to control
+ * clocks registered with this API.
+ */
+int __init msm_clock_init(struct clock_init_data *data)
+{
+	if (!data)
+		return -EINVAL;
+
+	if (data->pre_init)
+		data->pre_init();
+
+	mutex_lock(&msm_clock_init_lock);
+	if (data->late_init)
+		list_add(&data->list, &initdata_list);
+	mutex_unlock(&msm_clock_init_lock);
+
+	msm_clock_register(data->table, data->size);
+
+	if (data->post_init)
+		data->post_init();
+
+	return 0;
+}
+
+static int __init clock_late_init(void)
+{
+	struct handoff_clk *h, *h_temp;
+	struct handoff_vdd *v, *v_temp;
+	struct clock_init_data *initdata, *initdata_temp;
+	int ret = 0;
+
+	pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
+
+	mutex_lock(&msm_clock_init_lock);
+
+	list_for_each_entry_safe(initdata, initdata_temp,
+					&initdata_list, list) {
+		ret = initdata->late_init();
+		if (ret)
+			pr_err("%s: %pS failed late_init.\n", __func__,
+				initdata);
+	}
+
+	list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
+		clk_disable_unprepare(h->clk);
+		list_del(&h->list);
+		kfree(h);
+	}
+
+	list_for_each_entry_safe(v, v_temp, &handoff_vdd_list, list) {
+		unvote_vdd_level(v->vdd_class, v->vdd_class->num_levels - 1);
+		list_del(&v->list);
+		kfree(v);
+	}
+
+	mutex_unlock(&msm_clock_init_lock);
+
+	return ret;
+}
+/* clock_late_init should run only after all deferred probing
+ * (excluding DLKM probes) has completed.
+ */
+late_initcall_sync(clock_late_init);
diff --git a/drivers/clk/msm/clock.h b/drivers/clk/msm/clock.h
new file mode 100644
index 0000000..f8c6fbf
--- /dev/null
+++ b/drivers/clk/msm/clock.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_CLK_MSM_CLOCK_H
+#define __DRIVERS_CLK_MSM_CLOCK_H
+
+#include <linux/clkdev.h>
+
+/**
+ * struct clock_init_data - SoC specific clock initialization data
+ * @table: table of lookups to add
+ * @size: size of @table
+ * @pre_init: called before initializing the clock driver.
+ * @post_init: called after registering @table. clock APIs can be called inside.
+ * @late_init: called during late init
+ */
+struct clock_init_data {
+	struct list_head list;
+	struct clk_lookup *table;
+	size_t size;
+	void (*pre_init)(void);
+	void (*post_init)(void);
+	int (*late_init)(void);
+};
+
+int msm_clock_init(struct clock_init_data *data);
+int find_vdd_level(struct clk *clk, unsigned long rate);
+extern struct list_head orphan_clk_list;
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMMON_CLK_MSM)
+int clock_debug_register(struct clk *clk);
+void clock_debug_print_enabled(bool print_parent);
+#elif defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMMON_CLK_QCOM)
+void clock_debug_print_enabled(bool print_parent);
+#else
+static inline int clock_debug_register(struct clk *unused)
+{
+	return 0;
+}
+static inline void clock_debug_print_enabled(void) { return; }
+#endif
+
+#endif
diff --git a/drivers/clk/msm/gdsc.c b/drivers/clk/msm/gdsc.c
new file mode 100644
index 0000000..e24795e
--- /dev/null
+++ b/drivers/clk/msm/gdsc.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/reset.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+
+#define PWR_ON_MASK		BIT(31)
+#define EN_REST_WAIT_MASK	(0xF << 20)
+#define EN_FEW_WAIT_MASK	(0xF << 16)
+#define CLK_DIS_WAIT_MASK	(0xF << 12)
+#define SW_OVERRIDE_MASK	BIT(2)
+#define HW_CONTROL_MASK		BIT(1)
+#define SW_COLLAPSE_MASK	BIT(0)
+#define GMEM_CLAMP_IO_MASK	BIT(0)
+#define GMEM_RESET_MASK		BIT(4)
+#define BCR_BLK_ARES_BIT	BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL	(0x2 << 20)
+#define EN_FEW_WAIT_VAL		(0x8 << 16)
+#define CLK_DIS_WAIT_VAL	(0x2 << 12)
+
+#define TIMEOUT_US		100
+
+struct gdsc {
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+	void __iomem		*gdscr;
+	struct clk		**clocks;
+	struct reset_control	**reset_clocks;
+	int			clock_count;
+	int			reset_count;
+	bool			toggle_mem;
+	bool			toggle_periph;
+	bool			toggle_logic;
+	bool			resets_asserted;
+	bool			root_en;
+	bool			force_root_en;
+	int			root_clk_idx;
+	bool			no_status_check_on_disable;
+	bool			is_gdsc_enabled;
+	bool			allow_clear;
+	bool			reset_aon;
+	void __iomem		*domain_addr;
+	void __iomem		*hw_ctrl_addr;
+	void __iomem		*sw_reset_addr;
+	u32			gds_timeout;
+};
+
+enum gdscr_status {
+	ENABLED,
+	DISABLED,
+};
+
+static DEFINE_MUTEX(gdsc_seq_lock);
+
+void gdsc_allow_clear_retention(struct regulator *regulator)
+{
+	struct gdsc *sc = regulator_get_drvdata(regulator);
+
+	if (sc)
+		sc->allow_clear = true;
+}
+
+static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
+{
+	void __iomem *gdscr;
+	int count = sc->gds_timeout;
+	u32 val;
+
+	if (sc->hw_ctrl_addr)
+		gdscr = sc->hw_ctrl_addr;
+	else
+		gdscr = sc->gdscr;
+
+	for (; count > 0; count--) {
+		val = readl_relaxed(gdscr);
+		val &= PWR_ON_MASK;
+		switch (status) {
+		case ENABLED:
+			if (val)
+				return 0;
+			break;
+		case DISABLED:
+			if (!val)
+				return 0;
+			break;
+		}
+		/*
+		 * There is no guarantee about the delay needed for the enable
+		 * bit in the GDSCR to be set or reset after the GDSC state
+		 * changes. Hence, keep on checking for a reasonable number
+		 * of times until the bit is set with the least possible delay
+		 * between succeessive tries.
+		 */
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int gdsc_is_enabled(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+
+	if (!sc->toggle_logic)
+		return !sc->resets_asserted;
+
+	regval = readl_relaxed(sc->gdscr);
+	if (regval & PWR_ON_MASK) {
+		/*
+		 * The GDSC might be turned on due to TZ/HYP vote on the
+		 * votable GDS registers. Check the SW_COLLAPSE_MASK to
+		 * determine if HLOS has voted for it.
+		 */
+		if (!(regval & SW_COLLAPSE_MASK))
+			return true;
+	}
+	return false;
+}
+
+static int gdsc_enable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval, hw_ctrl_regval = 0x0;
+	int i, ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->root_en || sc->force_root_en)
+		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+	if (sc->toggle_logic) {
+		if (sc->sw_reset_addr) {
+			regval = readl_relaxed(sc->sw_reset_addr);
+			regval |= BCR_BLK_ARES_BIT;
+			writel_relaxed(regval, sc->sw_reset_addr);
+			/*
+			 * BLK_ARES should be kept asserted for 1us before
+			 * being de-asserted.
+			 */
+			wmb();
+			udelay(1);
+
+			regval &= ~BCR_BLK_ARES_BIT;
+			writel_relaxed(regval, sc->sw_reset_addr);
+
+			/* Make sure de-assert goes through before continuing */
+			wmb();
+		}
+
+		if (sc->domain_addr) {
+			if (sc->reset_aon) {
+				regval = readl_relaxed(sc->domain_addr);
+				regval |= GMEM_RESET_MASK;
+				writel_relaxed(regval, sc->domain_addr);
+				/*
+				 * Keep reset asserted for at-least 1us before
+				 * continuing.
+				 */
+				wmb();
+				udelay(1);
+
+				regval &= ~GMEM_RESET_MASK;
+				writel_relaxed(regval, sc->domain_addr);
+				/*
+				 * Make sure GMEM_RESET is de-asserted before
+				 * continuing.
+				 */
+				wmb();
+			}
+
+			regval = readl_relaxed(sc->domain_addr);
+			regval &= ~GMEM_CLAMP_IO_MASK;
+			writel_relaxed(regval, sc->domain_addr);
+			/*
+			 * Make sure CLAMP_IO is de-asserted before continuing.
+			 */
+			wmb();
+		}
+
+		regval = readl_relaxed(sc->gdscr);
+		if (regval & HW_CONTROL_MASK) {
+			dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
+				 sc->rdesc.name);
+			mutex_unlock(&gdsc_seq_lock);
+			return -EBUSY;
+		}
+
+		regval &= ~SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+
+		/* Wait for 8 XO cycles before polling the status bit. */
+		mb();
+		udelay(1);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret) {
+			regval = readl_relaxed(sc->gdscr);
+			if (sc->hw_ctrl_addr) {
+				hw_ctrl_regval =
+					readl_relaxed(sc->hw_ctrl_addr);
+				dev_warn(&rdev->dev, "%s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
+					sc->rdesc.name, sc->gds_timeout,
+					regval, hw_ctrl_regval);
+
+				ret = poll_gdsc_status(sc, ENABLED);
+				if (ret) {
+					dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
+					sc->rdesc.name, sc->gds_timeout,
+					readl_relaxed(sc->gdscr),
+					readl_relaxed(sc->hw_ctrl_addr));
+
+					mutex_unlock(&gdsc_seq_lock);
+					return ret;
+				}
+			} else {
+				dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
+					sc->rdesc.name,
+					regval);
+				udelay(sc->gds_timeout);
+				regval = readl_relaxed(sc->gdscr);
+				dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
+					sc->rdesc.name, regval,
+					sc->gds_timeout);
+				mutex_unlock(&gdsc_seq_lock);
+				return ret;
+			}
+		}
+	} else {
+		for (i = 0; i < sc->reset_count; i++)
+			reset_control_deassert(sc->reset_clocks[i]);
+		sc->resets_asserted = false;
+	}
+
+	for (i = 0; i < sc->clock_count; i++) {
+		if (unlikely(i == sc->root_clk_idx))
+			continue;
+		if (sc->toggle_mem)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+		if (sc->toggle_periph)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+	}
+
+	/*
+	 * If clocks to this power domain were already on, they will take an
+	 * additional 4 clock cycles to re-enable after the rail is enabled.
+	 * Delay to account for this. A delay is also needed to ensure clocks
+	 * are not enabled within 400ns of enabling power to the memories.
+	 */
+	udelay(1);
+
+	/* Delay to account for staggered memory powerup. */
+	udelay(1);
+
+	if (sc->force_root_en)
+		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+	sc->is_gdsc_enabled = true;
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static int gdsc_disable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int i, ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->force_root_en)
+		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+	for (i = sc->clock_count-1; i >= 0; i--) {
+		if (unlikely(i == sc->root_clk_idx))
+			continue;
+		if (sc->toggle_mem && sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+		if (sc->toggle_periph && sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	/* Delay to account for staggered memory powerdown. */
+	udelay(1);
+
+	if (sc->toggle_logic) {
+		regval = readl_relaxed(sc->gdscr);
+		if (regval & HW_CONTROL_MASK) {
+			dev_warn(&rdev->dev, "Invalid disable while %s is under HW control\n",
+				 sc->rdesc.name);
+			mutex_unlock(&gdsc_seq_lock);
+			return -EBUSY;
+		}
+
+		regval |= SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/* Wait for 8 XO cycles before polling the status bit. */
+		mb();
+		udelay(1);
+
+		if (sc->no_status_check_on_disable) {
+			/*
+			 * Add a short delay here to ensure that gdsc_enable
+			 * right after it was disabled does not put it in a
+			 * weird state.
+			 */
+			udelay(TIMEOUT_US);
+		} else {
+			ret = poll_gdsc_status(sc, DISABLED);
+			if (ret)
+				dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
+					sc->rdesc.name, regval);
+		}
+
+		if (sc->domain_addr) {
+			regval = readl_relaxed(sc->domain_addr);
+			regval |= GMEM_CLAMP_IO_MASK;
+			writel_relaxed(regval, sc->domain_addr);
+			/* Make sure CLAMP_IO is asserted before continuing. */
+			wmb();
+		}
+	} else {
+		for (i = sc->reset_count-1; i >= 0; i--)
+			reset_control_assert(sc->reset_clocks[i]);
+		sc->resets_asserted = true;
+	}
+
+	/*
+	 * Check if gdsc_enable was called for this GDSC. If not, the root
+	 * clock will not have been enabled prior to this.
+	 */
+	if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
+		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+	sc->is_gdsc_enabled = false;
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+
+	mutex_lock(&gdsc_seq_lock);
+	regval = readl_relaxed(sc->gdscr);
+	mutex_unlock(&gdsc_seq_lock);
+	if (regval & HW_CONTROL_MASK)
+		return REGULATOR_MODE_FAST;
+	return REGULATOR_MODE_NORMAL;
+}
+
+static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	regval = readl_relaxed(sc->gdscr);
+
+	/*
+	 * HW control can only be enable/disabled when SW_COLLAPSE
+	 * indicates on.
+	 */
+	if (regval & SW_COLLAPSE_MASK) {
+		dev_err(&rdev->dev, "can't enable hw collapse now\n");
+		mutex_unlock(&gdsc_seq_lock);
+		return -EBUSY;
+	}
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		/* Turn on HW trigger mode */
+		regval |= HW_CONTROL_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/*
+		 * There may be a race with internal HW trigger signal,
+		 * that will result in GDSC going through a power down and
+		 * up cycle.  In case HW trigger signal is controlled by
+		 * firmware that also poll same status bits as we do, FW
+		 * might read an 'on' status before the GDSC can finish
+		 * power cycle.  We wait 1us before returning to ensure
+		 * FW can't immediately poll the status bit.
+		 */
+		mb();
+		udelay(1);
+		break;
+
+	case REGULATOR_MODE_NORMAL:
+		/* Turn off HW trigger mode */
+		regval &= ~HW_CONTROL_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/*
+		 * There may be a race with internal HW trigger signal,
+		 * that will result in GDSC going through a power down and
+		 * up cycle.  If we poll too early, status bit will
+		 * indicate 'on' before the GDSC can finish the power cycle.
+		 * Account for this case by waiting 1us before polling.
+		 */
+		mb();
+		udelay(1);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret)
+			dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
+				sc->rdesc.name, regval);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static struct regulator_ops gdsc_ops = {
+	.is_enabled = gdsc_is_enabled,
+	.enable = gdsc_enable,
+	.disable = gdsc_disable,
+	.set_mode = gdsc_set_mode,
+	.get_mode = gdsc_get_mode,
+};
+
+static int gdsc_probe(struct platform_device *pdev)
+{
+	static atomic_t gdsc_count = ATOMIC_INIT(-1);
+	struct regulator_config reg_config = {};
+	struct regulator_init_data *init_data;
+	struct resource *res;
+	struct gdsc *sc;
+	uint32_t regval, clk_dis_wait_val = CLK_DIS_WAIT_VAL;
+	bool retain_mem, retain_periph, support_hw_trigger;
+	int i, ret;
+	u32 timeout;
+
+	sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
+	if (sc == NULL)
+		return -ENOMEM;
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+			&sc->rdesc);
+	if (init_data == NULL)
+		return -ENOMEM;
+
+	if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+		init_data->supply_regulator = "parent";
+
+	ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
+				      &sc->rdesc.name);
+	if (ret)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -EINVAL;
+	sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (sc->gdscr == NULL)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"domain_addr");
+	if (res) {
+		sc->domain_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->domain_addr == NULL)
+			return -ENOMEM;
+	}
+
+	sc->reset_aon = of_property_read_bool(pdev->dev.of_node,
+						"qcom,reset-aon-logic");
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"sw_reset");
+	if (res) {
+		sc->sw_reset_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->sw_reset_addr == NULL)
+			return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"hw_ctrl_addr");
+	if (res) {
+		sc->hw_ctrl_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->hw_ctrl_addr == NULL)
+			return -ENOMEM;
+	}
+
+	sc->gds_timeout = TIMEOUT_US;
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,gds-timeout",
+							&timeout);
+	if (!ret)
+		sc->gds_timeout = timeout;
+
+	sc->clock_count = of_property_count_strings(pdev->dev.of_node,
+					    "clock-names");
+	if (sc->clock_count == -EINVAL) {
+		sc->clock_count = 0;
+	} else if (IS_ERR_VALUE((unsigned long)sc->clock_count)) {
+		dev_err(&pdev->dev, "Failed to get clock names\n");
+		return -EINVAL;
+	}
+
+	sc->clocks = devm_kzalloc(&pdev->dev,
+			sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
+	if (!sc->clocks)
+		return -ENOMEM;
+
+	sc->root_clk_idx = -1;
+
+	sc->root_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,enable-root-clk");
+	sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,force-enable-root-clk");
+	for (i = 0; i < sc->clock_count; i++) {
+		const char *clock_name;
+
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+					      i, &clock_name);
+		sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
+		if (IS_ERR(sc->clocks[i])) {
+			int rc = PTR_ERR(sc->clocks[i]);
+
+			if (rc != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "Failed to get %s\n",
+					clock_name);
+			return rc;
+		}
+
+		if (!strcmp(clock_name, "core_root_clk"))
+			sc->root_clk_idx = i;
+	}
+
+	if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
+		dev_err(&pdev->dev, "Failed to get root clock name\n");
+		return -EINVAL;
+	}
+
+	sc->rdesc.id = atomic_inc_return(&gdsc_count);
+	sc->rdesc.ops = &gdsc_ops;
+	sc->rdesc.type = REGULATOR_VOLTAGE;
+	sc->rdesc.owner = THIS_MODULE;
+	platform_set_drvdata(pdev, sc);
+
+	/*
+	 * Disable HW trigger: collapse/restore occur based on registers writes.
+	 * Disable SW override: Use hardware state-machine for sequencing.
+	 */
+	regval = readl_relaxed(sc->gdscr);
+	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
+
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
+				  &clk_dis_wait_val))
+		clk_dis_wait_val = clk_dis_wait_val << 12;
+
+	/* Configure wait time between states. */
+	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
+	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | clk_dis_wait_val;
+	writel_relaxed(regval, sc->gdscr);
+
+	sc->no_status_check_on_disable =
+			of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-status-check-on-disable");
+	retain_mem = of_property_read_bool(pdev->dev.of_node,
+					    "qcom,retain-mem");
+	sc->toggle_mem = !retain_mem;
+	retain_periph = of_property_read_bool(pdev->dev.of_node,
+					    "qcom,retain-periph");
+	sc->toggle_periph = !retain_periph;
+	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
+						"qcom,skip-logic-collapse");
+	support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
+						    "qcom,support-hw-trigger");
+	if (support_hw_trigger) {
+		init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
+		init_data->constraints.valid_modes_mask |=
+				REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
+	}
+
+	if (!sc->toggle_logic) {
+		sc->reset_count = of_property_count_strings(pdev->dev.of_node,
+							"reset-names");
+		if (sc->reset_count == -EINVAL) {
+			sc->reset_count = 0;
+		} else if (IS_ERR_VALUE((unsigned long)sc->reset_count)) {
+			dev_err(&pdev->dev, "Failed to get reset reset names\n");
+			return -EINVAL;
+		}
+
+		sc->reset_clocks = devm_kzalloc(&pdev->dev,
+					sizeof(struct reset_control *) *
+					sc->reset_count,
+					GFP_KERNEL);
+		if (!sc->reset_clocks)
+			return -ENOMEM;
+
+		for (i = 0; i < sc->reset_count; i++) {
+			const char *reset_name;
+
+			of_property_read_string_index(pdev->dev.of_node,
+					"reset-names", i, &reset_name);
+			sc->reset_clocks[i] = devm_reset_control_get(&pdev->dev,
+								reset_name);
+			if (IS_ERR(sc->reset_clocks[i])) {
+				int rc = PTR_ERR(sc->reset_clocks[i]);
+
+				if (rc != -EPROBE_DEFER)
+					dev_err(&pdev->dev, "Failed to get %s\n",
+							reset_name);
+				return rc;
+			}
+		}
+
+		regval &= ~SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret) {
+			dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
+				sc->rdesc.name, regval);
+			return ret;
+		}
+	}
+
+	sc->allow_clear = of_property_read_bool(pdev->dev.of_node,
+							"qcom,disallow-clear");
+	sc->allow_clear = !sc->allow_clear;
+
+	for (i = 0; i < sc->clock_count; i++) {
+		if (retain_mem || (regval & PWR_ON_MASK) || !sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+
+		if (retain_periph || (regval & PWR_ON_MASK) || !sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+		else
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = sc;
+	reg_config.of_node = pdev->dev.of_node;
+	sc->rdev = regulator_register(&sc->rdesc, &reg_config);
+	if (IS_ERR(sc->rdev)) {
+		dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
+			sc->rdesc.name);
+		return PTR_ERR(sc->rdev);
+	}
+
+	return 0;
+}
+
+static int gdsc_remove(struct platform_device *pdev)
+{
+	struct gdsc *sc = platform_get_drvdata(pdev);
+
+	regulator_unregister(sc->rdev);
+	return 0;
+}
+
+static const  struct of_device_id gdsc_match_table[] = {
+	{ .compatible = "qcom,gdsc" },
+	{}
+};
+
+static struct platform_driver gdsc_driver = {
+	.probe		= gdsc_probe,
+	.remove		= gdsc_remove,
+	.driver		= {
+		.name		= "gdsc",
+		.of_match_table = gdsc_match_table,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init gdsc_init(void)
+{
+	return platform_driver_register(&gdsc_driver);
+}
+subsys_initcall(gdsc_init);
+
+static void __exit gdsc_exit(void)
+{
+	platform_driver_unregister(&gdsc_driver);
+}
+module_exit(gdsc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");
diff --git a/drivers/clk/msm/msm-clock-controller.c b/drivers/clk/msm/msm-clock-controller.c
new file mode 100644
index 0000000..82ffb6e
--- /dev/null
+++ b/drivers/clk/msm/msm-clock-controller.c
@@ -0,0 +1,748 @@
+/*
+ * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "msmclock: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/hashtable.h>
+
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/msm-clock-controller.h>
+#include <soc/qcom/clock-rpm.h>
+
+/* Protects list operations */
+static DEFINE_MUTEX(msmclk_lock);
+static LIST_HEAD(msmclk_parser_list);
+static u32 msmclk_debug;
+
+struct hitem {
+	struct hlist_node list;
+	phandle key;
+	void *ptr;
+};
+
+int of_property_count_phandles(struct device_node *np, char *propname)
+{
+	const __be32 *phandle;
+	int size;
+
+	phandle = of_get_property(np, propname, &size);
+	return phandle ? (size / sizeof(*phandle)) : -EINVAL;
+}
+EXPORT_SYMBOL(of_property_count_phandles);
+
+int of_property_read_phandle_index(struct device_node *np, char *propname,
+					int index, phandle *p)
+{
+	const __be32 *phandle;
+	int size;
+
+	phandle = of_get_property(np, propname, &size);
+	if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
+		return -EINVAL;
+
+	*p = be32_to_cpup(phandle + index);
+	return 0;
+}
+EXPORT_SYMBOL(of_property_read_phandle_index);
+
+static int generic_vdd_parse_regulators(struct device *dev,
+		struct clk_vdd_class *vdd, struct device_node *np)
+{
+	int num_regulators, i, rc;
+	char *name = "qcom,regulators";
+
+	num_regulators = of_property_count_phandles(np, name);
+	if (num_regulators <= 0) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return -EINVAL;
+	}
+
+	vdd->regulator = devm_kzalloc(dev,
+				sizeof(*vdd->regulator) * num_regulators,
+				GFP_KERNEL);
+	if (!vdd->regulator) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_regulators; i++) {
+		phandle p;
+
+		rc = of_property_read_phandle_index(np, name, i, &p);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read phandle\n");
+			return rc;
+		}
+
+		vdd->regulator[i] = msmclk_parse_phandle(dev, p);
+		if (IS_ERR(vdd->regulator[i])) {
+			dt_prop_err(np, name, "hashtable lookup failed\n");
+			return PTR_ERR(vdd->regulator[i]);
+		}
+	}
+
+	vdd->num_regulators = num_regulators;
+	return 0;
+}
+
+static int generic_vdd_parse_levels(struct device *dev,
+		struct clk_vdd_class *vdd, struct device_node *np)
+{
+	int len, rc;
+	char *name = "qcom,uV-levels";
+
+	if (!of_find_property(np, name, &len)) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return -EINVAL;
+	}
+
+	len /= sizeof(u32);
+	if (len % vdd->num_regulators) {
+		dt_err(np, "mismatch beween qcom,uV-levels and qcom,regulators dt properties\n");
+		return -EINVAL;
+	}
+
+	vdd->num_levels = len / vdd->num_regulators;
+	vdd->vdd_uv = devm_kzalloc(dev, len * sizeof(*vdd->vdd_uv),
+						GFP_KERNEL);
+	vdd->level_votes = devm_kzalloc(dev,
+				vdd->num_levels * sizeof(*vdd->level_votes),
+				GFP_KERNEL);
+
+	if (!vdd->vdd_uv || !vdd->level_votes) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(np, name, vdd->vdd_uv,
+					vdd->num_levels * vdd->num_regulators);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32 array\n");
+		return -EINVAL;
+	}
+
+	/* Optional Property */
+	name = "qcom,uA-levels";
+	if (!of_find_property(np, name, &len))
+		return 0;
+
+	len /= sizeof(u32);
+	if (len / vdd->num_regulators != vdd->num_levels) {
+		dt_err(np, "size of qcom,uA-levels and qcom,uV-levels must match\n");
+		return -EINVAL;
+	}
+
+	vdd->vdd_ua = devm_kzalloc(dev, len * sizeof(*vdd->vdd_ua),
+						GFP_KERNEL);
+	if (!vdd->vdd_ua)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(np, name, vdd->vdd_ua,
+					vdd->num_levels * vdd->num_regulators);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32 array\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void *simple_vdd_class_dt_parser(struct device *dev,
+			struct device_node *np)
+{
+	struct clk_vdd_class *vdd;
+	int rc = 0;
+
+	vdd = devm_kzalloc(dev, sizeof(*vdd), GFP_KERNEL);
+	if (!vdd)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&vdd->lock);
+	vdd->class_name = np->name;
+
+	rc = generic_vdd_parse_regulators(dev, vdd, np);
+	rc |= generic_vdd_parse_levels(dev, vdd, np);
+	if (rc) {
+		dt_err(np, "unable to read vdd_class\n");
+		return ERR_PTR(rc);
+	}
+
+	return vdd;
+}
+MSMCLK_PARSER(simple_vdd_class_dt_parser, "qcom,simple-vdd-class", 0);
+
+static int generic_clk_parse_parents(struct device *dev, struct clk *c,
+					struct device_node *np)
+{
+	int rc;
+	phandle p;
+	char *name = "qcom,parent";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_phandle_index(np, name, 0, &p);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read phandle\n");
+		return rc;
+	}
+
+	c->parent = msmclk_parse_phandle(dev, p);
+	if (IS_ERR(c->parent)) {
+		dt_prop_err(np, name, "hashtable lookup failed\n");
+		return PTR_ERR(c->parent);
+	}
+
+	return 0;
+}
+
+static int generic_clk_parse_vdd(struct device *dev, struct clk *c,
+					struct device_node *np)
+{
+	phandle p;
+	int rc;
+	char *name = "qcom,supply-group";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_phandle_index(np, name, 0, &p);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read phandle\n");
+		return rc;
+	}
+
+	c->vdd_class = msmclk_parse_phandle(dev, p);
+	if (IS_ERR(c->vdd_class)) {
+		dt_prop_err(np, name, "hashtable lookup failed\n");
+		return PTR_ERR(c->vdd_class);
+	}
+
+	return 0;
+}
+
+static int generic_clk_parse_flags(struct device *dev, struct clk *c,
+						struct device_node *np)
+{
+	int rc;
+	char *name = "qcom,clk-flags";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_u32(np, name, &c->flags);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int generic_clk_parse_fmax(struct device *dev, struct clk *c,
+					struct device_node *np)
+{
+	u32 prop_len, i;
+	int rc;
+	char *name = "qcom,clk-fmax";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, &prop_len))
+		return 0;
+
+	if (!c->vdd_class) {
+		dt_err(np, "both qcom,clk-fmax and qcom,supply-group must be defined\n");
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % 2) {
+		dt_prop_err(np, name, "bad length\n");
+		return -EINVAL;
+	}
+
+	/* Value at proplen - 2 is the index of the  last entry in fmax array */
+	rc = of_property_read_u32_index(np, name, prop_len - 2, &c->num_fmax);
+	c->num_fmax += 1;
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32\n");
+		return rc;
+	}
+
+	c->fmax = devm_kzalloc(dev, sizeof(*c->fmax) * c->num_fmax, GFP_KERNEL);
+	if (!c->fmax)
+		return -ENOMEM;
+
+	for (i = 0; i < prop_len; i += 2) {
+		u32 level, value;
+
+		rc = of_property_read_u32_index(np, name, i, &level);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return rc;
+		}
+
+		rc = of_property_read_u32_index(np, name, i + 1, &value);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return rc;
+		}
+
+		if (level >= c->num_fmax) {
+			dt_prop_err(np, name, "must be sorted\n");
+			return -EINVAL;
+		}
+		c->fmax[level] = value;
+	}
+
+	return 0;
+}
+
+static int generic_clk_add_lookup_tbl_entry(struct device *dev, struct clk *c)
+{
+	struct msmclk_data *drv = dev_get_drvdata(dev);
+	struct clk_lookup *cl;
+
+	if (drv->clk_tbl_size >= drv->max_clk_tbl_size) {
+		dev_err(dev, "child node count should be > clock_count?\n");
+		return -EINVAL;
+	}
+
+	cl = drv->clk_tbl + drv->clk_tbl_size;
+	cl->clk = c;
+	drv->clk_tbl_size++;
+	return 0;
+}
+
+static int generic_clk_parse_depends(struct device *dev, struct clk *c,
+						struct device_node *np)
+{
+	phandle p;
+	int rc;
+	char *name = "qcom,depends";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_phandle_index(np, name, 0, &p);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read phandle\n");
+		return rc;
+	}
+
+	c->depends = msmclk_parse_phandle(dev, p);
+	if (IS_ERR(c->depends)) {
+		dt_prop_err(np, name, "hashtable lookup failed\n");
+		return PTR_ERR(c->depends);
+	}
+
+	return 0;
+}
+
+static int generic_clk_parse_init_config(struct device *dev, struct clk *c,
+						struct device_node *np)
+{
+	int rc;
+	u32 temp;
+	char *name = "qcom,always-on";
+
+	c->always_on = of_property_read_bool(np, name);
+
+	name = "qcom,config-rate";
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_u32(np, name, &temp);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32\n");
+		return rc;
+	}
+	c->init_rate = temp;
+
+	return rc;
+}
+
+void *msmclk_generic_clk_init(struct device *dev, struct device_node *np,
+				struct clk *c)
+{
+	int rc;
+
+	/* CLK_INIT macro */
+	spin_lock_init(&c->lock);
+	mutex_init(&c->prepare_lock);
+	INIT_LIST_HEAD(&c->children);
+	INIT_LIST_HEAD(&c->siblings);
+	INIT_LIST_HEAD(&c->list);
+	c->dbg_name = np->name;
+
+	rc = generic_clk_add_lookup_tbl_entry(dev, c);
+	rc |= generic_clk_parse_flags(dev, c, np);
+	rc |= generic_clk_parse_parents(dev, c, np);
+	rc |= generic_clk_parse_vdd(dev, c, np);
+	rc |= generic_clk_parse_fmax(dev, c, np);
+	rc |= generic_clk_parse_depends(dev, c, np);
+	rc |= generic_clk_parse_init_config(dev, c, np);
+
+	if (rc) {
+		dt_err(np, "unable to read clk\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return c;
+}
+
+static struct msmclk_parser *msmclk_parser_lookup(struct device_node *np)
+{
+	struct msmclk_parser *item;
+
+	list_for_each_entry(item, &msmclk_parser_list, list) {
+		if (of_device_is_compatible(np, item->compatible))
+			return item;
+	}
+	return NULL;
+}
+void msmclk_parser_register(struct msmclk_parser *item)
+{
+	mutex_lock(&msmclk_lock);
+	list_add(&item->list, &msmclk_parser_list);
+	mutex_unlock(&msmclk_lock);
+}
+
+static int msmclk_htable_add(struct device *dev, void *result, phandle key);
+
+void *msmclk_parse_dt_node(struct device *dev, struct device_node *np)
+{
+	struct msmclk_parser *parser;
+	phandle key;
+	void *result;
+	int rc;
+
+	key = np->phandle;
+	result = msmclk_lookup_phandle(dev, key);
+	if (!result)
+		return ERR_PTR(-EINVAL);
+
+	if (!of_device_is_available(np)) {
+		dt_err(np, "node is disabled\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	parser = msmclk_parser_lookup(np);
+	if (IS_ERR_OR_NULL(parser)) {
+		dt_err(np, "no parser found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* This may return -EPROBE_DEFER */
+	result = parser->parsedt(dev, np);
+	if (IS_ERR(result)) {
+		dt_err(np, "parsedt failed");
+		return result;
+	}
+
+	rc = msmclk_htable_add(dev, result, key);
+	if (rc)
+		return ERR_PTR(rc);
+
+	return result;
+}
+
+void *msmclk_parse_phandle(struct device *dev, phandle key)
+{
+	struct hitem *item;
+	struct device_node *np;
+	struct msmclk_data *drv = dev_get_drvdata(dev);
+
+	/*
+	 * the default phandle value is 0. Since hashtable keys must
+	 * be unique, reject the default value.
+	 */
+	if (!key)
+		return ERR_PTR(-EINVAL);
+
+	hash_for_each_possible(drv->htable, item, list, key) {
+		if (item->key == key)
+			return item->ptr;
+	}
+
+	np = of_find_node_by_phandle(key);
+	if (!np)
+		return ERR_PTR(-EINVAL);
+
+	return msmclk_parse_dt_node(dev, np);
+}
+EXPORT_SYMBOL(msmclk_parse_phandle);
+
+void *msmclk_lookup_phandle(struct device *dev, phandle key)
+{
+	struct hitem *item;
+	struct msmclk_data *drv = dev_get_drvdata(dev);
+
+	hash_for_each_possible(drv->htable, item, list, key) {
+		if (item->key == key)
+			return item->ptr;
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(msmclk_lookup_phandle);
+
+static int msmclk_htable_add(struct device *dev, void *data, phandle key)
+{
+	struct hitem *item;
+	struct msmclk_data *drv = dev_get_drvdata(dev);
+
+	/*
+	 * If there are no phandle references to a node, key == 0. However, if
+	 * there is a second node like this, both will have key == 0. This
+	 * violates the requirement that hashtable keys be unique. Skip it.
+	 */
+	if (!key)
+		return 0;
+
+	if (!IS_ERR(msmclk_lookup_phandle(dev, key))) {
+		struct device_node *np = of_find_node_by_phandle(key);
+
+		dev_err(dev, "attempt to add duplicate entry for %s\n",
+				np ? np->name : "NULL");
+		return -EINVAL;
+	}
+
+	item = devm_kzalloc(dev, sizeof(*item), GFP_KERNEL);
+	if (!item)
+		return -ENOMEM;
+
+	INIT_HLIST_NODE(&item->list);
+	item->key = key;
+	item->ptr = data;
+
+	hash_add(drv->htable, &item->list, key);
+	return 0;
+}
+
+/*
+ * Currently, regulators are the only elements capable of probe deferral.
+ * Check them first to handle probe deferal efficiently.
+ */
+static int get_ext_regulators(struct device *dev)
+{
+	int num_strings, i, rc;
+	struct device_node *np;
+	void *item;
+	char *name = "qcom,regulator-names";
+
+	np = dev->of_node;
+	/* This property is optional */
+	num_strings = of_property_count_strings(np, name);
+	if (num_strings <= 0)
+		return 0;
+
+	for (i = 0; i < num_strings; i++) {
+		const char *str;
+		char buf[50];
+		phandle key;
+
+		rc = of_property_read_string_index(np, name, i, &str);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read string\n");
+			return rc;
+		}
+
+		item = devm_regulator_get(dev, str);
+		if (IS_ERR(item)) {
+			dev_err(dev, "Failed to get regulator: %s\n", str);
+			return PTR_ERR(item);
+		}
+
+		snprintf(buf, ARRAY_SIZE(buf), "%s-supply", str);
+		rc = of_property_read_phandle_index(np, buf, 0, &key);
+		if (rc) {
+			dt_prop_err(np, buf, "unable to read phandle\n");
+			return rc;
+		}
+
+		rc = msmclk_htable_add(dev, item, key);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static struct clk *msmclk_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+	phandle key;
+	struct clk *c = ERR_PTR(-ENOENT);
+
+	key = clkspec->args[0];
+	c = msmclk_lookup_phandle(data, key);
+
+	if (!IS_ERR(c) && !(c->flags & CLKFLAG_INIT_DONE))
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return c;
+}
+
+static void *regulator_dt_parser(struct device *dev, struct device_node *np)
+{
+	dt_err(np, "regulators should be handled in probe()");
+	return ERR_PTR(-EINVAL);
+}
+MSMCLK_PARSER(regulator_dt_parser, "qcom,rpm-smd-regulator", 0);
+
+static void *msmclk_dt_parser(struct device *dev, struct device_node *np)
+{
+	dt_err(np, "calling into other clock controllers isn't allowed");
+	return ERR_PTR(-EINVAL);
+}
+MSMCLK_PARSER(msmclk_dt_parser, "qcom,msm-clock-controller", 0);
+
+static struct msmclk_data *msmclk_drv_init(struct device *dev)
+{
+	struct msmclk_data *drv;
+	size_t size;
+
+	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return ERR_PTR(-ENOMEM);
+
+	dev_set_drvdata(dev, drv);
+
+	drv->dev = dev;
+	INIT_LIST_HEAD(&drv->list);
+
+	/* This overestimates size */
+	drv->max_clk_tbl_size = of_get_child_count(dev->of_node);
+	size = sizeof(*drv->clk_tbl) * drv->max_clk_tbl_size;
+	drv->clk_tbl = devm_kzalloc(dev, size, GFP_KERNEL);
+	if (!drv->clk_tbl)
+		return ERR_PTR(-ENOMEM);
+
+	hash_init(drv->htable);
+	return drv;
+}
+
+static int msmclk_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct device *dev;
+	struct msmclk_data *drv;
+	struct device_node *child;
+	void *result;
+	int rc = 0;
+
+	dev = &pdev->dev;
+	drv = msmclk_drv_init(dev);
+	if (IS_ERR(drv))
+		return PTR_ERR(drv);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc-base");
+	if (!res) {
+		dt_err(dev->of_node, "missing cc-base\n");
+		return -EINVAL;
+	}
+	drv->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drv->base) {
+		dev_err(dev, "ioremap failed for drv->base\n");
+		return -ENOMEM;
+	}
+	rc = msmclk_htable_add(dev, drv, dev->of_node->phandle);
+	if (rc)
+		return rc;
+
+	rc = enable_rpm_scaling();
+	if (rc)
+		return rc;
+
+	rc = get_ext_regulators(dev);
+	if (rc)
+		return rc;
+
+	/*
+	 * Returning -EPROBE_DEFER here is inefficient due to
+	 * destroying work 'unnecessarily'
+	 */
+	for_each_available_child_of_node(dev->of_node, child) {
+		result = msmclk_parse_dt_node(dev, child);
+		if (!IS_ERR(result))
+			continue;
+		if (!msmclk_debug)
+			return PTR_ERR(result);
+		/*
+		 * Parse and report all errors instead of immediately
+		 * exiting. Return the first error code.
+		 */
+		if (!rc)
+			rc = PTR_ERR(result);
+	}
+	if (rc)
+		return rc;
+
+	rc = of_clk_add_provider(dev->of_node, msmclk_clk_get, dev);
+	if (rc) {
+		dev_err(dev, "of_clk_add_provider failed\n");
+		return rc;
+	}
+
+	/*
+	 * can't fail after registering clocks, because users may have
+	 * gotten clock references. Failing would delete the memory.
+	 */
+	WARN_ON(msm_clock_register(drv->clk_tbl, drv->clk_tbl_size));
+	dev_info(dev, "registered clocks\n");
+
+	return 0;
+}
+
+static const struct of_device_id msmclk_match_table[] = {
+	{.compatible = "qcom,msm-clock-controller"},
+	{}
+};
+
+static struct platform_driver msmclk_driver = {
+	.probe = msmclk_probe,
+	.driver = {
+		.name =  "msm-clock-controller",
+		.of_match_table = msmclk_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static bool initialized;
+int __init msmclk_init(void)
+{
+	int rc;
+
+	if (initialized)
+		return 0;
+
+	rc = platform_driver_register(&msmclk_driver);
+	if (rc)
+		return rc;
+	initialized = true;
+	return rc;
+}
+arch_initcall(msmclk_init);
diff --git a/drivers/clk/msm/reset.c b/drivers/clk/msm/reset.c
new file mode 100644
index 0000000..0f47fd6
--- /dev/null
+++ b/drivers/clk/msm/reset.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+
+#include "reset.h"
+
+static int msm_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	rcdev->ops->assert(rcdev, id);
+	udelay(1);
+	rcdev->ops->deassert(rcdev, id);
+	return 0;
+}
+
+static int
+msm_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	struct msm_reset_controller *rst;
+	const struct msm_reset_map *map;
+	u32 regval;
+
+	rst = to_msm_reset_controller(rcdev);
+	map = &rst->reset_map[id];
+
+	regval = readl_relaxed(rst->base + map->reg);
+	regval |= BIT(map->bit);
+	writel_relaxed(regval, rst->base + map->reg);
+
+	/* Make sure the reset is asserted */
+	mb();
+
+	return 0;
+}
+
+static int
+msm_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	struct msm_reset_controller *rst;
+	const struct msm_reset_map *map;
+	u32 regval;
+
+	rst = to_msm_reset_controller(rcdev);
+	map = &rst->reset_map[id];
+
+	regval = readl_relaxed(rst->base + map->reg);
+	regval &= ~BIT(map->bit);
+	writel_relaxed(regval, rst->base + map->reg);
+
+	/* Make sure the reset is de-asserted */
+	mb();
+
+	return 0;
+}
+
+struct reset_control_ops msm_reset_ops = {
+	.reset = msm_reset,
+	.assert = msm_reset_assert,
+	.deassert = msm_reset_deassert,
+};
+EXPORT_SYMBOL(msm_reset_ops);
+
+int msm_reset_controller_register(struct platform_device *pdev,
+	const struct msm_reset_map *map, unsigned int num_resets,
+	void __iomem *virt_base)
+{
+	struct msm_reset_controller *reset;
+	int ret = 0;
+
+	reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+	if (!reset)
+		return -ENOMEM;
+
+	reset->rcdev.of_node = pdev->dev.of_node;
+	reset->rcdev.ops = &msm_reset_ops;
+	reset->rcdev.owner = pdev->dev.driver->owner;
+	reset->rcdev.nr_resets = num_resets;
+	reset->reset_map = map;
+	reset->base = virt_base;
+
+	ret = reset_controller_register(&reset->rcdev);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to register with reset controller\n");
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_reset_controller_register);
diff --git a/drivers/clk/msm/reset.h b/drivers/clk/msm/reset.h
new file mode 100644
index 0000000..9e3b2fb
--- /dev/null
+++ b/drivers/clk/msm/reset.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_CLK_RESET_H
+#define __DRIVERS_CLK_RESET_H
+
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+struct msm_reset_map {
+	unsigned int reg;
+	u8 bit;
+};
+
+struct msm_reset_controller {
+	const struct msm_reset_map *reset_map;
+	struct reset_controller_dev rcdev;
+	void __iomem  *base;
+};
+
+#define to_msm_reset_controller(r) \
+	container_of(r, struct msm_reset_controller, rcdev)
+
+extern struct reset_control_ops msm_reset_ops;
+
+int msm_reset_controller_register(struct platform_device *pdev,
+		const struct msm_reset_map *map, unsigned int nr_resets,
+		void __iomem *virt_base);
+#endif
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 0bff951..5452ad8 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -45,7 +45,11 @@
 #include <asm/cpuidle.h>
 #include "lpm-levels.h"
 #include <trace/events/power.h>
+#if defined(CONFIG_COMMON_CLK)
 #include "../clk/clk.h"
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#include "../../drivers/clk/msm/clock.h"
+#endif /* CONFIG_COMMON_CLK */
 #define CREATE_TRACE_POINTS
 #include <trace/events/trace_msm_low_power.h>
 
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index a52b65a..793255d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1004,6 +1004,12 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
 struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
 				void *data, const struct file_operations *fops);
 #endif
+#else
+struct of_device_id;
+
+static inline void __init of_clk_init(const struct of_device_id *matches)
+{
+}
 
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 094b152..eaaad7d 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -20,6 +20,8 @@ struct device;
 
 struct clk;
 
+#ifdef CONFIG_COMMON_CLK
+
 /**
  * DOC: clk notifier callback types
  *
@@ -76,8 +78,6 @@ struct clk_notifier_data {
 	unsigned long		new_rate;
 };
 
-#ifdef CONFIG_COMMON_CLK
-
 /**
  * clk_notifier_register: register a clock rate-change notifier callback
  * @clk: clock whose rate we are interested in
@@ -524,7 +524,7 @@ static inline void clk_disable_unprepare(struct clk *clk)
 struct device_node;
 struct of_phandle_args;
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 struct clk *of_clk_get(struct device_node *np, int index);
 struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
diff --git a/include/linux/clk/gdsc.h b/include/linux/clk/gdsc.h
new file mode 100644
index 0000000..b5a03ac
--- /dev/null
+++ b/include/linux/clk/gdsc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __GDSC_H
+#define __GDSC_H
+
+#include <linux/regulator/consumer.h>
+
+/* Allow the clock memories to be turned off */
+void gdsc_allow_clear_retention(struct regulator *regulator);
+
+#endif
diff --git a/include/linux/clk/msm-clk-provider.h b/include/linux/clk/msm-clk-provider.h
new file mode 100644
index 0000000..2bc6d18
--- /dev/null
+++ b/include/linux/clk/msm-clk-provider.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_CLK_PROVIDER_H
+#define __MSM_CLK_PROVIDER_H
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/clk/msm-clk.h>
+
+#if defined(CONFIG_COMMON_CLK_MSM)
+/*
+ * Bit manipulation macros
+ */
+#define BM(msb, lsb)	(((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val)	(((val) << lsb) & BM(msb, lsb))
+
+/*
+ * Halt/Status Checking Mode Macros
+ */
+#define HALT		0	/* Bit pol: 1 = halted */
+#define NOCHECK		1	/* No bit to check, do nothing */
+#define HALT_VOTED	2	/* Bit pol: 1 = halted; delay on disable */
+#define ENABLE		3	/* Bit pol: 1 = running */
+#define ENABLE_VOTED	4	/* Bit pol: 1 = running; delay on disable */
+#define DELAY		5	/* No bit to check, just delay */
+
+struct clk_register_data {
+	char *name;
+	u32 offset;
+};
+#ifdef CONFIG_DEBUG_FS
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f);
+#else
+static inline void clk_debug_print_hw(struct clk *clk, struct seq_file *f) {}
+#endif
+
+#define CLK_WARN(clk, cond, fmt, ...) do {				\
+	clk_debug_print_hw(clk, NULL);					\
+	WARN(cond, "%s: " fmt, clk_name(clk), ##__VA_ARGS__);		\
+} while (0)
+
+/**
+ * struct clk_vdd_class - Voltage scaling class
+ * @class_name: name of the class
+ * @regulator: array of regulators.
+ * @num_regulators: size of regulator array. Standard regulator APIs will be
+			used if this field > 0.
+ * @set_vdd: function to call when applying a new voltage setting.
+ * @vdd_uv: sorted 2D array of legal voltage settings. Indexed by level, then
+		regulator.
+ * @vdd_ua: sorted 2D array of legal cureent settings. Indexed by level, then
+		regulator. Optional parameter.
+ * @level_votes: array of votes for each level.
+ * @num_levels: specifies the size of level_votes array.
+ * @skip_handoff: do not vote for the max possible voltage during init
+ * @use_max_uV: use INT_MAX for max_uV when calling regulator_set_voltage
+ *           This is useful when different vdd_class share same regulator.
+ * @cur_level: the currently set voltage level
+ * @lock: lock to protect this struct
+ */
+struct clk_vdd_class {
+	const char *class_name;
+	struct regulator **regulator;
+	int num_regulators;
+	int (*set_vdd)(struct clk_vdd_class *v_class, int level);
+	int *vdd_uv;
+	int *vdd_ua;
+	int *level_votes;
+	int num_levels;
+	bool skip_handoff;
+	bool use_max_uV;
+	unsigned long cur_level;
+	struct mutex lock;
+};
+
+#define DEFINE_VDD_CLASS(_name, _set_vdd, _num_levels) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.set_vdd = _set_vdd, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGULATORS(_name, _num_levels, _num_regulators, _vdd_uv, \
+	 _vdd_ua) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.vdd_uv = _vdd_uv, \
+		.vdd_ua = _vdd_ua, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGS_INIT(_name, _num_regulators) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+enum handoff {
+	HANDOFF_ENABLED_CLK,
+	HANDOFF_DISABLED_CLK,
+};
+
+struct clk_ops {
+	int (*prepare)(struct clk *clk);
+	int (*enable)(struct clk *clk);
+	void (*disable)(struct clk *clk);
+	void (*unprepare)(struct clk *clk);
+	void (*enable_hwcg)(struct clk *clk);
+	void (*disable_hwcg)(struct clk *clk);
+	int (*in_hwcg_mode)(struct clk *clk);
+	enum handoff (*handoff)(struct clk *clk);
+	int (*reset)(struct clk *clk, enum clk_reset_action action);
+	int (*pre_set_rate)(struct clk *clk, unsigned long new_rate);
+	int (*set_rate)(struct clk *clk, unsigned long rate);
+	void (*post_set_rate)(struct clk *clk, unsigned long old_rate);
+	int (*set_max_rate)(struct clk *clk, unsigned long rate);
+	int (*set_flags)(struct clk *clk, unsigned long flags);
+	int (*set_duty_cycle)(struct clk *clk, u32 numerator, u32 denominator);
+	unsigned long (*get_rate)(struct clk *clk);
+	long (*list_rate)(struct clk *clk, unsigned long n);
+	int (*is_enabled)(struct clk *clk);
+	long (*round_rate)(struct clk *clk, unsigned long rate);
+	int (*set_parent)(struct clk *clk, struct clk *parent);
+	struct clk *(*get_parent)(struct clk *clk);
+	bool (*is_local)(struct clk *clk);
+	void __iomem *(*list_registers)(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+/**
+ * struct clk
+ * @prepare_count: prepare refcount
+ * @prepare_lock: protects clk_prepare()/clk_unprepare() path and @prepare_count
+ * @count: enable refcount
+ * @lock: protects clk_enable()/clk_disable() path and @count
+ * @depends: non-direct parent of clock to enable when this clock is enabled
+ * @vdd_class: voltage scaling requirement class
+ * @fmax: maximum frequency in Hz supported at each voltage level
+ * @parent: the current source of this clock
+ * @opp_table_populated: tracks if the OPP table of this clock has been filled
+ */
+struct clk {
+	uint32_t flags;
+	const struct clk_ops *ops;
+	const char *dbg_name;
+	struct clk *depends;
+	struct clk_vdd_class *vdd_class;
+	unsigned long *fmax;
+	int num_fmax;
+	unsigned long rate;
+	struct clk *parent;
+	struct clk_src *parents;
+	unsigned int num_parents;
+
+	struct list_head children;
+	struct list_head siblings;
+	struct list_head list;
+
+	unsigned long count;
+	unsigned long notifier_count;
+	spinlock_t lock;
+	unsigned long prepare_count;
+	struct mutex prepare_lock;
+
+	unsigned long init_rate;
+	bool always_on;
+	bool opp_table_populated;
+
+	struct dentry *clk_dir;
+};
+
+#define CLK_INIT(name) \
+	.lock = __SPIN_LOCK_UNLOCKED((name).lock), \
+	.prepare_lock = __MUTEX_INITIALIZER((name).prepare_lock), \
+	.children = LIST_HEAD_INIT((name).children), \
+	.siblings = LIST_HEAD_INIT((name).siblings), \
+	.list = LIST_HEAD_INIT((name).list)
+
+bool is_rate_valid(struct clk *clk, unsigned long rate);
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags);
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags);
+
+/* Register clocks with the MSM clock driver */
+int msm_clock_register(struct clk_lookup *table, size_t size);
+int of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+				size_t size);
+
+int clock_rcgwr_init(struct platform_device *pdev);
+int clock_rcgwr_disable(struct platform_device *pdev);
+
+extern struct clk dummy_clk;
+extern const  struct clk_ops clk_ops_dummy;
+
+#define CLK_DUMMY(clk_name, clk_id, clk_dev, flags) { \
+	.con_id = clk_name, \
+	.dev_id = clk_dev, \
+	.clk = &dummy_clk, \
+	}
+
+#define DEFINE_CLK_DUMMY(name, _rate) \
+	static struct fixed_clk name = { \
+		.c = { \
+			.dbg_name = #name, \
+			.rate = _rate, \
+			.ops = &clk_ops_dummy, \
+			CLK_INIT(name.c), \
+		}, \
+	}
+
+#define CLK_LOOKUP(con, c, dev) { .con_id = con, .clk = &c, .dev_id = dev }
+#define CLK_LOOKUP_OF(con, _c, dev) { .con_id = con, .clk = &(&_c)->c, \
+				      .dev_id = dev, .of_idx = clk_##_c }
+#define CLK_LIST(_c) { .clk = &(&_c)->c, .of_idx = clk_##_c }
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+				  unsigned long new)
+{
+	if (IS_ERR_VALUE(new))
+		return false;
+
+	return (req <= new && new < best) || (best < req && best < new);
+}
+
+extern int of_clk_add_provider(struct device_node *np,
+			struct clk *(*clk_src_get)(struct of_phandle_args *args,
+						   void *data),
+			void *data);
+extern void of_clk_del_provider(struct device_node *np);
+
+static inline const char *clk_name(struct clk *c)
+{
+	if (IS_ERR_OR_NULL(c))
+		return "(null)";
+	return c->dbg_name;
+};
+#endif /* CONFIG_COMMON_CLK_MSM */
+#endif
diff --git a/include/linux/clk/msm-clk.h b/include/linux/clk/msm-clk.h
new file mode 100644
index 0000000..baa8e52
--- /dev/null
+++ b/include/linux/clk/msm-clk.h
@@ -0,0 +1,125 @@
+/* Copyright (c) 2009, 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_CLK_H
+#define __MACH_CLK_H
+
+#include <linux/notifier.h>
+
+#define CLKFLAG_INVERT			0x00000001
+#define CLKFLAG_NOINVERT		0x00000002
+#define CLKFLAG_NONEST			0x00000004
+#define CLKFLAG_NORESET			0x00000008
+#define CLKFLAG_RETAIN_PERIPH		0x00000010
+#define CLKFLAG_NORETAIN_PERIPH		0x00000020
+#define CLKFLAG_RETAIN_MEM		0x00000040
+#define CLKFLAG_NORETAIN_MEM		0x00000080
+#define CLKFLAG_SKIP_HANDOFF		0x00000100
+#define CLKFLAG_MIN			0x00000400
+#define CLKFLAG_MAX			0x00000800
+#define CLKFLAG_INIT_DONE		0x00001000
+#define CLKFLAG_INIT_ERR		0x00002000
+#define CLKFLAG_NO_RATE_CACHE		0x00004000
+#define CLKFLAG_MEASURE			0x00008000
+#define CLKFLAG_EPROBE_DEFER		0x00010000
+#define CLKFLAG_PERIPH_OFF_SET		0x00020000
+#define CLKFLAG_PERIPH_OFF_CLEAR	0x00040000
+
+struct clk_lookup;
+struct clk;
+
+enum clk_reset_action {
+	CLK_RESET_DEASSERT	= 0,
+	CLK_RESET_ASSERT	= 1
+};
+
+struct clk_src {
+	struct clk *src;
+	int sel;
+};
+
+/* Rate is maximum clock rate in Hz */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/* Assert/Deassert reset to a hardware block associated with a clock */
+int clk_reset(struct clk *clk, enum clk_reset_action action);
+
+/* Set clock-specific configuration parameters */
+int clk_set_flags(struct clk *clk, unsigned long flags);
+
+/* returns the mux selection index associated with a particular parent */
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p);
+
+/* returns the mux selection index associated with a particular parent */
+int clk_get_parent_sel(struct clk *c, struct clk *parent);
+
+/**
+ * DOC: clk notifier callback types
+ *
+ * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
+ *     to indicate that the rate change will proceed.  Drivers must
+ *     immediately terminate any operations that will be affected by the
+ *     rate change.  Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
+ *     NOTIFY_STOP or NOTIFY_BAD.
+ *
+ * ABORT_RATE_CHANGE: called if the rate change failed for some reason
+ *     after PRE_RATE_CHANGE.  In this case, all registered notifiers on
+ *     the clk will be called with ABORT_RATE_CHANGE. Callbacks must
+ *     always return NOTIFY_DONE or NOTIFY_OK.
+ *
+ * POST_RATE_CHANGE - called after the clk rate change has successfully
+ *     completed.  Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
+ *
+ */
+#define PRE_RATE_CHANGE			BIT(0)
+#define POST_RATE_CHANGE		BIT(1)
+#define ABORT_RATE_CHANGE		BIT(2)
+
+/**
+ * struct msm_clk_notifier - associate a clk with a notifier
+ * @clk: struct clk * to associate the notifier with
+ * @notifier_head: a blocking_notifier_head for this clk
+ * @node: linked list pointers
+ *
+ * A list of struct clk_notifier is maintained by the notifier code.
+ * An entry is created whenever code registers the first notifier on a
+ * particular @clk.  Future notifiers on that @clk are added to the
+ * @notifier_head.
+ */
+struct msm_clk_notifier {
+	struct clk			*clk;
+	struct srcu_notifier_head	notifier_head;
+	struct list_head		node;
+};
+
+/**
+ * struct msm_clk_notifier_data - rate data to pass to the notifier callback
+ * @clk: struct clk * being changed
+ * @old_rate: previous rate of this clk
+ * @new_rate: new rate of this clk
+ *
+ * For a pre-notifier, old_rate is the clk's rate before this rate
+ * change, and new_rate is what the rate will be in the future.  For a
+ * post-notifier, old_rate and new_rate are both set to the clk's
+ * current rate (this was done to optimize the implementation).
+ */
+struct msm_clk_notifier_data {
+	struct clk		*clk;
+	unsigned long		old_rate;
+	unsigned long		new_rate;
+};
+
+int msm_clk_notif_register(struct clk *clk, struct notifier_block *nb);
+
+int msm_clk_notif_unregister(struct clk *clk, struct notifier_block *nb);
+
+#endif
+
diff --git a/include/linux/clk/msm-clock-generic.h b/include/linux/clk/msm-clock-generic.h
new file mode 100644
index 0000000..010a37f
--- /dev/null
+++ b/include/linux/clk/msm-clock-generic.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCK_GENERIC_H
+#define __MSM_CLOCK_GENERIC_H
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/of.h>
+
+/**
+ * struct fixed_clk - fixed rate clock
+ * @c: clk
+ */
+struct fixed_clk {
+	struct clk c;
+};
+
+/* ==================== Mux clock ==================== */
+
+struct mux_clk;
+
+struct clk_mux_ops {
+	int (*set_mux_sel)(struct mux_clk *clk, int sel);
+	int (*get_mux_sel)(struct mux_clk *clk);
+
+	/* Optional */
+	bool (*is_enabled)(struct mux_clk *clk);
+	int (*enable)(struct mux_clk *clk);
+	void (*disable)(struct mux_clk *clk);
+	void __iomem *(*list_registers)(struct mux_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+#define MUX_SRC_LIST(...) \
+	.parents = (struct clk_src[]){__VA_ARGS__}, \
+	.num_parents = ARRAY_SIZE(((struct clk_src[]){__VA_ARGS__}))
+
+#define MUX_REC_SRC_LIST(...) \
+	.rec_parents = (struct clk * []){__VA_ARGS__}, \
+	.num_rec_parents = ARRAY_SIZE(((struct clk * []){__VA_ARGS__}))
+
+struct mux_clk {
+	/* Parents in decreasing order of preference for obtaining rates. */
+	struct clk_src	*parents;
+	int		num_parents;
+	/* Recursively search for the requested parent in rec_parents. */
+	struct clk	**rec_parents;
+	int		num_rec_parents;
+	struct clk	*safe_parent;
+	int		safe_sel;
+	unsigned long	safe_freq;
+	/*
+	 * Before attempting a clk_round_rate on available sources, attempt a
+	 * clk_get_rate on all those sources. If one of them is already at the
+	 * necessary rate, that source will be used.
+	 */
+	bool		try_get_rate;
+	struct clk_mux_ops *ops;
+	/*
+	 * Set if you need the mux to try a new parent before falling back to
+	 * the current parent. If the safe_parent field above is set, then the
+	 * safe_sel intermediate source will only be used if we fall back to
+	 * to the current parent during mux_set_rate.
+	 */
+	bool		try_new_parent;
+
+	/* Fields not used by helper function. */
+	void *const __iomem *base;
+	u32		offset;
+	u32		en_offset;
+	u32		mask;
+	u32		shift;
+	u32		en_mask;
+	/*
+	 * Set post divider for debug mux in order to divide the clock
+	 * by post_div + 1.
+	 */
+	u32		post_div;
+	int		low_power_sel;
+	void		*priv;
+
+	struct clk	c;
+};
+
+static inline struct mux_clk *to_mux_clk(struct clk *c)
+{
+	return container_of(c, struct mux_clk, c);
+}
+
+extern const  struct clk_ops clk_ops_gen_mux;
+
+/* ==================== Divider clock ==================== */
+
+struct div_clk;
+
+struct clk_div_ops {
+	int (*set_div)(struct div_clk *clk, int div);
+	int (*get_div)(struct div_clk *clk);
+	bool (*is_enabled)(struct div_clk *clk);
+	int (*enable)(struct div_clk *clk);
+	void (*disable)(struct div_clk *clk);
+	void __iomem *(*list_registers)(struct div_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+struct div_data {
+	unsigned int div;
+	unsigned int min_div;
+	unsigned int max_div;
+	unsigned long rate_margin;
+	/*
+	 * Indicate whether this divider clock supports half-integer divider.
+	 * If it is, all the min_div and max_div have been doubled. It means
+	 * they are 2*N.
+	 */
+	bool is_half_divider;
+	/*
+	 * Skip odd dividers since the hardware may not support them.
+	 */
+	bool skip_odd_div;
+	bool skip_even_div;
+	bool allow_div_one;
+	unsigned int cached_div;
+};
+
+struct div_clk {
+	struct div_data data;
+
+	/*
+	 * Some implementations may require the divider to be set to a "safe"
+	 * value that allows reprogramming of upstream clocks without violating
+	 * voltage constraints.
+	 */
+	unsigned long safe_freq;
+
+	/* Optional */
+	struct clk_div_ops *ops;
+
+	/* Fields not used by helper function. */
+	void *const __iomem *base;
+	u32		offset;
+	u32		mask;
+	u32		shift;
+	u32		en_mask;
+	void		*priv;
+	struct clk	c;
+};
+
+static inline struct div_clk *to_div_clk(struct clk *c)
+{
+	return container_of(c, struct div_clk, c);
+}
+
+extern const struct clk_ops clk_ops_div;
+extern const struct clk_ops clk_ops_slave_div;
+
+struct ext_clk {
+	struct clk c;
+	struct device *dev;
+	char *clk_id;
+};
+
+long parent_round_rate(struct clk *c, unsigned long rate);
+unsigned long parent_get_rate(struct clk *c);
+int parent_set_rate(struct clk *c, unsigned long rate);
+
+static inline struct ext_clk *to_ext_clk(struct clk *c)
+{
+	return container_of(c, struct ext_clk, c);
+}
+
+extern const struct clk_ops clk_ops_ext;
+
+#define DEFINE_FIXED_DIV_CLK(clk_name, _div, _parent) \
+static struct div_clk clk_name = {	\
+	.data = {				\
+		.max_div = _div,		\
+		.min_div = _div,		\
+		.div = _div,			\
+	},					\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_div,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+#define DEFINE_FIXED_SLAVE_DIV_CLK(clk_name, _div, _parent) \
+static struct div_clk clk_name = {	\
+	.data = {				\
+		.max_div = _div,		\
+		.min_div = _div,		\
+		.div = _div,			\
+	},					\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_slave_div,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+#define DEFINE_EXT_CLK(clk_name, _parent) \
+static struct ext_clk clk_name = {		\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_ext,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+/* ==================== Mux Div clock ==================== */
+
+struct mux_div_clk;
+
+/*
+ * struct mux_div_ops
+ * the enable and disable ops are optional.
+ */
+
+struct mux_div_ops {
+	int (*set_src_div)(struct mux_div_clk *, u32 src_sel, u32 div);
+	void (*get_src_div)(struct mux_div_clk *, u32 *src_sel, u32 *div);
+	int (*enable)(struct mux_div_clk *);
+	void (*disable)(struct mux_div_clk *);
+	bool (*is_enabled)(struct mux_div_clk *);
+	void __iomem *(*list_registers)(struct mux_div_clk *md, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+/*
+ * struct mux_div_clk - combined mux/divider clock
+ * @priv
+		parameters needed by ops
+ * @safe_freq
+		when switching rates from A to B, the mux div clock will
+		instead switch from A -> safe_freq -> B. This allows the
+		mux_div clock to change rates while enabled, even if this
+		behavior is not supported by the parent clocks.
+
+		If changing the rate of parent A also causes the rate of
+		parent B to change, then safe_freq must be defined.
+
+		safe_freq is expected to have a source clock which is always
+		on and runs at only one rate.
+ * @parents
+		list of parents and mux indicies
+ * @ops
+		function pointers for hw specific operations
+ * @src_sel
+		the mux index which will be used if the clock is enabled.
+ * @try_get_rate
+		Set if you need the mux to directly jump to a source
+		that is at the desired rate currently.
+ * @force_enable_md
+		Set if the mux-div needs to be force enabled/disabled during
+		clk_enable/disable.
+ */
+
+struct mux_div_clk {
+	/* Required parameters */
+	struct mux_div_ops		*ops;
+	struct div_data			data;
+	struct clk_src			*parents;
+	u32				num_parents;
+
+	struct clk			c;
+
+	/* Internal */
+	u32				src_sel;
+
+	/* Optional parameters */
+	void				*priv;
+	void __iomem			*base;
+	u32				div_mask;
+	u32				div_offset;
+	u32				div_shift;
+	u32				src_mask;
+	u32				src_offset;
+	u32				src_shift;
+	u32				en_mask;
+	u32				en_offset;
+
+	u32				safe_div;
+	struct clk			*safe_parent;
+	unsigned long			safe_freq;
+	bool				try_get_rate;
+	bool				force_enable_md;
+};
+
+static inline struct mux_div_clk *to_mux_div_clk(struct clk *clk)
+{
+	return container_of(clk, struct mux_div_clk, c);
+}
+
+extern const struct clk_ops clk_ops_mux_div_clk;
+
+#endif
diff --git a/include/linux/clk/qcom.h b/include/linux/clk/qcom.h
index e2fee60..d413b0a 100644
--- a/include/linux/clk/qcom.h
+++ b/include/linux/clk/qcom.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
 #ifndef __LINUX_CLK_QCOM_H_
 #define __LINUX_CLK_QCOM_H_
 
+#if defined(CONFIG_COMMON_CLK_QCOM)
 enum branch_mem_flags {
 	CLKFLAG_RETAIN_PERIPH,
 	CLKFLAG_NORETAIN_PERIPH,
@@ -23,5 +24,8 @@ enum branch_mem_flags {
 	CLKFLAG_PERIPH_OFF_SET,
 	CLKFLAG_PERIPH_OFF_CLEAR,
 };
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#include <linux/clk/msm-clk.h>
+#endif /* CONFIG_COMMON_CLK_QCOM */
 
 #endif  /* __LINUX_CLK_QCOM_H_ */
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 2eabc86..9b8848b 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -22,6 +22,7 @@ struct clk_lookup {
 	struct list_head	node;
 	const char		*dev_id;
 	const char		*con_id;
+	int			of_idx;
 	struct clk		*clk;
 	struct clk_hw		*clk_hw;
 };
diff --git a/include/soc/qcom/clock-alpha-pll.h b/include/soc/qcom/clock-alpha-pll.h
new file mode 100644
index 0000000..f8130f1
--- /dev/null
+++ b/include/soc/qcom/clock-alpha-pll.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_ALPHA_PLL_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_ALPHA_PLL_H
+
+#include <linux/spinlock.h>
+#include <linux/clk/msm-clk-provider.h>
+
+struct alpha_pll_masks {
+	u32 lock_mask;		/* lock_det bit */
+	u32 active_mask;	/* active_flag in FSM mode */
+	u32 update_mask;	/* update bit for dynamic update */
+	u32 vco_mask;		/* vco_sel bits */
+	u32 vco_shift;
+	u32 alpha_en_mask;	/* alpha_en bit */
+	u32 output_mask;	/* pllout_* bits */
+	u32 post_div_mask;
+
+	u32 test_ctl_lo_mask;
+	u32 test_ctl_hi_mask;
+};
+
+struct alpha_pll_vco_tbl {
+	u32 vco_val;
+	unsigned long min_freq;
+	unsigned long max_freq;
+};
+
+#define VCO(a, b, c) { \
+	.vco_val = a,\
+	.min_freq = b,\
+	.max_freq = c,\
+}
+
+struct alpha_pll_clk {
+	struct alpha_pll_masks *masks;
+
+	void *const __iomem *base;
+
+	u32 offset;
+	u32 fabia_frac_offset;
+
+	/* if fsm_en_mask is set, config PLL to FSM mode */
+	u32 fsm_reg_offset;
+	u32 fsm_en_mask;
+
+	u32 enable_config;	/* bitmask of outputs to be enabled */
+	u32 post_div_config;	/* masked post divider setting */
+	u32 config_ctl_val;	/* config register init value */
+	u32 test_ctl_lo_val;	/* test control settings */
+	u32 test_ctl_hi_val;
+
+	struct alpha_pll_vco_tbl *vco_tbl;
+	u32 num_vco;
+	u32 current_vco_val;
+	bool inited;
+	bool slew;
+	bool no_prepared_reconfig;
+
+	/* some PLLs support dynamically updating their rate
+	 * without disabling the PLL first. Set this flag
+	 * to enable this support.
+	 */
+	bool dynamic_update;
+
+	/*
+	 * Some chipsets need the offline request bit to be
+	 * cleared on a second write to the register, even though
+	 * SW wants the bit to be set. Set this flag to indicate
+	 * that the workaround is required.
+	 */
+	bool offline_bit_workaround;
+	bool no_irq_dis;
+	bool is_fabia;
+	unsigned long min_supported_freq;
+	struct clk c;
+};
+
+static inline struct alpha_pll_clk *to_alpha_pll_clk(struct clk *c)
+{
+	return container_of(c, struct alpha_pll_clk, c);
+}
+
+
+#endif
+extern void __init_alpha_pll(struct clk *c);
+extern const struct clk_ops clk_ops_alpha_pll;
+extern const struct clk_ops clk_ops_alpha_pll_hwfsm;
+extern const struct clk_ops clk_ops_fixed_alpha_pll;
+extern const struct clk_ops clk_ops_dyna_alpha_pll;
+extern const struct clk_ops clk_ops_fixed_fabia_alpha_pll;
+extern const struct clk_ops clk_ops_fabia_alpha_pll;
diff --git a/include/soc/qcom/clock-local2.h b/include/soc/qcom/clock-local2.h
new file mode 100644
index 0000000..c5e7488
--- /dev/null
+++ b/include/soc/qcom/clock-local2.h
@@ -0,0 +1,274 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H
+
+#include <linux/spinlock.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+
+/*
+ * Generic frequency-definition structs and macros
+ */
+
+/**
+ * @freq_hz: output rate
+ * @src_freq: source freq for dynamic pll. For fixed plls, set to 0.
+ * @src_clk: source clock for freq_hz
+ * @m_val: M value corresponding to freq_hz
+ * @n_val: N value corresponding to freq_hz
+ * @d_val: D value corresponding to freq_hz
+ * @div_src_val: Pre divider value and source selection mux index for freq_hz
+ * @sys_vdd: Voltage level required for freq_hz
+ */
+struct clk_freq_tbl {
+	unsigned long	freq_hz;
+	unsigned long	src_freq;
+	struct clk	*src_clk;
+	u32	m_val;
+	u32	n_val;
+	u32	d_val;
+	u32	div_src_val;
+	const unsigned long sys_vdd;
+};
+
+#define FREQ_END	(ULONG_MAX-1)
+#define F_END { .freq_hz = FREQ_END }
+#define	FIXED_CLK_SRC	0
+/*
+ * Generic clock-definition struct and macros
+ */
+/**
+ * struct rcg_clk - root clock generator
+ * @cmd_rcgr_reg: command register
+ * @mnd_reg_width: Width of MND register
+ * @set_rate: function to set frequency
+ * @freq_tbl: frequency table for this RCG
+ * @current_freq: current RCG frequency
+ * @c: generic clock data
+ * @non_local_children: set if RCG has at least one branch owned by a diff EE
+ * @non_local_control_timeout: configurable RCG timeout needed when all RCG
+ *			 children can be controlled by an entity outside of
+			 HLOS.
+ * @force_enable_rcgr: set if RCG needs to be force enabled/disabled during
+ * power sequence
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct rcg_clk {
+	u32 cmd_rcgr_reg;
+	u32 mnd_reg_width;
+
+	void   (*set_rate)(struct rcg_clk *, struct clk_freq_tbl *);
+
+	struct clk_freq_tbl *freq_tbl;
+	struct clk_freq_tbl *current_freq;
+	struct clk	c;
+
+	bool non_local_children;
+	int non_local_control_timeout;
+	bool force_enable_rcgr;
+
+	void *const __iomem *base;
+};
+
+static inline struct rcg_clk *to_rcg_clk(struct clk *clk)
+{
+	return container_of(clk, struct rcg_clk, c);
+}
+
+extern struct clk_freq_tbl rcg_dummy_freq;
+
+/**
+ * struct branch_clk - branch clock
+ * @set_rate: Set the frequency of this branch clock.
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @bcr_reg: block reset register
+ * @has_sibling: true if other branches are derived from this branch's source
+ * @cur_div: current branch divider value
+ * @max_div: maximum branch divider value (if zero, no divider exists)
+ * @halt_check: halt checking type
+ * @toggle_memory: toggle memory during enable/disable if true
+ * @no_halt_check_on_disable: When set, do not check status bit during
+ *			      clk_disable().
+ * @check_enable_bit: Check the enable bit to determine clock status
+				during handoff.
+ * @aggr_sibling_rates: Set if there are multiple branch clocks with rate
+			setting capability on the common RCG.
+ * @is_prepared: Set if clock's prepare count is greater than 0.
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct branch_clk {
+	void   (*set_rate)(struct branch_clk *, struct clk_freq_tbl *);
+	struct clk c;
+	u32 cbcr_reg;
+	u32 bcr_reg;
+	int has_sibling;
+	u32 cur_div;
+	u32 max_div;
+	const u32 halt_check;
+	bool toggle_memory;
+	bool no_halt_check_on_disable;
+	bool check_enable_bit;
+	bool aggr_sibling_rates;
+	bool is_prepared;
+
+	void *const __iomem *base;
+};
+
+static inline struct branch_clk *to_branch_clk(struct clk *clk)
+{
+	return container_of(clk, struct branch_clk, c);
+}
+
+/**
+ * struct local_vote_clk - Voteable branch clock
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @vote_reg: voting register
+ * @en_mask: enable mask
+ * @halt_check: halt checking type
+ * @base: pointer to base address of ioremapped registers.
+ * An on/off switch with a rate derived from the parent.
+ */
+struct local_vote_clk {
+	struct clk c;
+	u32 cbcr_reg;
+	u32 vote_reg;
+	u32 bcr_reg;
+	u32 en_mask;
+	const u32 halt_check;
+
+	void * __iomem *base;
+};
+
+static inline struct local_vote_clk *to_local_vote_clk(struct clk *clk)
+{
+	return container_of(clk, struct local_vote_clk, c);
+}
+
+/**
+ * struct reset_clk - Reset clock
+ * @c: clk
+ * @reset_reg: block reset register
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct reset_clk {
+	struct clk c;
+	u32 reset_reg;
+
+	void *__iomem *base;
+};
+
+static inline struct reset_clk *to_reset_clk(struct clk *clk)
+{
+	return container_of(clk, struct reset_clk, c);
+}
+/**
+ * struct measure_clk - for rate measurement debug use
+ * @sample_ticks: sample period in reference clock ticks
+ * @multiplier: measurement scale-up factor
+ * @divider: measurement scale-down factor
+ * @c: clk
+ */
+struct measure_clk {
+	u64 sample_ticks;
+	u32 multiplier;
+	u32 divider;
+
+	struct clk c;
+};
+
+struct measure_clk_data {
+	struct clk *cxo;
+	u32 plltest_reg;
+	u32 plltest_val;
+	u32 xo_div4_cbcr;
+	u32 ctl_reg;
+	u32 status_reg;
+
+	void *const __iomem *base;
+};
+
+static inline struct measure_clk *to_measure_clk(struct clk *clk)
+{
+	return container_of(clk, struct measure_clk, c);
+}
+
+/**
+ * struct gate_clk
+ * @c: clk
+ * @en_mask: ORed with @en_reg to enable gate clk
+ * @en_reg: register used to enable/disable gate clk
+ * @base: pointer to base address of ioremapped registers
+ */
+struct gate_clk {
+	struct clk c;
+	u32 en_mask;
+	u32 en_reg;
+	unsigned int delay_us;
+
+	void *const __iomem *base;
+};
+
+static inline struct gate_clk *to_gate_clk(struct clk *clk)
+{
+	return container_of(clk, struct gate_clk, c);
+}
+
+/*
+ * Generic set-rate implementations
+ */
+void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+void set_rate_hid(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+
+/*
+ * Variables from the clock-local driver
+ */
+extern spinlock_t local_clock_reg_lock;
+
+extern const struct clk_ops clk_ops_empty;
+extern const struct clk_ops clk_ops_rcg;
+extern const struct clk_ops clk_ops_rcg_mnd;
+extern const struct clk_ops clk_ops_branch;
+extern const struct clk_ops clk_ops_vote;
+extern const struct clk_ops clk_ops_rcg_hdmi;
+extern const struct clk_ops clk_ops_rcg_edp;
+extern const struct clk_ops clk_ops_byte;
+extern const struct clk_ops clk_ops_pixel;
+extern const struct clk_ops clk_ops_byte_multiparent;
+extern const struct clk_ops clk_ops_pixel_multiparent;
+extern const struct clk_ops clk_ops_edppixel;
+extern const struct clk_ops clk_ops_gate;
+extern const struct clk_ops clk_ops_rst;
+extern struct clk_mux_ops mux_reg_ops;
+extern struct mux_div_ops rcg_mux_div_ops;
+extern const  struct clk_div_ops postdiv_reg_ops;
+
+enum handoff pixel_rcg_handoff(struct clk *clk);
+enum handoff byte_rcg_handoff(struct clk *clk);
+unsigned long measure_get_rate(struct clk *c);
+
+/*
+ * Clock definition macros
+ */
+#define DEFINE_CLK_MEASURE(name) \
+	struct clk name = { \
+		.ops = &clk_ops_empty, \
+		.dbg_name = #name, \
+		CLK_INIT(name), \
+	} \
+
+#endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H */
+
diff --git a/include/soc/qcom/clock-pll.h b/include/soc/qcom/clock-pll.h
new file mode 100644
index 0000000..1865e3c
--- /dev/null
+++ b/include/soc/qcom/clock-pll.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_PLL_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_PLL_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+/**
+ * struct pll_freq_tbl - generic PLL frequency definition
+ * @freq_hz: pll frequency in hz
+ * @l_val: pll l value
+ * @m_val: pll m value
+ * @n_val: pll n value
+ * @post_div_val: pll post divider value
+ * @pre_div_val: pll pre-divider value
+ * @vco_val: pll vco value
+ */
+struct pll_freq_tbl {
+	const u32 freq_hz;
+	const u32 l_val;
+	const u32 m_val;
+	const u32 n_val;
+	const u32 post_div_val;
+	const u32 pre_div_val;
+	const u32 vco_val;
+};
+
+/**
+ * struct pll_config_masks - PLL config masks struct
+ * @post_div_mask: mask for post divider bits location
+ * @pre_div_mask: mask for pre-divider bits location
+ * @vco_mask: mask for vco bits location
+ * @mn_en_mask: ORed with pll config register to enable the mn counter
+ * @main_output_mask: ORed with pll config register to enable the main output
+ * @apc_pdn_mask: ORed with pll config register to enable/disable APC PDN
+ * @lock_mask: Mask that indicates that the PLL has locked
+ */
+struct pll_config_masks {
+	u32 apc_pdn_mask;
+	u32 post_div_mask;
+	u32 pre_div_mask;
+	u32 vco_mask;
+	u32 mn_en_mask;
+	u32 main_output_mask;
+	u32 early_output_mask;
+	u32 lock_mask;
+};
+
+struct pll_config_vals {
+	u32 post_div_masked;
+	u32 pre_div_masked;
+	u32 config_ctl_val;
+	u32 config_ctl_hi_val;
+	u32 test_ctl_lo_val;
+	u32 test_ctl_hi_val;
+	u32 alpha_val;
+	bool enable_mn;
+};
+
+struct pll_spm_ctrl {
+	u32 offset;
+	u32 event_bit;
+	void __iomem *spm_base;
+};
+
+#define PLL_FREQ_END	(UINT_MAX-1)
+#define PLL_F_END { .freq_hz = PLL_FREQ_END }
+
+/**
+ * struct pll_vote_clk - phase locked loop (HW voteable)
+ * @soft_vote: soft voting variable for multiple PLL software instances
+ * @soft_vote_mask: soft voting mask for multiple PLL software instances
+ * @en_reg: enable register
+ * @en_mask: ORed with @en_reg to enable the clock
+ * @status_mask: ANDed with @status_reg to determine if PLL is active.
+ * @status_reg: status register
+ * @c: clock
+ */
+struct pll_vote_clk {
+	u32 *soft_vote;
+	u32 soft_vote_mask;
+	void __iomem *const en_reg;
+	u32 en_mask;
+	void __iomem *const status_reg;
+	u32 status_mask;
+
+	struct clk c;
+
+	void *const __iomem *base;
+};
+
+extern const struct clk_ops clk_ops_pll_vote;
+extern const struct clk_ops clk_ops_pll_acpu_vote;
+extern const struct clk_ops clk_ops_pll_sleep_vote;
+
+/* Soft voting values */
+#define PLL_SOFT_VOTE_PRIMARY   BIT(0)
+#define PLL_SOFT_VOTE_ACPU      BIT(1)
+#define PLL_SOFT_VOTE_AUX       BIT(2)
+
+static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *c)
+{
+	return container_of(c, struct pll_vote_clk, c);
+}
+
+/**
+ * struct pll_clk - phase locked loop
+ * @mode_reg: enable register
+ * @l_reg: l value register
+ * @m_reg: m value register
+ * @n_reg: n value register
+ * @config_reg: configuration register, contains mn divider enable, pre divider,
+ *   post divider and vco configuration. register name can be configure register
+ *   or user_ctl register depending on targets
+ * @config_ctl_reg: "expert" configuration register
+ * @config_ctl_hi_reg: upper 32 bits of the "expert" configuration register
+ * @status_reg: status register, contains the lock detection bit
+ * @init_test_ctl: initialize the test control register
+ * @pgm_test_ctl_enable: program the test_ctl register in the enable sequence
+ * @test_ctl_dbg: if false will configure the test control registers.
+ * @masks: masks used for settings in config_reg
+ * @vals: configuration values to be written to PLL registers
+ * @freq_tbl: pll freq table
+ * @no_prepared_reconfig: Fail round_rate if pll is prepared
+ * @c: clk
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct pll_clk {
+	void __iomem *const mode_reg;
+	void __iomem *const l_reg;
+	void __iomem *const m_reg;
+	void __iomem *const n_reg;
+	void __iomem *const alpha_reg;
+	void __iomem *const config_reg;
+	void __iomem *const config_ctl_reg;
+	void __iomem *const config_ctl_hi_reg;
+	void __iomem *const status_reg;
+	void __iomem *const alt_status_reg;
+	void __iomem *const test_ctl_lo_reg;
+	void __iomem *const test_ctl_hi_reg;
+
+	bool init_test_ctl;
+	bool pgm_test_ctl_enable;
+	bool test_ctl_dbg;
+
+	struct pll_config_masks masks;
+	struct pll_config_vals vals;
+	struct pll_freq_tbl *freq_tbl;
+
+	unsigned long src_rate;
+	unsigned long min_rate;
+	unsigned long max_rate;
+
+	bool inited;
+	bool no_prepared_reconfig;
+
+	struct pll_spm_ctrl spm_ctrl;
+	struct clk c;
+
+	void *const __iomem *base;
+};
+
+extern const struct clk_ops clk_ops_local_pll;
+extern const struct clk_ops clk_ops_sr2_pll;
+extern const struct clk_ops clk_ops_variable_rate_pll;
+extern const struct clk_ops clk_ops_variable_rate_pll_hwfsm;
+
+void __variable_rate_pll_init(struct clk *c);
+
+static inline struct pll_clk *to_pll_clk(struct clk *c)
+{
+	return container_of(c, struct pll_clk, c);
+}
+
+int sr_pll_clk_enable(struct clk *c);
+int sr_hpm_lp_pll_clk_enable(struct clk *c);
+
+struct pll_alt_config {
+	u32 val;
+	u32 mask;
+};
+
+struct pll_config {
+	u32 l;
+	u32 m;
+	u32 n;
+	u32 vco_val;
+	u32 vco_mask;
+	u32 pre_div_val;
+	u32 pre_div_mask;
+	u32 post_div_val;
+	u32 post_div_mask;
+	u32 mn_ena_val;
+	u32 mn_ena_mask;
+	u32 main_output_val;
+	u32 main_output_mask;
+	u32 aux_output_val;
+	u32 aux_output_mask;
+	u32 cfg_ctl_val;
+	/* SR2 PLL specific fields */
+	u32 add_factor_val;
+	u32 add_factor_mask;
+	struct pll_alt_config alt_cfg;
+};
+
+struct pll_config_regs {
+	void __iomem *l_reg;
+	void __iomem *m_reg;
+	void __iomem *n_reg;
+	void __iomem *config_reg;
+	void __iomem *config_alt_reg;
+	void __iomem *config_ctl_reg;
+	void __iomem *mode_reg;
+
+	void *const __iomem *base;
+};
+
+void configure_sr_pll(struct pll_config *config, struct pll_config_regs *regs,
+				u32 ena_fsm_mode);
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+				struct pll_config_regs *regs, u32 ena_fsm_mode);
+#endif
diff --git a/include/soc/qcom/clock-rpm.h b/include/soc/qcom/clock-rpm.h
new file mode 100644
index 0000000..4af457c
--- /dev/null
+++ b/include/soc/qcom/clock-rpm.h
@@ -0,0 +1,180 @@
+/* Copyright (c) 2010-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_RPM_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_RPM_H
+
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define RPM_SMD_KEY_RATE	0x007A484B
+#define RPM_SMD_KEY_ENABLE	0x62616E45
+#define RPM_SMD_KEY_STATE	0x54415453
+
+#define RPM_CLK_BUFFER_A_REQ			0x616B6C63
+#define RPM_KEY_SOFTWARE_ENABLE			0x6E657773
+#define RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY	0x62636370
+
+struct clk_ops;
+struct clk_rpmrs_data;
+extern const struct clk_ops clk_ops_rpm;
+extern const struct clk_ops clk_ops_rpm_branch;
+
+struct rpm_clk {
+	int rpm_res_type;
+	int rpm_key;
+	int rpm_clk_id;
+	const int rpm_status_id;
+	bool active_only;
+	bool enabled;
+	bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
+	struct clk_rpmrs_data *rpmrs_data;
+	struct rpm_clk *peer;
+	struct clk c;
+	uint32_t *last_active_set_vote;
+	uint32_t *last_sleep_set_vote;
+};
+
+static inline struct rpm_clk *to_rpm_clk(struct clk *clk)
+{
+	return container_of(clk, struct rpm_clk, c);
+}
+
+/*
+ * RPM scaling enable function used for target that has an RPM resource for
+ * rpm clock scaling enable.
+ */
+int enable_rpm_scaling(void);
+
+int vote_bimc(struct rpm_clk *r, uint32_t value);
+
+extern struct clk_rpmrs_data clk_rpmrs_data_smd;
+
+/*
+ * A note on name##last_{active,sleep}_set_vote below:
+ * We track the last active and sleep set votes across both
+ * active-only and active+sleep set clocks. We use the same
+ * tracking variables for both clocks in order to keep both
+ * updated about the last vote irrespective of which clock
+ * actually made the request. This is the only way to allow
+ * optimizations that prevent duplicate requests from being sent
+ * to the RPM. Separate tracking does not work since it is not
+ * possible to know if the peer's last request was actually sent
+ * to the RPM.
+ */
+
+#define __DEFINE_CLK_RPM(name, active, type, r_id, stat_id, dep, key, \
+				rpmrsdata) \
+	static struct rpm_clk active; \
+	static uint32_t name##last_active_set_vote; \
+	static uint32_t name##last_sleep_set_vote; \
+	static struct rpm_clk name = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &active, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm, \
+			.dbg_name = #name, \
+			CLK_INIT(name.c), \
+			.depends = dep, \
+		}, \
+	}; \
+	static struct rpm_clk active = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &name, \
+		.active_only = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm, \
+			.dbg_name = #active, \
+			CLK_INIT(active.c), \
+			.depends = dep, \
+		}, \
+	} \
+
+#define __DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, stat_id, r, \
+					key, rpmrsdata) \
+	static struct rpm_clk active; \
+	static uint32_t name##last_active_set_vote; \
+	static uint32_t name##last_sleep_set_vote; \
+	static struct rpm_clk name = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &active, \
+		.branch = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm_branch, \
+			.dbg_name = #name, \
+			.rate = (r), \
+			CLK_INIT(name.c), \
+		}, \
+	}; \
+	static struct rpm_clk active = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &name, \
+		.active_only = true, \
+		.branch = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm_branch, \
+			.dbg_name = #active, \
+			.rate = (r), \
+			CLK_INIT(active.c), \
+		}, \
+	} \
+
+#define DEFINE_CLK_RPM_SMD(name, active, type, r_id, dep) \
+	__DEFINE_CLK_RPM(name, active, type, r_id, 0, dep, \
+				RPM_SMD_KEY_RATE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_BRANCH(name, active, type, r_id, r) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, 0, r, \
+				RPM_SMD_KEY_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_QDSS(name, active, type, r_id) \
+	__DEFINE_CLK_RPM(name, active, type, r_id, \
+		0, 0, RPM_SMD_KEY_STATE, &clk_rpmrs_data_smd)
+/*
+ * The RPM XO buffer clock management code aggregates votes for pin-control mode
+ * and software mode separately. Software-enable has higher priority over pin-
+ * control, and if the software-mode aggregation results in a 'disable', the
+ * buffer will be left in pin-control mode if a pin-control vote is in place.
+ */
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+			1000, RPM_KEY_SOFTWARE_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+	1000, RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY, &clk_rpmrs_data_smd)
+#endif
diff --git a/include/soc/qcom/clock-voter.h b/include/soc/qcom/clock-voter.h
new file mode 100644
index 0000000..7f92a0d
--- /dev/null
+++ b/include/soc/qcom/clock-voter.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2010-2013, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_VOTER_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_VOTER_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+struct clk_ops;
+extern const struct clk_ops clk_ops_voter;
+
+struct clk_voter {
+	int is_branch;
+	bool enabled;
+	struct clk c;
+};
+
+static inline struct clk_voter *to_clk_voter(struct clk *clk)
+{
+	return container_of(clk, struct clk_voter, c);
+}
+
+#define __DEFINE_CLK_VOTER(clk_name, _parent, _default_rate, _is_branch) \
+	struct clk_voter clk_name = { \
+		.is_branch = (_is_branch), \
+		.c = { \
+			.parent = _parent, \
+			.dbg_name = #clk_name, \
+			.ops = &clk_ops_voter, \
+			.rate = _default_rate, \
+			CLK_INIT(clk_name.c), \
+		}, \
+	}
+
+#define DEFINE_CLK_VOTER(clk_name, _parent, _default_rate) \
+	 __DEFINE_CLK_VOTER(clk_name, _parent, _default_rate, 0)
+
+#define DEFINE_CLK_BRANCH_VOTER(clk_name, _parent) \
+	 __DEFINE_CLK_VOTER(clk_name, _parent, 1000, 1)
+
+#endif
diff --git a/include/soc/qcom/msm-clock-controller.h b/include/soc/qcom/msm-clock-controller.h
new file mode 100644
index 0000000..4b7abec
--- /dev/null
+++ b/include/soc/qcom/msm-clock-controller.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MSM_CLOCK_CONTROLLER_H
+#define __ARCH_ARM_MSM_CLOCK_CONTROLLER_H
+
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define dt_err(np, fmt, ...) \
+	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
+#define dt_prop_err(np, str, fmt, ...) \
+	dt_err(np, "%s: " fmt, str, ##__VA_ARGS__)
+
+/**
+ * struct msmclk_parser
+ * @compatible
+ *      matches compatible property from devicetree
+ * @parsedt
+ *      constructs & returns an instance of the appropriate obj based on
+ *      the data from devicetree.
+ */
+struct msmclk_parser {
+	struct list_head list;
+	char *compatible;
+	void * (*parsedt)(struct device *dev, struct device_node *of);
+};
+
+#define MSMCLK_PARSER(fn, str, id) \
+static struct msmclk_parser _msmclk_##fn##id = {		\
+	.list = LIST_HEAD_INIT(_msmclk_##fn##id.list),		\
+	.compatible = str,					\
+	.parsedt = fn,						\
+};								\
+static int __init _msmclk_init_##fn##id(void)			\
+{								\
+	msmclk_parser_register(&_msmclk_##fn##id);		\
+	return 0;						\
+}								\
+early_initcall(_msmclk_init_##fn##id)
+
+/*
+ * struct msmclk_data
+ * @base
+ *      ioremapped region for sub_devices
+ * @list
+ *	tracks all registered driver instances
+ * @htable
+ *	tracks all registered child clocks
+ * @clk_tbl
+ *      array of clk_lookup to be registered with the clock framework
+ */
+#define HASHTABLE_SIZE 200
+struct msmclk_data {
+	void __iomem *base;
+	struct device *dev;
+	struct list_head list;
+	struct hlist_head htable[HASHTABLE_SIZE];
+	struct clk_lookup *clk_tbl;
+	int clk_tbl_size;
+	int max_clk_tbl_size;
+};
+
+#if defined(CONFIG_MSM_CLK_CONTROLLER_V2)
+
+/* Utility functions */
+int of_property_count_phandles(struct device_node *np, char *propname);
+int of_property_read_phandle_index(struct device_node *np, char *propname,
+					int index, phandle *p);
+void *msmclk_generic_clk_init(struct device *dev, struct device_node *np,
+				struct clk *c);
+
+/*
+ * msmclk_parser_register
+ *      Registers a parser which will be matched with a node from dt
+ *      according to the compatible string.
+ */
+void msmclk_parser_register(struct msmclk_parser *p);
+
+/*
+ * msmclk_parse_phandle
+ *      On hashtable miss, the corresponding entry will be retrieved from
+ *      devicetree, and added to the hashtable.
+ */
+void *msmclk_parse_phandle(struct device *dev, phandle key);
+/*
+ * msmclk_lookup_phandle
+ *	Straightforward hashtable lookup
+ */
+void *msmclk_lookup_phandle(struct device *dev, phandle key);
+
+int __init msmclk_init(void);
+#else
+
+static inline int of_property_count_phandles(struct device_node *np,
+			char *propname)
+{
+	return 0;
+}
+
+static inline int of_property_read_phandle_index(struct device_node *np,
+			char *propname, int index, phandle *p)
+{
+	return 0;
+}
+
+static inline void *msmclk_generic_clk_init(struct device *dev,
+				struct device_node *np, struct clk *c)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void msmclk_parser_register(struct msmclk_parser *p) {};
+
+static inline void *msmclk_parse_phandle(struct device *dev, phandle key)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void *msmclk_lookup_phandle(struct device *dev, phandle key)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline int __init msmclk_init(void)
+{
+	return 0;
+}
+
+#endif /* CONFIG_MSM_CLK_CONTROLLER_V2 */
+#endif /* __ARCH_ARM_MSM_CLOCK_CONTROLLER_H */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 408fa57..8cfb1d7 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -331,6 +331,7 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
  * The clock events are used for clock enable/disable and for
  *  clock rate change
  */
+#if defined(CONFIG_COMMON_CLK_MSM)
 DECLARE_EVENT_CLASS(clock,
 
 	TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
@@ -374,6 +375,13 @@ DEFINE_EVENT(clock, clock_set_rate,
 	TP_ARGS(name, state, cpu_id)
 );
 
+DEFINE_EVENT(clock, clock_set_rate_complete,
+
+	TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+	TP_ARGS(name, state, cpu_id)
+);
+
 TRACE_EVENT(clock_set_parent,
 
 	TP_PROTO(const char *name, const char *parent_name),
@@ -393,6 +401,32 @@ TRACE_EVENT(clock_set_parent,
 	TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
 );
 
+TRACE_EVENT(clock_state,
+
+	TP_PROTO(const char *name, unsigned long prepare_count,
+		unsigned long count, unsigned long rate),
+
+	TP_ARGS(name, prepare_count, count, rate),
+
+	TP_STRUCT__entry(
+		__string(name,			name)
+		__field(unsigned long,		prepare_count)
+		__field(unsigned long,		count)
+		__field(unsigned long,		rate)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->prepare_count = prepare_count;
+		__entry->count = count;
+		__entry->rate = rate;
+	),
+	TP_printk("%s\t[%lu:%lu]\t%lu", __get_str(name), __entry->prepare_count,
+					 __entry->count, __entry->rate)
+
+);
+#endif /* CONFIG_COMMON_CLK_MSM */
+
 /*
  * The power domain events are used for power domains transitions
  */