Merge "msm: kgsl: Enhance preemption traces"
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 18386ab..59c3356 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -78,6 +78,7 @@
 		- "qcom,coresight-remote-etm"
 		- "qcom,coresight-hwevent"
 		- "qcom,coresight-dummy"
+		- "qcom,coresight-dbgui"
 
 	* port or ports: same as above.
 
diff --git a/Documentation/devicetree/bindings/arm/msm/clock-controller.txt b/Documentation/devicetree/bindings/arm/msm/clock-controller.txt
new file mode 100644
index 0000000..4cc49a59
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/clock-controller.txt
@@ -0,0 +1,67 @@
+Qualcomm Technologies MSM Clock controller
+
+Qualcomm Technologies MSM Clock controller devices contain PLLs, root clock
+generators and other clocking hardware blocks that provide stable, low power
+clocking to hardware blocks on Qualcomm Technologies SOCs. The clock controller
+device node lists the power supplies needed to be scaled using the vdd_*-supply
+property.
+
+Minor differences between hardware revisions are handled in code by re-using
+the compatible string to indicate the revision.
+
+Required properties:
+- compatible:           Must be one of following,
+			"qcom,gcc-8953"
+			"qcom,cc-debug-8953"
+			"qcom,gcc-mdss-8953"
+                        "qcom,gcc-gfx-8953"
+                        "qcom,gcc-gfx-sdm450"
+
+- reg:                  Pairs of physical base addresses and region sizes of
+                        memory mapped registers.
+- reg-names:            Names of the bases for the above registers. Currently,
+                        there is one expected base: "cc_base". Optional
+                        reg-names are "apcs_base", "meas", "mmss_base",
+                        "lpass_base", "apcs_c0_base", "apcs_c1_base",
+                        "apcs_cci_base", "efuse".
+
+Optional properties:
+- vdd_dig-supply:       The digital logic rail supply.
+- <pll>_dig-supply:     Some PLLs might have separate digital supply on some
+                        targets. These properties will be provided on those
+                        targets for specific PLLs.
+- <pll>_analog-supply:  Some PLLs might have separate analog supply on some
+                        targets. These properties will be provided on those
+                        targets for specific PLLs.
+- vdd_gpu_mx-supply:    MX rail supply for the GPU core.
+- #clock_cells:         If this device will also be providing controllable
+                        clocks, the clock_cells property needs to be specified.
+                        This will allow the common clock device tree framework
+                        to recognize _this_ device node as a clock provider.
+- qcom,<clk>-corner-<vers>: List of frequency voltage pairs that the clock can
+                            operate at. Drivers can use the OPP library API to
+                            operate on the list of OPPs registered using these
+                            values.
+- qcom,<clk>-speedbinX: A table of frequency (Hz) to voltage (corner) mapping
+                        that represents the max frequency possible for each
+                        supported voltage level for the clock.
+                        'X' is the speed bin into which the device falls into -
+                        a bin will have unique frequency-voltage relationships.
+                        The value 'X' is read from efuse registers, and the right
+                        table is picked from multiple possible tables.
+- qcom,<clock-name>-opp-handle: phandle references to the devices for which OPP
+                        table is filled with the clock frequency and voltage
+                        values.
+- qcom,<clock-name>-opp-store-vcorner: phandle references to the devices for
+                        which OPP table is filled with the clock frequency
+                         and voltage corner/level.
+
+Example:
+        clock_gcc: qcom,gcc@fc400000 {
+                compatible = "qcom,gcc-8974";
+                reg = <0xfc400000 0x4000>;
+                reg-names = "cc_base";
+                vdd_dig-supply = <&pm8841_s2_corner>;
+                #clock-cells = <1>;
+        };
+
diff --git a/Documentation/devicetree/bindings/arm/msm/clock-cpu-8953.txt b/Documentation/devicetree/bindings/arm/msm/clock-cpu-8953.txt
new file mode 100644
index 0000000..85316ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/clock-cpu-8953.txt
@@ -0,0 +1,70 @@
+Qualcomm Technologies MSM8953 CPU clock tree
+
+clock-cpu-8953 is a device that represents the MSM8953 CPU subystem clock
+tree. It lists the various power supplies that need to be scaled when the
+clocks are scaled and also other HW specific parameters like fmax tables etc.
+
+The root clock generator could have the ramp controller in built.
+Ramp control will allow programming the sequence ID for pulse swallowing,
+enable sequence and for linking sequence IDs.
+
+Required properties:
+- compatible:		Must be "qcom,clock-cpu-8953".
+
+- reg:			Pairs of physical base addresses and region sizes of
+			memory mapped registers.
+- reg-names:		Names of the bases for the above registers. Expected
+			bases are:
+			"c0-pll", "c1-pll", "c0-mux", "c1-mux", "cci-mux",
+			"efuse", "perf_base"(optional), "rcgwr-c0-base(optional)",
+			"rcgwr-c1-base(optional)".
+- clocks:		The clocks sources used by the cluster/cci mux.
+- clock-names:		Name of the clocks for the above clocks.
+- vdd-mx-supply:	The regulator powering all the PLLs of clusters & cci.
+- vdd-cl-supply:	The regulator powering the clusters & cci.
+- qcom,speedX-bin-vY-ZZZ:
+			A table of CPU frequency (Hz) to voltage (corner)
+			mapping that represents the max frequency possible
+			for each supported voltage level for a CPU. 'X' is
+			the speed bin into which the device falls into - a
+			bin will have unique frequency-voltage relationships.
+			'Y' is the characterization version, implying that
+			characterization (deciding what speed bin a device
+			falls into) methods and/or encoding may change. The
+			values 'X' and 'Y' are read from efuse registers, and
+			the right table is picked from multiple possible tables.
+			'ZZZ' can be cl for(c0 & c1) or cci depending on whether
+			the table for the clusters or cci.
+
+Optional Properties:
+- qcom,enable-qos:      Boolean property to indicate the pm qos is required
+			during set rate of the cluster clocks, which would not
+			allow the cluster cores to go to low power mode.
+
+Example:
+	clock_cpu: qcom,cpu-clock-8953@b116000 {
+		compatible = "qcom,cpu-clock-8953";
+		reg =   <0xb114000  0x68>,
+			<0xb014000  0x68>,
+			<0xb116000  0x400>,
+			<0xb111050  0x08>,
+			<0xb011050  0x08>,
+			<0xb1d1050  0x08>,
+			<0x00a412c  0x08>;
+		reg-names = "rcgwr-c0-base", "rcgwr-c1-base",
+			    "c0-pll", "c0-mux", "c1-mux",
+			    "cci-mux", "efuse";
+		vdd-mx-supply = <&pm8953_s7_level_ao>;
+		vdd-cl-supply = <&apc_vreg_corner>;
+		clocks = <&clock_gcc clk_xo_a_clk_src>;
+		clock-names = "xo_a";
+		qcom,num-clusters = <2>;
+		qcom,speed0-bin-v0-cl =
+			<          0 0>,
+			< 2208000000 7>;
+		qcom,speed0-bin-v0-cci =
+			<          0 0>,
+			<  883200000 7>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/qcom,msm-clock-controller.txt b/Documentation/devicetree/bindings/clock/qcom,msm-clock-controller.txt
new file mode 100644
index 0000000..ef7d9c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,msm-clock-controller.txt
@@ -0,0 +1,22 @@
+Qualcomm Technologies MSM Clock Controller
+
+Required properties :
+- compatible : shall contain "qcom,msm-clock-controller"
+- reg : shall contain base register location and length
+- reg-names: names of registers listed in the same order as in
+		the reg property.
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Optional properties :
+- vdd_<rail>-supply: The logic rail supply.
+
+Example:
+	clock_gcc: qcom,gcc@1800000 {
+		compatible = "qcom,msm-clock-controller";
+		reg = <0x1800000 0x80000>;
+		reg-names = "cc-base";
+		#clock-cells = <1>;
+		clock-names = "a7_debug_clk";
+		clocks = <&clock_a7pll clk_a7_debug_mux>;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devfreq-spdm.txt b/Documentation/devicetree/bindings/devfreq/devfreq-spdm.txt
new file mode 100644
index 0000000..16303f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devfreq-spdm.txt
@@ -0,0 +1,84 @@
+MSM SPDM bandwidth monitor device
+
+devfreq-spdm is a device that represents a device that is monitored by the SPDM
+hardware to measure the traffic status of configured master ports on the bus.
+
+
+Required properties:
+-compatible:			Must be "qcom,devfreq_spdm"
+-clock-names:			Clocks used to measure current bus frequency.
+				Expected names are "cci_clk"
+-clocks:			References to named clocks
+-qcom,spdm-client:		Client id of the port being monitored
+-qcom,bw-upstep:		Initial up vote size in MB/s
+-qcom,bw-dwnstep:		Initial down vote size in MB/s
+-qcom,max-vote:			Vote ceiling in MB/s
+-qcom,ports:			SPDM ports used by this device
+-qcom,alpha-up:			SPDM filter up alpha value
+-qcom,alpha-down:		SPDM filter down alpha value
+-qcom,bucket-size:		SPDM filter bucket size
+-qcom,pl-freqs:			The driver supports different filter values at
+				three different performance levels.  This value
+				defines the cut-over frequenices
+-qcom,reject-rate:		Desired rejection rate used to calculate
+				SPDM threshold
+-qcom,response-time-us:	Desired response time used to calculate
+				SPDM threshold
+-qcom,cci-response-time-us:	Desired response time used to calculate
+				SPDM threshold when CCI is under heavy load
+-qcom,max-cci-freq:		CCI frequency at which cci_response_time_us
+				is used
+-qcom,up-step-multp:		used to increase rate of growth on up votes
+-qcom,spdm-interval:		down-vote polling interval
+
+Example:
+devfreq_spdm_cpu {
+	compatible = "qcom,devfreq_spdm";
+	qcom,msm-bus,name = "devfreq_spdm";
+	qcom,msm-bus,num-cases = <2>;
+	qcom,msm-bus,num-paths = <1>;
+	qcom,msm-bus,vectors-KBps =
+			<1 512 0 0>,
+			<1 512 0 0>;
+	qcom,spdm-client = <0>;
+
+	clock-names = "cci_clk";
+	clocks = <&clock_cpu clk_cci_clk>;
+
+	qcom,bw-upstep = <100>;
+	qcom,bw-dwnstep = <100>;
+	qcom,max-vote = <10000>;
+	qcom,up-step-multp = <2>;
+	qcom,spdm-interval = <100>;
+
+	qcom,ports = <16>;
+	qcom,alpha-up = <7>;
+	qcom,alpha-down = <15>;
+	qcom,bucket-size = <8>;
+
+	/*max pl1 freq, max pl2 freq*/
+	qcom,pl-freqs = <149999999 150000000>;
+
+	/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+	qcom,reject-rate = <5000 5000 5000 5000 5000 5000>;
+	/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+	qcom,response-time-us = <220 220 2000 2000 900 900>;
+	/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+	qcom,cci-response-time-us = <50 50 30 30 20 20>;
+	qcom,max-cci-freq = <600000000>;
+};
+
+This device is always used with the SPDM governor which requires a device tree
+entry to know what IRQ to respond to.
+
+Required properties:
+-compatible			Must be "qcom,gov_spdm_hyp"
+-interrupt-names		SPDM irq to handle.  Name should be "spdm-irq"
+-interrupts			The interrupt number the SPDM hw is assigned
+
+Example:
+devfreq_spdm_gov {
+	compatible = "qcom,gov_spdm_hyp";
+	interrupt-names = "spdm-irq";
+	interrupts = <0 192 0>;
+};
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index 3b4436e..bdba526 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -11,6 +11,8 @@
  * "qcom,scm-msm8660" for MSM8660 platforms
  * "qcom,scm-msm8690" for MSM8690 platforms
  * "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
+ * "android,firmware" for firmware image
+ * "android,vbmeta" for setting system properties for verified boot.
 - clocks: One to three clocks may be required based on compatible.
  * Only core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660", and "qcom,scm-msm8960"
  * Core, iface, and bus clocks required for "qcom,scm"
@@ -26,3 +28,26 @@
 			clock-names = "core", "bus", "iface";
 		};
 	};
+
+Example for SDM845:
+
+	firmware {
+		android {
+			compatible = "android,firmware";
+			vbmeta {
+				compatible = "android,vbmeta";
+				parts = "vbmeta,boot,system,vendor,dtbo";
+			};
+
+			fstab {
+				compatible = "android,fstab";
+				vendor {
+					compatible = "android,vendor";
+					dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait,slotselect,avb";
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index b73c96d..e15f71c 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -184,14 +184,20 @@
 drive-open-drain	- drive with open drain
 drive-open-source	- drive with open source
 drive-strength		- sink or source at most X mA
-input-enable		- enable input on pin (no effect on output)
-input-disable		- disable input on pin (no effect on output)
+input-enable		- enable input on pin (no effect on output, such as
+			  enabling an input buffer)
+input-disable		- disable input on pin (no effect on output, such as
+			  disabling an input buffer)
 input-schmitt-enable	- enable schmitt-trigger mode
 input-schmitt-disable	- disable schmitt-trigger mode
 input-debounce		- debounce mode with debound time X
 power-source		- select between different power supplies
 low-power-enable	- enable low power mode
 low-power-disable	- disable low power mode
+output-disable		- disable output on a pin (such as disable an output
+			  buffer)
+output-enable		- enable output on a pin without actively driving it
+			  (such as enabling an output buffer)
 output-low		- set the pin to output mode with low level
 output-high		- set the pin to output mode with high level
 slew-rate		- set the slew rate
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-typec.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-typec.txt
new file mode 100644
index 0000000..aa4d2f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-typec.txt
@@ -0,0 +1,61 @@
+QPNP USB Type-C module
+
+QPNP USB Type-C module supports USB type-c ports and detection of
+USB Type-C chargers that can supply upto 3A Vbus current for charging.
+
+The QPNP USB Type-C interfaces via the SPMI bus.
+
+Required properties :
+- compatible : Must be "qcom,qpnp-typec"
+- reg:			The SPMI address for this peripheral
+- interrupts:		Specifies the interrupt associated with the peripheral.
+- interrupt-names:	Specifies the interrupt names for the peripheral. Every
+			available interrupt needs to have an associated name
+			with it to indentify its purpose.
+
+			- vrd-change:		Triggers on change in current
+						capability of charger.
+			- ufp-detach:		Triggers on cable detach in
+						UFP mode.
+			- ufp-detect:		Triggers on charger insertion.
+			- dfp-detach:		Triggers on cable detach in
+						DFP mode.
+			- dfp-detect:		Triggers on OTG cable insertion.
+			- vbus-err:		Triggers if VBUS is not
+						detected within 275 msec after
+						CC detection in UFP mode.
+			- vconn-oc:		Triggers on VCONN overcurrent
+						in DFP mode with active cable.
+
+Optional properties:
+- pinctrl-names : This should be defined if a target uses pinctrl framework
+  for SSMUX control pin. See "pinctrl" in
+  Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt.
+  It should specify the names of the configs that pinctrl can install in driver.
+  Following are the pinctrl config that can be installed:
+      "typec_ssmux_config" : Default configuration of pins.
+- <supply-name>-supply: handle to the regulator device tree node.
+        "supply-name" is "ss-mux" regulator to drive super-speed MUX chip.
+- qcom,role-reversal-supported : A boolean property that when present enables
+			support of dual role class.
+
+Example:
+	qcom,qpnp-typec@bf00 {
+		compatible = "qcom,qpnp-typec";
+		reg = <0xbf00 0x100>;
+		interrupts =	<0x0 0xbf 0x0>,
+				<0x0 0xbf 0x1>,
+				<0x0 0xbf 0x2>,
+				<0x0 0xbf 0x3>,
+				<0x0 0xbf 0x4>,
+				<0x0 0xbf 0x6>,
+				<0x0 0xbf 0x7>;
+
+		interrupt-names =	"vrd-change",
+					"ufp-detach",
+					"ufp-detect",
+					"dfp-detach",
+					"dfp-detect",
+					"vbus-err",
+					"vconn-oc";
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
index c200f94..d7111cf 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
@@ -71,6 +71,8 @@
 				If not specified the default value is active-low.
 - qcom,parallel-external-current-sense If present specifies external rsense is
 				used for charge current sensing.
+- qcom,stacked-batfet:		Boolean flag. Specifies if parallel charger has stacked BATFET
+				cofiguration.
 
 Example for standalone charger:
 
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
index abbb981..4f12ec0 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
@@ -36,6 +36,21 @@
 	      connected to AUX. Set this flag to indicate the thermistor
 	      doesn't exist.
 
+- qcom,parallel-mode
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies parallel charging mode. If not specified, MID-MID
+              option is selected by default.
+
+- qcom,stacked-batfet
+  Usage:      optional
+  Value type: <empty>
+  Definition: boolean flag. Specifies if parallel charger has stacked BATFET
+              configuration.
+	      In stacked batfet the main and parallel charger's batfet are
+	      stacked one after the other and thus all the charge current
+	      (FCC) flows through main. In a non-stacked configuration each
+	      charger controls the charge current (FCC) separately.
 ================================================
 Second Level Nodes - SMB1355 Charger Peripherals
 ================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
index fd2729f..fc0ee1f 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
@@ -93,6 +93,12 @@
 		will use io-channel-names to match IIO input names
 		with IIO specifiers.
 
+- qcom,stacked-batfet
+  Usage:      optional
+  Value type: <empty>
+  Definition: boolean flag. Specifies if parallel charger has stacked BATFET
+              cofiguration.
+
 ================================================
 Second Level Nodes - SMB138X Charger Peripherals
 ================================================
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index 6405371..f8c8a69 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -160,6 +160,9 @@
    "efuse_addr": EFUSE address to read and update analog tune parameter.
    "emu_phy_base" : phy base address used for programming emulation target phy.
    "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
+   "tcsr_clamp_dig_n" : To enable/disable digital clamp to the phy. When
+   de-asserted, it will prevent random leakage from qusb2 phy resulting from
+   out of sequence turn on/off of 1p8, 3p3 and DVDD regulators.
    "refgen_north_bg_reg" : address used to read REFGEN status for overriding QUSB PHY register.
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -179,6 +182,8 @@
  - qcom,major-rev: provide major revision number to differentiate power up sequence. default is 2.0
  - pinctrl-names/pinctrl-0/1: The GPIOs configured as output function. Names represents "active"
    state when attached in host mode and "suspend" state when detached.
+ - qcom,tune2-efuse-correction: The value to be adjusted from fused value for
+   improved rise/fall times.
 
 Example:
 	qusb_phy: qusb@f9b39000 {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index a491bd7..a37e441 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -24,6 +24,7 @@
 ams	AMS AG
 amstaos	AMS-Taos Inc.
 analogix	Analogix Semiconductor, Inc.
+android	Google
 apm	Applied Micro Circuits Corporation (APM)
 aptina	Aptina Imaging
 arasan	Arasan Chip Systems
diff --git a/arch/arm/boot/dts/qcom/msm-arm-smmu-sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/msm-arm-smmu-sdxpoorwills.dtsi
new file mode 100644
index 0000000..580df55
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-arm-smmu-sdxpoorwills.dtsi
@@ -0,0 +1,104 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+	apps_smmu: apps-smmu@0x15000000 {
+		compatible = "qcom,qsmmu-v500";
+		reg = <0x15000000 0x20000>,
+			<0x15022000 0x20>;
+		reg-names = "base", "tcu-base";
+		#iommu-cells = <2>;
+		qcom,use-3-lvl-tables;
+		#global-interrupts = <1>;
+		#size-cells = <1>;
+		#address-cells = <1>;
+		ranges;
+		interrupts =	<GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,msm-bus,name = "apps_smmu";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_QDSS_BAM>,
+			<MSM_BUS_SLAVE_OCIMEM>,
+			<0 0>,
+			<MSM_BUS_MASTER_QDSS_BAM>,
+			<MSM_BUS_SLAVE_OCIMEM>,
+			<0 1000>;
+
+		anoc_1_tbu: anoc_1_tbu@0x15025000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x15025000 0x1000>,
+				<0x15022200 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x0 0x400>;
+			qcom,msm-bus,name = "apps_smmu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_QDSS_BAM>,
+				<MSM_BUS_SLAVE_OCIMEM>,
+				<0 0>,
+				<MSM_BUS_MASTER_QDSS_BAM>,
+				<MSM_BUS_SLAVE_OCIMEM>,
+				<0 1000>;
+		};
+
+		anoc_2_tbu: anoc_2_tbu@0x15029000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x15029000 0x1000>,
+				<0x15022208 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x400 0x400>;
+			qcom,msm-bus,name = "apps_smmu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_QDSS_BAM>,
+				<MSM_BUS_SLAVE_OCIMEM>,
+				<0 0>,
+				<MSM_BUS_MASTER_QDSS_BAM>,
+				<MSM_BUS_SLAVE_OCIMEM>,
+				<0 1000>;
+		};
+	};
+
+	apps_iommu_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * This SID belongs to CRYPTO. We can't use a fake SID for
+		 * the apps_smmu device.
+		 */
+		iommus = <&apps_smmu 0x1a0 0x0>;
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
deleted file mode 100644
index fa21dd7..0000000
--- a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
+++ /dev/null
@@ -1,137 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <dt-bindings/interrupt-controller/irq.h>
-
-&i2c_7 {
-	status = "okay";
-	smb138x: qcom,smb138x@8 {
-		compatible = "qcom,i2c-pmic";
-		reg = <0x8>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupt-parent = <&spmi_bus>;
-		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
-		interrupt_names = "smb138x";
-		interrupt-controller;
-		#interrupt-cells = <3>;
-		qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
-
-		smb138x_revid: qcom,revid@100 {
-			compatible = "qcom,qpnp-revid";
-			reg = <0x100 0x100>;
-		};
-
-		smb138x_tadc: qcom,tadc@3600 {
-			compatible = "qcom,tadc";
-			reg = <0x3600 0x100>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			#io-channel-cells = <1>;
-			interrupt-parent = <&smb138x>;
-			interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
-			interrupt-names = "eoc";
-
-			batt_temp@0 {
-				reg = <0>;
-				qcom,rbias = <68100>;
-				qcom,rtherm-at-25degc = <68000>;
-				qcom,beta-coefficient = <3450>;
-			};
-
-			skin_temp@1 {
-				reg = <1>;
-				qcom,rbias = <33000>;
-				qcom,rtherm-at-25degc = <68000>;
-				qcom,beta-coefficient = <3450>;
-			};
-
-			die_temp@2 {
-				reg = <2>;
-				qcom,scale = <(-1306)>;
-				qcom,offset = <397904>;
-			};
-
-			batt_i@3 {
-				reg = <3>;
-				qcom,channel = <3>;
-				qcom,scale = <(-20000000)>;
-			};
-
-			batt_v@4 {
-				reg = <4>;
-				qcom,scale = <5000000>;
-			};
-
-			input_i@5 {
-				reg = <5>;
-				qcom,scale = <14285714>;
-			};
-
-			input_v@6 {
-				reg = <6>;
-				qcom,scale = <25000000>;
-			};
-
-			otg_i@7 {
-				reg = <7>;
-				qcom,scale = <5714286>;
-			};
-		};
-
-		smb1381_charger: qcom,smb1381-charger@1000 {
-			compatible = "qcom,smb138x-parallel-slave";
-			qcom,pmic-revid = <&smb138x_revid>;
-			reg = <0x1000 0x700>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			interrupt-parent = <&smb138x>;
-			io-channels =
-				<&smb138x_tadc 1>,
-				<&smb138x_tadc 2>,
-				<&smb138x_tadc 3>,
-				<&smb138x_tadc 14>,
-				<&smb138x_tadc 15>,
-				<&smb138x_tadc 16>,
-				<&smb138x_tadc 17>;
-			io-channel-names =
-				"connector_temp",
-				"charger_temp",
-				"batt_i",
-				"connector_temp_thr1",
-				"connector_temp_thr2",
-				"connector_temp_thr3",
-				"charger_temp_max";
-
-			qcom,chgr@1000 {
-				reg = <0x1000 0x100>;
-				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "chg-state-change";
-			};
-
-			qcom,chgr-misc@1600 {
-				reg = <0x1600 0x100>;
-				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
-					     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "wdog-bark",
-						  "temperature-change";
-			};
-		};
-	};
-};
-
-&smb1381_charger {
-	smb138x_vbus: qcom,smb138x-vbus {
-		status = "disabled";
-		regulator-name = "smb138x-vbus";
-	};
-};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
index 30484fb..89945e3 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
@@ -105,3 +105,29 @@
 		qcom,vadc-thermal-node;
 	};
 };
+
+&i2c_3 {
+	status = "okay";
+	#include "smb138x.dtsi"
+};
+
+&smb138x {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <42 IRQ_TYPE_LEVEL_LOW>;
+
+	smb1381_charger: qcom,smb1381-charger@1000 {
+		compatible = "qcom,smb138x-charger";
+		qcom,use-extcon;
+	};
+};
+
+&smb138x_vbus {
+	status = "okay";
+};
+
+&usb {
+	status = "okay";
+	extcon = <&smb1381_charger>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
new file mode 100644
index 0000000..afc8896
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
@@ -0,0 +1,24 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		system_heap: qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
index 73adbdc..f580901 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
@@ -105,3 +105,29 @@
 		qcom,vadc-thermal-node;
 	};
 };
+
+&i2c_3 {
+	status = "okay";
+	#include "smb138x.dtsi"
+};
+
+&smb138x {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <42 IRQ_TYPE_LEVEL_LOW>;
+
+	smb1381_charger: qcom,smb1381-charger@1000 {
+		compatible = "qcom,smb138x-charger";
+		qcom,use-extcon;
+	};
+};
+
+&smb138x_vbus {
+	status = "okay";
+};
+
+&usb {
+	status = "okay";
+	extcon = <&smb1381_charger>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index dce3bbf..9b8e751 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -1298,6 +1298,19 @@
 				};
 			};
 		};
+
+		smb_int_default: smb_int_default {
+			mux {
+				pins = "gpio42";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio42";
+				drive-strength = <2>;
+				bias-pull-up;
+				input-enable;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
index def0e13..926044a 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
@@ -12,6 +12,7 @@
  */
 #include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
 
 &soc {
 	/* USB port for DWC3 controller */
@@ -46,6 +47,20 @@
 
 		resets = <&clock_gcc GCC_USB30_BCR>;
 		reset-names = "core_reset";
+		status = "disabled";
+
+		qcom,msm-bus,name = "usb";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <3>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_USB3 MSM_BUS_SLAVE_EBI_CH0 0 0>,
+			<MSM_BUS_MASTER_USB3 MSM_BUS_SLAVE_IPA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 0>,
+			<MSM_BUS_MASTER_USB3
+				MSM_BUS_SLAVE_EBI_CH0 240000 700000>,
+			<MSM_BUS_MASTER_USB3
+				MSM_BUS_SLAVE_IPA_CFG 0 2400>,
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 40000>;
 
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index b6393a91..f5351de 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -13,6 +13,7 @@
 #include <dt-bindings/soc/qcom,tcs-mbox.h>
 #include "skeleton.dtsi"
 #include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,cpu-a7.h>
 #include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
@@ -169,6 +170,20 @@
 		};
 	};
 
+	msm_cpufreq: qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		clocks = <&clock_cpu APCS_CLK>;
+		clock-names = "cpu0_clk";
+
+		qcom,cpufreq-table-0 =
+				<  153600 >,
+				<  300000 >,
+				<  345600 >,
+				<  576000 >,
+				< 1094400 >,
+				< 1497600 >;
+	};
+
 	clock_gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-sdxpoorwills";
 		reg = <0x100000 0x1f0000>;
@@ -711,3 +726,5 @@
 #include "sdxpoorwills-bus.dtsi"
 #include "sdxpoorwills-thermal.dtsi"
 #include "sdxpoorwills-audio.dtsi"
+#include "sdxpoorwills-ion.dtsi"
+#include "msm-arm-smmu-sdxpoorwills.dtsi"
diff --git a/arch/arm/boot/dts/qcom/smb138x.dtsi b/arch/arm/boot/dts/qcom/smb138x.dtsi
new file mode 100644
index 0000000..218cd6c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/smb138x.dtsi
@@ -0,0 +1,166 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+smb138x: qcom,smb138x@44 {
+	compatible = "qcom,i2c-pmic";
+	reg = <0x44>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupt-parent = <&spmi_bus>;
+	interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+	interrupt_names = "smb138x";
+	interrupt-controller;
+	#interrupt-cells = <3>;
+	qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
+
+	smb138x_revid: qcom,revid@100 {
+		compatible = "qcom,qpnp-revid";
+		reg = <0x100 0x100>;
+	};
+
+	smb138x_tadc: qcom,tadc@3600 {
+		compatible = "qcom,tadc";
+		reg = <0x3600 0x100>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		#io-channel-cells = <1>;
+		interrupt-parent = <&smb138x>;
+		interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "eoc";
+
+		batt_temp@0 {
+			reg = <0>;
+			qcom,rbias = <68100>;
+			qcom,rtherm-at-25degc = <68000>;
+			qcom,beta-coefficient = <3450>;
+		};
+
+		skin_temp@1 {
+			reg = <1>;
+			qcom,rbias = <33000>;
+			qcom,rtherm-at-25degc = <68000>;
+			qcom,beta-coefficient = <3450>;
+		};
+
+		die_temp@2 {
+			reg = <2>;
+			qcom,scale = <(-1306)>;
+			qcom,offset = <397904>;
+		};
+
+		batt_i@3 {
+			reg = <3>;
+			qcom,channel = <3>;
+			qcom,scale = <(-20000000)>;
+		};
+
+		batt_v@4 {
+			reg = <4>;
+			qcom,scale = <5000000>;
+		};
+
+		input_i@5 {
+			reg = <5>;
+			qcom,scale = <14285714>;
+		};
+
+		input_v@6 {
+			reg = <6>;
+			qcom,scale = <25000000>;
+		};
+
+		otg_i@7 {
+			reg = <7>;
+			qcom,scale = <5714286>;
+			};
+		};
+
+	smb1381_charger: qcom,smb1381-charger@1000 {
+		compatible = "qcom,smb138x-charger";
+		qcom,pmic-revid = <&smb138x_revid>;
+		reg = <0x1000 0x700>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		interrupt-parent = <&smb138x>;
+		io-channels =
+			<&smb138x_tadc 1>,
+			<&smb138x_tadc 2>,
+			<&smb138x_tadc 3>,
+			<&smb138x_tadc 14>,
+			<&smb138x_tadc 15>,
+			<&smb138x_tadc 16>,
+			<&smb138x_tadc 17>;
+		io-channel-names =
+			"connector_temp",
+			"charger_temp",
+			"batt_i",
+			"connector_temp_thr1",
+			"connector_temp_thr2",
+			"connector_temp_thr3",
+			"charger_temp_max";
+
+		qcom,chgr@1000 {
+			reg = <0x1000 0x100>;
+			interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "chg-state-change";
+		};
+
+		qcom,otg@1100 {
+			reg = <0x1100 0x100>;
+			interrupts = <0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+				     <0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+				     <0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+				     <0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
+
+			interrupt-names = "otg-fail",
+					  "otg-overcurrent",
+					  "otg-oc-dis-sw-sts",
+					  "testmode-change-detect";
+		};
+
+		qcom,usb-chgpth@1300 {
+			reg = <0x1300 0x100>;
+			interrupts = <0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+				     <0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+				     <0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+				     <0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+				     <0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+				     <0x13 0x5 IRQ_TYPE_EDGE_RISING>,
+				     <0x13 0x6 IRQ_TYPE_EDGE_RISING>,
+				     <0x13 0x7 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-names = "usbin-collapse",
+					  "usbin-lt-3p6v",
+					  "usbin-uv",
+					  "usbin-ov",
+					  "usbin-plugin",
+					  "usbin-src-change",
+					  "usbin-icl-change",
+					  "type-c-change";
+		};
+
+		qcom,chgr-misc@1600 {
+			reg = <0x1600 0x100>;
+			interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog-bark",
+					  "temperature-change";
+		};
+
+		smb138x_vbus: qcom,smb138x-vbus {
+			status = "disabled";
+			regulator-name = "smb138x-vbus";
+		};
+	};
+};
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index e015eff..d3c8152 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -31,6 +31,7 @@
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
@@ -315,6 +316,12 @@
 CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_SMEM=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index d0568aa..a2846ca 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -33,6 +33,7 @@
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
@@ -311,6 +312,12 @@
 CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_BUS_SCALING=y
@@ -332,6 +339,7 @@
 CONFIG_MSM_PM=y
 CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON=y
 CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 74643f5..a1b5d19 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -13,7 +13,8 @@
 struct dma_iommu_mapping {
 	/* iommu specific data */
 	struct iommu_domain	*domain;
-
+	bool			init;
+	const struct dma_map_ops *ops;
 	unsigned long		**bitmaps;	/* array of bitmaps */
 	unsigned int		nr_bitmaps;	/* nr of elements in array */
 	unsigned int		extensions;
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 4761bc5..f9dfe80 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -43,7 +43,6 @@
 	select CPU_V7
 	select HAVE_ARM_ARCH_TIMER
 	select MSM_CORTEX_A7
-	select COMMON_CLK_MSM
 	select PINCTRL
 	select QCOM_SCM if SMP
 	select MSM_JTAG_MM if CORESIGHT_ETM
@@ -59,9 +58,10 @@
 	select PINCTRL
 	select QCOM_SCM if SMP
 	select PM_DEVFREQ
-	select COMMON_CLK
-	select COMMON_CLK_QCOM
-	select QCOM_GDSC
+	select CLKDEV_LOOKUP
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
+	select COMMON_CLK_MSM
 
 config ARCH_SDM450
 	bool "Enable support for SDM450"
@@ -70,9 +70,10 @@
 	select PINCTRL
 	select QCOM_SCM if SMP
 	select PM_DEVFREQ
-	select COMMON_CLK
-	select COMMON_CLK_QCOM
-	select QCOM_GDSC
+	select CLKDEV_LOOKUP
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
+	select COMMON_CLK_MSM
 
 endmenu
 endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index baf63ea..bfff16a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2289,24 +2289,48 @@ const struct dma_map_ops iommu_coherent_ops = {
  * IO address ranges, which is required to perform memory allocation and
  * mapping with IOMMU aware functions.
  *
- * The client device need to be attached to the mapping with
- * arm_iommu_attach_device function.
+ * Clients may use iommu_domain_set_attr() to set additional flags prior
+ * to calling arm_iommu_attach_device() to complete initialization.
  */
 struct dma_iommu_mapping *
 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
 {
 	unsigned int bits = size >> PAGE_SHIFT;
-	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
 	struct dma_iommu_mapping *mapping;
+
+	if (!bits)
+		return ERR_PTR(-EINVAL);
+
+	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+	if (!mapping)
+		return ERR_PTR(-ENOMEM);
+
+	mapping->base = base;
+	mapping->bits = bits;
+
+	mapping->domain = iommu_domain_alloc(bus);
+	if (!mapping->domain)
+		goto err_domain_alloc;
+
+	mapping->init = false;
+	return mapping;
+
+err_domain_alloc:
+	kfree(mapping);
+	return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(arm_iommu_create_mapping);
+
+static int
+iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
+{
+	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
+	u64 size = mapping->bits << PAGE_SHIFT;
 	int extensions = 1;
 	int err = -ENOMEM;
 
-	/* currently only 32-bit DMA address space is supported */
-	if (size > DMA_BIT_MASK(32) + 1)
-		return ERR_PTR(-ERANGE);
-
 	if (!bitmap_size)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	WARN(!IS_ALIGNED(size, SZ_128M),
 			"size is not aligned to 128M, alignment enforced");
@@ -2316,45 +2340,30 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
 		bitmap_size = PAGE_SIZE;
 	}
 
-	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
-	if (!mapping)
-		goto err;
-
 	mapping->bitmap_size = bitmap_size;
 	mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
-				GFP_KERNEL);
+			GFP_KERNEL);
+
 	if (!mapping->bitmaps)
-		goto err2;
+		return -ENOMEM;
 
 	mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
 	if (!mapping->bitmaps[0])
-		goto err3;
+		goto err;
 
 	mapping->nr_bitmaps = 1;
 	mapping->extensions = extensions;
-	mapping->base = base;
-	mapping->bits = BITS_PER_BYTE * bitmap_size;
 
 	spin_lock_init(&mapping->lock);
+	mapping->ops = &iommu_ops;
+	return 0;
 
-	mapping->domain = iommu_domain_alloc(bus);
-	if (!mapping->domain)
-		goto err4;
-
-	kref_init(&mapping->kref);
-	return mapping;
-err4:
-	kfree(mapping->bitmaps[0]);
-err3:
-	kfree(mapping->bitmaps);
-err2:
-	kfree(mapping);
 err:
-	return ERR_PTR(err);
+	kfree(mapping->bitmaps);
+	return err;
 }
-EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
 
-static void release_iommu_mapping(struct kref *kref)
+static void iommu_release_mapping(struct kref *kref)
 {
 	int i;
 	struct dma_iommu_mapping *mapping =
@@ -2385,13 +2394,70 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
 	return 0;
 }
 
+/*
+ * arm_iommu_release_mapping
+ * @mapping: allocted via arm_iommu_create_mapping()
+ *
+ * Frees all resources associated with the iommu mapping.
+ * The device associated with this mapping must be in the 'detached' state
+ */
 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
 {
-	if (mapping)
-		kref_put(&mapping->kref, release_iommu_mapping);
+	int is_fast = 0;
+	void (*release)(struct kref *kref);
+
+	if (!mapping)
+		return;
+
+	if (!mapping->init) {
+		iommu_domain_free(mapping->domain);
+		kfree(mapping);
+		return;
+	}
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+
+	if (is_fast)
+		release = fast_smmu_release_mapping;
+	else
+		release = iommu_release_mapping;
+
+	kref_put(&mapping->kref, release);
 }
 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
 
+static int arm_iommu_init_mapping(struct device *dev,
+			    struct dma_iommu_mapping *mapping)
+{
+	int err = -EINVAL;
+	u64 size = mapping->bits << PAGE_SHIFT;
+	int is_fast = 0;
+
+	if (mapping->init) {
+		kref_get(&mapping->kref);
+		return 0;
+	}
+
+	/* currently only 32-bit DMA address space is supported */
+	if (size > DMA_BIT_MASK(32) + 1) {
+		dev_err(dev, "dma mask %llx too small\n", dma_get_mask(dev));
+		return -ERANGE;
+	}
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+
+	if (is_fast)
+		err = fast_smmu_init_mapping(dev, mapping);
+	else
+		err = iommu_init_mapping(dev, mapping);
+	if (!err) {
+		kref_init(&mapping->kref);
+		mapping->init = true;
+	}
+	return err;
+}
+
+
 static int __arm_iommu_attach_device(struct device *dev,
 				     struct dma_iommu_mapping *mapping)
 {
@@ -2401,13 +2467,33 @@ static int __arm_iommu_attach_device(struct device *dev,
 	if (err)
 		return err;
 
-	kref_get(&mapping->kref);
 	to_dma_iommu_mapping(dev) = mapping;
 
 	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
 	return 0;
 }
 
+static void __arm_iommu_detach_device(struct device *dev)
+{
+	struct dma_iommu_mapping *mapping;
+	int is_fast;
+
+	mapping = to_dma_iommu_mapping(dev);
+	if (!mapping) {
+		dev_warn(dev, "Not attached\n");
+		return;
+	}
+
+	if (msm_dma_unmap_all_for_dev(dev))
+		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+
+	iommu_detach_device(mapping->domain, dev);
+	to_dma_iommu_mapping(dev) = NULL;
+
+	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+
 /**
  * arm_iommu_attach_device
  * @dev: valid struct device pointer
@@ -2426,48 +2512,36 @@ int arm_iommu_attach_device(struct device *dev,
 {
 	int err;
 	int s1_bypass = 0;
-	int is_fast = 0;
+	struct iommu_group *group = dev->iommu_group;
 
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
-	if (is_fast)
-		return fast_smmu_attach_device(dev, mapping);
+	if (!group) {
+		dev_err(dev, "No iommu associated with device\n");
+		return -EINVAL;
+	}
+
+	if (iommu_get_domain_for_dev(dev)) {
+		dev_err(dev, "Device already attached to other iommu_domain\n");
+		return -EINVAL;
+	}
 
 	err = __arm_iommu_attach_device(dev, mapping);
 	if (err)
 		return err;
 
+	err = arm_iommu_init_mapping(dev, mapping);
+	if (err) {
+		__arm_iommu_detach_device(dev);
+		return err;
+	}
+
 	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
-					&s1_bypass);
+			&s1_bypass);
 	if (!s1_bypass)
-		set_dma_ops(dev, &iommu_ops);
+		set_dma_ops(dev, mapping->ops);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 
-static void __arm_iommu_detach_device(struct device *dev)
-{
-	struct dma_iommu_mapping *mapping;
-	int is_fast;
-
-	mapping = to_dma_iommu_mapping(dev);
-	if (!mapping) {
-		dev_warn(dev, "Not attached\n");
-		return;
-	}
-
-	if (msm_dma_unmap_all_for_dev(dev))
-		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
-	if (is_fast)
-		return fast_smmu_detach_device(dev, mapping);
-
-	iommu_detach_device(mapping->domain, dev);
-	kref_put(&mapping->kref, release_iommu_mapping);
-	to_dma_iommu_mapping(dev) = NULL;
-
-	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
-}
-
 /**
  * arm_iommu_detach_device
  * @dev: valid struct device pointer
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6468b58..0848993 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -31,7 +31,7 @@
 	select ARM_PSCI_FW
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS
-	select COMMON_CLK
+	select COMMON_CLK if !ARCH_QCOM
 	select CPU_PM if (SUSPEND || CPU_IDLE)
 	select DCACHE_WORD_ACCESS
 	select EDAC_SUPPORT
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6531949..b1eca8f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -114,6 +114,9 @@
 config ARCH_QCOM
 	bool "Qualcomm Platforms"
 	select PINCTRL
+	select CLKDEV_LOOKUP
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
 	select SOC_BUS
 	select PM_OPP
 	select MFD_CORE
@@ -124,6 +127,7 @@
 config ARCH_SDM845
 	bool "Enable Support for Qualcomm Technologies Inc. SDM845"
 	depends on ARCH_QCOM
+	select COMMON_CLK
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
@@ -133,6 +137,7 @@
 config ARCH_SDM670
 	bool "Enable Support for Qualcomm Technologies Inc. SDM670"
 	depends on ARCH_QCOM
+	select COMMON_CLK
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
@@ -142,9 +147,8 @@
 config ARCH_MSM8953
 	bool "Enable Support for Qualcomm Technologies Inc. MSM8953"
 	depends on ARCH_QCOM
-	select COMMON_CLK_QCOM
-	select QCOM_GDSC
 	select CPU_FREQ_QCOM
+	select COMMON_CLK_MSM
 	help
 	  This enables support for the MSM8953 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
@@ -152,9 +156,8 @@
 config ARCH_SDM450
 	bool "Enable Support for Qualcomm Technologies Inc. SDM450"
 	depends on ARCH_QCOM
-	select COMMON_CLK_QCOM
-	select QCOM_GDSC
 	select CPU_FREQ_QCOM
+	select COMMON_CLK_MSM
 	help
 	  This enables support for the sdm450 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index d5dc94e..86c8836 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -39,6 +39,11 @@
 		qcom,dcs-cmd-by-left;
 		qcom,mdss-dsi-tx-eot-append;
 		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
 
 		qcom,mdss-dsi-display-timings {
 			timing@0{
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index 6a4200dc..66beead 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -33,6 +33,11 @@
 		qcom,mdss-dsi-tx-eot-append;
 
 		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
 
 		qcom,mdss-dsi-display-timings {
 			timing@0{
diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-8916.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-8916.dtsi
new file mode 100644
index 0000000..49e148c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-gdsc-8916.dtsi
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	gdsc_venus: qcom,gdsc@184c018 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus";
+		reg = <0x184c018 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_mdss: qcom,gdsc@184d078 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_mdss";
+		reg = <0x184d078 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_jpeg: qcom,gdsc@185701c {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_jpeg";
+		reg = <0x185701c 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_vfe: qcom,gdsc@1858034 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_vfe";
+		reg = <0x1858034 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_vfe1: qcom,gdsc@185806c {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_vfe1";
+		reg = <0x185806c 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_cpp: qcom,gdsc@1858078 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_cpp";
+		reg = <0x1858078 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_oxili_gx: qcom,gdsc@185901c {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_oxili_gx";
+		reg = <0x185901c 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_venus_core0: qcom,gdsc@184c028 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus_core0";
+		reg = <0x184c028 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_venus_core1: qcom,gdsc@184c030 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus_core1";
+		reg = <0x184c030 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_oxili_cx: qcom,gdsc@185904c {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_oxili_cx";
+		reg = <0x185904c 0x4>;
+		status = "disabled";
+	};
+
+	gdsc_usb30: qcom,gdsc@183f078 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_usb30";
+		reg = <0x183f078 0x4>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-bus.dtsi b/arch/arm64/boot/dts/qcom/msm8953-bus.dtsi
new file mode 100644
index 0000000..d1654f3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-bus.dtsi
@@ -0,0 +1,1002 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+		/*Version = 11 */
+	ad_hoc_bus: ad-hoc-bus@580000 {
+		compatible = "qcom,msm-bus-device";
+		reg = <0x580000 0x16080>,
+			<0x580000 0x16080>,
+			<0x400000 0x5A000>,
+			<0x500000 0x12080>;
+		reg-names = "snoc-base", "snoc-mm-base",
+			    "bimc-base", "pcnoc-base";
+
+		/*Buses*/
+
+		fab_bimc: fab-bimc {
+			cell-id = <MSM_BUS_FAB_BIMC>;
+			label = "fab-bimc";
+			qcom,fab-dev;
+			qcom,base-name = "bimc-base";
+			qcom,bus-type = <2>;
+			qcom,util-fact = <153>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc  clk_bimc_msmbus_clk>,
+				<&clock_gcc  clk_bimc_msmbus_a_clk>;
+		};
+
+		fab_pcnoc: fab-pcnoc {
+			cell-id = <MSM_BUS_FAB_PERIPH_NOC>;
+			label = "fab-pcnoc";
+			qcom,fab-dev;
+			qcom,base-name = "pcnoc-base";
+			qcom,base-offset = <0x7000>;
+			qcom,qos-off = <0x1000>;
+			qcom,bus-type = <1>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc  clk_pcnoc_msmbus_clk>,
+				<&clock_gcc  clk_pcnoc_msmbus_a_clk>;
+
+			qcom,node-qos-clks {
+				clock-names = "pcnoc-usb3-axi-no-rate";
+				clocks =
+					<&clock_gcc clk_gcc_pcnoc_usb3_axi_clk>;
+			};
+		};
+
+		fab_snoc: fab-snoc {
+			cell-id = <MSM_BUS_FAB_SYS_NOC>;
+			label = "fab-snoc";
+			qcom,fab-dev;
+			qcom,base-name = "snoc-base";
+			qcom,base-offset = <0x7000>;
+			qcom,qos-off = <0x1000>;
+			qcom,bus-type = <1>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc  clk_snoc_msmbus_clk>,
+				<&clock_gcc  clk_snoc_msmbus_a_clk>;
+		};
+
+		fab_snoc_mm: fab-snoc-mm {
+			cell-id = <MSM_BUS_FAB_MMSS_NOC>;
+			label = "fab-snoc-mm";
+			qcom,fab-dev;
+			qcom,base-name = "snoc-mm-base";
+			qcom,base-offset = <0x7000>;
+			qcom,qos-off = <0x1000>;
+			qcom,bus-type = <1>;
+			qcom,util-fact = <153>;
+			clock-names = "bus_clk", "bus_a_clk";
+			clocks = <&clock_gcc  clk_sysmmnoc_msmbus_clk>,
+				<&clock_gcc  clk_sysmmnoc_msmbus_a_clk>;
+		};
+
+		/*Masters*/
+
+		mas_apps_proc: mas-apps-proc {
+			cell-id = <MSM_BUS_MASTER_AMPSS_M0>;
+			label = "mas-apps-proc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <0>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_ebi &slv_bimc_snoc>;
+			qcom,prio-lvl = <0>;
+			qcom,prio-rd = <0>;
+			qcom,prio-wr = <0>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_APPSS_PROC>;
+		};
+
+		mas_oxili: mas-oxili {
+			cell-id = <MSM_BUS_MASTER_GRAPHICS_3D>;
+			label = "mas-oxili";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <2>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_ebi &slv_bimc_snoc>;
+			qcom,prio-lvl = <0>;
+			qcom,prio-rd = <0>;
+			qcom,prio-wr = <0>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_GFX3D>;
+		};
+
+		mas_snoc_bimc_0: mas-snoc-bimc-0 {
+			cell-id = <MSM_BUS_SNOC_BIMC_0_MAS>;
+			label = "mas-snoc-bimc-0";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <3>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_ebi &slv_bimc_snoc>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_BIMC_0>;
+		};
+
+		mas_snoc_bimc_2: mas-snoc-bimc-2 {
+			cell-id = <MSM_BUS_SNOC_BIMC_2_MAS>;
+			label = "mas-snoc-bimc-2";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <4>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_ebi &slv_bimc_snoc>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_BIMC_2>;
+		};
+
+		mas_snoc_bimc_1: mas-snoc-bimc-1 {
+			cell-id = <MSM_BUS_SNOC_BIMC_1_MAS>;
+			label = "mas-snoc-bimc-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <5>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_ebi>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_BIMC_1>;
+		};
+
+		mas_tcu_0: mas-tcu-0 {
+			cell-id = <MSM_BUS_MASTER_TCU_0>;
+			label = "mas-tcu-0";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <6>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&slv_ebi &slv_bimc_snoc>;
+			qcom,prio-lvl = <2>;
+			qcom,prio-rd = <2>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_TCU_0>;
+		};
+
+		mas_spdm: mas-spdm {
+			cell-id = <MSM_BUS_MASTER_SPDM>;
+			label = "mas-spdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&pcnoc_m_0>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SPDM>;
+		};
+
+		mas_blsp_1: mas-blsp-1 {
+			cell-id = <MSM_BUS_MASTER_BLSP_1>;
+			label = "mas-blsp-1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&pcnoc_m_1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BLSP_1>;
+		};
+
+		mas_blsp_2: mas-blsp-2 {
+			cell-id = <MSM_BUS_MASTER_BLSP_2>;
+			label = "mas-blsp-2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&pcnoc_m_1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BLSP_2>;
+		};
+
+		mas_usb3: mas-usb3 {
+			cell-id = <MSM_BUS_MASTER_USB3>;
+			label = "mas-usb3";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <11>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&pcnoc_int_1>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_USB3>;
+		};
+
+		mas_crypto: mas-crypto {
+			cell-id = <MSM_BUS_MASTER_CRYPTO_CORE0>;
+			label = "mas-crypto";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <0>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&pcnoc_int_1>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CRYPTO>;
+		};
+
+		mas_sdcc_1: mas-sdcc-1 {
+			cell-id = <MSM_BUS_MASTER_SDCC_1>;
+			label = "mas-sdcc-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&pcnoc_int_1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SDCC_1>;
+		};
+
+		mas_sdcc_2: mas-sdcc-2 {
+			cell-id = <MSM_BUS_MASTER_SDCC_2>;
+			label = "mas-sdcc-2";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <8>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&pcnoc_int_1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SDCC_2>;
+		};
+
+		mas_snoc_pcnoc: mas-snoc-pcnoc {
+			cell-id = <MSM_BUS_SNOC_PNOC_MAS>;
+			label = "mas-snoc-pcnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <9>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&pcnoc_int_2>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_PCNOC>;
+		};
+
+		/*SNOC Masters*/
+		mas_qdss_bam: mas-qdss-bam {
+			cell-id = <MSM_BUS_MASTER_QDSS_BAM>;
+			label = "mas-qdss-bam";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <11>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&qdss_int>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_BAM>;
+		};
+
+		mas_bimc_snoc: mas-bimc-snoc {
+			cell-id = <MSM_BUS_BIMC_SNOC_MAS>;
+			label = "mas-bimc-snoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&snoc_int_0
+				&snoc_int_1 &snoc_int_2>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_BIMC_SNOC>;
+		};
+
+		mas_jpeg: mas-jpeg {
+			cell-id = <MSM_BUS_MASTER_JPEG>;
+			label = "mas-jpeg";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <6>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_snoc_bimc_2>;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,mas-rpm-id = <ICBID_MASTER_JPEG>;
+		};
+
+		mas_mdp: mas-mdp {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT0>;
+			label = "mas-mdp";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <7>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_snoc_bimc_0>;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,mas-rpm-id = <ICBID_MASTER_MDP>;
+		};
+
+		mas_pcnoc_snoc: mas-pcnoc-snoc {
+			cell-id = <MSM_BUS_PNOC_SNOC_MAS>;
+			label = "mas-pcnoc-snoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <5>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&snoc_int_0
+				&snoc_int_1 &slv_snoc_bimc_1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PNOC_SNOC>;
+			qcom,blacklist = <&slv_snoc_pcnoc>;
+		};
+
+		mas_venus: mas-venus {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P0>;
+			label = "mas-venus";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <8>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_snoc_bimc_2>;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VIDEO>;
+		};
+
+		mas_vfe0: mas-vfe0 {
+			cell-id = <MSM_BUS_MASTER_VFE>;
+			label = "mas-vfe0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <9>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_snoc_bimc_0>;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VFE>;
+		};
+
+		mas_vfe1: mas-vfe1 {
+			cell-id = <MSM_BUS_MASTER_VFE1>;
+			label = "mas-vfe1";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <13>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_snoc_bimc_0>;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,mas-rpm-id = <ICBID_MASTER_VFE1>;
+		};
+
+		mas_cpp: mas-cpp {
+			cell-id = <MSM_BUS_MASTER_CPP>;
+			label = "mas-cpp";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <12>;
+			qcom,qos-mode = "bypass";
+			qcom,connections = <&slv_snoc_bimc_2>;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,mas-rpm-id = <ICBID_MASTER_CPP>;
+		};
+
+		mas_ipa: mas-ipa {
+			cell-id = <MSM_BUS_MASTER_IPA>;
+			label = "mas-ipa";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <14>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&snoc_int_0
+				&snoc_int_1 &slv_snoc_bimc_1>;
+			qcom,prio1 = <0>;
+			qcom,prio0 = <0>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_IPA>;
+		};
+
+		mas_qdss_etr: mas-qdss-etr {
+			cell-id = <MSM_BUS_MASTER_QDSS_ETR>;
+			label = "mas-qdss-etr";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <10>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&qdss_int>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_ETR>;
+		};
+
+		/*Internal nodes*/
+		pcnoc_m_0: pcnoc-m-0 {
+			cell-id = <MSM_BUS_PNOC_M_0>;
+			label = "pcnoc-m-0";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,qport = <5>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&pcnoc_int_1>;
+			qcom,prio1 = <1>;
+			qcom,prio0 = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_M_0>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_M_0>;
+		};
+
+		pcnoc_m_1: pcnoc-m-1 {
+			cell-id = <MSM_BUS_PNOC_M_1>;
+			label = "pcnoc-m-1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <6>;
+			qcom,qos-mode = "fixed";
+			qcom,connections = <&pcnoc_int_1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_M_1>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_M_1>;
+		};
+
+		pcnoc_int_1: pcnoc-int-1 {
+			cell-id = <MSM_BUS_PNOC_INT_1>;
+			label = "pcnoc-int-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&pcnoc_int_2 &slv_pcnoc_snoc>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_INT_1>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_INT_1>;
+		};
+
+		pcnoc_int_2: pcnoc-int-2 {
+			cell-id = <MSM_BUS_PNOC_INT_2>;
+			label = "pcnoc-int-2";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&pcnoc_s_1 &pcnoc_s_2
+				&pcnoc_s_0 &pcnoc_s_4 &pcnoc_s_6
+				&pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9
+				&slv_tcu &slv_gpu_cfg &pcnoc_s_3>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_INT_2>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_INT_2>;
+		};
+
+		pcnoc_s_0: pcnoc-s-0 {
+			cell-id = <MSM_BUS_PNOC_SLV_0>;
+			label = "pcnoc-s-0";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_pdm &slv_spdm>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_0>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_0>;
+		};
+
+		pcnoc_s_1: pcnoc-s-1 {
+			cell-id = <MSM_BUS_PNOC_SLV_1>;
+			label = "pcnoc-s-1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_tcsr>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_1>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_1>;
+		};
+
+		pcnoc_s_2: pcnoc-s-2 {
+			cell-id = <MSM_BUS_PNOC_SLV_2>;
+			label = "pcnoc-s-2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_snoc_cfg>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_2>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_2>;
+		};
+
+		pcnoc_s_3: pcnoc-s-3 {
+			cell-id = <MSM_BUS_PNOC_SLV_3>;
+			label = "pcnoc-s-3";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_tlmm &slv_prng &slv_blsp_1
+				 &slv_blsp_2 &slv_message_ram>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_3>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_3>;
+		};
+
+		pcnoc_s_4: pcnoc-s-4 {
+			cell-id = <MSM_BUS_PNOC_SLV_4>;
+			label = "pcnoc-s-4";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_camera_ss_cfg
+				&slv_disp_ss_cfg &slv_venus_cfg>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_4>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_4>;
+		};
+
+		pcnoc_s_6: pcnoc-s-6 {
+			cell-id = <MSM_BUS_PNOC_SLV_6>;
+			label = "pcnoc-s-6";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_crypto_0_cfg
+				&slv_sdcc_2 &slv_sdcc_1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_6>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_6>;
+		};
+
+		pcnoc_s_7: pcnoc-s-7 {
+			cell-id = <MSM_BUS_PNOC_SLV_7>;
+			label = "pcnoc-s-7";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_pmic_arb>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_7>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_7>;
+		};
+
+		pcnoc_s_8: pcnoc-s-8 {
+			cell-id = <MSM_BUS_PNOC_SLV_8>;
+			label = "pcnoc-s-8";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_usb3>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_8>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_8>;
+		};
+
+		pcnoc_s_9: pcnoc-s-9 {
+			cell-id = <MSM_BUS_PNOC_SLV_9>;
+			label = "pcnoc-s-9";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_ipa_cfg>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_PCNOC_S_9>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_S_9>;
+		};
+
+		qdss_int: qdss-int {
+			cell-id = <MSM_BUS_SNOC_QDSS_INT>;
+			label = "qdss-int";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&snoc_int_1 &slv_snoc_bimc_1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_QDSS_INT>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_INT>;
+		};
+
+		snoc_int_0: snoc-int-0 {
+			cell-id = <MSM_BUS_SNOC_INT_0>;
+			label = "snoc-int-0";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_lpass
+				&slv_wcss &slv_kpss_ahb>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_INT_0>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_INT_0>;
+		};
+
+		snoc_int_1: snoc-int-1 {
+			cell-id = <MSM_BUS_SNOC_INT_1>;
+			label = "snoc-int-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qdss_stm &slv_imem
+					&slv_snoc_pcnoc>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_INT_1>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_INT_1>;
+		};
+
+		snoc_int_2: snoc-int-2 {
+			cell-id = <MSM_BUS_SNOC_INT_2>;
+			label = "snoc-int-2";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,connections = <&slv_cats_0 &slv_cats_1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,mas-rpm-id = <ICBID_MASTER_SNOC_INT_2>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_INT_2>;
+		};
+
+		/*Slaves*/
+
+		slv_ebi:slv-ebi {
+			cell-id = <MSM_BUS_SLAVE_EBI_CH0>;
+			label = "slv-ebi";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_EBI1>;
+		};
+
+		slv_bimc_snoc:slv-bimc-snoc {
+			cell-id = <MSM_BUS_BIMC_SNOC_SLV>;
+			label = "slv-bimc-snoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_bimc>;
+			qcom,connections = <&mas_bimc_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BIMC_SNOC>;
+		};
+
+		slv_spdm:slv-spdm {
+			cell-id = <MSM_BUS_SLAVE_SPDM_WRAPPER>;
+			label = "slv-spdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SPDM_WRAPPER>;
+		};
+
+		slv_pdm:slv-pdm {
+			cell-id = <MSM_BUS_SLAVE_PDM>;
+			label = "slv-pdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PDM>;
+		};
+
+		slv_tcsr:slv-tcsr {
+			cell-id = <MSM_BUS_SLAVE_TCSR>;
+			label = "slv-tcsr";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TCSR>;
+		};
+
+		slv_snoc_cfg:slv-snoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_SNOC_CFG>;
+			label = "slv-snoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_CFG>;
+		};
+
+		slv_tlmm:slv-tlmm {
+			cell-id = <MSM_BUS_SLAVE_TLMM>;
+			label = "slv-tlmm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TLMM>;
+		};
+
+		slv_message_ram:slv-message-ram {
+			cell-id = <MSM_BUS_SLAVE_MESSAGE_RAM>;
+			label = "slv-message-ram";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_MESSAGE_RAM>;
+		};
+
+		slv_blsp_1:slv-blsp-1 {
+			cell-id = <MSM_BUS_SLAVE_BLSP_1>;
+			label = "slv-blsp-1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BLSP_1>;
+		};
+
+		slv_blsp_2:slv-blsp-2 {
+			cell-id = <MSM_BUS_SLAVE_BLSP_2>;
+			label = "slv-blsp-2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_BLSP_2>;
+		};
+
+		slv_prng:slv-prng {
+			cell-id = <MSM_BUS_SLAVE_PRNG>;
+			label = "slv-prng";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PRNG>;
+		};
+
+		slv_camera_ss_cfg:slv-camera-ss-cfg {
+			cell-id = <MSM_BUS_SLAVE_CAMERA_CFG>;
+			label = "slv-camera-ss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CAMERA_CFG>;
+		};
+
+		slv_disp_ss_cfg:slv-disp-ss-cfg {
+			cell-id = <MSM_BUS_SLAVE_DISPLAY_CFG>;
+			label = "slv-disp-ss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_DISPLAY_CFG>;
+		};
+
+		slv_venus_cfg:slv-venus-cfg {
+			cell-id = <MSM_BUS_SLAVE_VENUS_CFG>;
+			label = "slv-venus-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_VENUS_CFG>;
+		};
+
+		slv_gpu_cfg:slv-gpu-cfg {
+			cell-id = <MSM_BUS_SLAVE_GRAPHICS_3D_CFG>;
+			label = "slv-gpu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_GFX3D_CFG>;
+		};
+
+		slv_sdcc_1:slv-sdcc-1 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_1>;
+			label = "slv-sdcc-1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SDCC_1>;
+		};
+
+		slv_sdcc_2:slv-sdcc-2 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_2>;
+			label = "slv-sdcc-2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SDCC_2>;
+		};
+
+		slv_crypto_0_cfg:slv-crypto-0-cfg {
+			cell-id = <MSM_BUS_SLAVE_CRYPTO_0_CFG>;
+			label = "slv-crypto-0-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CRYPTO_0_CFG>;
+		};
+
+		slv_pmic_arb:slv-pmic-arb {
+			cell-id = <MSM_BUS_SLAVE_PMIC_ARB>;
+			label = "slv-pmic-arb";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PMIC_ARB>;
+		};
+
+		slv_usb3:slv-usb3 {
+			cell-id = <MSM_BUS_SLAVE_USB3>;
+			label = "slv-usb3";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_USB3_0>;
+		};
+
+		slv_ipa_cfg:slv-ipa-cfg {
+			cell-id = <MSM_BUS_SLAVE_IPA_CFG>;
+			label = "slv-ipa-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_IPA_CFG>;
+		};
+
+		slv_tcu:slv-tcu {
+			cell-id = <MSM_BUS_SLAVE_TCU>;
+			label = "slv-tcu";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_TCU>;
+		};
+
+		slv_pcnoc_snoc:slv-pcnoc-snoc {
+			cell-id = <MSM_BUS_PNOC_SNOC_SLV>;
+			label = "slv-pcnoc-snoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_pcnoc>;
+			qcom,connections = <&mas_pcnoc_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_PCNOC_SNOC>;
+		};
+
+		slv_kpss_ahb:slv-kpss-ahb {
+			cell-id = <MSM_BUS_SLAVE_APPSS>;
+			label = "slv-kpss-ahb";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_APPSS>;
+		};
+
+		slv_wcss:slv-wcss {
+			cell-id = <MSM_BUS_SLAVE_WCSS>;
+			label = "slv-wcss";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_WCSS>;
+		};
+
+		slv_snoc_bimc_0:slv-snoc-bimc-0 {
+			cell-id = <MSM_BUS_SNOC_BIMC_0_SLV>;
+			label = "slv-snoc-bimc-0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,connections = <&mas_snoc_bimc_0>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_BIMC_0>;
+		};
+
+		slv_snoc_bimc_1:slv-snoc-bimc-1 {
+			cell-id = <MSM_BUS_SNOC_BIMC_1_SLV>;
+			label = "slv-snoc-bimc-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,connections = <&mas_snoc_bimc_1>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_BIMC_1>;
+		};
+
+		slv_snoc_bimc_2:slv-snoc-bimc-2 {
+			cell-id = <MSM_BUS_SNOC_BIMC_2_SLV>;
+			label = "slv-snoc-bimc-2";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,connections = <&mas_snoc_bimc_2>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_BIMC_2>;
+		};
+
+		slv_imem:slv-imem {
+			cell-id = <MSM_BUS_SLAVE_OCIMEM>;
+			label = "slv-imem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_IMEM>;
+		};
+
+		slv_snoc_pcnoc:slv-snoc-pcnoc {
+			cell-id = <MSM_BUS_SNOC_PNOC_SLV>;
+			label = "slv-snoc-pcnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,connections = <&mas_snoc_pcnoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_SNOC_PCNOC>;
+		};
+
+		slv_qdss_stm:slv-qdss-stm {
+			cell-id = <MSM_BUS_SLAVE_QDSS_STM>;
+			label = "slv-qdss-stm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_QDSS_STM>;
+		};
+
+		slv_cats_0:slv-cats-0 {
+			cell-id = <MSM_BUS_SLAVE_CATS_128>;
+			label = "slv-cats-0";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc_mm>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CATS_0>;
+		};
+
+		slv_cats_1:slv-cats-1 {
+			cell-id = <MSM_BUS_SLAVE_OCMEM_64>;
+			label = "slv-cats-1";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_CATS_1>;
+		};
+
+		slv_lpass:slv-lpass {
+			cell-id = <MSM_BUS_SLAVE_LPASS>;
+			label = "slv-lpass";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,ap-owned;
+			qcom,bus-dev = <&fab_snoc>;
+			qcom,slv-rpm-id = <ICBID_SLAVE_LPASS>;
+		};
+	};
+
+	devfreq_spdm_cpu {
+		compatible = "qcom,devfreq_spdm";
+		qcom,msm-bus,name = "devfreq_spdm";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 512 0 0>,
+				<1 512 0 0>;
+		qcom,msm-bus,active-only;
+		qcom,spdm-client = <0>;
+
+		clock-names = "cci_clk";
+		clocks = <&clock_cpu clk_cci_clk>;
+
+		qcom,bw-upstep = <400>;
+		qcom,bw-dwnstep = <4200>;
+		qcom,max-vote = <4200>;
+		qcom,up-step-multp = <2>;
+		qcom,spdm-interval = <30>;
+
+		qcom,ports = <11>;
+		qcom,alpha-up = <8>;
+		qcom,alpha-down = <15>;
+		qcom,bucket-size = <8>;
+
+		/*max pl1 freq, max pl2 freq*/
+		qcom,pl-freqs = <230000 770000>;
+
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,reject-rate = <5000 5000 5000 5000 5000 5000>;
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,response-time-us = <6000 6000 4000 4000 2000 2000>;
+		/* pl1 low, pl1 high, pl2 low, pl2 high, pl3 low, pl3 high */
+		qcom,cci-response-time-us = <4000 4000 3000 3000 2000 2000>;
+		qcom,max-cci-freq = <870000>;
+	};
+
+	devfreq_spdm_gov {
+		compatible = "qcom,gov_spdm_hyp";
+		interrupt-names = "spdm-irq";
+		interrupts = <0 192 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
index 243aaf5..87b8c74 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
@@ -16,3 +16,61 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart_console_active>;
 };
+
+&sdhc_1 {
+	/* device core power supply */
+	vdd-supply = <&pm8953_l8>;
+	qcom,vdd-voltage-level = <2900000 2900000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8953_l5>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on  &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 192000000
+								384000000>;
+	qcom,nonremovable;
+	qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	/* device core power supply */
+	vdd-supply = <&pm8953_l11>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <15000 800000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8953_l12>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+		1 &intc 0 221 0
+		2 &tlmm 133 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 133 0x1>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+								200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
new file mode 100644
index 0000000..55914d0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
@@ -0,0 +1,1240 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 an
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tmc_etr: tmc@6028000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6028000 0x1000>,
+				<0x6044000 0x15000>;
+		reg-names = "tmc-base", "bam-base";
+
+		interrupts = <0 166 0>;
+		interrupt-names = "byte-cntr-irq";
+
+		arm,buffer-size = <0x100000>;
+		arm,sg-enable;
+
+		coresight-name = "coresight-tmc-etr";
+		coresight-ctis = <&cti0 &cti8>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			tmc_etr_in_replicator: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_tmc_etr>;
+			};
+		};
+	};
+
+	tmc_etf: tmc@6027000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6027000 0x1000>;
+		reg-names = "tmc-base";
+
+		coresight-name = "coresight-tmc-etf";
+
+		arm,default-sink;
+		coresight-ctis = <&cti0 &cti8>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				tmc_etf_out_replicator:endpoint {
+					remote-endpoint =
+						<&replicator_in_tmc_etf>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tmc_etf_in_funnel_in0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in0_out_tmc_etf>;
+				};
+			};
+		};
+	};
+
+	replicator: replicator@6026000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b909>;
+
+		reg = <0x6026000 0x1000>;
+		reg-names = "replicator-base";
+
+		coresight-name = "coresight-replicator";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				replicator_out_tmc_etr: endpoint {
+					remote-endpoint =
+						<&tmc_etr_in_replicator>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				replicator_in_tmc_etf: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tmc_etf_out_replicator>;
+				};
+			};
+		};
+	};
+
+	funnel_in0: funnel@6021000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6021000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_in0_out_tmc_etf: endpoint {
+					remote-endpoint =
+						<&tmc_etf_in_funnel_in0>;
+				};
+			};
+
+			port@1 {
+				reg = <7>;
+				funnel_in0_in_stm: endpoint {
+					slave-mode;
+					remote-endpoint = <&stm_out_funnel_in0>;
+				};
+			};
+
+			port@2 {
+				reg = <6>;
+				funnel_in0_in_tpda: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_out_funnel_in0>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				funnel_in0_in_funnel_center: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_center_out_funnel_in0>;
+				};
+			};
+
+			port@4 {
+				reg = <4>;
+				funnel_in0_in_funnel_right: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_right_out_funnel_in0>;
+				};
+			};
+
+			port@5 {
+				reg = <5>;
+				funnel_in0_in_funnel_mm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_mm_out_funnel_in0>;
+				};
+			};
+		};
+	};
+
+	funnel_center: funnel@6100000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6100000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-center";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_center_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_center>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_center_in_rpm_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&rpm_etm0_out_funnel_center>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				funnel_center_in_dbgui: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&dbgui_out_funnel_center>;
+				};
+			};
+		};
+	};
+
+	funnel_right: funnel@6120000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6120000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-right";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_right_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_right>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				funnel_right_in_modem_etm1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&modem_etm1_out_funnel_right>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				funnel_right_in_modem_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&modem_etm0_out_funnel_right>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				funnel_right_in_funnel_apss1: endpoint {
+					slave-mode;
+					remote-endpoint =
+					       <&funnel_apss1_out_funnel_right>;
+				};
+			};
+		};
+	};
+
+	funnel_mm: funnel@6130000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6130000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-mm";
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_mm_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_mm>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_mm_in_wcn_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&wcn_etm0_out_funnel_mm>;
+				};
+			};
+
+			port@2 {
+				reg = <4>;
+				funnel_mm_in_funnel_cam: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_cam_out_funnel_mm>;
+				};
+			};
+
+			port@3 {
+				reg = <5>;
+				funnel_mm_in_audio_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&audio_etm0_out_funnel_mm>;
+				};
+			};
+		};
+	};
+
+	funnel_cam: funnel@6132000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6132000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-cam";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			funnel_cam_out_funnel_mm: endpoint {
+				remote-endpoint = <&funnel_mm_in_funnel_cam>;
+			};
+		};
+	};
+
+	funnel_apss1: funnel@61d0000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x61d0000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-apss1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_apss1_out_funnel_right: endpoint {
+					remote-endpoint =
+						<&funnel_right_in_funnel_apss1>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_apss1_in_funnel_apss0: endpoint {
+					slave-mode;
+					remote-endpoint =
+					       <&funnel_apss0_out_funnel_apss1>;
+				};
+			};
+		};
+	};
+
+	funnel_apss0: funnel@61a1000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x61a1000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-apss0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_apss0_out_funnel_apss1: endpoint {
+					remote-endpoint =
+						<&funnel_apss1_in_funnel_apss0>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_apss0_in_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm0_out_funnel_apss0>;
+				};
+			};
+
+			port@2 {
+				reg = <1>;
+				funnel_apss0_in_etm1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm1_out_funnel_apss0>;
+				};
+			};
+
+			port@3 {
+				reg = <2>;
+				funnel_apss0_in_etm2: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm2_out_funnel_apss0>;
+					};
+			};
+
+			port@4 {
+				reg = <3>;
+				funnel_apss0_in_etm3: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm3_out_funnel_apss0>;
+				};
+			};
+
+			port@5 {
+				reg = <4>;
+				funnel_apss0_in_etm4: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm4_out_funnel_apss0>;
+				};
+			};
+
+			port@6 {
+				reg = <5>;
+				funnel_apss0_in_etm5: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm5_out_funnel_apss0>;
+				};
+			};
+
+			port@7 {
+				reg = <6>;
+				funnel_apss0_in_etm6: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm6_out_funnel_apss0>;
+				};
+			};
+
+			port@8 {
+				reg = <7>;
+				funnel_apss0_in_etm7: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm7_out_funnel_apss0>;
+				};
+			};
+		};
+	};
+
+	etm0: etm@619c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x619c000 0x1000>;
+		cpu = <&CPU0>;
+		coresight-name = "coresight-etm0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm0_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm0>;
+			};
+		};
+	};
+
+	etm1: etm@619d000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x619d000 0x1000>;
+		cpu = <&CPU1>;
+		coresight-name = "coresight-etm1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm1_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm1>;
+			};
+		};
+	};
+
+	etm2: etm@619e000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x619e000 0x1000>;
+		cpu = <&CPU2>;
+		coresight-name = "coresight-etm2";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm2_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm2>;
+			};
+		};
+	};
+
+	etm3: etm@619f000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x619f000 0x1000>;
+		cpu = <&CPU3>;
+		coresight-name = "coresight-etm3";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm3_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm3>;
+			};
+		};
+	};
+
+	etm4: etm@61bc000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x61bc000 0x1000>;
+		cpu = <&CPU4>;
+		coresight-name = "coresight-etm4";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm4_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm4>;
+			};
+		};
+	};
+
+	etm5: etm@61bd000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x61bd000 0x1000>;
+		cpu = <&CPU5>;
+		coresight-name = "coresight-etm5";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm5_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm5>;
+			};
+		};
+	};
+
+	etm6: etm@61be000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x61be000 0x1000>;
+		cpu = <&CPU6>;
+		coresight-name = "coresight-etm6";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm6_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm6>;
+			};
+		};
+	};
+
+	etm7: etm@61bf000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x61bf000 0x1000>;
+		coresight-name = "coresight-etm7";
+		cpu = <&CPU7>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm7_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm7>;
+			};
+		};
+	};
+
+	stm: stm@6002000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b962>;
+
+		reg = <0x6002000 0x1000>,
+		      <0x9280000 0x180000>;
+		reg-names = "stm-base", "stm-stimulus-base";
+
+		coresight-name = "coresight-stm";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			stm_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_stm>;
+			};
+		};
+	};
+
+	cti0: cti@6010000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6010000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1: cti@6011000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6011000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2: cti@6012000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6012000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti2";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti3: cti@6013000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6013000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti3";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti4: cti@6014000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6014000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti4";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti5: cti@6015000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6015000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti5";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti6: cti@6016000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6016000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti6";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti7: cti@6017000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6017000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti7";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti8: cti@6018000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6018000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti8";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti9: cti@6019000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6019000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti9";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti10: cti@601a000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601a000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti10";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti11: cti@601b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601b000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti11";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti12: cti@601c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601c000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti12";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti13: cti@601d000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601d000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti13";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti14: cti@601e000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601e000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti14";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti15: cti@601f000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601f000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti15";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu0: cti@6198000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6198000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu0";
+		cpu = <&CPU0>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu1: cti@6199000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6199000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu1";
+		cpu = <&CPU1>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu2: cti@619a000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x619a000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu2";
+		cpu = <&CPU2>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu3: cti@619b000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x619b000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu3";
+		cpu = <&CPU3>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu4: cti@61b8000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x61b8000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu4";
+		cpu = <&CPU4>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu5: cti@61b9000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x61b9000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu5";
+		cpu = <&CPU5>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu6: cti@61ba000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x61ba000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu6";
+		cpu = <&CPU6>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu7: cti@61bb000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x61bb000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu7";
+		cpu = <&CPU7>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_modem_cpu0: cti@6128000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6128000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-modem-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_modem_cpu1: cti@6124000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6124000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-modem-cpu1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* Venus CTI */
+	cti_video_cpu0: cti@6134000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6134000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-video-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* Pronto CTI */
+	cti_wcn_cpu0: cti@6139000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6139000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-wcn-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* LPASS CTI */
+	cti_audio_cpu0: cti@613c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x613c000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-audio-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_rpm_cpu0: cti@610c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x610c000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-rpm-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* Pronto ETM */
+	wcn_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-wcn-etm0";
+		qcom,inst-id = <3>;
+
+		port {
+			wcn_etm0_out_funnel_mm: endpoint {
+				remote-endpoint = <&funnel_mm_in_wcn_etm0>;
+			};
+		};
+	};
+
+	rpm_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-rpm-etm0";
+		qcom,inst-id = <4>;
+
+		port {
+			rpm_etm0_out_funnel_center: endpoint {
+				remote-endpoint = <&funnel_center_in_rpm_etm0>;
+			};
+		};
+	};
+
+	/* LPASS ETM */
+	audio_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-audio-etm0";
+		qcom,inst-id = <5>;
+
+		port {
+			audio_etm0_out_funnel_mm: endpoint {
+				remote-endpoint = <&funnel_mm_in_audio_etm0>;
+			};
+		};
+	};
+
+	/* MSS_SCL */
+	modem_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-modem-etm0";
+		qcom,inst-id = <11>;
+
+		port {
+			modem_etm0_out_funnel_right: endpoint {
+				remote-endpoint = <&funnel_right_in_modem_etm0>;
+			};
+		};
+	};
+
+	/* MSS_VEC */
+	modem_etm1 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-modem-etm1";
+		qcom,inst-id = <2>;
+
+		port {
+			modem_etm1_out_funnel_right: endpoint {
+				remote-endpoint = <&funnel_right_in_modem_etm1>;
+			};
+		};
+	};
+
+	csr: csr@6001000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6001000 0x1000>;
+		reg-names = "csr-base";
+		coresight-name = "coresight-csr";
+
+		qcom,blk-size = <1>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	dbgui: dbgui@6108000 {
+		compatible = "qcom,coresight-dbgui";
+		reg = <0x6108000 0x1000>;
+		reg-names = "dbgui-base";
+		coresight-name = "coresight-dbgui";
+
+		qcom,dbgui-addr-offset = <0x30>;
+		qcom,dbgui-data-offset = <0x130>;
+		qcom,dbgui-size = <64>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			dbgui_out_funnel_center: endpoint {
+				remote-endpoint = <&funnel_center_in_dbgui>;
+			};
+		};
+	};
+
+	tpda: tpda@6003000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b969>;
+
+		reg = <0x6003000 0x1000>;
+		reg-names = "tpda-base";
+		coresight-name = "coresight-tpda";
+
+		qcom,tpda-atid = <64>;
+		qcom,cmb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+			port@0 {
+				tpda_out_funnel_in0: endpoint {
+					remote-endpoint = <&funnel_in0_in_tpda>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_in_tpdm_dcc: endpoint {
+					slave-mode;
+						remote-endpoint =
+							<&tpdm_dcc_out_tpda>;
+				};
+			};
+		};
+	};
+
+	tpdm_dcc: tpdm@6110000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+
+		reg = <0x6110000 0x1000>;
+		reg-names = "tpdm-base";
+		coresight-name = "coresight-tpdm-dcc";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			tpdm_dcc_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_dcc>;
+			};
+		};
+	};
+
+	hwevent: hwevent@6101000 {
+		compatible = "qcom,coresight-hwevent";
+
+		reg = <0x6101000 0x148>,
+		      <0x6101fb0 0x4>,
+		      <0x6121000 0x148>,
+		      <0x6121fb0 0x4>,
+		      <0x6131000 0x148>,
+		      <0x6131fb0 0x4>,
+		      <0x7105010 0x4>,
+		      <0x7885010 0x4>;
+
+		reg-names = "center-wrapper-mux", "center-wrapper-lockaccess",
+				"right-wrapper-mux", "right-wrapper-lockaccess",
+				"mm-wrapper-mux", "mm-wrapper-lockaccess",
+				"usbbam-mux", "blsp-mux";
+
+		coresight-name = "coresight-hwevent";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
index 12b039c..b53f7b8 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
@@ -42,3 +42,7 @@
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
 };
+
+&usb3 {
+	extcon = <&pmi8950_charger>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
index 243aaf5..87b8c74 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
@@ -16,3 +16,61 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart_console_active>;
 };
+
+&sdhc_1 {
+	/* device core power supply */
+	vdd-supply = <&pm8953_l8>;
+	qcom,vdd-voltage-level = <2900000 2900000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8953_l5>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on  &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 192000000
+								384000000>;
+	qcom,nonremovable;
+	qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	/* device core power supply */
+	vdd-supply = <&pm8953_l11>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <15000 800000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8953_l12>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+		1 &intc 0 221 0
+		2 &tlmm 133 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 133 0x1>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+								200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
index e3ada39..a45bb66 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
@@ -424,6 +424,32 @@
 			};
 		};
 
+		blsp2_uart0_active: blsp2_uart0_active {
+			mux {
+				pins = "gpio16", "gpio17", "gpio18", "gpio19";
+				function = "blsp_uart5";
+			};
+
+			config {
+				pins = "gpio16", "gpio17", "gpio18", "gpio19";
+				drive-strength = <16>;
+				bias-disable;
+			};
+		};
+
+		blsp2_uart0_sleep: blsp2_uart0_sleep {
+			mux {
+				pins = "gpio16", "gpio17", "gpio18", "gpio19";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio16", "gpio17", "gpio18", "gpio19";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
 		/* SDC pin type */
 		sdc1_clk_on: sdc1_clk_on {
 			config {
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
index 6270223..016baf2 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
@@ -26,3 +26,7 @@
 	status = "ok";
 	qpnp,qpnp-labibb-mode = "lcd";
 };
+
+&usb3 {
+	vbus_dwc3-supply = <&smbcharger_charger_otg>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
index 243aaf5..87b8c74 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
@@ -16,3 +16,61 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart_console_active>;
 };
+
+&sdhc_1 {
+	/* device core power supply */
+	vdd-supply = <&pm8953_l8>;
+	qcom,vdd-voltage-level = <2900000 2900000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8953_l5>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on  &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 192000000
+								384000000>;
+	qcom,nonremovable;
+	qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	/* device core power supply */
+	vdd-supply = <&pm8953_l11>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <15000 800000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8953_l12>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+		1 &intc 0 221 0
+		2 &tlmm 133 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 133 0x1>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+								200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index a9ca87c..8ce2100 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -16,6 +16,7 @@
 #include <dt-bindings/spmi/spmi.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
+#include <dt-bindings/clock/msm-clocks-8953.h>
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM 8953";
@@ -118,6 +119,10 @@
 		smd36 = &smdtty_loopback;
 		sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
 		sdhc2 = &sdhc_2; /* SDC2 for SD card */
+		i2c2 = &i2c_2;
+		i2c3 = &i2c_3;
+		i2c5 = &i2c_5;
+		spi3 = &spi_3;
 	};
 
 	soc: soc { };
@@ -127,7 +132,8 @@
 #include "msm8953-pinctrl.dtsi"
 #include "msm8953-cpu.dtsi"
 #include "msm8953-pm.dtsi"
-
+#include "msm8953-bus.dtsi"
+#include "msm8953-coresight.dtsi"
 
 &soc {
 	#address-cells = <1>;
@@ -135,6 +141,17 @@
 	ranges = <0 0 0 0xffffffff>;
 	compatible = "simple-bus";
 
+	dcc: dcc@b3000 {
+		compatible = "qcom,dcc";
+		reg = <0xb3000 0x1000>,
+		      <0xb4000 0x800>;
+		reg-names = "dcc-base", "dcc-ram-base";
+
+		clocks = <&clock_gcc clk_gcc_dcc_clk>;
+		clock-names = "apb_pclk";
+		qcom,save-reg;
+	};
+
 	apc_apm: apm@b111000 {
 		compatible = "qcom,msm8953-apm";
 		reg = <0xb111000 0x1000>;
@@ -257,10 +274,308 @@
 		qcom,pipe-attr-ee;
 	};
 
+	thermal_zones: thermal-zones {
+		mdm-core-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "user_space";
+			thermal-sensors = <&tsens0 1>;
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		qdsp-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "user_space";
+			thermal-sensors = <&tsens0 2>;
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		camera-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "user_space";
+			thermal-sensors = <&tsens0 3>;
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_cpu0-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 4>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_cpu1-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 5>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_cpu2-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 6>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_cpu3-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 7>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_l2-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 8>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_cpu0-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 9>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_cpu1-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 10>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_cpu2-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 11>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_cpu3-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 12>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_l2-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 13>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		gpu0-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 14>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		gpu1-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 15>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+	};
+
+	tsens0: tsens@4a8000 {
+		compatible = "qcom,msm8953-tsens";
+		reg = <0x4a8000 0x1000>,
+			<0x4a9000 0x1000>;
+		reg-names = "tsens_srot_physical",
+					"tsens_tm_physical";
+		interrupts = <0 184 0>, <0 314 0>;
+		interrupt-names = "tsens-upper-lower", "tsens-critical";
+		#thermal-sensor-cells = <1>;
+	};
+
 	blsp1_uart0: serial@78af000 {
 		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 		reg = <0x78af000 0x200>;
 		interrupts = <0 107 0>;
+		clocks = <&clock_gcc clk_gcc_blsp1_uart1_apps_clk>,
+			<&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		clock-names = "core", "iface";
+		status = "disabled";
+	};
+
+	blsp1_uart1: uart@78b0000 {
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0x78b0000 0x200>,
+			<0x7884000 0x1f000>;
+		reg-names = "core_mem", "bam_mem";
+
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp1_uart1>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 108 0
+				1 &intc 0 238 0
+				2 &tlmm 13 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+		qcom,master-id = <86>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+			<&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&hsuart_sleep>;
+		pinctrl-1 = <&hsuart_active>;
+		qcom,bam-tx-ep-pipe-index = <2>;
+		qcom,bam-rx-ep-pipe-index = <3>;
+		qcom,msm-bus,name = "blsp1_uart1";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<86 512 0 0>,
+				<86 512 500 800>;
+		status = "disabled";
+	};
+
+	blsp2_uart0: uart@7aef000 {
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0x7aef000 0x200>,
+			<0x7ac4000 0x1f000>;
+		reg-names = "core_mem", "bam_mem";
+
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&blsp2_uart0>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 306 0
+				1 &intc 0 239 0
+				2 &tlmm 17 0>;
+
+		qcom,inject-rx-on-wakeup;
+		qcom,rx-char-to-inject = <0xFD>;
+		qcom,master-id = <84>;
+		clock-names = "core_clk", "iface_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_uart1_apps_clk>,
+			<&clock_gcc clk_gcc_blsp2_ahb_clk>;
+		pinctrl-names = "sleep", "default";
+		pinctrl-0 = <&blsp2_uart0_sleep>;
+		pinctrl-1 = <&blsp2_uart0_active>;
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+		qcom,msm-bus,name = "blsp2_uart0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<84 512 0 0>,
+				<84 512 500 800>;
 		status = "disabled";
 	};
 
@@ -280,6 +595,110 @@
 		qcom,summing-threshold = <10>;
 	};
 
+	spi_3: spi@78b7000 { /* BLSP1 QUP3 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0x78b7000 0x600>,
+			<0x7884000 0x1f000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 97 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+		pinctrl-names = "spi_default", "spi_sleep";
+		pinctrl-0 = <&spi3_default &spi3_cs0_active>;
+		pinctrl-1 = <&spi3_sleep &spi3_cs0_sleep>;
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			<&clock_gcc clk_gcc_blsp1_qup3_spi_apps_clk>;
+		clock-names = "iface_clk", "core_clk";
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,use-pinctrl;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <8>;
+		qcom,bam-producer-pipe-index = <9>;
+		qcom,master-id = <86>;
+		status = "disabled";
+	};
+
+	i2c_2: i2c@78b6000 { /* BLSP1 QUP2 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0x78b6000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 96 0>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			<&clock_gcc clk_gcc_blsp1_qup2_i2c_apps_clk>;
+
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_2_active>;
+		pinctrl-1 = <&i2c_2_sleep>;
+		qcom,noise-rjct-scl = <0>;
+		qcom,noise-rjct-sda = <0>;
+		qcom,master-id = <86>;
+		dmas = <&dma_blsp1 6 64 0x20000020 0x20>,
+			<&dma_blsp1 7 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	i2c_3: i2c@78b7000 { /* BLSP1 QUP3 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0x78b7000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 97 0>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+			<&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_3_active>;
+		pinctrl-1 = <&i2c_3_sleep>;
+		qcom,noise-rjct-scl = <0>;
+		qcom,noise-rjct-sda = <0>;
+		qcom,master-id = <86>;
+		dmas = <&dma_blsp1 8 64 0x20000020 0x20>,
+			<&dma_blsp1 9 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	i2c_5: i2c@7af5000 { /* BLSP2 QUP1 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0x7af5000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 299 0>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			<&clock_gcc clk_gcc_blsp2_qup1_i2c_apps_clk>;
+
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_5_active>;
+		pinctrl-1 = <&i2c_5_sleep>;
+		qcom,noise-rjct-scl = <0>;
+		qcom,noise-rjct-sda = <0>;
+		qcom,master-id = <84>;
+		dmas = <&dma_blsp2 4 64 0x20000020 0x20>,
+			<&dma_blsp2 5 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
 	slim_msm: slim@c140000{
 		cell-index = <1>;
 		compatible = "qcom,slim-ngd";
@@ -293,6 +712,161 @@
 		status = "disabled";
 	};
 
+	clock_gcc: qcom,gcc@1800000 {
+		compatible = "qcom,gcc-8953";
+		reg = <0x1800000 0x80000>,
+			 <0x00a4124 0x08>;
+		reg-names = "cc_base", "efuse";
+		vdd_dig-supply = <&pm8953_s2_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_debug: qcom,cc-debug@1874000 {
+		compatible = "qcom,cc-debug-8953";
+		reg = <0x1874000 0x4>;
+		reg-names = "cc_base";
+		clocks = <&clock_cpu clk_cpu_debug_pri_mux>;
+		clock-names = "debug_cpu_clk";
+		#clock-cells = <1>;
+	};
+
+	clock_gcc_gfx: qcom,gcc-gfx@1800000 {
+		compatible = "qcom,gcc-gfx-8953";
+		reg = <0x1800000 0x80000>;
+		reg-names = "cc_base";
+		vdd_gfx-supply = <&gfx_vreg_corner>;
+		qcom,gfxfreq-corner =
+			 <         0   0 >,
+			 < 133330000   1 >,  /* Min SVS   */
+			 < 216000000   2 >,  /* Low SVS   */
+			 < 320000000   3 >,  /* SVS       */
+			 < 400000000   4 >,  /* SVS Plus  */
+			 < 510000000   5 >,  /* NOM       */
+			 < 560000000   6 >,  /* Nom Plus  */
+			 < 650000000   7 >;  /* Turbo     */
+		#clock-cells = <1>;
+	};
+
+	clock_cpu: qcom,cpu-clock-8953@b116000 {
+		compatible = "qcom,cpu-clock-8953";
+		reg =	 <0xb114000  0x68>,
+			 <0xb014000  0x68>,
+			 <0xb116000  0x400>,
+			 <0xb111050  0x08>,
+			 <0xb011050  0x08>,
+			 <0xb1d1050  0x08>,
+			 <0x00a4124  0x08>;
+		reg-names = "rcgwr-c0-base", "rcgwr-c1-base",
+				"c0-pll", "c0-mux", "c1-mux",
+				"cci-mux", "efuse";
+		vdd-mx-supply = <&pm8953_s7_level_ao>;
+		vdd-cl-supply = <&apc_vreg>;
+		clocks = <&clock_gcc clk_xo_a_clk_src>;
+		clock-names = "xo_a";
+		qcom,num-clusters = <2>;
+		qcom,speed0-bin-v0-cl =
+			<          0 0>,
+			<  652800000 1>,
+			< 1036800000 2>,
+			< 1401600000 3>,
+			< 1689600000 4>,
+			< 1804800000 5>,
+			< 1958400000 6>,
+			< 2016000000 7>;
+		qcom,speed0-bin-v0-cci =
+			<          0 0>,
+			<  261120000 1>,
+			<  414720000 2>,
+			<  560640000 3>,
+			<  675840000 4>,
+			<  721920000 5>,
+			<  783360000 6>,
+			<  806400000 7>;
+		qcom,speed2-bin-v0-cl =
+			<          0 0>,
+			<  652800000 1>,
+			< 1036800000 2>,
+			< 1401600000 3>,
+			< 1689600000 4>,
+			< 1804800000 5>,
+			< 1958400000 6>,
+			< 2016000000 7>;
+		qcom,speed2-bin-v0-cci =
+			<          0 0>,
+			<  261120000 1>,
+			<  414720000 2>,
+			<  560640000 3>,
+			<  675840000 4>,
+			<  721920000 5>,
+			<  783360000 6>,
+			<  806400000 7>;
+		qcom,speed7-bin-v0-cl =
+			<          0 0>,
+			<  652800000 1>,
+			< 1036800000 2>,
+			< 1401600000 3>,
+			< 1689600000 4>,
+			< 1804800000 5>,
+			< 1958400000 6>,
+			< 2016000000 7>,
+			< 2150400000 8>,
+			< 2208000000 9>;
+		qcom,speed7-bin-v0-cci =
+			<          0 0>,
+			<  261120000 1>,
+			<  414720000 2>,
+			<  560640000 3>,
+			<  675840000 4>,
+			<  721920000 5>,
+			<  783360000 6>,
+			<  806400000 7>,
+			<  860160000 8>,
+			<  883200000 9>;
+		qcom,speed6-bin-v0-cl =
+			<          0 0>,
+			<  652800000 1>,
+			< 1036800000 2>,
+			< 1401600000 3>,
+			< 1689600000 4>,
+			< 1804800000 5>;
+		qcom,speed6-bin-v0-cci =
+			<          0 0>,
+			<  261120000 1>,
+			<  414720000 2>,
+			<  560640000 3>,
+			<  675840000 4>,
+			<  721920000 5>;
+		#clock-cells = <1>;
+	 };
+
+	msm_cpufreq: qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		clock-names = "l2_clk", "cpu0_clk", "cpu1_clk", "cpu2_clk",
+				"cpu3_clk", "cpu4_clk", "cpu5_clk",
+				"cpu6_clk", "cpu7_clk";
+		clocks = <&clock_cpu clk_cci_clk>,
+			 <&clock_cpu clk_a53_pwr_clk>,
+			 <&clock_cpu clk_a53_pwr_clk>,
+			 <&clock_cpu clk_a53_pwr_clk>,
+			 <&clock_cpu clk_a53_pwr_clk>,
+			 <&clock_cpu clk_a53_pwr_clk>,
+			 <&clock_cpu clk_a53_pwr_clk>,
+			 <&clock_cpu clk_a53_pwr_clk>,
+			 <&clock_cpu clk_a53_pwr_clk>;
+
+		qcom,cpufreq-table =
+			 <  652800 >,
+			 < 1036800 >,
+			 < 1401600 >,
+			 < 1689600 >,
+			 < 1804800 >,
+			 < 1958400 >,
+			 < 2016000 >,
+			 < 2150400 >,
+			 < 2208000 >;
+	};
+
 	cpubw: qcom,cpubw {
 		compatible = "qcom,devbw";
 		governor = "cpufreq";
@@ -663,6 +1237,12 @@
 				  "sdcc_ice_sec_level_irq";
 		interrupts = <0 312 0>, <0 313 0>;
 		qcom,enable-ice-clk;
+		clock-names = "ice_core_clk_src", "ice_core_clk",
+				"bus_clk", "iface_clk";
+		clocks = <&clock_gcc clk_sdcc1_ice_core_clk_src>,
+			 <&clock_gcc clk_gcc_sdcc1_ice_core_clk>,
+			 <&clock_gcc clk_gcc_sdcc1_apps_clk>,
+			 <&clock_gcc clk_gcc_sdcc1_ahb_clk>;
 		qcom,op-freq-hz = <270000000>, <0>, <0>, <0>;
 		qcom,msm-bus,name = "sdcc_ice_noc";
 		qcom,msm-bus,num-cases = <2>;
@@ -710,6 +1290,10 @@
 		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
 			100000000 200000000 400000000 4294967295>;
 
+		clocks = <&clock_gcc clk_gcc_sdcc1_ahb_clk>,
+			 <&clock_gcc clk_gcc_sdcc1_apps_clk>,
+			 <&clock_gcc clk_gcc_sdcc1_ice_core_clk>;
+		clock-names = "iface_clk", "core_clk", "ice_core_clk";
 		qcom,ice-clk-rates = <270000000 160000000>;
 		qcom,large-address-bus;
 
@@ -748,6 +1332,10 @@
 		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
 			100000000 200000000 4294967295>;
 
+		clocks = <&clock_gcc clk_gcc_sdcc2_ahb_clk>,
+			<&clock_gcc clk_gcc_sdcc2_apps_clk>;
+		clock-names = "iface_clk", "core_clk";
+
 		qcom,large-address-bus;
 		status = "disabled";
 	};
@@ -770,8 +1358,369 @@
 		#interrupt-cells = <4>;
 		cell-index = <0>;
 	};
+
+	usb3: ssusb@7000000{
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0x07000000 0xfc000>,
+			<0x0007e000 0x400>;
+		reg-names = "core_base",
+			"ahb2phy_base";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts = <0 136 0>, <0 220 0>, <0 134 0>;
+		interrupt-names = "hs_phy_irq", "ss_phy_irq", "pwr_event_irq";
+
+		USB3_GDSC-supply = <&gdsc_usb30>;
+		qcom,usb-dbm = <&dbm_1p5>;
+		qcom,msm-bus,name = "usb3";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+					<61 512 0 0>,
+					<61 512 240000 800000>,
+					<61 512 240000 800000>;
+
+		/* CPU-CLUSTER-WFI-LVL latency +1 */
+		qcom,pm-qos-latency = <2>;
+
+		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
+
+		clocks = <&clock_gcc clk_gcc_usb30_master_clk>,
+			<&clock_gcc clk_gcc_pcnoc_usb3_axi_clk>,
+			<&clock_gcc clk_gcc_usb30_mock_utmi_clk>,
+			<&clock_gcc clk_gcc_usb30_sleep_clk>,
+			<&clock_gcc clk_xo_dwc3_clk>,
+			<&clock_gcc clk_gcc_usb_phy_cfg_ahb_clk>;
+
+		clock-names = "core_clk", "iface_clk", "utmi_clk",
+				"sleep_clk", "xo", "cfg_ahb_clk";
+
+		qcom,core-clk-rate = <133333333>; /* NOM */
+		qcom,core-clk-rate-hs = <60000000>; /* LOW SVS */
+
+		resets = <&clock_gcc GCC_USB_30_BCR>;
+		reset-names = "core_reset";
+
+		dwc3@7000000 {
+			compatible = "snps,dwc3";
+			reg = <0x07000000 0xc8d0>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 140 0>;
+			usb-phy = <&qusb_phy>, <&ssphy>;
+			tx-fifo-resize;
+			snps,usb3-u1u2-disable;
+			snps,nominal-elastic-buffer;
+			snps,is-utmi-l1-suspend;
+			snps,hird-threshold = /bits/ 8 <0x0>;
+		};
+
+		qcom,usbbam@7104000 {
+			compatible = "qcom,usb-bam-msm";
+			reg = <0x07104000 0x1a934>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 135 0>;
+
+			qcom,bam-type = <0>;
+			qcom,usb-bam-fifo-baseaddr = <0x08605000>;
+			qcom,usb-bam-num-pipes = <8>;
+			qcom,ignore-core-reset-ack;
+			qcom,disable-clk-gating;
+			qcom,usb-bam-override-threshold = <0x4001>;
+			qcom,usb-bam-max-mbps-highspeed = <400>;
+			qcom,usb-bam-max-mbps-superspeed = <3600>;
+			qcom,reset-bam-on-connect;
+
+			qcom,pipe0 {
+				label = "ssusb-ipa-out-0";
+				qcom,usb-bam-mem-type = <1>;
+				qcom,dir = <0>;
+				qcom,pipe-num = <0>;
+				qcom,peer-bam = <1>;
+				qcom,src-bam-pipe-index = <1>;
+				qcom,data-fifo-size = <0x8000>;
+				qcom,descriptor-fifo-size = <0x2000>;
+			};
+
+			qcom,pipe1 {
+				label = "ssusb-ipa-in-0";
+				qcom,usb-bam-mem-type = <1>;
+				qcom,dir = <1>;
+				qcom,pipe-num = <0>;
+				qcom,peer-bam = <1>;
+				qcom,dst-bam-pipe-index = <0>;
+				qcom,data-fifo-size = <0x8000>;
+				qcom,descriptor-fifo-size = <0x2000>;
+			};
+
+			qcom,pipe2 {
+				label = "ssusb-qdss-in-0";
+				qcom,usb-bam-mem-type = <2>;
+				qcom,dir = <1>;
+				qcom,pipe-num = <0>;
+				qcom,peer-bam = <0>;
+				qcom,peer-bam-physical-address = <0x06044000>;
+				qcom,src-bam-pipe-index = <0>;
+				qcom,dst-bam-pipe-index = <2>;
+				qcom,data-fifo-offset = <0x0>;
+				qcom,data-fifo-size = <0xe00>;
+				qcom,descriptor-fifo-offset = <0xe00>;
+				qcom,descriptor-fifo-size = <0x200>;
+			};
+
+			qcom,pipe3 {
+				label = "ssusb-dpl-ipa-in-1";
+				qcom,usb-bam-mem-type = <1>;
+				qcom,dir = <1>;
+				qcom,pipe-num = <1>;
+				qcom,peer-bam = <1>;
+				qcom,dst-bam-pipe-index = <2>;
+				qcom,data-fifo-size = <0x8000>;
+				qcom,descriptor-fifo-size = <0x2000>;
+			};
+		};
+	};
+
+	qusb_phy: qusb@79000 {
+		compatible = "qcom,qusb2phy";
+		reg = <0x079000 0x180>,
+			<0x01841030 0x4>,
+			<0x0193f020 0x4>;
+		reg-names = "qusb_phy_base",
+			"ref_clk_addr",
+			"tcsr_clamp_dig_n_1p8";
+
+		USB3_GDSC-supply = <&gdsc_usb30>;
+		vdd-supply = <&pm8953_l3>;
+		vdda18-supply = <&pm8953_l7>;
+		vdda33-supply = <&pm8953_l13>;
+		qcom,vdd-voltage-level = <0 925000 925000>;
+
+		qcom,qusb-phy-init-seq = <0xf8 0x80
+					0xb3 0x84
+					0x83 0x88
+					0xc0 0x8c
+					0x14 0x9c
+					0x30 0x08
+					0x79 0x0c
+					0x21 0x10
+					0x00 0x90
+					0x9f 0x1c
+					0x00 0x18>;
+		phy_type= "utmi";
+		qcom,phy-clk-scheme = "cml";
+		qcom,major-rev = <1>;
+
+		clocks = <&clock_gcc clk_bb_clk1>,
+			 <&clock_gcc clk_gcc_qusb_ref_clk>,
+			 <&clock_gcc clk_gcc_usb_phy_cfg_ahb_clk>,
+			 <&clock_gcc clk_gcc_pcnoc_usb3_axi_clk>,
+			 <&clock_gcc clk_gcc_usb30_master_clk>;
+
+		clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk",
+			      "iface_clk", "core_clk";
+
+		resets = <&clock_gcc GCC_QUSB2_PHY_BCR>;
+		reset-names = "phy_reset";
+	};
+
+	ssphy: ssphy@78000 {
+		compatible = "qcom,usb-ssphy-qmp";
+		reg = <0x78000 0x9f8>,
+		      <0x0193f244 0x4>;
+		reg-names = "qmp_phy_base",
+			    "vls_clamp_reg";
+
+		qcom,qmp-phy-init-seq = /*<reg_offset, value, delay>*/
+					<0xac 0x14 0x00
+					0x34 0x08 0x00
+					0x174 0x30 0x00
+					0x3c 0x06 0x00
+					0xb4 0x00 0x00
+					0xb8 0x08 0x00
+					0x194 0x06 0x3e8
+					0x19c 0x01 0x00
+					0x178 0x00 0x00
+					0xd0 0x82 0x00
+					0xdc 0x55 0x00
+					0xe0 0x55 0x00
+					0xe4 0x03 0x00
+					0x78 0x0b 0x00
+					0x84 0x16 0x00
+					0x90 0x28 0x00
+					0x108 0x80 0x00
+					0x10c 0x00 0x00
+					0x184 0x0a 0x00
+					0x4c 0x15 0x00
+					0x50 0x34 0x00
+					0x54 0x00 0x00
+					0xc8 0x00 0x00
+					0x18c 0x00 0x00
+					0xcc 0x00 0x00
+					0x128 0x00 0x00
+					0x0c 0x0a 0x00
+					0x10 0x01 0x00
+					0x1c 0x31 0x00
+					0x20 0x01 0x00
+					0x14 0x00 0x00
+					0x18 0x00 0x00
+					0x24 0xde 0x00
+					0x28 0x07 0x00
+					0x48 0x0f 0x00
+					0x70 0x0f 0x00
+					0x100 0x80 0x00
+					0x440 0x0b 0x00
+					0x4d8 0x02 0x00
+					0x4dc 0x6c 0x00
+					0x4e0 0xbb 0x00
+					0x508 0x77 0x00
+					0x50c 0x80 0x00
+					0x514 0x03 0x00
+					0x51c 0x16 0x00
+					0x448 0x75 0x00
+					0x454 0x00 0x00
+					0x40c 0x0a 0x00
+					0x41c 0x06 0x00
+					0x510 0x00 0x00
+					0x268 0x45 0x00
+					0x2ac 0x12 0x00
+					0x294 0x06 0x00
+					0x254 0x00 0x00
+					0x8c8 0x83 0x00
+					0x8c4 0x02 0x00
+					0x8cc 0x09 0x00
+					0x8d0 0xa2 0x00
+					0x8d4 0x85 0x00
+					0x880 0xd1 0x00
+					0x884 0x1f 0x00
+					0x888 0x47 0x00
+					0x80c 0x9f 0x00
+					0x824 0x17 0x00
+					0x828 0x0f 0x00
+					0x8b8 0x75 0x00
+					0x8bc 0x13 0x00
+					0x8b0 0x86 0x00
+					0x8a0 0x04 0x00
+					0x88c 0x44 0x00
+					0x870 0xe7 0x00
+					0x874 0x03 0x00
+					0x878 0x40 0x00
+					0x87c 0x00 0x00
+					0x9d8 0x88 0x00
+					0xffffffff 0x00 0x00>;
+		qcom,qmp-phy-reg-offset =
+				<0x974  /* USB3_PHY_PCS_STATUS */
+				0x8d8   /* USB3_PHY_AUTONOMOUS_MODE_CTRL */
+				0x8dc   /* USB3_PHY_LFPS_RXTERM_IRQ_CLEAR */
+				0x804   /* USB3_PHY_POWER_DOWN_CONTROL */
+				0x800   /* USB3_PHY_SW_RESET */
+				0x808>; /* USB3_PHY_START */
+
+		vdd-supply = <&pm8953_l3>;
+		core-supply = <&pm8953_l7>;
+		qcom,vdd-voltage-level = <0 925000 925000>;
+		qcom,core-voltage-level = <0 1800000 1800000>;
+		qcom,vbus-valid-override;
+
+		clocks = <&clock_gcc clk_gcc_usb3_aux_clk>,
+			 <&clock_gcc clk_gcc_usb3_pipe_clk>,
+			 <&clock_gcc clk_gcc_usb_phy_cfg_ahb_clk>,
+			 <&clock_gcc clk_bb_clk1>,
+			 <&clock_gcc clk_gcc_usb_ss_ref_clk>;
+
+		clock-names = "aux_clk", "pipe_clk", "cfg_ahb_clk",
+			      "ref_clk_src", "ref_clk";
+
+		resets = <&clock_gcc GCC_USB3_PHY_BCR>,
+			<&clock_gcc GCC_USB3PHY_PHY_BCR>;
+
+		reset-names = "phy_reset", "phy_phy_reset";
+	};
+
+	dbm_1p5: dbm@70f8000 {
+		compatible = "qcom,usb-dbm-1p5";
+		reg = <0x070f8000 0x300>;
+		qcom,reset-ep-after-lpm-resume;
+	};
 };
 
 #include "pm8953-rpm-regulator.dtsi"
 #include "pm8953.dtsi"
 #include "msm8953-regulator.dtsi"
+#include "msm-gdsc-8916.dtsi"
+
+&gdsc_venus {
+	clock-names = "bus_clk", "core_clk";
+	clocks = <&clock_gcc clk_gcc_venus0_axi_clk>,
+		<&clock_gcc clk_gcc_venus0_vcodec0_clk>;
+	status = "okay";
+};
+
+&gdsc_venus_core0 {
+	qcom,support-hw-trigger;
+	clock-names ="core0_clk";
+	clocks = <&clock_gcc clk_gcc_venus0_core0_vcodec0_clk>;
+	status = "okay";
+};
+
+&gdsc_mdss {
+	clock-names = "core_clk", "bus_clk";
+	clocks = <&clock_gcc clk_gcc_mdss_mdp_clk>,
+		<&clock_gcc clk_gcc_mdss_axi_clk>;
+	proxy-supply = <&gdsc_mdss>;
+	qcom,proxy-consumer-enable;
+	status = "okay";
+};
+
+&gdsc_oxili_gx {
+	clock-names = "core_root_clk";
+	clocks =<&clock_gcc_gfx clk_gfx3d_clk_src>;
+	qcom,force-enable-root-clk;
+	parent-supply = <&gfx_vreg_corner>;
+	status = "okay";
+};
+
+&gdsc_jpeg {
+	clock-names = "core_clk", "bus_clk";
+	clocks = <&clock_gcc clk_gcc_camss_jpeg0_clk>,
+		<&clock_gcc clk_gcc_camss_jpeg_axi_clk>;
+	status = "okay";
+};
+
+&gdsc_vfe {
+	clock-names = "core_clk", "bus_clk", "micro_clk",
+				"csi_clk";
+	clocks = <&clock_gcc clk_gcc_camss_vfe0_clk>,
+		<&clock_gcc clk_gcc_camss_vfe_axi_clk>,
+		<&clock_gcc clk_gcc_camss_micro_ahb_clk>,
+		<&clock_gcc clk_gcc_camss_csi_vfe0_clk>;
+	status = "okay";
+};
+
+&gdsc_vfe1 {
+	clock-names = "core_clk", "bus_clk", "micro_clk",
+			"csi_clk";
+	clocks = <&clock_gcc clk_gcc_camss_vfe1_clk>,
+		<&clock_gcc clk_gcc_camss_vfe1_axi_clk>,
+		<&clock_gcc clk_gcc_camss_micro_ahb_clk>,
+		<&clock_gcc clk_gcc_camss_csi_vfe1_clk>;
+	status = "okay";
+};
+
+&gdsc_cpp {
+	clock-names = "core_clk", "bus_clk";
+	clocks = <&clock_gcc clk_gcc_camss_cpp_clk>,
+		<&clock_gcc clk_gcc_camss_cpp_axi_clk>;
+	status = "okay";
+};
+
+&gdsc_oxili_cx {
+	clock-names = "core_clk";
+	clocks = <&clock_gcc_gfx clk_gcc_oxili_gfx3d_clk>;
+	status = "okay";
+};
+
+&gdsc_usb30 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index 8b4fccb..e731f5b 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -212,6 +212,7 @@
 			qcom,force-aicl-rerun;
 			qcom,aicl-rerun-period-s = <180>;
 			qcom,autoadjust-vfloat;
+			dpdm-supply = <&qusb_phy>;
 
 			qcom,chgr@1000 {
 				reg = <0x1000 0x100>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 007081a..2f4b00e 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -103,6 +103,7 @@
 			qcom,thermal-mitigation
 					= <3000000 1500000 1000000 500000>;
 			qcom,auto-recharge-soc;
+			qcom,suspend-input-on-debug-batt;
 
 			qcom,chgr@1000 {
 				reg = <0x1000 0x100>;
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
index 26a73b0..378c4a1 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
@@ -38,3 +38,56 @@
 &usb_qmp_phy {
 	status = "ok";
 };
+
+&tlmm {
+	pmx_ts_rst_active {
+		ts_rst_active: ts_rst_active {
+			mux {
+				pins = "gpio99";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio99";
+				drive-strength = <16>;
+				bias-pull-up;
+			};
+		};
+	};
+
+	pmx_ts_rst_suspend {
+		ts_rst_suspend: ts_rst_suspend {
+			mux {
+				pins = "gpio99";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio99";
+				drive-strength = <2>;
+				bias-pull-down;
+			};
+		};
+	};
+};
+
+&soc {
+	hbtp {
+		compatible = "qcom,hbtp-input";
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+		pinctrl-0 = <&ts_rst_active>;
+		pinctrl-1 = <&ts_rst_suspend>;
+		vcc_ana-supply = <&pm8998_l28>;
+		vcc_dig-supply = <&pm8998_l14>;
+		qcom,afe-load = <20000>;
+		qcom,afe-vtg-min = <3000000>;
+		qcom,afe-vtg-max = <3000000>;
+		qcom,dig-load = <40000>;
+		qcom,dig-vtg-min = <1800000>;
+		qcom,dig-vtg-max = <1800000>;
+		qcom,fb-resume-delay-us = <1000>;
+		qcom,afe-force-power-on;
+		qcom,afe-power-on-delay-us = <6>;
+		qcom,afe-power-off-delay-us = <6>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index 521b048..bd88087 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -94,13 +94,13 @@
 &sdhc_1 {
 	vdd-supply = <&pm660l_l4>;
 	qcom,vdd-voltage-level = <2960000 2960000>;
-	qcom,vdd-current-level = <200 570000>;
+	qcom,vdd-current-level = <0 570000>;
 
 	vdd-io-supply = <&pm660_l8>;
 	qcom,vdd-io-always-on;
 	qcom,vdd-io-lpm-sup;
 	qcom,vdd-io-voltage-level = <1800000 1800000>;
-	qcom,vdd-io-current-level = <200 325000>;
+	qcom,vdd-io-current-level = <0 325000>;
 
 	pinctrl-names = "active", "sleep";
 	pinctrl-0 = <&sdc1_clk_on  &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
@@ -112,11 +112,11 @@
 &sdhc_2 {
 	vdd-supply = <&pm660l_l5>;
 	qcom,vdd-voltage-level = <2960000 2960000>;
-	qcom,vdd-current-level = <200 800000>;
+	qcom,vdd-current-level = <0 800000>;
 
 	vdd-io-supply = <&pm660l_l2>;
 	qcom,vdd-io-voltage-level = <1800000 2960000>;
-	qcom,vdd-io-current-level = <200 22000>;
+	qcom,vdd-io-current-level = <0 22000>;
 
 	pinctrl-names = "active", "sleep";
 	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index f8a8e15..7928ab5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -478,15 +478,6 @@
 						<&audio_etm0_out_funnel_in1>;
 				};
 			};
-
-			port@2 {
-				reg = <3>;
-				funnel_in1_in_funnel_modem: endpoint {
-					slave-mode;
-					remote-endpoint =
-					  <&funnel_modem_out_funnel_in1>;
-				};
-			};
 		};
 	};
 
@@ -533,6 +524,14 @@
 				};
 			};
 			port@3 {
+				reg = <2>;
+				funnel_in2_in_funnel_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&funnel_modem_out_funnel_in2>;
+				};
+			};
+			port@4 {
 				reg = <5>;
 				funnel_in2_in_funnel_apss_merg: endpoint {
 					slave-mode;
@@ -540,7 +539,7 @@
 					  <&funnel_apss_merg_out_funnel_in2>;
 				};
 			};
-			port@4 {
+			port@5 {
 				reg = <6>;
 				funnel_in2_in_funnel_gfx: endpoint {
 					slave-mode;
@@ -736,9 +735,9 @@
 
 			port@0 {
 				reg = <0>;
-				funnel_modem_out_funnel_in1: endpoint {
+				funnel_modem_out_funnel_in2: endpoint {
 					remote-endpoint =
-					    <&funnel_in1_in_funnel_modem>;
+					    <&funnel_in2_in_funnel_modem>;
 				};
 			};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index c136752..de9e40e 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -95,13 +95,13 @@
 &sdhc_1 {
 	vdd-supply = <&pm660l_l4>;
 	qcom,vdd-voltage-level = <2960000 2960000>;
-	qcom,vdd-current-level = <200 570000>;
+	qcom,vdd-current-level = <0 570000>;
 
 	vdd-io-supply = <&pm660_l8>;
 	qcom,vdd-io-always-on;
 	qcom,vdd-io-lpm-sup;
 	qcom,vdd-io-voltage-level = <1800000 1800000>;
-	qcom,vdd-io-current-level = <200 325000>;
+	qcom,vdd-io-current-level = <0 325000>;
 
 	pinctrl-names = "active", "sleep";
 	pinctrl-0 = <&sdc1_clk_on  &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
@@ -113,11 +113,11 @@
 &sdhc_2 {
 	vdd-supply = <&pm660l_l5>;
 	qcom,vdd-voltage-level = <2960000 2960000>;
-	qcom,vdd-current-level = <200 800000>;
+	qcom,vdd-current-level = <0 800000>;
 
 	vdd-io-supply = <&pm660l_l2>;
 	qcom,vdd-io-voltage-level = <1800000 2960000>;
-	qcom,vdd-io-current-level = <200 22000>;
+	qcom,vdd-io-current-level = <0 22000>;
 
 	pinctrl-names = "active", "sleep";
 	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index d4953c1..ffed74c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -418,33 +418,42 @@
 		};
 
 		qupv3_se6_4uart_pins: qupv3_se6_4uart_pins {
-			qupv3_se6_4uart_active: qupv3_se6_4uart_active {
+			qupv3_se6_ctsrx: qupv3_se6_ctsrx {
 				mux {
-					pins = "gpio45", "gpio46", "gpio47",
-								"gpio48";
+					pins = "gpio45", "gpio48";
 					function = "qup6";
 				};
 
 				config {
-					pins = "gpio45", "gpio46", "gpio47",
-								"gpio48";
+					pins = "gpio45", "gpio48";
 					drive-strength = <2>;
-					bias-disable;
+					bias-no-pull;
 				};
 			};
 
-			qupv3_se6_4uart_sleep: qupv3_se6_4uart_sleep {
+			qupv3_se6_rts: qupv3_se6_rts {
 				mux {
-					pins = "gpio45", "gpio46", "gpio47",
-								"gpio48";
-					function = "gpio";
+					pins = "gpio46";
+					function = "qup6";
 				};
 
 				config {
-					pins = "gpio45", "gpio46", "gpio47",
-								"gpio48";
+					pins = "gpio46";
 					drive-strength = <2>;
-					bias-disable;
+					bias-pull-down;
+				};
+			};
+
+			qupv3_se6_tx: qupv3_se6_tx {
+				mux {
+					pins = "gpio47";
+					function = "qup6";
+				};
+
+				config {
+					pins = "gpio47";
+					drive-strength = <2>;
+					bias-pull-up;
 				};
 			};
 		};
@@ -927,7 +936,7 @@
 				config {
 					pins = "gpio51", "gpio52";
 					drive-strength = <2>;
-					bias-disable;
+					bias-pull-down;
 				};
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index dd35a36..fe88aae 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -186,12 +186,4 @@
 		reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
-
-	pdc: interrupt-controller@b220000{
-		compatible = "qcom,pdc-sdm670";
-		reg = <0xb220000 0x400>;
-		#interrupt-cells = <3>;
-		interrupt-parent = <&intc>;
-		interrupt-controller;
-	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index ea6e1c7..cc4645f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -206,13 +206,13 @@
 &sdhc_1 {
 	vdd-supply = <&pm660l_l4>;
 	qcom,vdd-voltage-level = <2960000 2960000>;
-	qcom,vdd-current-level = <200 570000>;
+	qcom,vdd-current-level = <0 570000>;
 
 	vdd-io-supply = <&pm660_l8>;
 	qcom,vdd-io-always-on;
 	qcom,vdd-io-lpm-sup;
 	qcom,vdd-io-voltage-level = <1800000 1800000>;
-	qcom,vdd-io-current-level = <200 325000>;
+	qcom,vdd-io-current-level = <0 325000>;
 
 	pinctrl-names = "active", "sleep";
 	pinctrl-0 = <&sdc1_clk_on  &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
@@ -224,11 +224,11 @@
 &sdhc_2 {
 	vdd-supply = <&pm660l_l5>;
 	qcom,vdd-voltage-level = <2960000 2960000>;
-	qcom,vdd-current-level = <200 800000>;
+	qcom,vdd-current-level = <0 800000>;
 
 	vdd-io-supply = <&pm660l_l2>;
 	qcom,vdd-io-voltage-level = <1800000 2960000>;
-	qcom,vdd-io-current-level = <200 22000>;
+	qcom,vdd-io-current-level = <0 22000>;
 
 	pinctrl-names = "active", "sleep";
 	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
index c388f4a..225a6e6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -40,8 +40,10 @@
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
 		pinctrl-names = "default", "sleep";
-		pinctrl-0 = <&qupv3_se6_4uart_active>;
-		pinctrl-1 = <&qupv3_se6_4uart_sleep>;
+		pinctrl-0 = <&qupv3_se6_ctsrx>, <&qupv3_se6_rts>,
+						<&qupv3_se6_tx>;
+		pinctrl-1 = <&qupv3_se6_ctsrx>, <&qupv3_se6_rts>,
+						<&qupv3_se6_tx>;
 		interrupts-extended = <&pdc GIC_SPI 607 0>,
 				<&tlmm 48 0>;
 		status = "disabled";
@@ -245,6 +247,9 @@
 		interrupts = <GIC_SPI 601 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 0 1 64 0>,
+			<&gpi_dma0 1 0 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -264,6 +269,9 @@
 		interrupts = <GIC_SPI 602 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 1 1 64 0>,
+			<&gpi_dma0 1 1 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -283,6 +291,9 @@
 		interrupts = <GIC_SPI 603 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 2 1 64 0>,
+			<&gpi_dma0 1 2 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -302,6 +313,9 @@
 		interrupts = <GIC_SPI 604 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 3 1 64 0>,
+			<&gpi_dma0 1 3 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -321,6 +335,9 @@
 		interrupts = <GIC_SPI 605 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 4 1 64 0>,
+			<&gpi_dma0 1 4 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -340,6 +357,9 @@
 		interrupts = <GIC_SPI 606 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 5 1 64 0>,
+			<&gpi_dma0 1 5 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -359,6 +379,9 @@
 		interrupts = <GIC_SPI 607 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 6 1 64 0>,
+			<&gpi_dma0 1 6 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -378,6 +401,9 @@
 		interrupts = <GIC_SPI 608 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 7 1 64 0>,
+			<&gpi_dma0 1 7 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -626,6 +652,9 @@
 		interrupts = <GIC_SPI 353 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 0 1 64 0>,
+			<&gpi_dma1 1 0 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -645,6 +674,9 @@
 		interrupts = <GIC_SPI 354 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 1 1 64 0>,
+			<&gpi_dma1 1 1 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -664,6 +696,9 @@
 		interrupts = <GIC_SPI 355 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 2 1 64 0>,
+			<&gpi_dma1 1 2 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -683,6 +718,9 @@
 		interrupts = <GIC_SPI 356 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 3 1 64 0>,
+			<&gpi_dma1 1 3 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -702,6 +740,9 @@
 		interrupts = <GIC_SPI 357 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 4 1 64 0>,
+			<&gpi_dma1 1 4 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -721,6 +762,9 @@
 		interrupts = <GIC_SPI 358 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 5 1 64 0>,
+			<&gpi_dma1 1 5 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -740,6 +784,9 @@
 		interrupts = <GIC_SPI 359 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 6 1 64 0>,
+			<&gpi_dma1 1 6 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -759,6 +806,9 @@
 		interrupts = <GIC_SPI 360 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 7 1 64 0>,
+			<&gpi_dma1 1 7 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 0163c87..e321329 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -354,7 +354,7 @@
 				1324800    84
 				1516800    96
 				1612800   114
-				1708000   139
+				1708800   139
 			>;
 			idle-cost-data = <
 				12 10 8 6
@@ -395,7 +395,7 @@
 				1324800   13
 				1516800   15
 				1612800   16
-				1708000   19
+				1708800   19
 			>;
 			idle-cost-data = <
 				4 3 2 1
@@ -446,6 +446,11 @@
 		android {
 			compatible = "android,firmware";
 
+			vbmeta {
+				compatible = "android,vbmeta";
+				parts = "vbmeta,boot,system,vendor,dtbo";
+			};
+
 			fstab {
 				compatible = "android,fstab";
 				vendor {
@@ -701,6 +706,14 @@
 		interrupt-parent = <&intc>;
 	};
 
+	pdc: interrupt-controller@b220000{
+		compatible = "qcom,pdc-sdm670";
+		reg = <0xb220000 0x400>;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&intc>;
+		interrupt-controller;
+	};
+
 	timer {
 		compatible = "arm,armv8-timer";
 		interrupts = <1 1 0xf08>,
@@ -1059,7 +1072,7 @@
 		vdd_l3_mx_ao-supply = <&pm660l_s1_level_ao>;
 		vdd_pwrcl_mx_ao-supply = <&pm660l_s1_level_ao>;
 
-		qcom,mx-turbo-freq = <3300000001 3300000001 3300000001>;
+		qcom,mx-turbo-freq = <1440000000 1708000000 3300000001>;
 		l3-devs = <&l3_cpu0 &l3_cpu6>;
 
 		clock-names = "xo_ao";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
index 829dfcc..7d83184 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
@@ -33,6 +33,12 @@
 			qcom,ion-heap-type = "DMA";
 		};
 
+		qcom,ion-heap@19 { /* QSEECOM TA HEAP */
+			reg = <19>;
+			memory-region = <&qseecom_ta_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
 		qcom,ion-heap@13 { /* SECURE SPSS HEAP */
 			reg = <13>;
 			memory-region = <&secure_sp_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 8f1afe9..b24ef1d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -139,12 +139,4 @@
 		reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
-
-	pdc: interrupt-controller@0xb220000{
-		compatible = "qcom,pdc-sdm845";
-		reg = <0xb220000 0x400>;
-		#interrupt-cells = <3>;
-		interrupt-parent = <&intc>;
-		interrupt-controller;
-	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 26d32a1..3ee0138 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -51,6 +51,26 @@
 	status = "ok";
 };
 
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_vol_up_default>;
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
 &qupv3_se3_i2c {
 	status = "ok";
 	nq@28 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index d12a954..dd4e0b1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -510,11 +510,6 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
-	qcom,mdss-dsi-pan-enable-dynamic-fps;
-	qcom,mdss-dsi-pan-fps-update =
-		"dfps_immediate_porch_mode_vfp";
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -580,11 +575,6 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
-	qcom,mdss-dsi-pan-enable-dynamic-fps;
-	qcom,mdss-dsi-pan-fps-update =
-		"dfps_immediate_porch_mode_vfp";
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 5b3178d..f77dd19 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -533,58 +533,82 @@
 			reg = <0 0x85fc0000 0 0x2f40000>;
 		};
 
-		pil_camera_mem: camera_region@8ab00000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x8ab00000 0 0x500000>;
-		};
-
-		pil_adsp_mem: pil_adsp_region@8b100000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x8b100000 0 0x1a00000>;
-		};
-
-		wlan_fw_region: wlan_fw_region@8cb00000 {
+		qseecom_mem: qseecom_region@0x8ab00000 {
 			compatible = "shared-dma-pool";
-			reg = <0 0x8cb00000 0 0x100000>;
+			no-map;
+			reg = <0 0x8ab00000 0 0x1400000>;
 		};
 
-		pil_modem_mem: modem_region@8cc00000 {
+		pil_camera_mem: camera_region@0x8bf00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x8cc00000 0 0x7600000>;
+			reg = <0 0x8bf00000 0 0x500000>;
 		};
 
-		pil_video_mem: pil_video_region@94200000 {
+		pil_ipa_fw_mem: ips_fw_region@0x8c400000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x94200000 0 0x500000>;
+			reg = <0 0x8c400000 0 0x10000>;
 		};
 
-		pil_cdsp_mem: cdsp_regions@94700000 {
+		pil_ipa_gsi_mem: ipa_gsi_region@0x8c410000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x94700000 0 0x800000>;
+			reg = <0 0x8c410000 0 0x5000>;
 		};
 
-		pil_mba_mem: pil_mba_region@0x94f00000 {
+		pil_gpu_mem: gpu_region@0x8c415000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x94f00000 0 0x200000>;
+			reg = <0 0x8c415000 0 0x2000>;
 		};
 
-		pil_slpi_mem: pil_slpi_region@95100000 {
+		pil_adsp_mem: adsp_region@0x8c500000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x95100000 0 0x1400000>;
+			reg = <0 0x8c500000 0 0x1a00000>;
 		};
 
-
-		pil_spss_mem: spss_region@96500000 {
+		wlan_fw_region: wlan_fw_region@0x8df00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x96500000 0 0x100000>;
+			reg = <0 0x8df00000 0 0x100000>;
+		};
+
+		pil_modem_mem: modem_region@0x8e000000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8e000000 0 0x7800000>;
+		};
+
+		pil_video_mem: video_region@0x95800000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95800000 0 0x500000>;
+		};
+
+		pil_cdsp_mem: cdsp_region@0x95d00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95d00000 0 0x800000>;
+		};
+
+		pil_mba_mem: mba_region@0x96500000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96500000 0 0x200000>;
+		};
+
+		pil_slpi_mem: slpi_region@0x96700000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96700000 0 0x1400000>;
+		};
+
+		pil_spss_mem: pil_spss_region@0x97b00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x97b00000 0 0x100000>;
 		};
 
 		adsp_mem: adsp_region {
@@ -595,12 +619,12 @@
 			size = <0 0x1000000>;
 		};
 
-		qseecom_mem: qseecom_region {
+		qseecom_ta_mem: qseecom_ta_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>;
-			no-map;
+			reusable;
 			alignment = <0 0x400000>;
-			size = <0 0x1400000>;
+			size = <0 0x1000000>;
 		};
 
 		secure_sp_mem: secure_sp_region { /* SPSS-HLOS ION shared mem */
@@ -754,6 +778,14 @@
 		interrupt-parent = <&intc>;
 	};
 
+	pdc: interrupt-controller@b220000{
+		compatible = "qcom,pdc-sdm845";
+		reg = <0xb220000 0x400>;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&intc>;
+		interrupt-controller;
+	};
+
 	timer {
 		compatible = "arm,armv8-timer";
 		interrupts = <1 1 0xf08>,
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index e225ede..e99c988 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -287,12 +287,14 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
 CONFIG_SERIAL_MSM_SMD=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=y
@@ -353,6 +355,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -360,7 +363,32 @@
 CONFIG_HID_MAGICMOUSE=y
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MULTITOUCH=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -415,7 +443,6 @@
 CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
-CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
@@ -426,6 +453,7 @@
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_SECURE_BUFFER=y
 CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
@@ -444,6 +472,8 @@
 CONFIG_MSM_PM=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_ARM_GIC_V3_ACL=y
@@ -471,8 +501,14 @@
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_DEBUG_ALIGN_RODATA=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index b78b7a0..e72d0b7 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -297,12 +297,14 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
 CONFIG_SERIAL_MSM_SMD=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=y
@@ -364,6 +366,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -371,7 +374,32 @@
 CONFIG_HID_MAGICMOUSE=y
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MULTITOUCH=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -426,7 +454,6 @@
 CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
-CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
@@ -442,6 +469,7 @@
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_SECURE_BUFFER=y
 CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
@@ -459,8 +487,11 @@
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_QCOM_DCC=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_ARM_GIC_V3_ACL=y
@@ -535,8 +566,12 @@
 CONFIG_ARM64_PTDUMP=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index e8fe5bc..afaae52 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -39,7 +39,6 @@
 # CONFIG_RD_LZ4 is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
-# CONFIG_AIO is not set
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
 # CONFIG_SLUB_DEBUG is not set
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index ca923f1..1597694 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -41,7 +41,6 @@
 # CONFIG_RD_LZ4 is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
-# CONFIG_AIO is not set
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 3a405dc..4e7eec7 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -93,11 +93,9 @@ static inline void efi_set_pgd(struct mm_struct *mm)
 			 * Defer the switch to the current thread's TTBR0_EL1
 			 * until uaccess_enable(). Restore the current
 			 * thread's saved ttbr0 corresponding to its active_mm
-			 * (if different from init_mm).
 			 */
 			cpu_set_reserved_ttbr0();
-			if (current->active_mm != &init_mm)
-				update_saved_ttbr0(current, current->active_mm);
+			update_saved_ttbr0(current, current->active_mm);
 		}
 	}
 }
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 88025ba..8f8dde1 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -156,29 +156,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 
 #define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.id, 0); 0; })
 
-/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm:  describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 static inline void update_saved_ttbr0(struct task_struct *tsk,
 				      struct mm_struct *mm)
 {
-	if (system_uses_ttbr0_pan()) {
-		BUG_ON(mm->pgd == swapper_pg_dir);
-		task_thread_info(tsk)->ttbr0 =
-			virt_to_phys(mm->pgd) | ASID(mm) << 48;
-	}
+	u64 ttbr;
+
+	if (!system_uses_ttbr0_pan())
+		return;
+
+	if (mm == &init_mm)
+		ttbr = __pa_symbol(empty_zero_page);
+	else
+		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+
+	task_thread_info(tsk)->ttbr0 = ttbr;
 }
 #else
 static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -187,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
 }
 #endif
 
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+	/*
+	 * We don't actually care about the ttbr0 mapping, so point it at the
+	 * zero page.
+	 */
+	update_saved_ttbr0(tsk, &init_mm);
+}
+
 static inline void __switch_mm(struct mm_struct *next)
 {
 	unsigned int cpu = smp_processor_id();
@@ -214,11 +216,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
 	 * value may have not been initialised yet (activate_mm caller) or the
 	 * ASID has changed since the last run (following the context switch
-	 * of another thread of the same process). Avoid setting the reserved
-	 * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
+	 * of another thread of the same process).
 	 */
-	if (next != &init_mm)
-		update_saved_ttbr0(tsk, next);
+	update_saved_ttbr0(tsk, next);
 }
 
 #define deactivate_mm(tsk,mm)	do { } while (0)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 6a8ac04..f9cc7c8 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -212,3 +212,5 @@
 source "drivers/clk/uniphier/Kconfig"
 
 endmenu
+
+source "drivers/clk/msm/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 42042c0..4fdbebb 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,7 +1,7 @@
 # common clock types
 obj-$(CONFIG_HAVE_CLK)		+= clk-devres.o
 obj-$(CONFIG_CLKDEV_LOOKUP)	+= clkdev.o
-obj-$(CONFIG_COMMON_CLK)	+= clk.o
+obj-$(CONFIG_OF)	        += clk.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-divider.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-fixed-factor.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-fixed-rate.o
@@ -92,3 +92,4 @@
 endif
 obj-$(CONFIG_ARCH_ZX)			+= zte/
 obj-$(CONFIG_ARCH_ZYNQ)			+= zynq/
+obj-$(CONFIG_ARCH_QCOM)			+= msm/
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 4f2fb77..020e8ad 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -32,6 +32,8 @@
 
 #include "clk.h"
 
+#if defined(CONFIG_COMMON_CLK)
+
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
 
@@ -4032,6 +4034,8 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
 
+#endif /* CONFIG_COMMON_CLK */
+
 #ifdef CONFIG_OF
 /**
  * struct of_clk_provider - Clock provider registration structure
@@ -4069,6 +4073,8 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
 }
 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
 
+#if defined(CONFIG_COMMON_CLK)
+
 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
 {
 	struct clk_onecell_data *clk_data = data;
@@ -4098,6 +4104,29 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
 }
 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
 
+#endif /* CONFIG_COMMON_CLK */
+
+/**
+ * of_clk_del_provider() - Remove a previously registered clock provider
+ * @np: Device node pointer associated with clock provider
+ */
+void of_clk_del_provider(struct device_node *np)
+{
+	struct of_clk_provider *cp;
+
+	mutex_lock(&of_clk_mutex);
+	list_for_each_entry(cp, &of_clk_providers, link) {
+		if (cp->node == np) {
+			list_del(&cp->link);
+			of_node_put(cp->node);
+			kfree(cp);
+			break;
+		}
+	}
+	mutex_unlock(&of_clk_mutex);
+}
+EXPORT_SYMBOL_GPL(of_clk_del_provider);
+
 /**
  * of_clk_add_provider() - Register a clock provider for a node
  * @np: Device node pointer associated with clock provider
@@ -4168,27 +4197,6 @@ int of_clk_add_hw_provider(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
 
-/**
- * of_clk_del_provider() - Remove a previously registered clock provider
- * @np: Device node pointer associated with clock provider
- */
-void of_clk_del_provider(struct device_node *np)
-{
-	struct of_clk_provider *cp;
-
-	mutex_lock(&of_clk_mutex);
-	list_for_each_entry(cp, &of_clk_providers, link) {
-		if (cp->node == np) {
-			list_del(&cp->link);
-			of_node_put(cp->node);
-			kfree(cp);
-			break;
-		}
-	}
-	mutex_unlock(&of_clk_mutex);
-}
-EXPORT_SYMBOL_GPL(of_clk_del_provider);
-
 static struct clk_hw *
 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
 			      struct of_phandle_args *clkspec)
@@ -4317,8 +4325,10 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
 			else
 				clk_name = NULL;
 		} else {
+#if defined(CONFIG_COMMON_CLK)
 			clk_name = __clk_get_name(clk);
 			clk_put(clk);
+#endif
 		}
 	}
 
@@ -4349,6 +4359,8 @@ int of_clk_parent_fill(struct device_node *np, const char **parents,
 }
 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
 
+#if defined(CONFIG_COMMON_CLK)
+
 struct clock_provider {
 	of_clk_init_cb_t clk_init_cb;
 	struct device_node *np;
@@ -4499,4 +4511,7 @@ void __init of_clk_init(const struct of_device_id *matches)
 			force = true;
 	}
 }
+
+#endif /* CONFIG_COMMON_CLK */
+
 #endif
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index a7d0981..9776a1c 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -12,7 +12,7 @@
 struct clk_hw;
 struct clk_core;
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
 				       const char *dev_id, const char *con_id);
 #endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index bb8a77a..94dcad5 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -27,7 +27,7 @@
 static LIST_HEAD(clocks);
 static DEFINE_MUTEX(clocks_mutex);
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 static struct clk *__of_clk_get(struct device_node *np, int index,
 			       const char *dev_id, const char *con_id)
 {
@@ -73,14 +73,10 @@ static struct clk *__of_clk_get_by_name(struct device_node *np,
 		if (name)
 			index = of_property_match_string(np, "clock-names", name);
 		clk = __of_clk_get(np, index, dev_id, name);
-		if (!IS_ERR(clk)) {
+		if (!IS_ERR(clk))
 			break;
-		} else if (name && index >= 0) {
-			if (PTR_ERR(clk) != -EPROBE_DEFER)
-				pr_err("ERROR: could not get clock %s:%s(%i)\n",
-					np->full_name, name ? name : "", index);
+		else if (name && index >= 0)
 			return clk;
-		}
 
 		/*
 		 * No matching clock found on this node.  If the parent node
@@ -190,7 +186,7 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id)
 out:
 	mutex_unlock(&clocks_mutex);
 
-	return cl ? clk : ERR_PTR(-ENOENT);
+	return cl ? cl->clk : ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL(clk_get_sys);
 
diff --git a/drivers/clk/msm/Kconfig b/drivers/clk/msm/Kconfig
new file mode 100644
index 0000000..16f8c32
--- /dev/null
+++ b/drivers/clk/msm/Kconfig
@@ -0,0 +1,18 @@
+config COMMON_CLK_MSM
+	tristate "Support for MSM clock controllers"
+	depends on OF
+	depends on ARCH_QCOM
+	select RATIONAL
+	help
+	  This support clock controller used by MSM devices which support
+	  global, mmss and gpu clock controller.
+	  Say Y if you want to support the clocks exposed by the MSM on
+	  platforms such as msm8953 etc.
+
+config MSM_CLK_CONTROLLER_V2
+	bool "QTI clock driver"
+	depends on COMMON_CLK_MSM
+	---help---
+	   Generate clock data structures from definitions found in
+	   device tree.
+
diff --git a/drivers/clk/msm/Makefile b/drivers/clk/msm/Makefile
new file mode 100644
index 0000000..4176553
--- /dev/null
+++ b/drivers/clk/msm/Makefile
@@ -0,0 +1,19 @@
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-dummy.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-generic.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-local2.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-pll.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-alpha-pll.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-rpm.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-voter.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= reset.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= clock-debug.o
+obj-$(CONFIG_COMMON_CLK_MSM)	+= gdsc.o
+
+obj-$(CONFIG_MSM_CLK_CONTROLLER_V2)	+= msm-clock-controller.o
+
+ifeq ($(CONFIG_COMMON_CLK_MSM), y)
+obj-$(CONFIG_ARCH_MSM8953)	+= clock-gcc-8953.o
+obj-$(CONFIG_ARCH_MSM8953)	+= clock-cpu-8953.o
+obj-$(CONFIG_ARCH_MSM8953)	+= clock-rcgwr.o
+endif
diff --git a/drivers/clk/msm/clock-alpha-pll.c b/drivers/clk/msm/clock-alpha-pll.c
new file mode 100644
index 0000000..dbe8d8e
--- /dev/null
+++ b/drivers/clk/msm/clock-alpha-pll.c
@@ -0,0 +1,1265 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#include "clock.h"
+
+#define WAIT_MAX_LOOPS 100
+
+#define MODE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define LOCK_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define ACTIVE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define UPDATE_REG(pll)		(*pll->base + pll->offset + 0x0)
+#define L_REG(pll)		(*pll->base + pll->offset + 0x4)
+#define A_REG(pll)		(*pll->base + pll->offset + 0x8)
+#define VCO_REG(pll)		(*pll->base + pll->offset + 0x10)
+#define ALPHA_EN_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define OUTPUT_REG(pll)		(*pll->base + pll->offset + 0x10)
+#define VOTE_REG(pll)		(*pll->base + pll->fsm_reg_offset)
+#define USER_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define USER_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x14)
+#define CONFIG_CTL_REG(pll)	(*pll->base + pll->offset + 0x18)
+#define TEST_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x1c)
+#define TEST_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x20)
+
+#define PLL_BYPASSNL 0x2
+#define PLL_RESET_N  0x4
+#define PLL_OUTCTRL  0x1
+#define PLL_LATCH_INTERFACE	BIT(11)
+
+#define FABIA_CONFIG_CTL_REG(pll)	(*pll->base + pll->offset + 0x14)
+#define FABIA_USER_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0xc)
+#define FABIA_USER_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x10)
+#define FABIA_TEST_CTL_LO_REG(pll)	(*pll->base + pll->offset + 0x1c)
+#define FABIA_TEST_CTL_HI_REG(pll)	(*pll->base + pll->offset + 0x20)
+#define FABIA_L_REG(pll)		(*pll->base + pll->offset + 0x4)
+#define FABIA_FRAC_REG(pll)		(*pll->base + pll->offset + 0x38)
+#define FABIA_PLL_OPMODE(pll)		(*pll->base + pll->offset + 0x2c)
+#define FABIA_FRAC_OFF(pll)		(*pll->base + pll->fabia_frac_offset)
+
+#define FABIA_PLL_STANDBY	0x0
+#define FABIA_PLL_RUN		0x1
+#define FABIA_PLL_OUT_MAIN	0x7
+#define FABIA_RATE_MARGIN	500
+#define ALPHA_PLL_ACK_LATCH	BIT(29)
+#define ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS	BIT(23)
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_BITWIDTH 32
+#define FABIA_ALPHA_BITWIDTH 16
+
+/*
+ * Enable/disable registers could be shared among PLLs when FSM voting
+ * is used. This lock protects against potential race when multiple
+ * PLLs are being enabled/disabled together.
+ */
+static DEFINE_SPINLOCK(alpha_pll_reg_lock);
+
+static unsigned long compute_rate(struct alpha_pll_clk *pll,
+				u32 l_val, u32 a_val)
+{
+	u64 rate, parent_rate;
+	int alpha_bw = ALPHA_BITWIDTH;
+
+	if (pll->is_fabia)
+		alpha_bw = FABIA_ALPHA_BITWIDTH;
+
+	parent_rate = clk_get_rate(pll->c.parent);
+	rate = parent_rate * l_val;
+	rate += (parent_rate * a_val) >> alpha_bw;
+
+	return rate;
+}
+
+static bool is_locked(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(LOCK_REG(pll));
+	u32 mask = pll->masks->lock_mask;
+
+	return (reg & mask) == mask;
+}
+
+static bool is_active(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(ACTIVE_REG(pll));
+	u32 mask = pll->masks->active_mask;
+
+	return (reg & mask) == mask;
+}
+
+/*
+ * Check active_flag if PLL is in FSM mode, otherwise check lock_det
+ * bit. This function assumes PLLs are already configured to the
+ * right mode.
+ */
+static bool update_finish(struct alpha_pll_clk *pll)
+{
+	if (pll->fsm_en_mask)
+		return is_active(pll);
+	else
+		return is_locked(pll);
+}
+
+static int wait_for_update(struct alpha_pll_clk *pll)
+{
+	int count;
+
+	for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+		if (update_finish(pll))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		pr_err("%s didn't lock after enabling it!\n", pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __alpha_pll_vote_enable(struct alpha_pll_clk *pll)
+{
+	u32 ena;
+
+	ena = readl_relaxed(VOTE_REG(pll));
+	ena |= pll->fsm_en_mask;
+	writel_relaxed(ena, VOTE_REG(pll));
+
+	/* Make sure enable request goes through before waiting for update */
+	mb();
+
+	return wait_for_update(pll);
+}
+
+static int __alpha_pll_enable(struct alpha_pll_clk *pll, int enable_output)
+{
+	int rc;
+	u32 mode;
+
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset.
+	 */
+	mb();
+	udelay(5);
+
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/* Enable PLL output. */
+	if (enable_output) {
+		mode |= PLL_OUTCTRL;
+		writel_relaxed(mode, MODE_REG(pll));
+	}
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+	return 0;
+}
+
+static void setup_alpha_pll_values(u64 a_val, u32 l_val, u32 vco_val,
+				struct alpha_pll_clk *pll)
+{
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	if (vco_val != UINT_MAX) {
+		regval = readl_relaxed(VCO_REG(pll));
+		regval &= ~(masks->vco_mask << masks->vco_shift);
+		regval |= vco_val << masks->vco_shift;
+		writel_relaxed(regval, VCO_REG(pll));
+	}
+
+	regval = readl_relaxed(ALPHA_EN_REG(pll));
+	regval |= masks->alpha_en_mask;
+	writel_relaxed(regval, ALPHA_EN_REG(pll));
+}
+
+static int alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	if (unlikely(!pll->inited))
+		__init_alpha_pll(c);
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __alpha_pll_enable(pll, true);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+static int __calibrate_alpha_pll(struct alpha_pll_clk *pll);
+static int dyna_alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	if (unlikely(!pll->inited))
+		__init_alpha_pll(c);
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+
+	if (pll->slew)
+		__calibrate_alpha_pll(pll);
+
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __alpha_pll_enable(pll, true);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+#define PLL_OFFLINE_REQ_BIT BIT(7)
+#define PLL_FSM_ENA_BIT BIT(20)
+#define PLL_OFFLINE_ACK_BIT BIT(28)
+#define PLL_ACTIVE_FLAG BIT(30)
+
+static int alpha_pll_enable_hwfsm(struct clk *c)
+{
+	u32 mode;
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+
+	/* Re-enable HW FSM mode, clear OFFLINE request */
+	mode = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_FSM_ENA_BIT;
+	mode &= ~PLL_OFFLINE_REQ_BIT;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Make sure enable request goes through before waiting for update */
+	mb();
+
+	if (wait_for_update(pll) < 0)
+		panic("PLL %s failed to lock", c->dbg_name);
+
+	return 0;
+}
+
+static void alpha_pll_disable_hwfsm(struct clk *c)
+{
+	u32 mode;
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+
+	/* Request PLL_OFFLINE and wait for ack */
+	mode = readl_relaxed(MODE_REG(pll));
+	writel_relaxed(mode | PLL_OFFLINE_REQ_BIT, MODE_REG(pll));
+	while (!(readl_relaxed(MODE_REG(pll)) & PLL_OFFLINE_ACK_BIT))
+		;
+
+	/* Disable HW FSM */
+	mode = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_FSM_ENA_BIT;
+	if (pll->offline_bit_workaround)
+		mode &= ~PLL_OFFLINE_REQ_BIT;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	while (readl_relaxed(MODE_REG(pll)) & PLL_ACTIVE_FLAG)
+		;
+}
+
+static void __alpha_pll_vote_disable(struct alpha_pll_clk *pll)
+{
+	u32 ena;
+
+	ena = readl_relaxed(VOTE_REG(pll));
+	ena &= ~pll->fsm_en_mask;
+	writel_relaxed(ena, VOTE_REG(pll));
+}
+
+static void __alpha_pll_disable(struct alpha_pll_clk *pll)
+{
+	u32 mode;
+
+	mode = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Delay of 2 output clock ticks required until output is disabled */
+	mb();
+	udelay(1);
+
+	mode &= ~(PLL_BYPASSNL | PLL_RESET_N);
+	writel_relaxed(mode, MODE_REG(pll));
+}
+
+static void alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__alpha_pll_disable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static void dyna_alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__alpha_pll_disable(pll);
+
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static u32 find_vco(struct alpha_pll_clk *pll, unsigned long rate)
+{
+	unsigned long i;
+	struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+
+	for (i = 0; i < pll->num_vco; i++) {
+		if (rate >= v[i].min_freq && rate <= v[i].max_freq)
+			return v[i].vco_val;
+	}
+
+	return -EINVAL;
+}
+
+static unsigned long __calc_values(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val, bool round_up)
+{
+	u32 parent_rate;
+	u64 remainder;
+	u64 quotient;
+	unsigned long freq_hz;
+	int alpha_bw = ALPHA_BITWIDTH;
+
+	parent_rate = clk_get_rate(pll->c.parent);
+	quotient = rate;
+	remainder = do_div(quotient, parent_rate);
+	*l_val = quotient;
+
+	if (!remainder) {
+		*a_val = 0;
+		return rate;
+	}
+
+	if (pll->is_fabia)
+		alpha_bw = FABIA_ALPHA_BITWIDTH;
+
+	/* Upper ALPHA_BITWIDTH bits of Alpha */
+	quotient = remainder << alpha_bw;
+	remainder = do_div(quotient, parent_rate);
+
+	if (remainder && round_up)
+		quotient++;
+
+	*a_val = quotient;
+	freq_hz = compute_rate(pll, *l_val, *a_val);
+	return freq_hz;
+}
+
+static unsigned long round_rate_down(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val)
+{
+	return __calc_values(pll, rate, l_val, a_val, false);
+}
+
+static unsigned long round_rate_up(struct alpha_pll_clk *pll,
+		unsigned long rate, int *l_val, u64 *a_val)
+{
+	return __calc_values(pll, rate, l_val, a_val, true);
+}
+
+static bool dynamic_update_finish(struct alpha_pll_clk *pll)
+{
+	u32 reg = readl_relaxed(UPDATE_REG(pll));
+	u32 mask = pll->masks->update_mask;
+
+	return (reg & mask) == 0;
+}
+
+static int wait_for_dynamic_update(struct alpha_pll_clk *pll)
+{
+	int count;
+
+	for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+		if (dynamic_update_finish(pll))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		pr_err("%s didn't latch after updating it!\n", pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dyna_alpha_pll_dynamic_update(struct alpha_pll_clk *pll)
+{
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+	int rc;
+
+	regval = readl_relaxed(UPDATE_REG(pll));
+	regval |= masks->update_mask;
+	writel_relaxed(regval, UPDATE_REG(pll));
+
+	rc = wait_for_dynamic_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * HPG mandates a wait of at least 570ns before polling the LOCK
+	 * detect bit. Have a delay of 1us just to be safe.
+	 */
+	mb();
+	udelay(1);
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate);
+static int dyna_alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long freq_hz, flags;
+	u32 l_val, vco_val;
+	u64 a_val;
+	int ret;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz != rate) {
+		pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	vco_val = find_vco(pll, freq_hz);
+
+	/*
+	 * Dynamic pll update will not support switching frequencies across
+	 * vco ranges. In those cases fall back to normal alpha set rate.
+	 */
+	if (pll->current_vco_val != vco_val) {
+		ret = alpha_pll_set_rate(c, rate);
+		if (!ret)
+			pll->current_vco_val = vco_val;
+		else
+			return ret;
+		return 0;
+	}
+
+	spin_lock_irqsave(&c->lock, flags);
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	/* Ensure that the write above goes through before proceeding. */
+	mb();
+
+	if (c->count)
+		dyna_alpha_pll_dynamic_update(pll);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+/*
+ * Slewing plls should be bought up at frequency which is in the middle of the
+ * desired VCO range. So after bringing up the pll at calibration freq, set it
+ * back to desired frequency(that was set by previous clk_set_rate).
+ */
+static int __calibrate_alpha_pll(struct alpha_pll_clk *pll)
+{
+	unsigned long calibration_freq, freq_hz;
+	struct alpha_pll_vco_tbl *vco_tbl = pll->vco_tbl;
+	u64 a_val;
+	u32 l_val, vco_val;
+	int rc;
+
+	vco_val = find_vco(pll, pll->c.rate);
+	if (IS_ERR_VALUE((unsigned long)vco_val)) {
+		pr_err("alpha pll: not in a valid vco range\n");
+		return -EINVAL;
+	}
+	/*
+	 * As during slewing plls vco_sel won't be allowed to change, vco table
+	 * should have only one entry table, i.e. index = 0, find the
+	 * calibration frequency.
+	 */
+	calibration_freq = (vco_tbl[0].min_freq +
+					vco_tbl[0].max_freq)/2;
+
+	freq_hz = round_rate_up(pll, calibration_freq, &l_val, &a_val);
+	if (freq_hz != calibration_freq) {
+		pr_err("alpha_pll: call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	setup_alpha_pll_values(a_val, l_val, vco_tbl->vco_val, pll);
+
+	/* Bringup the pll at calibration frequency */
+	rc = __alpha_pll_enable(pll, false);
+	if (rc) {
+		pr_err("alpha pll calibration failed\n");
+		return rc;
+	}
+
+	/*
+	 * PLL is already running at calibration frequency.
+	 * So slew pll to the previously set frequency.
+	 */
+	pr_debug("pll %s: setting back to required rate %lu\n", pll->c.dbg_name,
+					pll->c.rate);
+	freq_hz = round_rate_up(pll, pll->c.rate, &l_val, &a_val);
+	setup_alpha_pll_values(a_val, l_val, UINT_MAX, pll);
+	dyna_alpha_pll_dynamic_update(pll);
+
+	return 0;
+}
+
+static int alpha_pll_dynamic_update(struct alpha_pll_clk *pll)
+{
+	u32 regval;
+
+	/* Latch the input to the PLL */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval |= pll->masks->update_mask;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	/* Wait for 2 reference cycle before checking ACK bit */
+	udelay(1);
+	if (!(readl_relaxed(MODE_REG(pll)) & ALPHA_PLL_ACK_LATCH)) {
+		WARN(1, "%s: PLL latch failed. Output may be unstable!\n",
+						pll->c.dbg_name);
+		return -EINVAL;
+	}
+
+	/* Return latch input to 0 */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval &= ~pll->masks->update_mask;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	/* Wait for PLL output to stabilize */
+	udelay(100);
+
+	return 0;
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	unsigned long flags = 0, freq_hz = 0;
+	u32 l_val, regval;
+	int vco_val;
+	u64 a_val;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz != rate) {
+		pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	vco_val = find_vco(pll, freq_hz);
+	if (IS_ERR_VALUE((unsigned long)vco_val)) {
+		pr_err("alpha pll: not in a valid vco range\n");
+		return -EINVAL;
+	}
+
+	if (pll->no_irq_dis)
+		spin_lock(&c->lock);
+	else
+		spin_lock_irqsave(&c->lock, flags);
+
+	/*
+	 * For PLLs that do not support dynamic programming (dynamic_update
+	 * is not set), ensure PLL is off before changing rate. For
+	 * optimization reasons, assume no downstream clock is actively
+	 * using it.
+	 */
+	if (c->count && !pll->dynamic_update)
+		c->ops->disable(c);
+
+	a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	writel_relaxed(l_val, L_REG(pll));
+	__iowrite32_copy(A_REG(pll), &a_val, 2);
+
+	if (masks->vco_mask) {
+		regval = readl_relaxed(VCO_REG(pll));
+		regval &= ~(masks->vco_mask << masks->vco_shift);
+		regval |= vco_val << masks->vco_shift;
+		writel_relaxed(regval, VCO_REG(pll));
+	}
+
+	regval = readl_relaxed(ALPHA_EN_REG(pll));
+	regval |= masks->alpha_en_mask;
+	writel_relaxed(regval, ALPHA_EN_REG(pll));
+
+	if (c->count && pll->dynamic_update)
+		alpha_pll_dynamic_update(pll);
+
+	if (c->count && !pll->dynamic_update)
+		c->ops->enable(c);
+
+	if (pll->no_irq_dis)
+		spin_unlock(&c->lock);
+	else
+		spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+static long alpha_pll_round_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+	int ret;
+	u32 l_val;
+	unsigned long freq_hz;
+	u64 a_val;
+	int i;
+
+	if (pll->no_prepared_reconfig && c->prepare_count)
+		return -EINVAL;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (rate < pll->min_supported_freq)
+		return pll->min_supported_freq;
+	if (pll->is_fabia)
+		return freq_hz;
+
+	ret = find_vco(pll, freq_hz);
+	if (!IS_ERR_VALUE((unsigned long)ret))
+		return freq_hz;
+
+	freq_hz = 0;
+	for (i = 0; i < pll->num_vco; i++) {
+		if (is_better_rate(rate, freq_hz, v[i].min_freq))
+			freq_hz = v[i].min_freq;
+		if (is_better_rate(rate, freq_hz, v[i].max_freq))
+			freq_hz = v[i].max_freq;
+	}
+	if (!freq_hz)
+		return -EINVAL;
+	return freq_hz;
+}
+
+static void update_vco_tbl(struct alpha_pll_clk *pll)
+{
+	int i, l_val;
+	u64 a_val;
+	unsigned long hz;
+
+	/* Round vco limits to valid rates */
+	for (i = 0; i < pll->num_vco; i++) {
+		hz = round_rate_up(pll, pll->vco_tbl[i].min_freq, &l_val,
+					&a_val);
+		pll->vco_tbl[i].min_freq = hz;
+
+		hz = round_rate_down(pll, pll->vco_tbl[i].max_freq, &l_val,
+					&a_val);
+		pll->vco_tbl[i].max_freq = hz;
+	}
+}
+
+/*
+ * Program bias count to be 0x6 (corresponds to 5us), and lock count
+ * bits to 0 (check lock_det for locking).
+ */
+static void __set_fsm_mode(void __iomem *mode_reg)
+{
+	u32 regval = readl_relaxed(mode_reg);
+
+	/* De-assert reset to FSM */
+	regval &= ~BIT(21);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program bias count */
+	regval &= ~BM(19, 14);
+	regval |= BVAL(19, 14, 0x6);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program lock count */
+	regval &= ~BM(13, 8);
+	regval |= BVAL(13, 8, 0x0);
+	writel_relaxed(regval, mode_reg);
+
+	/* Enable PLL FSM voting */
+	regval |= BIT(20);
+	writel_relaxed(regval, mode_reg);
+}
+
+static bool is_fsm_mode(void __iomem *mode_reg)
+{
+	return !!(readl_relaxed(mode_reg) & BIT(20));
+}
+
+void __init_alpha_pll(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	if (pll->config_ctl_val)
+		writel_relaxed(pll->config_ctl_val, CONFIG_CTL_REG(pll));
+
+	if (masks->output_mask && pll->enable_config) {
+		regval = readl_relaxed(OUTPUT_REG(pll));
+		regval &= ~masks->output_mask;
+		regval |= pll->enable_config;
+		writel_relaxed(regval, OUTPUT_REG(pll));
+	}
+
+	if (masks->post_div_mask) {
+		regval = readl_relaxed(USER_CTL_LO_REG(pll));
+		regval &= ~masks->post_div_mask;
+		regval |= pll->post_div_config;
+		writel_relaxed(regval, USER_CTL_LO_REG(pll));
+	}
+
+	if (pll->slew) {
+		regval = readl_relaxed(USER_CTL_HI_REG(pll));
+		regval &= ~PLL_LATCH_INTERFACE;
+		writel_relaxed(regval, USER_CTL_HI_REG(pll));
+	}
+
+	if (masks->test_ctl_lo_mask) {
+		regval = readl_relaxed(TEST_CTL_LO_REG(pll));
+		regval &= ~masks->test_ctl_lo_mask;
+		regval |= pll->test_ctl_lo_val;
+		writel_relaxed(regval, TEST_CTL_LO_REG(pll));
+	}
+
+	if (masks->test_ctl_hi_mask) {
+		regval = readl_relaxed(TEST_CTL_HI_REG(pll));
+		regval &= ~masks->test_ctl_hi_mask;
+		regval |= pll->test_ctl_hi_val;
+		writel_relaxed(regval, TEST_CTL_HI_REG(pll));
+	}
+
+	if (pll->fsm_en_mask)
+		__set_fsm_mode(MODE_REG(pll));
+
+	pll->inited = true;
+}
+
+static enum handoff alpha_pll_handoff(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u64 a_val;
+	u32 alpha_en, l_val, regval;
+
+	/* Set the PLL_HW_UPDATE_LOGIC_BYPASS bit before continuing */
+	if (pll->dynamic_update) {
+		regval = readl_relaxed(MODE_REG(pll));
+		regval |= ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS;
+		writel_relaxed(regval, MODE_REG(pll));
+	}
+
+	update_vco_tbl(pll);
+
+	if (!is_locked(pll)) {
+		if (pll->slew) {
+			if (c->rate && dyna_alpha_pll_set_rate(c, c->rate))
+				WARN(1, "%s: Failed to configure rate\n",
+					c->dbg_name);
+		} else {
+			if (c->rate && alpha_pll_set_rate(c, c->rate))
+				WARN(1, "%s: Failed to configure rate\n",
+					c->dbg_name);
+		}
+		__init_alpha_pll(c);
+		return HANDOFF_DISABLED_CLK;
+	} else if (pll->fsm_en_mask && !is_fsm_mode(MODE_REG(pll))) {
+		WARN(1, "%s should be in FSM mode but is not\n", c->dbg_name);
+	}
+
+	l_val = readl_relaxed(L_REG(pll));
+	/* read u64 in two steps to satisfy alignment constraint */
+	a_val = readl_relaxed(A_REG(pll) + 0x4);
+	a_val = a_val << 32 | readl_relaxed(A_REG(pll));
+	/* get upper 32 bits */
+	a_val = a_val >> (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+	alpha_en = readl_relaxed(ALPHA_EN_REG(pll));
+	alpha_en &= masks->alpha_en_mask;
+	if (!alpha_en)
+		a_val = 0;
+
+	c->rate = compute_rate(pll, l_val, a_val);
+
+	/*
+	 * Unconditionally vote for the PLL; it might be on because of
+	 * another master's vote.
+	 */
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_enable(pll);
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static void __iomem *alpha_pll_list_registers(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(clk);
+	static struct clk_register_data data[] = {
+		{"PLL_MODE", 0x0},
+		{"PLL_L_VAL", 0x4},
+		{"PLL_ALPHA_VAL", 0x8},
+		{"PLL_ALPHA_VAL_U", 0xC},
+		{"PLL_USER_CTL", 0x10},
+		{"PLL_CONFIG_CTL", 0x18},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return MODE_REG(pll);
+}
+
+static int __fabia_alpha_pll_enable(struct alpha_pll_clk *pll)
+{
+	int rc;
+	u32 mode;
+
+	/* Disable PLL output */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Set operation mode to STANDBY */
+	writel_relaxed(FABIA_PLL_STANDBY, FABIA_PLL_OPMODE(pll));
+
+	/* PLL should be in STANDBY mode before continuing */
+	mb();
+
+	/* Bring PLL out of reset */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Set operation mode to RUN */
+	writel_relaxed(FABIA_PLL_RUN, FABIA_PLL_OPMODE(pll));
+
+	rc = wait_for_update(pll);
+	if (rc < 0)
+		return rc;
+
+	/* Enable the main PLL output */
+	mode  = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+	mode |= FABIA_PLL_OUT_MAIN;
+	writel_relaxed(mode, FABIA_USER_CTL_LO_REG(pll));
+
+	/* Enable PLL outputs */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+	return 0;
+}
+
+static int fabia_alpha_pll_enable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		rc = __alpha_pll_vote_enable(pll);
+	else
+		rc = __fabia_alpha_pll_enable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+	return rc;
+}
+
+static void __fabia_alpha_pll_disable(struct alpha_pll_clk *pll)
+{
+	u32 mode;
+
+	/* Disable PLL outputs */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_OUTCTRL;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Disable the main PLL output */
+	mode  = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+	mode &= ~FABIA_PLL_OUT_MAIN;
+	writel_relaxed(mode, FABIA_USER_CTL_LO_REG(pll));
+
+	/* Place PLL is the OFF state */
+	mode  = readl_relaxed(MODE_REG(pll));
+	mode &= ~PLL_RESET_N;
+	writel_relaxed(mode, MODE_REG(pll));
+
+	/* Place the PLL mode in STANDBY */
+	writel_relaxed(FABIA_PLL_STANDBY, FABIA_PLL_OPMODE(pll));
+}
+
+static void fabia_alpha_pll_disable(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_disable(pll);
+	else
+		__fabia_alpha_pll_disable(pll);
+	spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static int fabia_alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	unsigned long flags, freq_hz;
+	u32 l_val;
+	u64 a_val;
+
+	freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+	if (freq_hz > rate + FABIA_RATE_MARGIN || freq_hz < rate) {
+		pr_err("%s: Call clk_set_rate with rounded rates!\n",
+						c->dbg_name);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&c->lock, flags);
+	/* Set the new L value */
+	writel_relaxed(l_val, FABIA_L_REG(pll));
+	if (pll->fabia_frac_offset)
+		writel_relaxed(a_val, FABIA_FRAC_OFF(pll));
+	else
+		writel_relaxed(a_val, FABIA_FRAC_REG(pll));
+
+	alpha_pll_dynamic_update(pll);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+void __init_fabia_alpha_pll(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	struct alpha_pll_masks *masks = pll->masks;
+	u32 regval;
+
+	if (pll->config_ctl_val)
+		writel_relaxed(pll->config_ctl_val, FABIA_CONFIG_CTL_REG(pll));
+
+	if (masks->output_mask && pll->enable_config) {
+		regval = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+		regval &= ~masks->output_mask;
+		regval |= pll->enable_config;
+		writel_relaxed(regval, FABIA_USER_CTL_LO_REG(pll));
+	}
+
+	if (masks->post_div_mask) {
+		regval = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+		regval &= ~masks->post_div_mask;
+		regval |= pll->post_div_config;
+		writel_relaxed(regval, FABIA_USER_CTL_LO_REG(pll));
+	}
+
+	if (pll->slew) {
+		regval = readl_relaxed(FABIA_USER_CTL_HI_REG(pll));
+		regval &= ~PLL_LATCH_INTERFACE;
+		writel_relaxed(regval, FABIA_USER_CTL_HI_REG(pll));
+	}
+
+	if (masks->test_ctl_lo_mask) {
+		regval = readl_relaxed(FABIA_TEST_CTL_LO_REG(pll));
+		regval &= ~masks->test_ctl_lo_mask;
+		regval |= pll->test_ctl_lo_val;
+		writel_relaxed(regval, FABIA_TEST_CTL_LO_REG(pll));
+	}
+
+	if (masks->test_ctl_hi_mask) {
+		regval = readl_relaxed(FABIA_TEST_CTL_HI_REG(pll));
+		regval &= ~masks->test_ctl_hi_mask;
+		regval |= pll->test_ctl_hi_val;
+		writel_relaxed(regval, FABIA_TEST_CTL_HI_REG(pll));
+	}
+
+	if (pll->fsm_en_mask)
+		__set_fsm_mode(MODE_REG(pll));
+
+	pll->inited = true;
+}
+
+static enum handoff fabia_alpha_pll_handoff(struct clk *c)
+{
+	struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+	u64 a_val;
+	u32 l_val, regval;
+
+	/* Set the PLL_HW_UPDATE_LOGIC_BYPASS bit before continuing */
+	regval = readl_relaxed(MODE_REG(pll));
+	regval |= ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS;
+	writel_relaxed(regval, MODE_REG(pll));
+
+	if (!is_locked(pll)) {
+		if (c->rate && fabia_alpha_pll_set_rate(c, c->rate))
+			WARN(1, "%s: Failed to configure rate\n", c->dbg_name);
+		__init_alpha_pll(c);
+		return HANDOFF_DISABLED_CLK;
+	} else if (pll->fsm_en_mask && !is_fsm_mode(MODE_REG(pll))) {
+		WARN(1, "%s should be in FSM mode but is not\n", c->dbg_name);
+	}
+
+	l_val = readl_relaxed(FABIA_L_REG(pll));
+
+	if (pll->fabia_frac_offset)
+		a_val = readl_relaxed(FABIA_FRAC_OFF(pll));
+	else
+		a_val = readl_relaxed(FABIA_FRAC_REG(pll));
+
+	c->rate = compute_rate(pll, l_val, a_val);
+
+	/*
+	 * Unconditionally vote for the PLL; it might be on because of
+	 * another master's vote.
+	 */
+	if (pll->fsm_en_mask)
+		__alpha_pll_vote_enable(pll);
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+const struct clk_ops clk_ops_alpha_pll = {
+	.enable = alpha_pll_enable,
+	.disable = alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+const struct clk_ops clk_ops_alpha_pll_hwfsm = {
+	.enable = alpha_pll_enable_hwfsm,
+	.disable = alpha_pll_disable_hwfsm,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+const struct clk_ops clk_ops_fixed_alpha_pll = {
+	.enable = alpha_pll_enable,
+	.disable = alpha_pll_disable,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+const struct clk_ops clk_ops_fixed_fabia_alpha_pll = {
+	.enable = fabia_alpha_pll_enable,
+	.disable = fabia_alpha_pll_disable,
+	.handoff = fabia_alpha_pll_handoff,
+};
+
+const struct clk_ops clk_ops_fabia_alpha_pll = {
+	.enable = fabia_alpha_pll_enable,
+	.disable = fabia_alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = fabia_alpha_pll_set_rate,
+	.handoff = fabia_alpha_pll_handoff,
+};
+
+const struct clk_ops clk_ops_dyna_alpha_pll = {
+	.enable = dyna_alpha_pll_enable,
+	.disable = dyna_alpha_pll_disable,
+	.round_rate = alpha_pll_round_rate,
+	.set_rate = dyna_alpha_pll_set_rate,
+	.handoff = alpha_pll_handoff,
+	.list_registers = alpha_pll_list_registers,
+};
+
+static struct alpha_pll_masks masks_20nm_p = {
+	.lock_mask = BIT(31),
+	.active_mask = BIT(30),
+	.vco_mask = BM(21, 20) >> 20,
+	.vco_shift = 20,
+	.alpha_en_mask = BIT(24),
+	.output_mask = 0xF,
+	.post_div_mask = 0xF00,
+};
+
+static struct alpha_pll_vco_tbl vco_20nm_p[] = {
+	VCO(3,  250000000,  500000000),
+	VCO(2,  500000000, 1000000000),
+	VCO(1, 1000000000, 1500000000),
+	VCO(0, 1500000000, 2000000000),
+};
+
+static struct alpha_pll_masks masks_20nm_t = {
+	.lock_mask = BIT(31),
+	.alpha_en_mask = BIT(24),
+	.output_mask = 0xf,
+};
+
+static struct alpha_pll_vco_tbl vco_20nm_t[] = {
+	VCO(0, 500000000, 1250000000),
+};
+
+static struct alpha_pll_clk *alpha_pll_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+	struct msmclk_data *drv;
+
+	pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return ERR_PTR(-ENOMEM);
+
+	if (of_property_read_u32(np, "qcom,base-offset", &pll->offset)) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,post-div-config",
+					&pll->post_div_config);
+
+	pll->masks = devm_kzalloc(dev, sizeof(*pll->masks), GFP_KERNEL);
+	if (!pll->masks)
+		return ERR_PTR(-ENOMEM);
+
+	if (of_device_is_compatible(np, "qcom,fixed-alpha-pll-20p") ||
+		of_device_is_compatible(np, "qcom,alpha-pll-20p")) {
+		*pll->masks = masks_20nm_p;
+		pll->vco_tbl = vco_20nm_p;
+		pll->num_vco = ARRAY_SIZE(vco_20nm_p);
+	} else if (of_device_is_compatible(np, "qcom,fixed-alpha-pll-20t") ||
+		of_device_is_compatible(np, "qcom,alpha-pll-20t")) {
+		*pll->masks = masks_20nm_t;
+		pll->vco_tbl = vco_20nm_t;
+		pll->num_vco = ARRAY_SIZE(vco_20nm_t);
+	} else {
+		dt_err(np, "unexpected compatible string\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	pll->base = &drv->base;
+	return pll;
+}
+
+static void *variable_rate_alpha_pll_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+
+	pll = alpha_pll_dt_parser(dev, np);
+	if (IS_ERR(pll))
+		return pll;
+
+	/* Optional Property */
+	of_property_read_u32(np, "qcom,output-enable", &pll->enable_config);
+
+	pll->c.ops = &clk_ops_alpha_pll;
+	return msmclk_generic_clk_init(dev, np, &pll->c);
+}
+
+static void *fixed_rate_alpha_pll_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct alpha_pll_clk *pll;
+	int rc;
+	u32 val;
+
+	pll = alpha_pll_dt_parser(dev, np);
+	if (IS_ERR(pll))
+		return pll;
+
+	rc = of_property_read_u32(np, "qcom,pll-config-rate", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,pll-config-rate\n");
+		return ERR_PTR(-EINVAL);
+	}
+	pll->c.rate = val;
+
+	rc = of_property_read_u32(np, "qcom,output-enable",
+						&pll->enable_config);
+	if (rc) {
+		dt_err(np, "missing qcom,output-enable\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Optional Property */
+	rc = of_property_read_u32(np, "qcom,fsm-en-bit", &val);
+	if (!rc) {
+		rc = of_property_read_u32(np, "qcom,fsm-en-offset",
+						&pll->fsm_reg_offset);
+		if (rc) {
+			dt_err(np, "missing qcom,fsm-en-offset\n");
+			return ERR_PTR(-EINVAL);
+		}
+		pll->fsm_en_mask = BIT(val);
+	}
+
+	pll->c.ops = &clk_ops_fixed_alpha_pll;
+	return msmclk_generic_clk_init(dev, np, &pll->c);
+}
+
+MSMCLK_PARSER(fixed_rate_alpha_pll_dt_parser, "qcom,fixed-alpha-pll-20p", 0);
+MSMCLK_PARSER(fixed_rate_alpha_pll_dt_parser, "qcom,fixed-alpha-pll-20t", 1);
+MSMCLK_PARSER(variable_rate_alpha_pll_dt_parser, "qcom,alpha-pll-20p", 0);
+MSMCLK_PARSER(variable_rate_alpha_pll_dt_parser, "qcom,alpha-pll-20t", 1);
diff --git a/drivers/clk/msm/clock-cpu-8953.c b/drivers/clk/msm/clock-cpu-8953.c
new file mode 100644
index 0000000..c771755
--- /dev/null
+++ b/drivers/clk/msm/clock-cpu-8953.c
@@ -0,0 +1,989 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/suspend.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-pll.h>
+
+#include <dt-bindings/clock/msm-clocks-8953.h>
+
+#include "clock.h"
+
+#define APCS_PLL_MODE		0x0
+#define APCS_PLL_L_VAL		0x8
+#define APCS_PLL_ALPHA_VAL	0x10
+#define APCS_PLL_USER_CTL	0x18
+#define APCS_PLL_CONFIG_CTL_LO	0x20
+#define APCS_PLL_CONFIG_CTL_HI	0x24
+#define APCS_PLL_STATUS		0x28
+#define APCS_PLL_TEST_CTL_LO	0x30
+#define APCS_PLL_TEST_CTL_HI	0x34
+
+#define UPDATE_CHECK_MAX_LOOPS 5000
+#define CCI_RATE(rate)		(div_u64((rate * 10ULL), 25))
+#define PLL_MODE(x)		(*(x)->base + (unsigned long) (x)->mode_reg)
+
+#define GLB_DIAG		0x0b11101c
+
+enum {
+	APCS_C0_PLL_BASE,
+	APCS0_DBG_BASE,
+	N_BASES,
+};
+
+static void __iomem *virt_bases[N_BASES];
+struct platform_device *cpu_clock_dev;
+
+DEFINE_EXT_CLK(xo_a_clk, NULL);
+DEFINE_VDD_REGS_INIT(vdd_pwrcl, 1);
+
+enum {
+	A53SS_MUX_C0,
+	A53SS_MUX_C1,
+	A53SS_MUX_CCI,
+	A53SS_MUX_NUM,
+};
+
+enum vdd_mx_pll_levels {
+	VDD_MX_OFF,
+	VDD_MX_SVS,
+	VDD_MX_NOM,
+	VDD_MX_TUR,
+	VDD_MX_NUM,
+};
+
+static int vdd_pll_levels[] = {
+	RPM_REGULATOR_LEVEL_NONE,	/* VDD_PLL_OFF */
+	RPM_REGULATOR_LEVEL_SVS,	/* VDD_PLL_SVS */
+	RPM_REGULATOR_LEVEL_NOM,	/* VDD_PLL_NOM */
+	RPM_REGULATOR_LEVEL_TURBO,	/* VDD_PLL_TUR */
+};
+
+static DEFINE_VDD_REGULATORS(vdd_pll, VDD_MX_NUM, 1,
+				vdd_pll_levels, NULL);
+
+#define VDD_MX_HF_FMAX_MAP1(l1, f1) \
+	.vdd_class = &vdd_pll,			\
+	.fmax = (unsigned long[VDD_MX_NUM]) {	\
+		[VDD_MX_##l1] = (f1),		\
+	},					\
+	.num_fmax = VDD_MX_NUM
+
+static struct clk_ops clk_ops_variable_rate;
+
+/* Early output of PLL */
+static struct pll_clk apcs_hf_pll = {
+	.mode_reg = (void __iomem *)APCS_PLL_MODE,
+	.l_reg = (void __iomem *)APCS_PLL_L_VAL,
+	.alpha_reg = (void __iomem *)APCS_PLL_ALPHA_VAL,
+	.config_reg = (void __iomem *)APCS_PLL_USER_CTL,
+	.config_ctl_reg = (void __iomem *)APCS_PLL_CONFIG_CTL_LO,
+	.config_ctl_hi_reg = (void __iomem *)APCS_PLL_CONFIG_CTL_HI,
+	.test_ctl_lo_reg = (void __iomem *)APCS_PLL_TEST_CTL_LO,
+	.test_ctl_hi_reg = (void __iomem *)APCS_PLL_TEST_CTL_HI,
+	.status_reg = (void __iomem *)APCS_PLL_MODE,
+	.init_test_ctl = true,
+	.test_ctl_dbg = true,
+	.masks = {
+		.pre_div_mask = BIT(12),
+		.post_div_mask = BM(9, 8),
+		.mn_en_mask = BIT(24),
+		.main_output_mask = BIT(0),
+		.early_output_mask = BIT(3),
+		.lock_mask = BIT(31),
+	},
+	.vals = {
+		.post_div_masked = 0x100,
+		.pre_div_masked = 0x0,
+		.config_ctl_val = 0x200D4828,
+		.config_ctl_hi_val = 0x006,
+		.test_ctl_hi_val = 0x00004000,
+		.test_ctl_lo_val = 0x1C000000,
+	},
+	.base = &virt_bases[APCS_C0_PLL_BASE],
+	.max_rate = 2208000000UL,
+	.min_rate = 652800000UL,
+	.src_rate =  19200000UL,
+	.c = {
+		.parent = &xo_a_clk.c,
+		.dbg_name = "apcs_hf_pll",
+		.ops = &clk_ops_variable_rate,
+		/* MX level of MSM is much higher than of PLL */
+		VDD_MX_HF_FMAX_MAP1(SVS, 2400000000UL),
+		CLK_INIT(apcs_hf_pll.c),
+	},
+};
+
+static const char * const mux_names[] = {"c0", "c1", "cci"};
+
+/* Perf Cluster */
+static struct mux_div_clk a53ssmux_perf = {
+	.ops = &rcg_mux_div_ops,
+	.data = {
+		.max_div = 32,
+		.min_div = 2,
+		.is_half_divider = true,
+	},
+	.c = {
+		.dbg_name = "a53ssmux_perf",
+		.ops = &clk_ops_mux_div_clk,
+		CLK_INIT(a53ssmux_perf.c),
+	},
+	.div_mask = BM(4, 0),
+	.src_mask = BM(10, 8) >> 8,
+	.src_shift = 8,
+	MUX_SRC_LIST(
+		{ &apcs_hf_pll.c,	 5 },  /* PLL early */
+	),
+};
+
+/* Little Cluster */
+static struct mux_div_clk a53ssmux_pwr = {
+	.ops = &rcg_mux_div_ops,
+	.data = {
+		.max_div = 32,
+		.min_div = 2,
+		.is_half_divider = true,
+	},
+	.c = {
+		.dbg_name = "a53ssmux_pwr",
+		.ops = &clk_ops_mux_div_clk,
+		CLK_INIT(a53ssmux_pwr.c),
+	},
+	.div_mask = BM(4, 0),
+	.src_mask = BM(10, 8) >> 8,
+	.src_shift = 8,
+	MUX_SRC_LIST(
+		{ &apcs_hf_pll.c,	 5 },  /* PLL early */
+	),
+};
+
+static struct mux_div_clk ccissmux = {
+	.ops = &rcg_mux_div_ops,
+	.data = {
+		.max_div = 32,
+		.min_div = 2,
+		.is_half_divider = true,
+	},
+	.c = {
+		.dbg_name = "ccissmux",
+		.ops = &clk_ops_mux_div_clk,
+		CLK_INIT(ccissmux.c),
+	},
+	.div_mask = BM(4, 0),
+	.src_mask = BM(10, 8) >> 8,
+	.src_shift = 8,
+	MUX_SRC_LIST(
+		{ &apcs_hf_pll.c,	 5 },  /* PLL early */
+	),
+};
+
+struct cpu_clk_8953 {
+	u32 cpu_reg_mask;
+	cpumask_t cpumask;
+	bool hw_low_power_ctrl;
+	struct pm_qos_request req;
+	struct clk c;
+	bool set_rate_done;
+	s32 cpu_latency_no_l2_pc_us;
+};
+
+static struct cpu_clk_8953 a53_pwr_clk;
+static struct cpu_clk_8953 a53_perf_clk;
+static struct cpu_clk_8953 cci_clk;
+static void do_nothing(void *unused) { }
+
+static inline struct cpu_clk_8953 *to_cpu_clk_8953(struct clk *c)
+{
+	return container_of(c, struct cpu_clk_8953, c);
+}
+
+static enum handoff cpu_clk_8953_handoff(struct clk *c)
+{
+	c->rate = clk_get_rate(c->parent);
+	return HANDOFF_DISABLED_CLK;
+}
+
+static long cpu_clk_8953_round_rate(struct clk *c, unsigned long rate)
+{
+	return clk_round_rate(c->parent, rate);
+}
+
+static int cpu_clk_8953_set_rate(struct clk *c, unsigned long rate)
+{
+	int ret = 0;
+	struct cpu_clk_8953 *cpuclk = to_cpu_clk_8953(c);
+	bool hw_low_power_ctrl = cpuclk->hw_low_power_ctrl;
+
+	/*
+	 * If hardware control of the clock tree is enabled during power
+	 * collapse, setup a PM QOS request to prevent power collapse and
+	 * wake up one of the CPUs in this clock domain, to ensure software
+	 * control while the clock rate is being switched.
+	 */
+	if (hw_low_power_ctrl) {
+		memset(&cpuclk->req, 0, sizeof(cpuclk->req));
+		cpumask_copy(&cpuclk->req.cpus_affine,
+				(const struct cpumask *)&cpuclk->cpumask);
+		cpuclk->req.type = PM_QOS_REQ_AFFINE_CORES;
+		pm_qos_add_request(&cpuclk->req, PM_QOS_CPU_DMA_LATENCY,
+					cpuclk->cpu_latency_no_l2_pc_us);
+		smp_call_function_any(&cpuclk->cpumask, do_nothing,
+				NULL, 1);
+	}
+
+	ret = clk_set_rate(c->parent, rate);
+	if (!ret) {
+		/* update the rates of perf & power cluster */
+		if (c == &a53_pwr_clk.c)
+			a53_perf_clk.c.rate = rate;
+		if (c == &a53_perf_clk.c)
+			a53_pwr_clk.c.rate  = rate;
+		cci_clk.c.rate = CCI_RATE(rate);
+	}
+
+	/* Remove PM QOS request */
+	if (hw_low_power_ctrl)
+		pm_qos_remove_request(&cpuclk->req);
+
+	return ret;
+}
+
+static int cpu_clk_cci_set_rate(struct clk *c, unsigned long rate)
+{
+	int ret = 0;
+	struct cpu_clk_8953 *cpuclk = to_cpu_clk_8953(c);
+
+	if (cpuclk->set_rate_done)
+		return ret;
+
+	ret = clk_set_rate(c->parent, rate);
+	if (!ret)
+		cpuclk->set_rate_done = true;
+	return ret;
+}
+
+static void __iomem *variable_pll_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	static struct clk_register_data data[] = {
+		{"MODE", 0x0},
+		{"L", 0x8},
+		{"ALPHA", 0x10},
+		{"USER_CTL", 0x18},
+		{"CONFIG_CTL_LO", 0x20},
+		{"CONFIG_CTL_HI", 0x24},
+		{"STATUS", 0x28},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return PLL_MODE(pll);
+}
+
+static const  struct clk_ops clk_ops_cpu = {
+	.set_rate = cpu_clk_8953_set_rate,
+	.round_rate = cpu_clk_8953_round_rate,
+	.handoff = cpu_clk_8953_handoff,
+};
+
+static const struct clk_ops clk_ops_cci = {
+	.set_rate = cpu_clk_cci_set_rate,
+	.round_rate = cpu_clk_8953_round_rate,
+	.handoff = cpu_clk_8953_handoff,
+};
+
+static struct cpu_clk_8953 a53_pwr_clk = {
+	.cpu_reg_mask = 0x3,
+	.cpu_latency_no_l2_pc_us = 280,
+	.c = {
+		.parent = &a53ssmux_pwr.c,
+		.ops = &clk_ops_cpu,
+		.vdd_class = &vdd_pwrcl,
+		.dbg_name = "a53_pwr_clk",
+		CLK_INIT(a53_pwr_clk.c),
+	},
+};
+
+static struct cpu_clk_8953 a53_perf_clk = {
+	.cpu_reg_mask = 0x103,
+	.cpu_latency_no_l2_pc_us = 280,
+	.c = {
+		.parent = &a53ssmux_perf.c,
+		.ops = &clk_ops_cpu,
+		.vdd_class = &vdd_pwrcl,
+		.dbg_name = "a53_perf_clk",
+		CLK_INIT(a53_perf_clk.c),
+	},
+};
+
+static struct cpu_clk_8953 cci_clk = {
+	.c = {
+		.parent = &ccissmux.c,
+		.ops = &clk_ops_cci,
+		.vdd_class = &vdd_pwrcl,
+		.dbg_name = "cci_clk",
+		CLK_INIT(cci_clk.c),
+	},
+};
+
+static struct measure_clk apc0_m_clk = {
+	.c = {
+		.ops = &clk_ops_empty,
+		.dbg_name = "apc0_m_clk",
+		CLK_INIT(apc0_m_clk.c),
+	},
+};
+
+static struct measure_clk apc1_m_clk = {
+	.c = {
+		.ops = &clk_ops_empty,
+		.dbg_name = "apc1_m_clk",
+		CLK_INIT(apc1_m_clk.c),
+	},
+};
+
+static struct measure_clk cci_m_clk = {
+	.c = {
+		.ops = &clk_ops_empty,
+		.dbg_name = "cci_m_clk",
+		CLK_INIT(cci_m_clk.c),
+	},
+};
+
+static struct mux_clk cpu_debug_ter_mux = {
+	.ops = &mux_reg_ops,
+	.mask = 0x3,
+	.shift = 8,
+	MUX_SRC_LIST(
+		{ &apc0_m_clk.c, 0},
+		{ &apc1_m_clk.c, 1},
+		{ &cci_m_clk.c,  2},
+	),
+	.base = &virt_bases[APCS0_DBG_BASE],
+	.c = {
+		.dbg_name = "cpu_debug_ter_mux",
+		.ops = &clk_ops_gen_mux,
+		CLK_INIT(cpu_debug_ter_mux.c),
+	},
+};
+
+static struct mux_clk cpu_debug_sec_mux = {
+	.ops = &mux_reg_ops,
+	.mask = 0x7,
+	.shift = 12,
+	MUX_SRC_LIST(
+		{ &cpu_debug_ter_mux.c, 0},
+	),
+	MUX_REC_SRC_LIST(
+		&cpu_debug_ter_mux.c,
+	),
+	.base = &virt_bases[APCS0_DBG_BASE],
+	.c = {
+		.dbg_name = "cpu_debug_sec_mux",
+		.ops = &clk_ops_gen_mux,
+		CLK_INIT(cpu_debug_sec_mux.c),
+	},
+};
+
+static struct mux_clk cpu_debug_pri_mux = {
+	.ops = &mux_reg_ops,
+	.mask = 0x3,
+	.shift = 16,
+	MUX_SRC_LIST(
+		{ &cpu_debug_sec_mux.c, 0},
+	),
+	MUX_REC_SRC_LIST(
+		&cpu_debug_sec_mux.c,
+	),
+	.base = &virt_bases[APCS0_DBG_BASE],
+	.c = {
+		.dbg_name = "cpu_debug_pri_mux",
+		.ops = &clk_ops_gen_mux,
+		CLK_INIT(cpu_debug_pri_mux.c),
+	},
+};
+
+static struct clk_lookup cpu_clocks_8953[] = {
+	/* PLL */
+	CLK_LIST(apcs_hf_pll),
+
+	/* Muxes */
+	CLK_LIST(a53ssmux_perf),
+	CLK_LIST(a53ssmux_pwr),
+	CLK_LIST(ccissmux),
+
+	/* CPU clocks */
+	CLK_LIST(a53_perf_clk),
+	CLK_LIST(a53_pwr_clk),
+	CLK_LIST(cci_clk),
+
+	/* debug clocks */
+	CLK_LIST(apc0_m_clk),
+	CLK_LIST(apc1_m_clk),
+	CLK_LIST(cci_m_clk),
+	CLK_LIST(cpu_debug_pri_mux),
+};
+
+static struct mux_div_clk *cpussmux[] = { &a53ssmux_pwr, &a53ssmux_perf,
+						&ccissmux };
+static struct cpu_clk_8953 *cpuclk[] = { &a53_pwr_clk, &a53_perf_clk,
+						&cci_clk};
+
+static struct clk *logical_cpu_to_clk(int cpu)
+{
+	struct device_node *cpu_node = of_get_cpu_node(cpu, NULL);
+	u32 reg;
+
+	if (cpu_node && !of_property_read_u32(cpu_node, "reg", &reg)) {
+		if ((reg | a53_pwr_clk.cpu_reg_mask) ==
+						a53_pwr_clk.cpu_reg_mask)
+			return &a53_pwr_clk.c;
+		if ((reg | a53_perf_clk.cpu_reg_mask) ==
+						a53_perf_clk.cpu_reg_mask)
+			return &a53_perf_clk.c;
+	}
+
+	return NULL;
+}
+
+static int add_opp(struct clk *c, struct device *dev, unsigned long max_rate)
+{
+	unsigned long rate = 0;
+	int level;
+	int uv;
+	long ret;
+	bool first = true;
+	int j = 1;
+
+	while (1) {
+		rate = c->fmax[j++];
+		level = find_vdd_level(c, rate);
+		if (level <= 0) {
+			pr_warn("clock-cpu: no corner for %lu.\n", rate);
+			return -EINVAL;
+		}
+
+		uv = c->vdd_class->vdd_uv[level];
+		if (uv < 0) {
+			pr_warn("clock-cpu: no uv for %lu.\n", rate);
+			return -EINVAL;
+		}
+
+		ret = dev_pm_opp_add(dev, rate, uv);
+		if (ret) {
+			pr_warn("clock-cpu: failed to add OPP for %lu\n", rate);
+			return ret;
+		}
+
+		/*
+		 * The OPP pair for the lowest and highest frequency for
+		 * each device that we're populating. This is important since
+		 * this information will be used by thermal mitigation and the
+		 * scheduler.
+		 */
+		if ((rate >= max_rate) || first) {
+			if (first)
+				first = false;
+			else
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static void print_opp_table(int a53_pwr_cpu, int a53_perf_cpu)
+{
+	struct dev_pm_opp *oppfmax, *oppfmin;
+	unsigned long apc0_fmax =
+			a53_pwr_clk.c.fmax[a53_pwr_clk.c.num_fmax - 1];
+	unsigned long apc0_fmin = a53_pwr_clk.c.fmax[1];
+
+	rcu_read_lock();
+
+	oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(a53_pwr_cpu),
+					apc0_fmax, true);
+	oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(a53_pwr_cpu),
+					apc0_fmin, true);
+	/*
+	 * One time information during boot. Important to know that this looks
+	 * sane since it can eventually make its way to the scheduler.
+	 */
+	pr_info("clock_cpu: a53 C0: OPP voltage for %lu: %ld\n", apc0_fmin,
+		dev_pm_opp_get_voltage(oppfmin));
+	pr_info("clock_cpu: a53 C0: OPP voltage for %lu: %ld\n", apc0_fmax,
+		dev_pm_opp_get_voltage(oppfmax));
+
+	oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(a53_perf_cpu),
+					apc0_fmax, true);
+	oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(a53_perf_cpu),
+					apc0_fmin, true);
+	pr_info("clock_cpu: a53 C1: OPP voltage for %lu: %lu\n", apc0_fmin,
+		dev_pm_opp_get_voltage(oppfmin));
+	pr_info("clock_cpu: a53 C2: OPP voltage for %lu: %lu\n", apc0_fmax,
+		dev_pm_opp_get_voltage(oppfmax));
+
+	rcu_read_unlock();
+}
+
+static void populate_opp_table(struct platform_device *pdev)
+{
+	unsigned long apc0_fmax;
+	int cpu, a53_pwr_cpu = 0, a53_perf_cpu = 0;
+
+	apc0_fmax = a53_pwr_clk.c.fmax[a53_pwr_clk.c.num_fmax - 1];
+
+	for_each_possible_cpu(cpu) {
+		if (logical_cpu_to_clk(cpu) == &a53_pwr_clk.c) {
+			a53_pwr_cpu = cpu;
+			WARN(add_opp(&a53_pwr_clk.c, get_cpu_device(cpu),
+					apc0_fmax),
+				"Failed to add OPP levels for %d\n", cpu);
+		}
+		if (logical_cpu_to_clk(cpu) == &a53_perf_clk.c) {
+			a53_perf_cpu = cpu;
+			WARN(add_opp(&a53_perf_clk.c, get_cpu_device(cpu),
+					apc0_fmax),
+				"Failed to add OPP levels for %d\n", cpu);
+		}
+	}
+
+	/* One time print during bootup */
+	pr_info("clock-cpu-8953: OPP tables populated (cpu %d and %d)\n",
+						a53_pwr_cpu, a53_perf_cpu);
+
+	print_opp_table(a53_pwr_cpu, a53_perf_cpu);
+}
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
+								char *prop_name)
+{
+	struct device_node *of = pdev->dev.of_node;
+	int prop_len, i;
+	struct clk_vdd_class *vdd = c->vdd_class;
+	u32 *array;
+
+	if (!of_find_property(of, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % 2) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	prop_len /= 2;
+	vdd->level_votes = devm_kzalloc(&pdev->dev,
+				prop_len * sizeof(*vdd->level_votes),
+					GFP_KERNEL);
+	if (!vdd->level_votes)
+		return -ENOMEM;
+
+	vdd->vdd_uv = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+					GFP_KERNEL);
+	if (!vdd->vdd_uv)
+		return -ENOMEM;
+
+	c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!c->fmax)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev,
+			prop_len * sizeof(u32) * 2, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	of_property_read_u32_array(of, prop_name, array, prop_len * 2);
+	for (i = 0; i < prop_len; i++) {
+		c->fmax[i] = array[2 * i];
+		vdd->vdd_uv[i] = array[2 * i + 1];
+	}
+
+	devm_kfree(&pdev->dev, array);
+	vdd->num_levels = prop_len;
+	vdd->cur_level = prop_len;
+	vdd->use_max_uV = true;
+	c->num_fmax = prop_len;
+
+	return 0;
+}
+
+static void get_speed_bin(struct platform_device *pdev, int *bin,
+								int *version)
+{
+	struct resource *res;
+	void __iomem *base;
+	u32 pte_efuse;
+
+	*bin = 0;
+	*version = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+	if (!res) {
+		dev_info(&pdev->dev,
+			 "No speed/PVS binning available. Defaulting to 0!\n");
+		return;
+	}
+
+	base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!base) {
+		dev_warn(&pdev->dev,
+			 "Unable to read efuse data. Defaulting to 0!\n");
+		return;
+	}
+
+	pte_efuse = readl_relaxed(base);
+	devm_iounmap(&pdev->dev, base);
+
+	*bin = (pte_efuse >> 8) & 0x7;
+
+	dev_info(&pdev->dev, "Speed bin: %d PVS Version: %d\n", *bin,
+								*version);
+}
+
+static int cpu_parse_devicetree(struct platform_device *pdev)
+{
+	struct resource *res;
+	int mux_id = 0;
+	char rcg_name[] = "xxx-mux";
+	char pll_name[] = "xxx-pll";
+	struct clk *c;
+
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "c0-pll");
+	if (!res) {
+		dev_err(&pdev->dev, "missing %s\n", pll_name);
+		return -EINVAL;
+	}
+
+	virt_bases[APCS_C0_PLL_BASE] = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+	if (!virt_bases[APCS_C0_PLL_BASE]) {
+		dev_err(&pdev->dev, "ioremap failed for %s\n",
+				pll_name);
+		return -ENOMEM;
+	}
+
+	for (mux_id = 0; mux_id < A53SS_MUX_NUM; mux_id++) {
+		snprintf(rcg_name, ARRAY_SIZE(rcg_name), "%s-mux",
+						mux_names[mux_id]);
+		res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, rcg_name);
+		if (!res) {
+			dev_err(&pdev->dev, "missing %s\n", rcg_name);
+			return -EINVAL;
+		}
+
+		cpussmux[mux_id]->base = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (!cpussmux[mux_id]->base) {
+			dev_err(&pdev->dev, "ioremap failed for %s\n",
+								rcg_name);
+			return -ENOMEM;
+		}
+	}
+
+	/* PLL core logic */
+	vdd_pll.regulator[0] = devm_regulator_get(&pdev->dev,
+							"vdd-mx");
+	if (IS_ERR(vdd_pll.regulator[0])) {
+		dev_err(&pdev->dev, "Get vdd-mx regulator!!!\n");
+		if (PTR_ERR(vdd_pll.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"Unable to get vdd-mx regulator!!!\n");
+		return PTR_ERR(vdd_pll.regulator[0]);
+	}
+
+	vdd_pwrcl.regulator[0] = devm_regulator_get(&pdev->dev,
+							"vdd-cl");
+	if (IS_ERR(vdd_pwrcl.regulator[0])) {
+		dev_err(&pdev->dev, "Get vdd-pwrcl regulator!!!\n");
+		if (PTR_ERR(vdd_pwrcl.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the cluster vreg\n");
+		return PTR_ERR(vdd_pwrcl.regulator[0]);
+	}
+
+	/* Sources of the PLL */
+	c = devm_clk_get(&pdev->dev, "xo_a");
+	if (IS_ERR(c)) {
+		if (PTR_ERR(c) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get xo (rc = %ld)!\n",
+				PTR_ERR(c));
+		return PTR_ERR(c);
+	}
+	xo_a_clk.c.parent = c;
+
+	return 0;
+}
+
+/**
+ * clock_panic_callback() - panic notification callback function.
+ *              This function is invoked when a kernel panic occurs.
+ * @nfb:        Notifier block pointer
+ * @event:      Value passed unmodified to notifier function
+ * @data:       Pointer passed unmodified to notifier function
+ *
+ * Return: NOTIFY_OK
+ */
+static int clock_panic_callback(struct notifier_block *nfb,
+					unsigned long event, void *data)
+{
+	unsigned long rate;
+
+	rate  = (a53_perf_clk.c.count) ? a53_perf_clk.c.rate : 0;
+	pr_err("%s frequency: %10lu Hz\n", a53_perf_clk.c.dbg_name, rate);
+
+	rate  = (a53_pwr_clk.c.count) ? a53_pwr_clk.c.rate : 0;
+	pr_err("%s frequency: %10lu Hz\n", a53_pwr_clk.c.dbg_name, rate);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block clock_panic_notifier = {
+	.notifier_call = clock_panic_callback,
+	.priority = 1,
+};
+
+static int clock_cpu_probe(struct platform_device *pdev)
+{
+	int speed_bin, version, rc, cpu, mux_id;
+	char prop_name[] = "qcom,speedX-bin-vX-XXX";
+	unsigned long ccirate, pwrcl_boot_rate = 883200000;
+
+	get_speed_bin(pdev, &speed_bin, &version);
+
+	rc = cpu_parse_devicetree(pdev);
+	if (rc)
+		return rc;
+
+	snprintf(prop_name, ARRAY_SIZE(prop_name),
+			"qcom,speed%d-bin-v%d-cl",
+					speed_bin, version);
+	for (mux_id = 0; mux_id < A53SS_MUX_CCI; mux_id++) {
+		rc = of_get_fmax_vdd_class(pdev, &cpuclk[mux_id]->c,
+						prop_name);
+		if (rc) {
+			dev_err(&pdev->dev, "Loading safe voltage plan %s!\n",
+							prop_name);
+			snprintf(prop_name, ARRAY_SIZE(prop_name),
+						"qcom,speed0-bin-v0-cl");
+			rc = of_get_fmax_vdd_class(pdev, &cpuclk[mux_id]->c,
+								prop_name);
+			if (rc) {
+				dev_err(&pdev->dev, "safe voltage plan load failed for clusters\n");
+				return rc;
+			}
+		}
+	}
+
+	snprintf(prop_name, ARRAY_SIZE(prop_name),
+			"qcom,speed%d-bin-v%d-cci", speed_bin, version);
+	rc = of_get_fmax_vdd_class(pdev, &cpuclk[mux_id]->c, prop_name);
+	if (rc) {
+		dev_err(&pdev->dev, "Loading safe voltage plan %s!\n",
+							prop_name);
+		snprintf(prop_name, ARRAY_SIZE(prop_name),
+						"qcom,speed0-bin-v0-cci");
+		rc = of_get_fmax_vdd_class(pdev, &cpuclk[mux_id]->c,
+								prop_name);
+		if (rc) {
+			dev_err(&pdev->dev, "safe voltage plan load failed for CCI\n");
+			return rc;
+		}
+	}
+
+	/* Debug Mux */
+	virt_bases[APCS0_DBG_BASE] = devm_ioremap(&pdev->dev, GLB_DIAG, SZ_8);
+	if (!virt_bases[APCS0_DBG_BASE]) {
+		dev_err(&pdev->dev, "Failed to ioremap GLB_DIAG registers\n");
+		return -ENOMEM;
+	}
+
+	rc = of_msm_clock_register(pdev->dev.of_node,
+			cpu_clocks_8953, ARRAY_SIZE(cpu_clocks_8953));
+	if (rc) {
+		dev_err(&pdev->dev, "msm_clock_register failed\n");
+		return rc;
+	}
+
+	rc = clock_rcgwr_init(pdev);
+	if (rc)
+		dev_err(&pdev->dev, "Failed to init RCGwR\n");
+
+	/*
+	 * We don't want the CPU clocks to be turned off at late init
+	 * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+	 * refcount of these clocks. Any cpufreq/hotplug manager can assume
+	 * that the clocks have already been prepared and enabled by the time
+	 * they take over.
+	 */
+
+	get_online_cpus();
+
+	for_each_online_cpu(cpu) {
+		WARN(clk_prepare_enable(&cci_clk.c),
+				"Unable to Turn on CCI clock");
+		WARN(clk_prepare_enable(&a53_pwr_clk.c),
+				"Unable to turn on CPU clock for %d\n", cpu);
+	}
+
+	/* ccirate = HFPLL_rate/(2.5) */
+	ccirate = CCI_RATE(apcs_hf_pll.c.rate);
+	rc = clk_set_rate(&cci_clk.c, ccirate);
+	if (rc)
+		dev_err(&pdev->dev, "Can't set safe rate for CCI\n");
+
+	rc = clk_set_rate(&a53_pwr_clk.c, apcs_hf_pll.c.rate);
+	if (rc)
+		dev_err(&pdev->dev, "Can't set pwr safe rate\n");
+
+	rc = clk_set_rate(&a53_perf_clk.c, apcs_hf_pll.c.rate);
+	if (rc)
+		dev_err(&pdev->dev, "Can't set perf safe rate\n");
+
+	/* Move to higher boot frequency */
+	rc = clk_set_rate(&a53_pwr_clk.c, pwrcl_boot_rate);
+	if (rc)
+		dev_err(&pdev->dev, "Can't set pwr rate %ld\n",
+					pwrcl_boot_rate);
+	put_online_cpus();
+
+	populate_opp_table(pdev);
+
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc)
+		return rc;
+
+	for_each_possible_cpu(cpu) {
+		if (logical_cpu_to_clk(cpu) == &a53_pwr_clk.c)
+			cpumask_set_cpu(cpu, &a53_pwr_clk.cpumask);
+		if (logical_cpu_to_clk(cpu) == &a53_perf_clk.c)
+			cpumask_set_cpu(cpu, &a53_perf_clk.cpumask);
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,enable-qos")) {
+		a53_pwr_clk.hw_low_power_ctrl = true;
+		a53_perf_clk.hw_low_power_ctrl = true;
+	}
+
+	atomic_notifier_chain_register(&panic_notifier_list,
+						&clock_panic_notifier);
+
+	return 0;
+}
+
+static const struct of_device_id clock_cpu_match_table[] = {
+	{.compatible = "qcom,cpu-clock-8953"},
+	{}
+};
+
+static struct platform_driver clock_cpu_driver = {
+	.probe = clock_cpu_probe,
+	.driver = {
+		.name = "cpu-clock-8953",
+		.of_match_table = clock_cpu_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init clock_cpu_init(void)
+{
+	return platform_driver_register(&clock_cpu_driver);
+}
+arch_initcall(clock_cpu_init);
+
+#define APCS_HF_PLL_BASE		0xb116000
+#define APCS_ALIAS1_CMD_RCGR		0xb011050
+#define APCS_ALIAS1_CFG_OFF		0x4
+#define APCS_ALIAS1_CORE_CBCR_OFF	0x8
+#define SRC_SEL				0x5
+#define SRC_DIV				0x1
+
+/* Configure PLL at Low frequency */
+unsigned long pwrcl_early_boot_rate = 652800000;
+
+static int __init cpu_clock_pwr_init(void)
+{
+	void __iomem  *base;
+	int regval = 0;
+	struct device_node *ofnode = of_find_compatible_node(NULL, NULL,
+						"qcom,cpu-clock-8953");
+	if (!ofnode)
+		return 0;
+
+	/* Initialize the PLLs */
+	virt_bases[APCS_C0_PLL_BASE] = ioremap_nocache(APCS_HF_PLL_BASE, SZ_1K);
+	clk_ops_variable_rate = clk_ops_variable_rate_pll_hwfsm;
+	clk_ops_variable_rate.list_registers = variable_pll_list_registers;
+
+	__variable_rate_pll_init(&apcs_hf_pll.c);
+	apcs_hf_pll.c.ops->set_rate(&apcs_hf_pll.c, pwrcl_early_boot_rate);
+	clk_ops_variable_rate_pll.enable(&apcs_hf_pll.c);
+
+	base = ioremap_nocache(APCS_ALIAS1_CMD_RCGR, SZ_8);
+	regval = readl_relaxed(base);
+
+	/* Source GPLL0 and at the rate of GPLL0 */
+	regval = (SRC_SEL << 8) | SRC_DIV; /* 0x501 */
+	writel_relaxed(regval, base + APCS_ALIAS1_CFG_OFF);
+	/* Make sure src sel and src div is set before update bit */
+	mb();
+
+	/* update bit */
+	regval = readl_relaxed(base);
+	regval |= BIT(0);
+	writel_relaxed(regval, base);
+	/* Make sure src sel and src div is set before update bit */
+	mb();
+
+	/* Enable the branch */
+	regval =  readl_relaxed(base + APCS_ALIAS1_CORE_CBCR_OFF);
+	regval |= BIT(0);
+	writel_relaxed(regval, base + APCS_ALIAS1_CORE_CBCR_OFF);
+	/* Branch enable should be complete */
+	mb();
+	iounmap(base);
+
+	pr_info("Power clocks configured\n");
+
+	return 0;
+}
+early_initcall(cpu_clock_pwr_init);
diff --git a/drivers/clk/msm/clock-debug.c b/drivers/clk/msm/clock-debug.c
new file mode 100644
index 0000000..f182fe1
--- /dev/null
+++ b/drivers/clk/msm/clock-debug.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, 2017,  The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <trace/events/power.h>
+
+
+#include "clock.h"
+
+static LIST_HEAD(clk_list);
+static DEFINE_MUTEX(clk_list_lock);
+
+static struct dentry *debugfs_base;
+static u32 debug_suspend;
+
+static int clock_debug_rate_set(void *data, u64 val)
+{
+	struct clk *clock = data;
+	int ret;
+
+	/* Only increases to max rate will succeed, but that's actually good
+	 * for debugging purposes so we don't check for error.
+	 */
+	if (clock->flags & CLKFLAG_MAX)
+		clk_set_max_rate(clock, val);
+	ret = clk_set_rate(clock, val);
+	if (ret)
+		pr_err("clk_set_rate(%s, %lu) failed (%d)\n", clock->dbg_name,
+				(unsigned long)val, ret);
+
+	return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+	*val = clk_get_rate(clock);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+			clock_debug_rate_set, "%llu\n");
+
+static struct clk *measure;
+
+static int clock_debug_measure_get(void *data, u64 *val)
+{
+	struct clk *clock = data, *par;
+	int ret, is_hw_gated;
+	unsigned long meas_rate, sw_rate;
+
+	/* Check to see if the clock is in hardware gating mode */
+	if (clock->ops->in_hwcg_mode)
+		is_hw_gated = clock->ops->in_hwcg_mode(clock);
+	else
+		is_hw_gated = 0;
+
+	ret = clk_set_parent(measure, clock);
+	if (!ret) {
+		/*
+		 * Disable hw gating to get accurate rate measurements. Only do
+		 * this if the clock is explicitly enabled by software. This
+		 * allows us to detect errors where clocks are on even though
+		 * software is not requesting them to be on due to broken
+		 * hardware gating signals.
+		 */
+		if (is_hw_gated && clock->count)
+			clock->ops->disable_hwcg(clock);
+		par = measure;
+		while (par && par != clock) {
+			if (par->ops->enable)
+				par->ops->enable(par);
+			par = par->parent;
+		}
+		*val = clk_get_rate(measure);
+		/* Reenable hwgating if it was disabled */
+		if (is_hw_gated && clock->count)
+			clock->ops->enable_hwcg(clock);
+	}
+
+	/*
+	 * If there's a divider on the path from the clock output to the
+	 * measurement circuitry, account for it by dividing the original clock
+	 * rate with the rate set on the parent of the measure clock.
+	 */
+	meas_rate = clk_get_rate(clock);
+	sw_rate = clk_get_rate(measure->parent);
+	if (sw_rate && meas_rate >= (sw_rate * 2))
+		*val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_measure_fops, clock_debug_measure_get,
+			NULL, "%lld\n");
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+	struct clk *clock = data;
+	int rc = 0;
+
+	if (val)
+		rc = clk_prepare_enable(clock);
+	else
+		clk_disable_unprepare(clock);
+
+	return rc;
+}
+
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+	int enabled;
+
+	if (clock->ops->is_enabled)
+		enabled = clock->ops->is_enabled(clock);
+	else
+		enabled = !!(clock->count);
+
+	*val = enabled;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+			clock_debug_enable_set, "%lld\n");
+
+static int clock_debug_local_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+
+	if (!clock->ops->is_local)
+		*val = true;
+	else
+		*val = clock->ops->is_local(clock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
+			NULL, "%llu\n");
+
+static int clock_debug_hwcg_get(void *data, u64 *val)
+{
+	struct clk *clock = data;
+
+	if (clock->ops->in_hwcg_mode)
+		*val = !!clock->ops->in_hwcg_mode(clock);
+	else
+		*val = 0;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get,
+			NULL, "%llu\n");
+
+static void clock_print_fmax_by_level(struct seq_file *m, int level)
+{
+	struct clk *clock = m->private;
+	struct clk_vdd_class *vdd_class = clock->vdd_class;
+	int off, i, vdd_level, nregs = vdd_class->num_regulators;
+
+	vdd_level = find_vdd_level(clock, clock->rate);
+
+	seq_printf(m, "%2s%10lu", vdd_level == level ? "[" : "",
+		clock->fmax[level]);
+	for (i = 0; i < nregs; i++) {
+		off = nregs*level + i;
+		if (vdd_class->vdd_uv)
+			seq_printf(m, "%10u", vdd_class->vdd_uv[off]);
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10u", vdd_class->vdd_ua[off]);
+	}
+
+	if (vdd_level == level)
+		seq_puts(m, "]");
+	seq_puts(m, "\n");
+}
+
+static int fmax_rates_show(struct seq_file *m, void *unused)
+{
+	struct clk *clock = m->private;
+	struct clk_vdd_class *vdd_class = clock->vdd_class;
+	int level = 0, i, nregs = vdd_class->num_regulators;
+	char reg_name[10];
+
+	int vdd_level = find_vdd_level(clock, clock->rate);
+
+	if (vdd_level < 0) {
+		seq_printf(m, "could not find_vdd_level for %s, %ld\n",
+			clock->dbg_name, clock->rate);
+		return 0;
+	}
+
+	seq_printf(m, "%12s", "");
+	for (i = 0; i < nregs; i++) {
+		snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
+		seq_printf(m, "%10s", reg_name);
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10s", "");
+	}
+
+	seq_printf(m, "\n%12s", "freq");
+	for (i = 0; i < nregs; i++) {
+		seq_printf(m, "%10s", "uV");
+		if (vdd_class->vdd_ua)
+			seq_printf(m, "%10s", "uA");
+	}
+	seq_puts(m, "\n");
+
+	for (level = 0; level < clock->num_fmax; level++)
+		clock_print_fmax_by_level(m, level);
+
+	return 0;
+}
+
+static int fmax_rates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, fmax_rates_show, inode->i_private);
+}
+
+static const struct file_operations fmax_rates_fops = {
+	.open		= fmax_rates_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int orphan_list_show(struct seq_file *m, void *unused)
+{
+	struct clk *c, *safe;
+
+	list_for_each_entry_safe(c, safe, &orphan_clk_list, list)
+		seq_printf(m, "%s\n", c->dbg_name);
+
+	return 0;
+}
+
+static int orphan_list_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, orphan_list_show, inode->i_private);
+}
+
+static const struct file_operations orphan_list_fops = {
+	.open		= orphan_list_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#define clock_debug_output(m, c, fmt, ...)		\
+do {							\
+	if (m)						\
+		seq_printf(m, fmt, ##__VA_ARGS__);	\
+	else if (c)					\
+		pr_cont(fmt, ##__VA_ARGS__);		\
+	else						\
+		pr_info(fmt, ##__VA_ARGS__);		\
+} while (0)
+
+/*
+ * clock_debug_print_enabled_debug_suspend() - Print names of enabled clocks
+ * during suspend.
+ */
+static void clock_debug_print_enabled_debug_suspend(struct seq_file *s)
+{
+	struct clk *c;
+	int cnt = 0;
+
+	if (!mutex_trylock(&clk_list_lock))
+		return;
+
+	clock_debug_output(s, 0, "Enabled clocks:\n");
+
+	list_for_each_entry(c, &clk_list, list) {
+		if (!c || !c->prepare_count)
+			continue;
+		if (c->vdd_class)
+			clock_debug_output(s, 0, " %s:%lu:%lu [%ld, %d]",
+					c->dbg_name, c->prepare_count,
+						c->count, c->rate,
+					find_vdd_level(c, c->rate));
+		else
+				 clock_debug_output(s, 0, " %s:%lu:%lu [%ld]",
+					c->dbg_name, c->prepare_count,
+					c->count, c->rate);
+		cnt++;
+	}
+
+	mutex_unlock(&clk_list_lock);
+
+	if (cnt)
+		clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(s, 0, "No clocks enabled.\n");
+}
+
+static int clock_debug_print_clock(struct clk *c, struct seq_file *m)
+{
+	char *start = "";
+
+	if (!c || !c->prepare_count)
+		return 0;
+
+	clock_debug_output(m, 0, "\t");
+	do {
+		if (c->vdd_class)
+			clock_debug_output(m, 1, "%s%s:%lu:%lu [%ld, %d]",
+				start, c->dbg_name, c->prepare_count, c->count,
+				c->rate, find_vdd_level(c, c->rate));
+		else
+			clock_debug_output(m, 1, "%s%s:%lu:%lu [%ld]", start,
+				c->dbg_name, c->prepare_count, c->count,
+				c->rate);
+		start = " -> ";
+	} while ((c = clk_get_parent(c)));
+
+	clock_debug_output(m, 1, "\n");
+
+	return 1;
+}
+
+/**
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ *
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *m)
+{
+	struct clk *c;
+	int cnt = 0;
+
+	if (!mutex_trylock(&clk_list_lock)) {
+		pr_err("clock-debug: Clocks are being registered. Cannot print clock state now.\n");
+		return;
+	}
+	clock_debug_output(m, 0, "Enabled clocks:\n");
+	list_for_each_entry(c, &clk_list, list) {
+		cnt += clock_debug_print_clock(c, m);
+	}
+	mutex_unlock(&clk_list_lock);
+
+	if (cnt)
+		clock_debug_output(m, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(m, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *m, void *unused)
+{
+	clock_debug_print_enabled_clocks(m);
+	return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations enabled_clocks_fops = {
+	.open		= enabled_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int trace_clocks_show(struct seq_file *m, void *unused)
+{
+	struct clk *c;
+	int total_cnt = 0;
+
+	if (!mutex_trylock(&clk_list_lock)) {
+		pr_err("trace_clocks: Clocks are being registered. Cannot trace clock state now.\n");
+		return 1;
+	}
+	list_for_each_entry(c, &clk_list, list) {
+		trace_clock_state(c->dbg_name, c->prepare_count, c->count,
+					c->rate);
+		total_cnt++;
+	}
+	mutex_unlock(&clk_list_lock);
+	clock_debug_output(m, 0, "Total clock count: %d\n", total_cnt);
+
+	return 0;
+}
+
+static int trace_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, trace_clocks_show, inode->i_private);
+}
+static const struct file_operations trace_clocks_fops = {
+	.open		= trace_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int list_rates_show(struct seq_file *m, void *unused)
+{
+	struct clk *clock = m->private;
+	int level, i = 0;
+	unsigned long rate, fmax = 0;
+
+	/* Find max frequency supported within voltage constraints. */
+	if (!clock->vdd_class) {
+		fmax = ULONG_MAX;
+	} else {
+		for (level = 0; level < clock->num_fmax; level++)
+			if (clock->fmax[level])
+				fmax = clock->fmax[level];
+	}
+
+	/*
+	 * List supported frequencies <= fmax. Higher frequencies may appear in
+	 * the frequency table, but are not valid and should not be listed.
+	 */
+	while (!IS_ERR_VALUE(rate = clock->ops->list_rate(clock, i++))) {
+		if (rate <= fmax)
+			seq_printf(m, "%lu\n", rate);
+	}
+
+	return 0;
+}
+
+static int list_rates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, list_rates_show, inode->i_private);
+}
+
+static const struct file_operations list_rates_fops = {
+	.open		= list_rates_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	struct clk *clock = filp->private_data;
+	struct clk *p = clock->parent;
+	char name[256] = {0};
+
+	snprintf(name, sizeof(name), "%s\n", p ? p->dbg_name : "None\n");
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
+}
+
+
+static ssize_t clock_parent_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct clk *clock = filp->private_data;
+	char buf[256];
+	char *cmp;
+	int ret;
+	struct clk *parent = NULL;
+
+	cnt = min(cnt, sizeof(buf) - 1);
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+	buf[cnt] = '\0';
+	cmp = strstrip(buf);
+
+	mutex_lock(&clk_list_lock);
+	list_for_each_entry(parent, &clk_list, list) {
+		if (!strcmp(cmp, parent->dbg_name))
+			break;
+	}
+
+	if (&parent->list == &clk_list) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	mutex_unlock(&clk_list_lock);
+	ret = clk_set_parent(clock, parent);
+	if (ret)
+		return ret;
+
+	return cnt;
+err:
+	mutex_unlock(&clk_list_lock);
+	return ret;
+}
+
+
+static const struct file_operations clock_parent_fops = {
+	.open		= simple_open,
+	.read		= clock_parent_read,
+	.write		= clock_parent_write,
+};
+
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f)
+{
+	void __iomem *base;
+	struct clk_register_data *regs;
+	u32 i, j, size;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+
+	clk_debug_print_hw(clk->parent, f);
+
+	clock_debug_output(f, false, "%s\n", clk->dbg_name);
+
+	if (!clk->ops->list_registers)
+		return;
+
+	j = 0;
+	base = clk->ops->list_registers(clk, j, &regs, &size);
+	while (!IS_ERR(base)) {
+		for (i = 0; i < size; i++) {
+			u32 val = readl_relaxed(base + regs[i].offset);
+
+			clock_debug_output(f, false, "%20s: 0x%.8x\n",
+						regs[i].name, val);
+		}
+		j++;
+		base = clk->ops->list_registers(clk, j, &regs, &size);
+	}
+}
+
+static int print_hw_show(struct seq_file *m, void *unused)
+{
+	struct clk *c = m->private;
+
+	clk_debug_print_hw(c, m);
+
+	return 0;
+}
+
+static int print_hw_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, print_hw_show, inode->i_private);
+}
+
+static const struct file_operations clock_print_hw_fops = {
+	.open		= print_hw_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+
+static void clock_measure_add(struct clk *clock)
+{
+	if (IS_ERR_OR_NULL(measure))
+		return;
+
+	if (clk_set_parent(measure, clock))
+		return;
+
+	debugfs_create_file("measure", 0444, clock->clk_dir, clock,
+				&clock_measure_fops);
+}
+
+static int clock_debug_add(struct clk *clock)
+{
+	char temp[50], *ptr;
+	struct dentry *clk_dir;
+
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	strlcpy(temp, clock->dbg_name, ARRAY_SIZE(temp));
+	for (ptr = temp; *ptr; ptr++)
+		*ptr = tolower(*ptr);
+
+	clk_dir = debugfs_create_dir(temp, debugfs_base);
+	if (!clk_dir)
+		return -ENOMEM;
+
+	clock->clk_dir = clk_dir;
+
+	if (!debugfs_create_file("rate", 0644, clk_dir,
+				clock, &clock_rate_fops))
+		goto error;
+
+	if (!debugfs_create_file("enable", 0644, clk_dir,
+				clock, &clock_enable_fops))
+		goto error;
+
+	if (!debugfs_create_file("is_local", 0444, clk_dir, clock,
+				&clock_local_fops))
+		goto error;
+
+	if (!debugfs_create_file("has_hw_gating", 0444, clk_dir, clock,
+				&clock_hwcg_fops))
+		goto error;
+
+	if (clock->ops->list_rate)
+		if (!debugfs_create_file("list_rates",
+				0444, clk_dir, clock, &list_rates_fops))
+			goto error;
+
+	if (clock->vdd_class && !debugfs_create_file(
+			"fmax_rates", 0444, clk_dir, clock, &fmax_rates_fops))
+		goto error;
+
+	if (!debugfs_create_file("parent", 0444, clk_dir, clock,
+			&clock_parent_fops))
+		goto error;
+
+	if (!debugfs_create_file("print", 0444, clk_dir, clock,
+			&clock_print_hw_fops))
+		goto error;
+
+	clock_measure_add(clock);
+
+	return 0;
+error:
+	debugfs_remove_recursive(clk_dir);
+	return -ENOMEM;
+}
+static DEFINE_MUTEX(clk_debug_lock);
+static int clk_debug_init_once;
+
+/**
+ * clock_debug_init() - Initialize clock debugfs
+ * Lock clk_debug_lock before invoking this function.
+ */
+static int clock_debug_init(void)
+{
+	if (clk_debug_init_once)
+		return 0;
+
+	clk_debug_init_once = 1;
+
+	debugfs_base = debugfs_create_dir("clk", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	if (!debugfs_create_u32("debug_suspend", 0644,
+				debugfs_base, &debug_suspend)) {
+		debugfs_remove_recursive(debugfs_base);
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_file("enabled_clocks", 0444, debugfs_base, NULL,
+				&enabled_clocks_fops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("orphan_list", 0444, debugfs_base, NULL,
+				&orphan_list_fops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("trace_clocks", 0444, debugfs_base, NULL,
+				&trace_clocks_fops))
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * clock_debug_register() - Add additional clocks to clock debugfs hierarchy
+ * @list: List of clocks to create debugfs nodes for
+ */
+int clock_debug_register(struct clk *clk)
+{
+	int ret = 0;
+	struct clk *c;
+
+	mutex_lock(&clk_list_lock);
+	if (!list_empty(&clk->list))
+		goto out;
+
+	ret = clock_debug_init();
+	if (ret)
+		goto out;
+
+	if (IS_ERR_OR_NULL(measure)) {
+		if (clk->flags & CLKFLAG_MEASURE)
+			measure = clk;
+		if (!IS_ERR_OR_NULL(measure)) {
+			list_for_each_entry(c, &clk_list, list)
+				clock_measure_add(c);
+		}
+	}
+
+	list_add_tail(&clk->list, &clk_list);
+	clock_debug_add(clk);
+out:
+	mutex_unlock(&clk_list_lock);
+	return ret;
+}
+
+/*
+ * Print the names of enabled clocks and their parents if debug_suspend is set
+ */
+void clock_debug_print_enabled(bool print_parent)
+{
+	if (likely(!debug_suspend))
+		return;
+	if (print_parent)
+		clock_debug_print_enabled_clocks(NULL);
+	else
+		clock_debug_print_enabled_debug_suspend(NULL);
+
+}
diff --git a/drivers/clk/msm/clock-dummy.c b/drivers/clk/msm/clock-dummy.c
new file mode 100644
index 0000000..ad6952a
--- /dev/null
+++ b/drivers/clk/msm/clock-dummy.c
@@ -0,0 +1,113 @@
+/* Copyright (c) 2011,2013-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+static int dummy_clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+	return 0;
+}
+
+static int dummy_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	clk->rate = rate;
+	return 0;
+}
+
+static int dummy_clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+	return 0;
+}
+
+static int dummy_clk_set_flags(struct clk *clk, unsigned long flags)
+{
+	return 0;
+}
+
+static unsigned long dummy_clk_get_rate(struct clk *clk)
+{
+	return clk->rate;
+}
+
+static long dummy_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	return rate;
+}
+
+const struct clk_ops clk_ops_dummy = {
+	.reset = dummy_clk_reset,
+	.set_rate = dummy_clk_set_rate,
+	.set_max_rate = dummy_clk_set_max_rate,
+	.set_flags = dummy_clk_set_flags,
+	.get_rate = dummy_clk_get_rate,
+	.round_rate = dummy_clk_round_rate,
+};
+
+struct clk dummy_clk = {
+	.dbg_name = "dummy_clk",
+	.ops = &clk_ops_dummy,
+	CLK_INIT(dummy_clk),
+};
+
+static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct clk *c;
+
+	c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	c->ops = &clk_ops_dummy;
+	return msmclk_generic_clk_init(dev, np, c);
+}
+MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0);
+
+static struct clk *of_dummy_get(struct of_phandle_args *clkspec,
+				  void *data)
+{
+	return &dummy_clk;
+}
+
+static const struct of_device_id msm_clock_dummy_match_table[] = {
+	{ .compatible = "qcom,dummycc" },
+	{}
+};
+
+static int msm_clock_dummy_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = of_clk_add_provider(pdev->dev.of_node, of_dummy_get, NULL);
+	if (ret)
+		return -ENOMEM;
+
+	dev_info(&pdev->dev, "Registered DUMMY provider.\n");
+	return ret;
+}
+
+static struct platform_driver msm_clock_dummy_driver = {
+	.probe = msm_clock_dummy_probe,
+	.driver = {
+		.name = "clock-dummy",
+		.of_match_table = msm_clock_dummy_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init msm_dummy_clk_init(void)
+{
+	return platform_driver_register(&msm_clock_dummy_driver);
+}
+arch_initcall(msm_dummy_clk_init);
diff --git a/drivers/clk/msm/clock-gcc-8953.c b/drivers/clk/msm/clock-gcc-8953.c
new file mode 100644
index 0000000..797f851
--- /dev/null
+++ b/drivers/clk/msm/clock-gcc-8953.c
@@ -0,0 +1,4140 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/clock-rpm.h>
+
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+
+#include <dt-bindings/clock/msm-clocks-hwio-8953.h>
+#include <dt-bindings/clock/msm-clocks-8953.h>
+
+#include "clock.h"
+#include "reset.h"
+
+enum {
+	GCC_BASE,
+	GFX_BASE,
+	MDSS_BASE,
+	N_BASES,
+};
+
+static void __iomem *virt_bases[N_BASES];
+
+#define GCC_REG_BASE(x) (void __iomem *)(virt_bases[GCC_BASE] + (x))
+
+DEFINE_CLK_RPM_SMD_BRANCH(xo_clk_src, xo_a_clk_src,
+				RPM_MISC_CLK_TYPE, XO_ID, 19200000);
+DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
+DEFINE_CLK_RPM_SMD(pcnoc_clk, pcnoc_a_clk, RPM_BUS_CLK_TYPE, PCNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(sysmmnoc_clk, sysmmnoc_a_clk, RPM_BUS_CLK_TYPE,
+							SYSMMNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(ipa_clk, ipa_a_clk, RPM_IPA_CLK_TYPE, IPA_ID, NULL);
+DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE, QDSS_ID);
+
+/* BIMC voter */
+static DEFINE_CLK_VOTER(bimc_msmbus_clk, &bimc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_a_clk, &bimc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_usb_clk, &bimc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_usb_a_clk, &bimc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_wcnss_a_clk, &bimc_a_clk.c, LONG_MAX);
+
+/* PCNOC Voter */
+static DEFINE_CLK_VOTER(pcnoc_keepalive_a_clk, &pcnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pcnoc_msmbus_clk, &pcnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pcnoc_msmbus_a_clk, &pcnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pcnoc_usb_clk, &pcnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pcnoc_usb_a_clk, &pcnoc_a_clk.c, LONG_MAX);
+
+/* SNOC Voter */
+static DEFINE_CLK_VOTER(snoc_msmbus_clk, &snoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_msmbus_a_clk, &snoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_usb_clk, &snoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_usb_a_clk, &snoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_wcnss_a_clk, &snoc_a_clk.c, LONG_MAX);
+
+/* SYSMMNOC Voter */
+static DEFINE_CLK_VOTER(sysmmnoc_msmbus_clk, &sysmmnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(sysmmnoc_msmbus_a_clk, &sysmmnoc_a_clk.c, LONG_MAX);
+
+/* XO Voter */
+static DEFINE_CLK_BRANCH_VOTER(xo_dwc3_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_lpm_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_pil_lpass_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_pil_mss_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_pil_pronto_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_wlan_clk, &xo_clk_src.c);
+
+/* SMD_XO_BUFFER */
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk2, rf_clk2_a, RF_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk3, rf_clk3_a, RF_CLK3_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(bb_clk1, bb_clk1_a, BB_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(bb_clk2, bb_clk2_a, BB_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk2, div_clk2_a, DIV_CLK2_ID);
+
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(bb_clk1_pin, bb_clk1_a_pin, BB_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(bb_clk2_pin, bb_clk2_a_pin, BB_CLK2_ID);
+
+DEFINE_CLK_DUMMY(wcnss_m_clk, 0);
+DEFINE_EXT_CLK(debug_cpu_clk, NULL);
+
+static struct pll_vote_clk gpll0_clk_src = {
+	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+	.en_mask = BIT(0),
+	.status_reg = (void __iomem *)GPLL0_MODE,
+	.status_mask = BIT(30),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.rate = 800000000,
+		.parent = &xo_clk_src.c,
+		.dbg_name = "gpll0_clk_src",
+		.ops = &clk_ops_pll_vote,
+		CLK_INIT(gpll0_clk_src.c),
+	},
+};
+
+static struct pll_vote_clk gpll2_clk_src = {
+	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+	.en_mask = BIT(2),
+	.status_reg = (void __iomem *)GPLL2_MODE,
+	.status_mask = BIT(30),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.rate = 930000000,
+		.parent = &xo_clk_src.c,
+		.dbg_name = "gpll2_clk_src",
+		.ops = &clk_ops_pll_vote,
+		CLK_INIT(gpll2_clk_src.c),
+	},
+};
+
+static struct alpha_pll_masks gpll3_masks_p = {
+	.lock_mask = BIT(31),
+	.active_mask = BIT(30),
+	.vco_mask = BM(21, 20) >> 20,
+	.vco_shift = 20,
+	.alpha_en_mask = BIT(24),
+	.output_mask = 0xf,
+	.update_mask = BIT(22),
+	.post_div_mask = BM(11, 8),
+	.test_ctl_lo_mask = BM(31, 0),
+	.test_ctl_hi_mask = BM(31, 0),
+};
+
+static struct alpha_pll_vco_tbl gpll3_p_vco[] = {
+	VCO(0,  1000000000, 2000000000),
+};
+
+static struct alpha_pll_clk gpll3_clk_src = {
+	.masks = &gpll3_masks_p,
+	.base = &virt_bases[GCC_BASE],
+	.offset = GPLL3_MODE,
+	.vco_tbl = gpll3_p_vco,
+	.num_vco = ARRAY_SIZE(gpll3_p_vco),
+	.enable_config = 1,
+	.post_div_config = 1 << 8,
+	.slew = true,
+	.config_ctl_val = 0x4001055b,
+	.c = {
+		.rate = 1300000000,
+		.parent = &xo_clk_src.c,
+		.dbg_name = "gpll3_clk_src",
+		.ops = &clk_ops_dyna_alpha_pll,
+		VDD_DIG_FMAX_MAP1(SVS, 2000000000),
+		CLK_INIT(gpll3_clk_src.c),
+	},
+};
+
+static struct pll_vote_clk gpll4_clk_src = {
+	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+	.en_mask = BIT(5),
+	.status_reg = (void __iomem *)GPLL4_MODE,
+	.status_mask = BIT(30),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.rate = 1152000000,
+		.parent = &xo_clk_src.c,
+		.dbg_name = "gpll4_clk_src",
+		.ops = &clk_ops_pll_vote,
+		CLK_INIT(gpll4_clk_src.c),
+	},
+};
+
+/* Brammo PLL status BIT(2) PLL_LOCK_DET */
+static struct pll_vote_clk gpll6_clk_src = {
+	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+	.en_mask = BIT(7),
+	.status_reg = (void __iomem *)GPLL6_STATUS,
+	.status_mask = BIT(2),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.rate = 1080000000,
+		.parent = &xo_clk_src.c,
+		.dbg_name = "gpll6_clk_src",
+		.ops = &clk_ops_pll_vote,
+		CLK_INIT(gpll6_clk_src.c),
+	},
+};
+
+DEFINE_EXT_CLK(xo_pipe_clk_src, &xo_clk_src.c);
+DEFINE_EXT_CLK(gpll0_main_clk_src, &gpll0_clk_src.c);
+DEFINE_EXT_CLK(gpll0_main_div2_cci_clk_src, &gpll0_clk_src.c);
+DEFINE_EXT_CLK(gpll0_main_div2_clk_src, &gpll0_clk_src.c);
+DEFINE_EXT_CLK(gpll0_main_div2_mm_clk_src, &gpll0_clk_src.c);
+DEFINE_EXT_CLK(gpll0_main_div2_usb3_clk_src, &gpll0_clk_src.c);
+DEFINE_EXT_CLK(gpll0_main_mock_clk_src, &gpll0_clk_src.c);
+DEFINE_EXT_CLK(gpll2_out_main_clk_src, &gpll2_clk_src.c);
+DEFINE_EXT_CLK(gpll2_vcodec_clk_src, &gpll2_clk_src.c);
+DEFINE_EXT_CLK(gpll4_aux_clk_src, &gpll4_clk_src.c);
+DEFINE_EXT_CLK(gpll4_out_aux_clk_src, &gpll4_clk_src.c);
+DEFINE_EXT_CLK(gpll6_aux_clk_src, &gpll6_clk_src.c);
+DEFINE_EXT_CLK(gpll6_main_clk_src, &gpll6_clk_src.c);
+DEFINE_EXT_CLK(gpll6_main_div2_clk_src, &gpll6_clk_src.c);
+DEFINE_EXT_CLK(gpll6_main_div2_gfx_clk_src, &gpll6_clk_src.c);
+DEFINE_EXT_CLK(gpll6_main_gfx_clk_src, &gpll6_clk_src.c);
+DEFINE_EXT_CLK(gpll6_main_div2_mock_clk_src, &gpll6_clk_src.c);
+DEFINE_EXT_CLK(gpll6_out_aux_clk_src, &gpll6_clk_src.c);
+
+DEFINE_EXT_CLK(ext_pclk0_clk_src, NULL);
+DEFINE_EXT_CLK(ext_byte0_clk_src, NULL);
+DEFINE_EXT_CLK(ext_pclk1_clk_src, NULL);
+DEFINE_EXT_CLK(ext_byte1_clk_src, NULL);
+
+static struct clk_freq_tbl ftbl_camss_top_ahb_clk_src[] = {
+	F(  40000000, gpll0_main_div2,   10,    0,     0),
+	F(  80000000,           gpll0,   10,    0,     0),
+	F_END
+};
+
+static struct rcg_clk camss_top_ahb_clk_src = {
+	.cmd_rcgr_reg = CAMSS_TOP_AHB_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_camss_top_ahb_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "camss_top_ahb_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 40000000, SVS_PLUS, 80000000),
+		CLK_INIT(camss_top_ahb_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi0_clk_src[] = {
+	F( 100000000, gpll0_main_div2_mm,    4,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 310000000,              gpll2,    3,    0,     0),
+	F( 400000000,              gpll0,    2,    0,     0),
+	F( 465000000,              gpll2,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi0_clk_src = {
+	.cmd_rcgr_reg = CSI0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi0_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP5(LOW_SVS, 100000000, SVS, 200000000, SVS_PLUS,
+			310000000, NOM, 400000000, NOM_PLUS, 465000000),
+		CLK_INIT(csi0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_apss_ahb_clk_src[] = {
+	F(  19200000,            xo_a,    1,    0,     0),
+	F(  25000000, gpll0_main_div2,   16,    0,     0),
+	F(  50000000,           gpll0,   16,    0,     0),
+	F( 100000000,           gpll0,    8,    0,     0),
+	F( 133330000,           gpll0,    6,    0,     0),
+	F_END
+};
+
+static struct rcg_clk apss_ahb_clk_src = {
+	.cmd_rcgr_reg = APSS_AHB_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_apss_ahb_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "apss_ahb_clk_src",
+		.ops = &clk_ops_rcg,
+		CLK_INIT(apss_ahb_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi1_clk_src[] = {
+	F( 100000000,    gpll0_main_div2,    4,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 310000000,     gpll2_out_main,    3,    0,     0),
+	F( 400000000,              gpll0,    2,    0,     0),
+	F( 465000000,     gpll2_out_main,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi1_clk_src = {
+	.cmd_rcgr_reg = CSI1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi1_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi1_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP5(LOW_SVS, 100000000, SVS, 200000000, SVS_PLUS,
+				310000000, NOM, 400000000, NOM_PLUS, 465000000),
+		CLK_INIT(csi1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi2_clk_src[] = {
+	F( 100000000,    gpll0_main_div2,    4,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 310000000,     gpll2_out_main,    3,    0,     0),
+	F( 400000000,              gpll0,    2,    0,     0),
+	F( 465000000,     gpll2_out_main,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi2_clk_src = {
+	.cmd_rcgr_reg = CSI2_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi2_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi2_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP5(LOW_SVS, 100000000, SVS, 200000000, SVS_PLUS,
+				310000000, NOM, 400000000, NOM_PLUS, 465000000),
+		CLK_INIT(csi2_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_vfe0_clk_src[] = {
+	F(  50000000, gpll0_main_div2_mm,    8,    0,     0),
+	F( 100000000, gpll0_main_div2_mm,    4,    0,     0),
+	F( 133330000,              gpll0,    6,    0,     0),
+	F( 160000000,              gpll0,    5,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 266670000,              gpll0,    3,    0,     0),
+	F( 310000000,              gpll2,    3,    0,     0),
+	F( 400000000,              gpll0,    2,    0,     0),
+	F( 465000000,              gpll2,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk vfe0_clk_src = {
+	.cmd_rcgr_reg = VFE0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_vfe0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "vfe0_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP4(LOW_SVS, 100000000, SVS, 200000000, SVS_PLUS,
+				310000000, NOM, 465000000),
+		CLK_INIT(vfe0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src[] = {
+	F_MM(  19200000, FIXED_CLK_SRC,                  xo,    1,    0,     0),
+	F_MM(  50000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    8,    0,     0),
+	F_MM(  80000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    5,    0,     0),
+	F_MM( 100000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    4,    0,     0),
+	F_MM( 133330000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    3,    0,     0),
+	F_MM( 160000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,  2.5,    0,     0),
+	F_MM( 200000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    2,    0,     0),
+	F_MM( 216000000, FIXED_CLK_SRC, gpll6_main_div2_gfx,  2.5,    0,     0),
+	F_MM( 266670000, FIXED_CLK_SRC,               gpll0,    3,    0,     0),
+	F_MM( 320000000, FIXED_CLK_SRC,               gpll0,  2.5,    0,     0),
+	F_MM( 400000000, FIXED_CLK_SRC,               gpll0,    2,    0,     0),
+	F_MM( 460800000, FIXED_CLK_SRC,       gpll4_out_aux,  2.5,    0,     0),
+	F_MM( 510000000,    1020000000,               gpll3,    1,    0,     0),
+	F_MM( 560000000,    1120000000,               gpll3,    1,    0,     0),
+	F_MM( 650000000,    1300000000,               gpll3,    1,    0,     0),
+
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src_sdm450[] = {
+	F_MM(  19200000, FIXED_CLK_SRC,                  xo,    1,    0,     0),
+	F_MM(  50000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    8,    0,     0),
+	F_MM(  80000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    5,    0,     0),
+	F_MM( 100000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    4,    0,     0),
+	F_MM( 133330000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    3,    0,     0),
+	F_MM( 160000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,  2.5,    0,     0),
+	F_MM( 200000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    2,    0,     0),
+	F_MM( 216000000, FIXED_CLK_SRC, gpll6_main_div2_gfx,  2.5,    0,     0),
+	F_MM( 266670000, FIXED_CLK_SRC,               gpll0,    3,    0,     0),
+	F_MM( 320000000, FIXED_CLK_SRC,               gpll0,  2.5,    0,     0),
+	F_MM( 400000000, FIXED_CLK_SRC,               gpll0,    2,    0,     0),
+	F_MM( 460800000, FIXED_CLK_SRC,       gpll4_out_aux,  2.5,    0,     0),
+	F_MM( 510000000,    1020000000,               gpll3,    1,    0,     0),
+	F_MM( 560000000,    1120000000,               gpll3,    1,    0,     0),
+	F_MM( 600000000,    1200000000,               gpll3,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk gfx3d_clk_src = {
+	.cmd_rcgr_reg = GFX3D_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_gfx3d_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.non_local_control_timeout = 1000,
+	.base = &virt_bases[GFX_BASE],
+	.c = {
+		.dbg_name = "gfx3d_clk_src",
+		.ops = &clk_ops_rcg,
+		CLK_INIT(gfx3d_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_vcodec0_clk_src[] = {
+	F( 114290000, gpll0_main_div2,  3.5,    0,     0),
+	F( 228570000,           gpll0,  3.5,    0,     0),
+	F( 310000000,    gpll2_vcodec,    3,    0,     0),
+	F( 360000000,           gpll6,    3,    0,     0),
+	F( 400000000,           gpll0,    2,    0,     0),
+	F( 465000000,    gpll2_vcodec,    2,    0,     0),
+	F_END
+};
+
+static struct clk_freq_tbl ftbl_vcodec0_clk_src_540MHz[] = {
+	F( 114290000, gpll0_main_div2,  3.5,    0,     0),
+	F( 228570000,           gpll0,  3.5,    0,     0),
+	F( 310000000,    gpll2_vcodec,    3,    0,     0),
+	F( 360000000,           gpll6,    3,    0,     0),
+	F( 400000000,           gpll0,    2,    0,     0),
+	F( 465000000,    gpll2_vcodec,    2,    0,     0),
+	F( 540000000,           gpll6,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk vcodec0_clk_src = {
+	.cmd_rcgr_reg = VCODEC0_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_vcodec0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "vcodec0_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP6(LOW_SVS, 114290000, SVS, 228570000, SVS_PLUS,
+				310000000, NOM, 360000000, NOM_PLUS, 400000000,
+				HIGH, 465000000),
+		CLK_INIT(vcodec0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_cpp_clk_src[] = {
+	F( 100000000, gpll0_main_div2_mm,    4,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 266670000,              gpll0,    3,    0,     0),
+	F( 320000000,              gpll0,  2.5,    0,     0),
+	F( 400000000,              gpll0,    2,    0,     0),
+	F( 465000000,              gpll2,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk cpp_clk_src = {
+	.cmd_rcgr_reg = CPP_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_cpp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "cpp_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP5(LOW_SVS, 100000000, SVS, 200000000, SVS_PLUS,
+				266670000, NOM, 400000000, NOM_PLUS, 465000000),
+		CLK_INIT(cpp_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_jpeg0_clk_src[] = {
+	F(  66670000, gpll0_main_div2,    6,    0,     0),
+	F( 133330000,           gpll0,    6,    0,     0),
+	F( 200000000,           gpll0,    4,    0,     0),
+	F( 266670000,           gpll0,    3,    0,     0),
+	F( 310000000,  gpll2_out_main,    3,    0,     0),
+	F( 320000000,           gpll0,  2.5,    0,     0),
+	F_END
+};
+
+static struct rcg_clk jpeg0_clk_src = {
+	.cmd_rcgr_reg = JPEG0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_jpeg0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "jpeg0_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP6(LOW_SVS, 66670000, SVS, 133330000, SVS_PLUS,
+				200000000, NOM, 266670000, NOM_PLUS, 310000000,
+				HIGH, 320000000),
+		CLK_INIT(jpeg0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_mdp_clk_src[] = {
+	F(  50000000, gpll0_main_div2,    8,    0,     0),
+	F(  80000000, gpll0_main_div2,    5,    0,     0),
+	F( 160000000, gpll0_main_div2,  2.5,    0,     0),
+	F( 200000000,           gpll0,    4,    0,     0),
+	F( 266670000,           gpll0,    3,    0,     0),
+	F( 320000000,           gpll0,  2.5,    0,     0),
+	F( 400000000,           gpll0,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk mdp_clk_src = {
+	.cmd_rcgr_reg = MDP_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_mdp_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "mdp_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP4(LOW_SVS, 160000000, SVS, 266670000, NOM,
+				320000000, HIGH, 400000000),
+		CLK_INIT(mdp_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_pclk0_clk_src[] = {
+	{
+		.div_src_val = BVAL(10, 8, xo_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &xo_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi0_phypll_mm_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &ext_pclk0_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi1_phypll_mm_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &ext_pclk1_clk_src.c,
+		.freq_hz = 0,
+	},
+	F_END
+};
+
+static struct rcg_clk pclk0_clk_src = {
+	.cmd_rcgr_reg = PCLK0_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.current_freq = ftbl_pclk0_clk_src,
+	.freq_tbl = ftbl_pclk0_clk_src,
+	.base = &virt_bases[MDSS_BASE],
+	.c = {
+		.dbg_name = "pclk0_clk_src",
+		.ops = &clk_ops_pixel_multiparent,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 175000000, SVS, 280000000, NOM,
+				350000000),
+		CLK_INIT(pclk0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_pclk1_clk_src[] = {
+	{
+		.div_src_val = BVAL(10, 8, xo_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &xo_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi1_phypll_clk_mm_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &ext_pclk1_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi0_phypll_clk_mm_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &ext_pclk0_clk_src.c,
+		.freq_hz = 0,
+	},
+	F_END
+};
+
+static struct rcg_clk pclk1_clk_src = {
+	.cmd_rcgr_reg = PCLK1_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.current_freq = ftbl_pclk1_clk_src,
+	.freq_tbl = ftbl_pclk1_clk_src,
+	.base = &virt_bases[MDSS_BASE],
+	.c = {
+		.dbg_name = "pclk1_clk_src",
+		.ops = &clk_ops_pixel_multiparent,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 175000000, SVS, 280000000, NOM,
+				350000000),
+		CLK_INIT(pclk1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_usb30_master_clk_src[] = {
+	F(  80000000, gpll0_main_div2_usb3,    5,    0,     0),
+	F( 100000000,                gpll0,    8,    0,     0),
+	F( 133330000,                gpll0,    6,    0,     0),
+	F_END
+};
+
+static struct rcg_clk usb30_master_clk_src = {
+	.cmd_rcgr_reg = USB30_MASTER_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_usb30_master_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "usb30_master_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 80000000, NOM, 133330000),
+		CLK_INIT(usb30_master_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_vfe1_clk_src[] = {
+	F(  50000000, gpll0_main_div2_mm,    8,    0,     0),
+	F( 100000000, gpll0_main_div2_mm,    4,    0,     0),
+	F( 133330000,              gpll0,    6,    0,     0),
+	F( 160000000,              gpll0,    5,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 266670000,              gpll0,    3,    0,     0),
+	F( 310000000,              gpll2,    3,    0,     0),
+	F( 400000000,              gpll0,    2,    0,     0),
+	F( 465000000,              gpll2,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk vfe1_clk_src = {
+	.cmd_rcgr_reg = VFE1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_vfe1_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "vfe1_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP4(LOW_SVS, 100000000, SVS, 200000000, SVS_PLUS,
+				310000000, NOM, 465000000),
+		CLK_INIT(vfe1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_apc0_droop_detector_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F( 400000000,          gpll0,    2,    0,     0),
+	F( 576000000,          gpll4,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk apc0_droop_detector_clk_src = {
+	.cmd_rcgr_reg = APC0_VOLTAGE_DROOP_DETECTOR_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_apc0_droop_detector_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "apc0_droop_detector_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 19200000, SVS, 400000000, NOM,
+				600000000),
+		CLK_INIT(apc0_droop_detector_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_apc1_droop_detector_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F( 400000000,          gpll0,    2,    0,     0),
+	F( 576000000,          gpll4,    2,    0,     0),
+	F_END
+};
+
+static struct rcg_clk apc1_droop_detector_clk_src = {
+	.cmd_rcgr_reg = APC1_VOLTAGE_DROOP_DETECTOR_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_apc1_droop_detector_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "apc1_droop_detector_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 19200000, SVS, 400000000, NOM,
+				600000000),
+		CLK_INIT(apc1_droop_detector_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+	F(  19200000,              xo,    1,    0,     0),
+	F(  25000000, gpll0_main_div2,   16,    0,     0),
+	F(  50000000,           gpll0,   16,    0,     0),
+	F_END
+};
+
+static struct rcg_clk blsp1_qup1_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_QUP1_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_qup1_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 25000000, SVS, 50000000),
+		CLK_INIT(blsp1_qup1_i2c_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
+	F(    960000,              xo,   10,    1,     2),
+	F(   4800000,              xo,    4,    0,     0),
+	F(   9600000,              xo,    2,    0,     0),
+	F(  12500000, gpll0_main_div2,   16,    1,     2),
+	F(  16000000,           gpll0,   10,    1,     5),
+	F(  19200000,              xo,    1,    0,     0),
+	F(  25000000,           gpll0,   16,    1,     2),
+	F(  50000000,           gpll0,   16,    0,     0),
+	F_END
+};
+
+static struct rcg_clk blsp1_qup1_spi_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_QUP1_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_qup1_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 12500000, SVS, 25000000, NOM,
+				50000000),
+		CLK_INIT(blsp1_qup1_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup2_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_QUP2_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_qup2_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 25000000, SVS, 50000000),
+		CLK_INIT(blsp1_qup2_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup2_spi_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_QUP2_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_qup2_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 12500000, SVS, 25000000, NOM,
+				50000000),
+		CLK_INIT(blsp1_qup2_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup3_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_QUP3_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_qup3_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 25000000, SVS, 50000000),
+		CLK_INIT(blsp1_qup3_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup3_spi_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_QUP3_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_qup3_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 12500000, SVS, 25000000, NOM,
+				50000000),
+		CLK_INIT(blsp1_qup3_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup4_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_QUP4_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_qup4_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 25000000, SVS, 50000000),
+		CLK_INIT(blsp1_qup4_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_qup4_spi_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_QUP4_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_qup4_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 12500000, SVS, 25000000, NOM,
+				50000000),
+		CLK_INIT(blsp1_qup4_spi_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+	F(   3686400, gpll0_main_div2,    1,  144, 15625),
+	F(   7372800, gpll0_main_div2,    1,  288, 15625),
+	F(  14745600, gpll0_main_div2,    1,  576, 15625),
+	F(  16000000, gpll0_main_div2,    5,    1,     5),
+	F(  19200000,              xo,    1,    0,     0),
+	F(  24000000,           gpll0,    1,    3,   100),
+	F(  25000000,           gpll0,   16,    1,     2),
+	F(  32000000,           gpll0,    1,    1,    25),
+	F(  40000000,           gpll0,    1,    1,    20),
+	F(  46400000,           gpll0,    1,   29,   500),
+	F(  48000000,           gpll0,    1,    3,    50),
+	F(  51200000,           gpll0,    1,    8,   125),
+	F(  56000000,           gpll0,    1,    7,   100),
+	F(  58982400,           gpll0,    1, 1152, 15625),
+	F(  60000000,           gpll0,    1,    3,    40),
+	F(  64000000,           gpll0,    1,    2,    25),
+	F_END
+};
+
+static struct rcg_clk blsp1_uart1_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_UART1_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_uart1_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 16000000, SVS, 32000000, NOM,
+				64000000),
+		CLK_INIT(blsp1_uart1_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp1_uart2_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP1_UART2_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp1_uart2_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 16000000, SVS, 32000000, NOM,
+				64000000),
+		CLK_INIT(blsp1_uart2_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup1_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_QUP1_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_qup1_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 25000000, SVS, 50000000),
+		CLK_INIT(blsp2_qup1_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup1_spi_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_QUP1_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_qup1_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 12500000, SVS, 25000000, NOM,
+				50000000),
+		CLK_INIT(blsp2_qup1_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup2_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_QUP2_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_qup2_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 25000000, SVS, 50000000),
+		CLK_INIT(blsp2_qup2_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup2_spi_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_QUP2_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_qup2_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 12500000, SVS, 25000000, NOM,
+				50000000),
+		CLK_INIT(blsp2_qup2_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup3_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_QUP3_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_qup3_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 25000000, SVS, 50000000),
+		CLK_INIT(blsp2_qup3_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup3_spi_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_QUP3_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_qup3_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 12500000, SVS, 25000000, NOM,
+				50000000),
+		CLK_INIT(blsp2_qup3_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup4_i2c_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_QUP4_I2C_APPS_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_qup4_i2c_apps_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 25000000, SVS, 50000000),
+		CLK_INIT(blsp2_qup4_i2c_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_qup4_spi_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_QUP4_SPI_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_spi_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_qup4_spi_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 12500000, SVS, 25000000, NOM,
+				50000000),
+		CLK_INIT(blsp2_qup4_spi_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_uart1_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_UART1_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_uart1_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 16000000, SVS, 32000000, NOM,
+				64000000),
+		CLK_INIT(blsp2_uart1_apps_clk_src.c),
+	},
+};
+
+static struct rcg_clk blsp2_uart2_apps_clk_src = {
+	.cmd_rcgr_reg = BLSP2_UART2_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_blsp_uart_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "blsp2_uart2_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 16000000, SVS, 32000000, NOM,
+				64000000),
+		CLK_INIT(blsp2_uart2_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_cci_clk_src[] = {
+	F(  19200000,                  xo,    1,    0,     0),
+	F(  37500000, gpll0_main_div2_cci,    1,    3,    32),
+	F_END
+};
+
+static struct rcg_clk cci_clk_src = {
+	.cmd_rcgr_reg = CCI_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_cci_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "cci_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP1(LOW_SVS, 37500000),
+		CLK_INIT(cci_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi0p_clk_src[] = {
+	F(  66670000, gpll0_main_div2_mm,    6,    0,     0),
+	F( 133330000,              gpll0,    6,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 266670000,              gpll0,    3,    0,     0),
+	F( 310000000,              gpll2,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi0p_clk_src = {
+	.cmd_rcgr_reg = CSI0P_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi0p_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi0p_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP5(LOW_SVS, 66670000, SVS, 133330000, SVS_PLUS,
+				200000000, NOM, 266670000, NOM_PLUS, 310000000),
+		CLK_INIT(csi0p_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi1p_clk_src[] = {
+	F(  66670000, gpll0_main_div2_mm,    6,    0,     0),
+	F( 133330000,              gpll0,    6,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 266670000,              gpll0,    3,    0,     0),
+	F( 310000000,              gpll2,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi1p_clk_src = {
+	.cmd_rcgr_reg = CSI1P_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi1p_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi1p_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP5(LOW_SVS, 66670000, SVS, 133330000, SVS_PLUS,
+				200000000, NOM, 266670000, NOM_PLUS, 310000000),
+		CLK_INIT(csi1p_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi2p_clk_src[] = {
+	F(  66670000, gpll0_main_div2_mm,    6,    0,     0),
+	F( 133330000,              gpll0,    6,    0,     0),
+	F( 200000000,              gpll0,    4,    0,     0),
+	F( 266670000,              gpll0,    3,    0,     0),
+	F( 310000000,              gpll2,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi2p_clk_src = {
+	.cmd_rcgr_reg = CSI2P_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi2p_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi2p_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP5(LOW_SVS, 66670000, SVS, 133330000, SVS_PLUS,
+				200000000, NOM, 266670000, NOM_PLUS, 310000000),
+		CLK_INIT(csi2p_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_camss_gp0_clk_src[] = {
+	F(  50000000, gpll0_main_div2,    8,    0,     0),
+	F( 100000000,           gpll0,    8,    0,     0),
+	F( 200000000,           gpll0,    4,    0,     0),
+	F( 266670000,           gpll0,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk camss_gp0_clk_src = {
+	.cmd_rcgr_reg = CAMSS_GP0_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_camss_gp0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "camss_gp0_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP4(LOW_SVS, 50000000, SVS, 100000000, SVS_PLUS,
+				200000000, NOM_PLUS, 266670000),
+		CLK_INIT(camss_gp0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_camss_gp1_clk_src[] = {
+	F(  50000000, gpll0_main_div2,    8,    0,     0),
+	F( 100000000,           gpll0,    8,    0,     0),
+	F( 200000000,           gpll0,    4,    0,     0),
+	F( 266670000,           gpll0,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk camss_gp1_clk_src = {
+	.cmd_rcgr_reg = CAMSS_GP1_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_camss_gp1_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "camss_gp1_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP4(LOW_SVS, 50000000, SVS, 100000000, SVS_PLUS,
+				200000000, NOM_PLUS, 266670000),
+		CLK_INIT(camss_gp1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_mclk0_clk_src[] = {
+	F(  24000000, gpll6_main_div2,    1,    2,    45),
+	F(  33330000, gpll0_main_div2,   12,    0,     0),
+	F(  36610000, gpll6,		  1,    2,    59),
+	F(  66667000,           gpll0,   12,    0,     0),
+	F_END
+};
+
+static struct rcg_clk mclk0_clk_src = {
+	.cmd_rcgr_reg = MCLK0_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_mclk0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "mclk0_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 33330000, SVS, 66670000),
+		CLK_INIT(mclk0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_mclk1_clk_src[] = {
+	F(  24000000, gpll6_main_div2,    1,    2,    45),
+	F(  33330000, gpll0_main_div2,   12,    0,     0),
+	F(  36610000, gpll6,		  1,    2,    59),
+	F(  66667000,           gpll0,   12,    0,     0),
+	F_END
+};
+
+static struct rcg_clk mclk1_clk_src = {
+	.cmd_rcgr_reg = MCLK1_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_mclk1_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "mclk1_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 33330000, SVS, 66670000),
+		CLK_INIT(mclk1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_mclk2_clk_src[] = {
+	F(  24000000, gpll6_main_div2,    1,    2,    45),
+	F(  33330000, gpll0_main_div2,   12,    0,     0),
+	F(  36610000, gpll6,		  1,    2,    59),
+	F(  66667000,           gpll0,   12,    0,     0),
+	F_END
+};
+
+static struct rcg_clk mclk2_clk_src = {
+	.cmd_rcgr_reg = MCLK2_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_mclk2_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "mclk2_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 33330000, SVS, 66670000),
+		CLK_INIT(mclk2_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_mclk3_clk_src[] = {
+	F(  24000000, gpll6_main_div2,    1,    2,    45),
+	F(  33330000, gpll0_main_div2,   12,    0,     0),
+	F(  36610000, gpll6,		  1,    2,    59),
+	F(  66667000,           gpll0,   12,    0,     0),
+	F_END
+};
+
+static struct rcg_clk mclk3_clk_src = {
+	.cmd_rcgr_reg = MCLK3_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_mclk3_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "mclk3_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 33330000, SVS, 66670000),
+		CLK_INIT(mclk3_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi0phytimer_clk_src[] = {
+	F( 100000000, gpll0_main_div2,    4,    0,     0),
+	F( 200000000,           gpll0,    4,    0,     0),
+	F( 266670000,           gpll0,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi0phytimer_clk_src = {
+	.cmd_rcgr_reg = CSI0PHYTIMER_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi0phytimer_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi0phytimer_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 100000000, SVS_PLUS, 200000000,
+				NOM_PLUS, 266670000),
+		CLK_INIT(csi0phytimer_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi1phytimer_clk_src[] = {
+	F( 100000000, gpll0_main_div2,    4,    0,     0),
+	F( 200000000,           gpll0,    4,    0,     0),
+	F( 266670000,           gpll0,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi1phytimer_clk_src = {
+	.cmd_rcgr_reg = CSI1PHYTIMER_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi1phytimer_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi1phytimer_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 100000000, SVS_PLUS, 200000000,
+				NOM_PLUS, 266670000),
+		CLK_INIT(csi1phytimer_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_csi2phytimer_clk_src[] = {
+	F( 100000000, gpll0_main_div2,    4,    0,     0),
+	F( 200000000,           gpll0,    4,    0,     0),
+	F( 266670000,           gpll0,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk csi2phytimer_clk_src = {
+	.cmd_rcgr_reg = CSI2PHYTIMER_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_csi2phytimer_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "csi2phytimer_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 100000000, SVS_PLUS, 200000000,
+				NOM_PLUS, 266670000),
+		CLK_INIT(csi2phytimer_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_crypto_clk_src[] = {
+	F(  40000000, gpll0_main_div2,   10,    0,     0),
+	F(  80000000,           gpll0,   10,    0,     0),
+	F( 100000000,           gpll0,    8,    0,     0),
+	F( 160000000,           gpll0,    5,    0,     0),
+	F_END
+};
+
+static struct rcg_clk crypto_clk_src = {
+	.cmd_rcgr_reg = CRYPTO_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_crypto_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "crypto_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 40000000, SVS, 80000000, NOM,
+				160000000),
+		CLK_INIT(crypto_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_gp1_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk gp1_clk_src = {
+	.cmd_rcgr_reg = GP1_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_gp1_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gp1_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP4(LOW_SVS, 50000000, SVS, 100000000, NOM,
+				200000000, NOM_PLUS, 266670000),
+		CLK_INIT(gp1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_gp2_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk gp2_clk_src = {
+	.cmd_rcgr_reg = GP2_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_gp2_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gp2_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP4(LOW_SVS, 50000000, SVS, 100000000, NOM,
+				200000000, NOM_PLUS, 266670000),
+		CLK_INIT(gp2_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_gp3_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk gp3_clk_src = {
+	.cmd_rcgr_reg = GP3_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_gp3_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gp3_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP4(LOW_SVS, 50000000, SVS, 100000000, NOM,
+				200000000, NOM_PLUS, 266670000),
+		CLK_INIT(gp3_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_byte0_clk_src[] = {
+	{
+		.div_src_val = BVAL(10, 8, xo_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &xo_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi0_phypll_mm_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &ext_byte0_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi1_phypll_mm_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &ext_byte1_clk_src.c,
+		.freq_hz = 0,
+	},
+	F_END
+};
+
+static struct rcg_clk byte0_clk_src = {
+	.cmd_rcgr_reg = BYTE0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.current_freq = ftbl_byte0_clk_src,
+	.freq_tbl = ftbl_byte0_clk_src,
+	.base = &virt_bases[MDSS_BASE],
+	.c = {
+		.dbg_name = "byte0_clk_src",
+		.ops = &clk_ops_byte_multiparent,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 131250000, SVS, 210000000, NOM,
+				262500000),
+		CLK_INIT(byte0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_byte1_clk_src[] = {
+	{
+		.div_src_val = BVAL(10, 8, xo_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &xo_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi1_phypll_clk_mm_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &ext_byte1_clk_src.c,
+		.freq_hz = 0,
+	},
+	{
+		.div_src_val = BVAL(10, 8, dsi0_phypll_clk_mm_src_val)
+					| BVAL(4, 0, 0),
+		.src_clk = &ext_byte0_clk_src.c,
+		.freq_hz = 0,
+	},
+	F_END
+};
+
+static struct rcg_clk byte1_clk_src = {
+	.cmd_rcgr_reg = BYTE1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.current_freq = ftbl_byte1_clk_src,
+	.freq_tbl = ftbl_byte1_clk_src,
+	.base = &virt_bases[MDSS_BASE],
+	.c = {
+		.dbg_name = "byte1_clk_src",
+		.ops = &clk_ops_byte_multiparent,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 131250000, SVS, 210000000, NOM,
+				262500000),
+		CLK_INIT(byte1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_esc0_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk esc0_clk_src = {
+	.cmd_rcgr_reg = ESC0_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_esc0_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "esc0_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP1(LOW_SVS, 19200000),
+		CLK_INIT(esc0_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_esc1_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk esc1_clk_src = {
+	.cmd_rcgr_reg = ESC1_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_esc1_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "esc1_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP1(LOW_SVS, 19200000),
+		CLK_INIT(esc1_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_vsync_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk vsync_clk_src = {
+	.cmd_rcgr_reg = VSYNC_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_vsync_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "vsync_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP1(LOW_SVS, 19200000),
+		CLK_INIT(vsync_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_pdm2_clk_src[] = {
+	F(  32000000, gpll0_main_div2, 12.5,    0,     0),
+	F(  64000000,           gpll0, 12.5,    0,     0),
+	F_END
+};
+
+static struct rcg_clk pdm2_clk_src = {
+	.cmd_rcgr_reg = PDM2_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_pdm2_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "pdm2_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 32000000, SVS, 64000000),
+		CLK_INIT(pdm2_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_rbcpr_gfx_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F(  50000000,          gpll0,   16,    0,     0),
+	F_END
+};
+
+static struct rcg_clk rbcpr_gfx_clk_src = {
+	.cmd_rcgr_reg = RBCPR_GFX_CMD_RCGR,
+	.set_rate = set_rate_hid,
+	.freq_tbl = ftbl_rbcpr_gfx_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "rbcpr_gfx_clk_src",
+		.ops = &clk_ops_rcg,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 19200000, SVS, 50000000),
+		CLK_INIT(rbcpr_gfx_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+	F(    144000,              xo,   16,    3,    25),
+	F(    400000,              xo,   12,    1,     4),
+	F(  20000000, gpll0_main_div2,    5,    1,     4),
+	F(  25000000, gpll0_main_div2,   16,    0,     0),
+	F(  50000000,           gpll0,   16,    0,     0),
+	F( 100000000,           gpll0,    8,    0,     0),
+	F( 177770000,           gpll0,  4.5,    0,     0),
+	F( 192000000,           gpll4,    6,    0,     0),
+	F( 384000000,           gpll4,    3,    0,     0),
+	F_END
+};
+
+static struct rcg_clk sdcc1_apps_clk_src = {
+	.cmd_rcgr_reg = SDCC1_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_sdcc1_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "sdcc1_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 25000000, SVS, 100000000, NOM,
+				400000000),
+		CLK_INIT(sdcc1_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+	F(  80000000, gpll0_main_div2,    5,    0,     0),
+	F( 160000000,           gpll0,    5,    0,     0),
+	F( 270000000,           gpll6,    4,    0,     0),
+	F_END
+};
+
+static struct rcg_clk sdcc1_ice_core_clk_src = {
+	.cmd_rcgr_reg = SDCC1_ICE_CORE_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "sdcc1_ice_core_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 80000000, SVS, 160000000, NOM,
+				270000000),
+		CLK_INIT(sdcc1_ice_core_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+	F(    144000,              xo,   16,    3,    25),
+	F(    400000,              xo,   12,    1,     4),
+	F(  20000000, gpll0_main_div2,    5,    1,     4),
+	F(  25000000, gpll0_main_div2,   16,    0,     0),
+	F(  50000000,           gpll0,   16,    0,     0),
+	F( 100000000,           gpll0,    8,    0,     0),
+	F( 177770000,           gpll0,  4.5,    0,     0),
+	F( 192000000,       gpll4_aux,    6,    0,     0),
+	F( 200000000,           gpll0,    4,    0,     0),
+	F_END
+};
+
+static struct rcg_clk sdcc2_apps_clk_src = {
+	.cmd_rcgr_reg = SDCC2_APPS_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_sdcc2_apps_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "sdcc2_apps_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP3(LOW_SVS, 25000000, SVS, 100000000, NOM,
+				200000000),
+		CLK_INIT(sdcc2_apps_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+	F(  19200000,                       xo,    1,    0,     0),
+	F(  60000000,     gpll6_main_div2_mock,    9,    1,     1),
+	F_END
+};
+
+static struct rcg_clk usb30_mock_utmi_clk_src = {
+	.cmd_rcgr_reg = USB30_MOCK_UTMI_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "usb30_mock_utmi_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP2(LOW_SVS, 19200000, SVS, 60000000),
+		CLK_INIT(usb30_mock_utmi_clk_src.c),
+	},
+};
+
+static struct clk_freq_tbl ftbl_usb3_aux_clk_src[] = {
+	F(  19200000,             xo,    1,    0,     0),
+	F_END
+};
+
+static struct rcg_clk usb3_aux_clk_src = {
+	.cmd_rcgr_reg = USB3_AUX_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_usb3_aux_clk_src,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "usb3_aux_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP1(LOW_SVS, 19200000),
+		CLK_INIT(usb3_aux_clk_src.c),
+	},
+};
+
+static struct branch_clk gcc_apc0_droop_detector_gpll0_clk = {
+	.cbcr_reg = APC0_VOLTAGE_DROOP_DETECTOR_GPLL0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_apc0_droop_detector_gpll0_clk",
+		.parent = &apc0_droop_detector_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_apc0_droop_detector_gpll0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_apc1_droop_detector_gpll0_clk = {
+	.cbcr_reg = APC1_VOLTAGE_DROOP_DETECTOR_GPLL0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_apc1_droop_detector_gpll0_clk",
+		.parent = &apc1_droop_detector_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_apc1_droop_detector_gpll0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = {
+	.cbcr_reg = BLSP1_QUP1_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_qup1_i2c_apps_clk",
+		.parent = &blsp1_qup1_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = {
+	.cbcr_reg = BLSP1_QUP1_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_qup1_spi_apps_clk",
+		.parent = &blsp1_qup1_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = {
+	.cbcr_reg = BLSP1_QUP2_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_qup2_i2c_apps_clk",
+		.parent = &blsp1_qup2_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = {
+	.cbcr_reg = BLSP1_QUP2_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_qup2_spi_apps_clk",
+		.parent = &blsp1_qup2_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = {
+	.cbcr_reg = BLSP1_QUP3_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_qup3_i2c_apps_clk",
+		.parent = &blsp1_qup3_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = {
+	.cbcr_reg = BLSP1_QUP3_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_qup3_spi_apps_clk",
+		.parent = &blsp1_qup3_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = {
+	.cbcr_reg = BLSP1_QUP4_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_qup4_i2c_apps_clk",
+		.parent = &blsp1_qup4_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = {
+	.cbcr_reg = BLSP1_QUP4_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_qup4_spi_apps_clk",
+		.parent = &blsp1_qup4_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_uart1_apps_clk = {
+	.cbcr_reg = BLSP1_UART1_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_uart1_apps_clk",
+		.parent = &blsp1_uart1_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_uart1_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp1_uart2_apps_clk = {
+	.cbcr_reg = BLSP1_UART2_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_uart2_apps_clk",
+		.parent = &blsp1_uart2_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp1_uart2_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup1_i2c_apps_clk = {
+	.cbcr_reg = BLSP2_QUP1_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_qup1_i2c_apps_clk",
+		.parent = &blsp2_qup1_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup1_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup1_spi_apps_clk = {
+	.cbcr_reg = BLSP2_QUP1_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_qup1_spi_apps_clk",
+		.parent = &blsp2_qup1_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup1_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup2_i2c_apps_clk = {
+	.cbcr_reg = BLSP2_QUP2_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_qup2_i2c_apps_clk",
+		.parent = &blsp2_qup2_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup2_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup2_spi_apps_clk = {
+	.cbcr_reg = BLSP2_QUP2_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_qup2_spi_apps_clk",
+		.parent = &blsp2_qup2_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup2_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup3_i2c_apps_clk = {
+	.cbcr_reg = BLSP2_QUP3_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_qup3_i2c_apps_clk",
+		.parent = &blsp2_qup3_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup3_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup3_spi_apps_clk = {
+	.cbcr_reg = BLSP2_QUP3_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_qup3_spi_apps_clk",
+		.parent = &blsp2_qup3_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup3_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup4_i2c_apps_clk = {
+	.cbcr_reg = BLSP2_QUP4_I2C_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_qup4_i2c_apps_clk",
+		.parent = &blsp2_qup4_i2c_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup4_i2c_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_qup4_spi_apps_clk = {
+	.cbcr_reg = BLSP2_QUP4_SPI_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_qup4_spi_apps_clk",
+		.parent = &blsp2_qup4_spi_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_qup4_spi_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_uart1_apps_clk = {
+	.cbcr_reg = BLSP2_UART1_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_uart1_apps_clk",
+		.parent = &blsp2_uart1_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_uart1_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_blsp2_uart2_apps_clk = {
+	.cbcr_reg = BLSP2_UART2_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_uart2_apps_clk",
+		.parent = &blsp2_uart2_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_blsp2_uart2_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_bimc_gpu_clk = {
+	.cbcr_reg = BIMC_GPU_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_bimc_gpu_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_bimc_gpu_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_cci_ahb_clk = {
+	.cbcr_reg = CAMSS_CCI_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_cci_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_cci_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_cci_clk = {
+	.cbcr_reg = CAMSS_CCI_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_cci_clk",
+		.parent = &cci_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_cci_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_cpp_ahb_clk = {
+	.cbcr_reg = CAMSS_CPP_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_cpp_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_cpp_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_cpp_axi_clk = {
+	.cbcr_reg = CAMSS_CPP_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_cpp_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_cpp_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_cpp_clk = {
+	.cbcr_reg = CAMSS_CPP_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_cpp_clk",
+		.parent = &cpp_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_cpp_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi0_ahb_clk = {
+	.cbcr_reg = CAMSS_CSI0_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi0_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi0_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi0_clk = {
+	.cbcr_reg = CAMSS_CSI0_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi0_clk",
+		.parent = &csi0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi0_csiphy_3p_clk = {
+	.cbcr_reg = CAMSS_CSI0_CSIPHY_3P_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi0_csiphy_3p_clk",
+		.parent = &csi0p_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi0_csiphy_3p_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi0phy_clk = {
+	.cbcr_reg = CAMSS_CSI0PHY_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi0phy_clk",
+		.parent = &csi0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi0phy_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi0pix_clk = {
+	.cbcr_reg = CAMSS_CSI0PIX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi0pix_clk",
+		.parent = &csi0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi0pix_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi0rdi_clk = {
+	.cbcr_reg = CAMSS_CSI0RDI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi0rdi_clk",
+		.parent = &csi0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi0rdi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi1_ahb_clk = {
+	.cbcr_reg = CAMSS_CSI1_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi1_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi1_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi1_clk = {
+	.cbcr_reg = CAMSS_CSI1_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi1_clk",
+		.parent = &csi1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi1_csiphy_3p_clk = {
+	.cbcr_reg = CAMSS_CSI1_CSIPHY_3P_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi1_csiphy_3p_clk",
+		.parent = &csi1p_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi1_csiphy_3p_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi1phy_clk = {
+	.cbcr_reg = CAMSS_CSI1PHY_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi1phy_clk",
+		.parent = &csi1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi1phy_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi1pix_clk = {
+	.cbcr_reg = CAMSS_CSI1PIX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi1pix_clk",
+		.parent = &csi1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi1pix_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi1rdi_clk = {
+	.cbcr_reg = CAMSS_CSI1RDI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi1rdi_clk",
+		.parent = &csi1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi1rdi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi2_ahb_clk = {
+	.cbcr_reg = CAMSS_CSI2_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi2_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi2_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi2_clk = {
+	.cbcr_reg = CAMSS_CSI2_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi2_clk",
+		.parent = &csi2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi2_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi2_csiphy_3p_clk = {
+	.cbcr_reg = CAMSS_CSI2_CSIPHY_3P_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi2_csiphy_3p_clk",
+		.parent = &csi2p_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi2_csiphy_3p_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi2phy_clk = {
+	.cbcr_reg = CAMSS_CSI2PHY_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi2phy_clk",
+		.parent = &csi2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi2phy_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi2pix_clk = {
+	.cbcr_reg = CAMSS_CSI2PIX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi2pix_clk",
+		.parent = &csi2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi2pix_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi2rdi_clk = {
+	.cbcr_reg = CAMSS_CSI2RDI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi2rdi_clk",
+		.parent = &csi2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi2rdi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi_vfe0_clk = {
+	.cbcr_reg = CAMSS_CSI_VFE0_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi_vfe0_clk",
+		.parent = &vfe0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi_vfe0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi_vfe1_clk = {
+	.cbcr_reg = CAMSS_CSI_VFE1_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi_vfe1_clk",
+		.parent = &vfe1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi_vfe1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_gp0_clk = {
+	.cbcr_reg = CAMSS_GP0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_gp0_clk",
+		.parent = &camss_gp0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_gp0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_gp1_clk = {
+	.cbcr_reg = CAMSS_GP1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_gp1_clk",
+		.parent = &camss_gp1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_gp1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_ispif_ahb_clk = {
+	.cbcr_reg = CAMSS_ISPIF_AHB_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_ispif_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_ispif_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_jpeg0_clk = {
+	.cbcr_reg = CAMSS_JPEG0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_jpeg0_clk",
+		.parent = &jpeg0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_jpeg0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_jpeg_ahb_clk = {
+	.cbcr_reg = CAMSS_JPEG_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_jpeg_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_jpeg_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_jpeg_axi_clk = {
+	.cbcr_reg = CAMSS_JPEG_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_jpeg_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_jpeg_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_mclk0_clk = {
+	.cbcr_reg = CAMSS_MCLK0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_mclk0_clk",
+		.parent = &mclk0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_mclk0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_mclk1_clk = {
+	.cbcr_reg = CAMSS_MCLK1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_mclk1_clk",
+		.parent = &mclk1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_mclk1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_mclk2_clk = {
+	.cbcr_reg = CAMSS_MCLK2_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_mclk2_clk",
+		.parent = &mclk2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_mclk2_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_mclk3_clk = {
+	.cbcr_reg = CAMSS_MCLK3_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_mclk3_clk",
+		.parent = &mclk3_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_mclk3_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_micro_ahb_clk = {
+	.cbcr_reg = CAMSS_MICRO_AHB_CBCR,
+	.bcr_reg = CAMSS_MICRO_BCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_micro_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_micro_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi0phytimer_clk = {
+	.cbcr_reg = CAMSS_CSI0PHYTIMER_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi0phytimer_clk",
+		.parent = &csi0phytimer_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi0phytimer_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi1phytimer_clk = {
+	.cbcr_reg = CAMSS_CSI1PHYTIMER_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi1phytimer_clk",
+		.parent = &csi1phytimer_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi1phytimer_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_csi2phytimer_clk = {
+	.cbcr_reg = CAMSS_CSI2PHYTIMER_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_csi2phytimer_clk",
+		.parent = &csi2phytimer_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_csi2phytimer_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_ahb_clk = {
+	.cbcr_reg = CAMSS_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_top_ahb_clk = {
+	.cbcr_reg = CAMSS_TOP_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_top_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_top_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_vfe0_clk = {
+	.cbcr_reg = CAMSS_VFE0_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_vfe0_clk",
+		.parent = &vfe0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_vfe0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_vfe_ahb_clk = {
+	.cbcr_reg = CAMSS_VFE_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_vfe_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_vfe_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_vfe_axi_clk = {
+	.cbcr_reg = CAMSS_VFE_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_vfe_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_vfe_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_vfe1_ahb_clk = {
+	.cbcr_reg = CAMSS_VFE1_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_vfe1_ahb_clk",
+		.parent = &camss_top_ahb_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_vfe1_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_vfe1_axi_clk = {
+	.cbcr_reg = CAMSS_VFE1_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_vfe1_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_vfe1_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_camss_vfe1_clk = {
+	.cbcr_reg = CAMSS_VFE1_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_camss_vfe1_clk",
+		.parent = &vfe1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_camss_vfe1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_dcc_clk = {
+	.cbcr_reg = DCC_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_dcc_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_dcc_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gp1_clk = {
+	.cbcr_reg = GP1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_gp1_clk",
+		.parent = &gp1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gp1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gp2_clk = {
+	.cbcr_reg = GP2_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_gp2_clk",
+		.parent = &gp2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gp2_clk.c),
+	},
+};
+
+static struct branch_clk gcc_gp3_clk = {
+	.cbcr_reg = GP3_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_gp3_clk",
+		.parent = &gp3_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_gp3_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_ahb_clk = {
+	.cbcr_reg = MDSS_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mdss_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_axi_clk = {
+	.cbcr_reg = MDSS_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mdss_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_byte0_clk = {
+	.cbcr_reg = MDSS_BYTE0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[MDSS_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_byte0_clk",
+		.parent = &byte0_clk_src.c,
+		.ops = &clk_ops_branch,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(gcc_mdss_byte0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_byte1_clk = {
+	.cbcr_reg = MDSS_BYTE1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[MDSS_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_byte1_clk",
+		.parent = &byte1_clk_src.c,
+		.ops = &clk_ops_branch,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(gcc_mdss_byte1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_esc0_clk = {
+	.cbcr_reg = MDSS_ESC0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_esc0_clk",
+		.parent = &esc0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mdss_esc0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_esc1_clk = {
+	.cbcr_reg = MDSS_ESC1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_esc1_clk",
+		.parent = &esc1_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mdss_esc1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_mdp_clk = {
+	.cbcr_reg = MDSS_MDP_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_mdp_clk",
+		.parent = &mdp_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mdss_mdp_clk.c),
+	},
+};
+
+static DEFINE_CLK_VOTER(mdss_mdp_vote_clk, &gcc_mdss_mdp_clk.c, 0);
+static DEFINE_CLK_VOTER(mdss_rotator_vote_clk, &gcc_mdss_mdp_clk.c, 0);
+
+static struct branch_clk gcc_mdss_pclk0_clk = {
+	.cbcr_reg = MDSS_PCLK0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[MDSS_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_pclk0_clk",
+		.parent = &pclk0_clk_src.c,
+		.ops = &clk_ops_branch,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(gcc_mdss_pclk0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_pclk1_clk = {
+	.cbcr_reg = MDSS_PCLK1_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[MDSS_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_pclk1_clk",
+		.parent = &pclk1_clk_src.c,
+		.ops = &clk_ops_branch,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(gcc_mdss_pclk1_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mdss_vsync_clk = {
+	.cbcr_reg = MDSS_VSYNC_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mdss_vsync_clk",
+		.parent = &vsync_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mdss_vsync_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mss_cfg_ahb_clk = {
+	.cbcr_reg = MSS_CFG_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mss_cfg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mss_cfg_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_mss_q6_bimc_axi_clk = {
+	.cbcr_reg = MSS_Q6_BIMC_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mss_q6_bimc_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mss_q6_bimc_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_bimc_gfx_clk = {
+	.cbcr_reg = BIMC_GFX_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GFX_BASE],
+	.c = {
+		.dbg_name = "gcc_bimc_gfx_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_bimc_gfx_clk.c),
+	},
+};
+
+static struct branch_clk gcc_oxili_ahb_clk = {
+	.cbcr_reg = OXILI_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GFX_BASE],
+	.c = {
+		.dbg_name = "gcc_oxili_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_oxili_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_oxili_aon_clk = {
+	.cbcr_reg = OXILI_AON_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GFX_BASE],
+	.c = {
+		.dbg_name = "gcc_oxili_aon_clk",
+		.parent = &gfx3d_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_oxili_aon_clk.c),
+	},
+};
+
+static struct branch_clk gcc_oxili_gfx3d_clk = {
+	.cbcr_reg = OXILI_GFX3D_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GFX_BASE],
+	.c = {
+		.dbg_name = "gcc_oxili_gfx3d_clk",
+		.parent = &gfx3d_clk_src.c,
+		.vdd_class = &vdd_gfx,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_oxili_gfx3d_clk.c),
+	},
+};
+
+static struct branch_clk gcc_oxili_timer_clk = {
+	.cbcr_reg = OXILI_TIMER_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GFX_BASE],
+	.c = {
+		.dbg_name = "gcc_oxili_timer_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_oxili_timer_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pcnoc_usb3_axi_clk = {
+	.cbcr_reg = PCNOC_USB3_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_pcnoc_usb3_axi_clk",
+		.parent = &usb30_master_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pcnoc_usb3_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pdm2_clk = {
+	.cbcr_reg = PDM2_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_pdm2_clk",
+		.parent = &pdm2_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pdm2_clk.c),
+	},
+};
+
+static struct branch_clk gcc_pdm_ahb_clk = {
+	.cbcr_reg = PDM_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_pdm_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_pdm_ahb_clk.c),
+	},
+};
+
+
+static struct branch_clk gcc_rbcpr_gfx_clk = {
+	.cbcr_reg = RBCPR_GFX_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_rbcpr_gfx_clk",
+		.parent = &rbcpr_gfx_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_rbcpr_gfx_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc1_ahb_clk = {
+	.cbcr_reg = SDCC1_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_sdcc1_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc1_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc1_apps_clk = {
+	.cbcr_reg = SDCC1_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_sdcc1_apps_clk",
+		.parent = &sdcc1_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc1_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc1_ice_core_clk = {
+	.cbcr_reg = SDCC1_ICE_CORE_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_sdcc1_ice_core_clk",
+		.parent = &sdcc1_ice_core_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc1_ice_core_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc2_ahb_clk = {
+	.cbcr_reg = SDCC2_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_sdcc2_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc2_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_sdcc2_apps_clk = {
+	.cbcr_reg = SDCC2_APPS_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_sdcc2_apps_clk",
+		.parent = &sdcc2_apps_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_sdcc2_apps_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb30_master_clk = {
+	.cbcr_reg = USB30_MASTER_CBCR,
+	.bcr_reg = USB_30_BCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb30_master_clk",
+		.parent = &usb30_master_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb30_master_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb30_mock_utmi_clk = {
+	.cbcr_reg = USB30_MOCK_UTMI_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb30_mock_utmi_clk",
+		.parent = &usb30_mock_utmi_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb30_mock_utmi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb30_sleep_clk = {
+	.cbcr_reg = USB30_SLEEP_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb30_sleep_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb30_sleep_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb3_aux_clk = {
+	.cbcr_reg = USB3_AUX_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb3_aux_clk",
+		.parent = &usb3_aux_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb3_aux_clk.c),
+	},
+};
+
+static struct branch_clk gcc_usb_phy_cfg_ahb_clk = {
+	.cbcr_reg = USB_PHY_CFG_AHB_CBCR,
+	.has_sibling = 1,
+	.no_halt_check_on_disable = true,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb_phy_cfg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_usb_phy_cfg_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_venus0_ahb_clk = {
+	.cbcr_reg = VENUS0_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_venus0_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_venus0_ahb_clk.c),
+	},
+};
+
+static struct branch_clk gcc_venus0_axi_clk = {
+	.cbcr_reg = VENUS0_AXI_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_venus0_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_venus0_axi_clk.c),
+	},
+};
+
+static struct branch_clk gcc_venus0_core0_vcodec0_clk = {
+	.cbcr_reg = VENUS0_CORE0_VCODEC0_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_venus0_core0_vcodec0_clk",
+		.parent = &vcodec0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_venus0_core0_vcodec0_clk.c),
+	},
+};
+
+static struct branch_clk gcc_venus0_vcodec0_clk = {
+	.cbcr_reg = VENUS0_VCODEC0_CBCR,
+	.has_sibling = 0,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_venus0_vcodec0_clk",
+		.parent = &vcodec0_clk_src.c,
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_venus0_vcodec0_clk.c),
+	},
+};
+
+static struct gate_clk gcc_qusb_ref_clk = {
+	.en_reg =  QUSB_REF_CLK_EN,
+	.en_mask = BIT(0),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_qusb_ref_clk",
+		.ops = &clk_ops_gate,
+		CLK_INIT(gcc_qusb_ref_clk.c),
+	},
+};
+
+static struct gate_clk gcc_usb_ss_ref_clk = {
+	.en_reg =  USB_SS_REF_CLK_EN,
+	.en_mask = BIT(0),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb_ss_ref_clk",
+		.ops = &clk_ops_gate,
+		CLK_INIT(gcc_usb_ss_ref_clk.c),
+	},
+};
+
+static struct gate_clk gcc_usb3_pipe_clk = {
+	.en_reg =  USB3_PIPE_CBCR,
+	.en_mask = BIT(0),
+	.delay_us = 50,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb3_pipe_clk",
+		.ops = &clk_ops_gate,
+		CLK_INIT(gcc_usb3_pipe_clk.c),
+	},
+};
+
+static struct reset_clk gcc_qusb2_phy_reset = {
+	.reset_reg = QUSB2_PHY_BCR,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_qusb2_phy_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_qusb2_phy_reset.c),
+	},
+};
+
+static struct reset_clk gcc_usb3_phy_reset = {
+	.reset_reg = USB3_PHY_BCR,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb3_phy_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_usb3_phy_reset.c),
+	},
+};
+
+static struct reset_clk gcc_usb3phy_phy_reset = {
+	.reset_reg = USB3PHY_PHY_BCR,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_usb3phy_phy_reset",
+		.ops = &clk_ops_rst,
+		CLK_INIT(gcc_usb3phy_phy_reset.c),
+	},
+};
+
+static struct local_vote_clk gcc_apss_ahb_clk = {
+	.cbcr_reg = APSS_AHB_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(14),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_apss_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_apss_ahb_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_apss_axi_clk = {
+	.cbcr_reg = APSS_AXI_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(13),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_apss_axi_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_apss_axi_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_blsp1_ahb_clk = {
+	.cbcr_reg = BLSP1_AHB_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(10),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp1_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_blsp1_ahb_clk.c),
+	},
+};
+
+
+static struct local_vote_clk gcc_blsp2_ahb_clk = {
+	.cbcr_reg = BLSP2_AHB_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(20),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_blsp2_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_blsp2_ahb_clk.c),
+	},
+};
+
+
+static struct local_vote_clk gcc_boot_rom_ahb_clk = {
+	.cbcr_reg = BOOT_ROM_AHB_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(7),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_boot_rom_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_boot_rom_ahb_clk.c),
+	},
+};
+
+
+static struct local_vote_clk gcc_crypto_ahb_clk = {
+	.cbcr_reg = CRYPTO_AHB_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(0),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_crypto_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_crypto_ahb_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_crypto_axi_clk = {
+	.cbcr_reg = CRYPTO_AXI_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(1),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_crypto_axi_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_crypto_axi_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_crypto_clk = {
+	.cbcr_reg = CRYPTO_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(2),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_crypto_clk",
+		.parent = &crypto_clk_src.c,
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_crypto_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_qdss_dap_clk = {
+	.cbcr_reg = QDSS_DAP_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(11),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_qdss_dap_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_qdss_dap_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_prng_ahb_clk = {
+	.cbcr_reg = PRNG_AHB_CBCR,
+	.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(8),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_prng_ahb_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_prng_ahb_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_apss_tcu_async_clk = {
+	.cbcr_reg = APSS_TCU_ASYNC_CBCR,
+	.vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(1),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_apss_tcu_async_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_apss_tcu_async_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_cpp_tbu_clk = {
+	.cbcr_reg = CPP_TBU_CBCR,
+	.vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(14),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_cpp_tbu_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_cpp_tbu_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_jpeg_tbu_clk = {
+	.cbcr_reg = JPEG_TBU_CBCR,
+	.vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(10),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_jpeg_tbu_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_jpeg_tbu_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_mdp_tbu_clk = {
+	.cbcr_reg = MDP_TBU_CBCR,
+	.vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(4),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mdp_tbu_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_mdp_tbu_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_smmu_cfg_clk = {
+	.cbcr_reg = SMMU_CFG_CBCR,
+	.vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(12),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_smmu_cfg_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_smmu_cfg_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_venus_tbu_clk = {
+	.cbcr_reg = VENUS_TBU_CBCR,
+	.vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(5),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_venus_tbu_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_venus_tbu_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_vfe1_tbu_clk = {
+	.cbcr_reg = VFE1_TBU_CBCR,
+	.vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(17),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_vfe1_tbu_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_vfe1_tbu_clk.c),
+	},
+};
+
+static struct local_vote_clk gcc_vfe_tbu_clk = {
+	.cbcr_reg = VFE_TBU_CBCR,
+	.vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+	.en_mask = BIT(9),
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_vfe_tbu_clk",
+		.ops = &clk_ops_vote,
+		CLK_INIT(gcc_vfe_tbu_clk.c),
+	},
+};
+
+
+static struct clk_ops clk_ops_debug_mux;
+
+static struct measure_clk_data debug_mux_priv = {
+	.cxo = &xo_clk_src.c,
+	.plltest_reg = PLLTEST_PAD_CFG,
+	.plltest_val = 0x51A00,
+	.xo_div4_cbcr = GCC_XO_DIV4_CBCR,
+	.ctl_reg = CLOCK_FRQ_MEASURE_CTL,
+	.status_reg = CLOCK_FRQ_MEASURE_STATUS,
+	.base = &virt_bases[GCC_BASE],
+};
+
+static struct mux_clk gcc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.ops = &mux_reg_ops,
+	.offset = GCC_DEBUG_CLK_CTL,
+	.mask = 0x1FF,
+	.en_offset = GCC_DEBUG_CLK_CTL,
+	.en_mask = BIT(16),
+	.base = &virt_bases[GCC_BASE],
+	MUX_REC_SRC_LIST(
+		&debug_cpu_clk.c,
+	),
+	MUX_SRC_LIST(
+		{ &debug_cpu_clk.c, 0x016A },
+		{ &snoc_clk.c, 0x0000 },
+		{ &sysmmnoc_clk.c, 0x0001 },
+		{ &pcnoc_clk.c, 0x0008 },
+		{ &bimc_clk.c, 0x15A },
+		{ &ipa_clk.c, 0x1b0 },
+		{ &gcc_dcc_clk.c, 0x000d },
+		{ &gcc_pcnoc_usb3_axi_clk.c, 0x000e },
+		{ &gcc_gp1_clk.c, 0x0010 },
+		{ &gcc_gp2_clk.c, 0x0011 },
+		{ &gcc_gp3_clk.c, 0x0012 },
+		{ &gcc_apc0_droop_detector_gpll0_clk.c, 0x001c },
+		{ &gcc_camss_csi2phytimer_clk.c, 0x001d },
+		{ &gcc_apc1_droop_detector_gpll0_clk.c, 0x001f },
+		{ &gcc_bimc_gfx_clk.c, 0x002d },
+		{ &gcc_mss_cfg_ahb_clk.c, 0x0030 },
+		{ &gcc_mss_q6_bimc_axi_clk.c, 0x0031 },
+		{ &gcc_qdss_dap_clk.c, 0x0049 },
+		{ &gcc_apss_tcu_async_clk.c, 0x0050 },
+		{ &gcc_mdp_tbu_clk.c, 0x0051 },
+		{ &gcc_venus_tbu_clk.c, 0x0054 },
+		{ &gcc_vfe_tbu_clk.c, 0x005a },
+		{ &gcc_smmu_cfg_clk.c, 0x005b },
+		{ &gcc_jpeg_tbu_clk.c, 0x005c },
+		{ &gcc_usb30_master_clk.c, 0x0060 },
+		{ &gcc_usb30_sleep_clk.c, 0x0061 },
+		{ &gcc_usb30_mock_utmi_clk.c, 0x0062 },
+		{ &gcc_usb_phy_cfg_ahb_clk.c, 0x0063 },
+		{ &gcc_usb3_pipe_clk.c, 0x0066 },
+		{ &gcc_usb3_aux_clk.c, 0x0067 },
+		{ &gcc_sdcc1_apps_clk.c, 0x0068 },
+		{ &gcc_sdcc1_ahb_clk.c, 0x0069 },
+		{ &gcc_sdcc1_ice_core_clk.c, 0x006a },
+		{ &gcc_sdcc2_apps_clk.c, 0x0070 },
+		{ &gcc_sdcc2_ahb_clk.c, 0x0071 },
+		{ &gcc_blsp1_ahb_clk.c, 0x0088 },
+		{ &gcc_blsp1_qup1_spi_apps_clk.c, 0x008a },
+		{ &gcc_blsp1_qup1_i2c_apps_clk.c, 0x008b },
+		{ &gcc_blsp1_uart1_apps_clk.c, 0x008c },
+		{ &gcc_blsp1_qup2_spi_apps_clk.c, 0x008e },
+		{ &gcc_blsp1_qup2_i2c_apps_clk.c, 0x0090 },
+		{ &gcc_blsp1_uart2_apps_clk.c, 0x0091 },
+		{ &gcc_blsp1_qup3_spi_apps_clk.c, 0x0093 },
+		{ &gcc_blsp1_qup3_i2c_apps_clk.c, 0x0094 },
+		{ &gcc_blsp1_qup4_spi_apps_clk.c, 0x0095 },
+		{ &gcc_blsp1_qup4_i2c_apps_clk.c, 0x0096 },
+		{ &gcc_blsp2_ahb_clk.c, 0x0098 },
+		{ &gcc_blsp2_qup1_spi_apps_clk.c, 0x009a },
+		{ &gcc_blsp2_qup1_i2c_apps_clk.c, 0x009b },
+		{ &gcc_blsp2_uart1_apps_clk.c, 0x009c },
+		{ &gcc_blsp2_qup2_spi_apps_clk.c, 0x009e },
+		{ &gcc_blsp2_qup2_i2c_apps_clk.c, 0x00a0 },
+		{ &gcc_blsp2_uart2_apps_clk.c, 0x00a1 },
+		{ &gcc_blsp2_qup3_spi_apps_clk.c, 0x00a3 },
+		{ &gcc_blsp2_qup3_i2c_apps_clk.c, 0x00a4 },
+		{ &gcc_blsp2_qup4_spi_apps_clk.c, 0x00a5 },
+		{ &gcc_blsp2_qup4_i2c_apps_clk.c, 0x00a6 },
+		{ &gcc_camss_ahb_clk.c, 0x00a8 },
+		{ &gcc_camss_top_ahb_clk.c, 0x00a9 },
+		{ &gcc_camss_micro_ahb_clk.c, 0x00aa },
+		{ &gcc_camss_gp0_clk.c, 0x00ab },
+		{ &gcc_camss_gp1_clk.c, 0x00ac },
+		{ &gcc_camss_mclk0_clk.c, 0x00ad },
+		{ &gcc_camss_mclk1_clk.c, 0x00ae },
+		{ &gcc_camss_cci_clk.c, 0x00af },
+		{ &gcc_camss_cci_ahb_clk.c, 0x00b0 },
+		{ &gcc_camss_csi0phytimer_clk.c, 0x00b1 },
+		{ &gcc_camss_csi1phytimer_clk.c, 0x00b2 },
+		{ &gcc_camss_jpeg0_clk.c, 0x00b3 },
+		{ &gcc_camss_jpeg_ahb_clk.c, 0x00b4 },
+		{ &gcc_camss_jpeg_axi_clk.c, 0x00b5 },
+		{ &gcc_camss_vfe0_clk.c, 0x00b8 },
+		{ &gcc_camss_cpp_clk.c, 0x00b9 },
+		{ &gcc_camss_cpp_ahb_clk.c, 0x00ba },
+		{ &gcc_camss_vfe_ahb_clk.c, 0x00bb },
+		{ &gcc_camss_vfe_axi_clk.c, 0x00bc },
+		{ &gcc_camss_csi_vfe0_clk.c, 0x00bf },
+		{ &gcc_camss_csi0_clk.c, 0x00c0 },
+		{ &gcc_camss_csi0_ahb_clk.c, 0x00c1 },
+		{ &gcc_camss_csi0phy_clk.c, 0x00c2 },
+		{ &gcc_camss_csi0rdi_clk.c, 0x00c3 },
+		{ &gcc_camss_csi0pix_clk.c, 0x00c4 },
+		{ &gcc_camss_csi1_clk.c, 0x00c5 },
+		{ &gcc_camss_csi1_ahb_clk.c, 0x00c6 },
+		{ &gcc_camss_csi1phy_clk.c, 0x00c7 },
+		{ &gcc_pdm_ahb_clk.c, 0x00d0 },
+		{ &gcc_pdm2_clk.c, 0x00d2 },
+		{ &gcc_prng_ahb_clk.c, 0x00d8 },
+		{ &gcc_mdss_byte1_clk.c, 0x00da },
+		{ &gcc_mdss_esc1_clk.c, 0x00db },
+		{ &gcc_camss_csi0_csiphy_3p_clk.c, 0x00dc },
+		{ &gcc_camss_csi1_csiphy_3p_clk.c, 0x00dd },
+		{ &gcc_camss_csi2_csiphy_3p_clk.c, 0x00de },
+		{ &gcc_camss_csi1rdi_clk.c, 0x00e0 },
+		{ &gcc_camss_csi1pix_clk.c, 0x00e1 },
+		{ &gcc_camss_ispif_ahb_clk.c, 0x00e2 },
+		{ &gcc_camss_csi2_clk.c, 0x00e3 },
+		{ &gcc_camss_csi2_ahb_clk.c, 0x00e4 },
+		{ &gcc_camss_csi2phy_clk.c, 0x00e5 },
+		{ &gcc_camss_csi2rdi_clk.c, 0x00e6 },
+		{ &gcc_camss_csi2pix_clk.c, 0x00e7 },
+		{ &gcc_cpp_tbu_clk.c, 0x00e9 },
+		{ &gcc_rbcpr_gfx_clk.c, 0x00f0 },
+		{ &gcc_boot_rom_ahb_clk.c, 0x00f8 },
+		{ &gcc_crypto_clk.c, 0x0138 },
+		{ &gcc_crypto_axi_clk.c, 0x0139 },
+		{ &gcc_crypto_ahb_clk.c, 0x013a },
+		{ &gcc_bimc_gpu_clk.c, 0x0157 },
+		{ &gcc_apss_ahb_clk.c, 0x0168 },
+		{ &gcc_apss_axi_clk.c, 0x0169 },
+		{ &gcc_vfe1_tbu_clk.c, 0x0199 },
+		{ &gcc_camss_csi_vfe1_clk.c, 0x01a0 },
+		{ &gcc_camss_vfe1_clk.c, 0x01a1 },
+		{ &gcc_camss_vfe1_ahb_clk.c, 0x01a2 },
+		{ &gcc_camss_vfe1_axi_clk.c, 0x01a3 },
+		{ &gcc_camss_cpp_axi_clk.c, 0x01a4 },
+		{ &gcc_venus0_core0_vcodec0_clk.c, 0x01b8 },
+		{ &gcc_camss_mclk2_clk.c, 0x01bd },
+		{ &gcc_camss_mclk3_clk.c, 0x01bf },
+		{ &gcc_oxili_aon_clk.c, 0x01e8 },
+		{ &gcc_oxili_timer_clk.c, 0x01e9 },
+		{ &gcc_oxili_gfx3d_clk.c, 0x01ea },
+		{ &gcc_oxili_ahb_clk.c, 0x01eb },
+		{ &gcc_venus0_vcodec0_clk.c, 0x01f1 },
+		{ &gcc_venus0_axi_clk.c, 0x01f2 },
+		{ &gcc_venus0_ahb_clk.c, 0x01f3 },
+		{ &gcc_mdss_ahb_clk.c, 0x01f6 },
+		{ &gcc_mdss_axi_clk.c, 0x01f7 },
+		{ &gcc_mdss_pclk0_clk.c, 0x01f8 },
+		{ &gcc_mdss_mdp_clk.c, 0x01f9 },
+		{ &gcc_mdss_pclk1_clk.c, 0x01fa },
+		{ &gcc_mdss_vsync_clk.c, 0x01fb },
+		{ &gcc_mdss_byte0_clk.c, 0x01fc },
+		{ &gcc_mdss_esc0_clk.c, 0x01fd },
+		{ &wcnss_m_clk.c,   0x0ec },
+	),
+	.c = {
+		.dbg_name = "gcc_debug_mux",
+		.ops = &clk_ops_debug_mux,
+		.flags = CLKFLAG_NO_RATE_CACHE | CLKFLAG_MEASURE,
+		CLK_INIT(gcc_debug_mux.c),
+	},
+};
+
+
+static struct clk_lookup msm_clocks_lookup[] = {
+	CLK_LIST(xo_clk_src),
+	CLK_LIST(xo_a_clk_src),
+	CLK_LIST(bimc_clk),
+	CLK_LIST(bimc_a_clk),
+	CLK_LIST(pcnoc_clk),
+	CLK_LIST(pcnoc_a_clk),
+	CLK_LIST(snoc_clk),
+	CLK_LIST(snoc_a_clk),
+	CLK_LIST(sysmmnoc_clk),
+	CLK_LIST(sysmmnoc_a_clk),
+	CLK_LIST(ipa_clk),
+	CLK_LIST(ipa_a_clk),
+	CLK_LIST(qdss_clk),
+	CLK_LIST(qdss_a_clk),
+	CLK_LIST(bimc_msmbus_clk),
+	CLK_LIST(bimc_msmbus_a_clk),
+	CLK_LIST(bimc_usb_clk),
+	CLK_LIST(bimc_usb_a_clk),
+	CLK_LIST(bimc_wcnss_a_clk),
+	CLK_LIST(pcnoc_keepalive_a_clk),
+	CLK_LIST(pcnoc_msmbus_clk),
+	CLK_LIST(pcnoc_msmbus_a_clk),
+	CLK_LIST(pcnoc_usb_clk),
+	CLK_LIST(pcnoc_usb_a_clk),
+	CLK_LIST(snoc_msmbus_clk),
+	CLK_LIST(snoc_msmbus_a_clk),
+	CLK_LIST(snoc_usb_clk),
+	CLK_LIST(snoc_usb_a_clk),
+	CLK_LIST(snoc_wcnss_a_clk),
+	CLK_LIST(sysmmnoc_msmbus_clk),
+	CLK_LIST(sysmmnoc_msmbus_a_clk),
+	CLK_LIST(xo_dwc3_clk),
+	CLK_LIST(xo_lpm_clk),
+	CLK_LIST(xo_pil_lpass_clk),
+	CLK_LIST(xo_pil_mss_clk),
+	CLK_LIST(xo_pil_pronto_clk),
+	CLK_LIST(xo_wlan_clk),
+	CLK_LIST(wcnss_m_clk),
+	CLK_LIST(rf_clk2),
+	CLK_LIST(rf_clk2_a),
+	CLK_LIST(rf_clk3),
+	CLK_LIST(rf_clk3_a),
+	CLK_LIST(bb_clk1),
+	CLK_LIST(bb_clk1_a),
+	CLK_LIST(bb_clk1_pin),
+	CLK_LIST(bb_clk1_a_pin),
+	CLK_LIST(bb_clk2),
+	CLK_LIST(bb_clk2_a),
+	CLK_LIST(bb_clk2_pin),
+	CLK_LIST(bb_clk2_a_pin),
+	CLK_LIST(div_clk2),
+	CLK_LIST(div_clk2_a),
+	CLK_LIST(gpll0_clk_src),
+	CLK_LIST(gpll6_clk_src),
+	CLK_LIST(gpll2_clk_src),
+	CLK_LIST(gpll4_clk_src),
+	CLK_LIST(gpll3_clk_src),
+	CLK_LIST(gcc_apss_ahb_clk),
+	CLK_LIST(gcc_apss_axi_clk),
+	CLK_LIST(gcc_blsp1_ahb_clk),
+	CLK_LIST(gcc_blsp2_ahb_clk),
+	CLK_LIST(gcc_boot_rom_ahb_clk),
+	CLK_LIST(gcc_crypto_ahb_clk),
+	CLK_LIST(gcc_crypto_axi_clk),
+	CLK_LIST(gcc_crypto_clk),
+	CLK_LIST(gcc_prng_ahb_clk),
+	CLK_LIST(gcc_qdss_dap_clk),
+	CLK_LIST(gcc_apss_tcu_async_clk),
+	CLK_LIST(gcc_cpp_tbu_clk),
+	CLK_LIST(gcc_jpeg_tbu_clk),
+	CLK_LIST(gcc_mdp_tbu_clk),
+	CLK_LIST(gcc_smmu_cfg_clk),
+	CLK_LIST(gcc_venus_tbu_clk),
+	CLK_LIST(gcc_vfe1_tbu_clk),
+	CLK_LIST(gcc_vfe_tbu_clk),
+	CLK_LIST(camss_top_ahb_clk_src),
+	CLK_LIST(csi0_clk_src),
+	CLK_LIST(apss_ahb_clk_src),
+	CLK_LIST(csi1_clk_src),
+	CLK_LIST(csi2_clk_src),
+	CLK_LIST(vfe0_clk_src),
+	CLK_LIST(vcodec0_clk_src),
+	CLK_LIST(cpp_clk_src),
+	CLK_LIST(jpeg0_clk_src),
+	CLK_LIST(usb30_master_clk_src),
+	CLK_LIST(vfe1_clk_src),
+	CLK_LIST(apc0_droop_detector_clk_src),
+	CLK_LIST(apc1_droop_detector_clk_src),
+	CLK_LIST(blsp1_qup1_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup1_spi_apps_clk_src),
+	CLK_LIST(blsp1_qup2_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup2_spi_apps_clk_src),
+	CLK_LIST(blsp1_qup3_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup3_spi_apps_clk_src),
+	CLK_LIST(blsp1_qup4_i2c_apps_clk_src),
+	CLK_LIST(blsp1_qup4_spi_apps_clk_src),
+	CLK_LIST(blsp1_uart1_apps_clk_src),
+	CLK_LIST(blsp1_uart2_apps_clk_src),
+	CLK_LIST(blsp2_qup1_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup1_spi_apps_clk_src),
+	CLK_LIST(blsp2_qup2_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup2_spi_apps_clk_src),
+	CLK_LIST(blsp2_qup3_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup3_spi_apps_clk_src),
+	CLK_LIST(blsp2_qup4_i2c_apps_clk_src),
+	CLK_LIST(blsp2_qup4_spi_apps_clk_src),
+	CLK_LIST(blsp2_uart1_apps_clk_src),
+	CLK_LIST(blsp2_uart2_apps_clk_src),
+	CLK_LIST(cci_clk_src),
+	CLK_LIST(csi0p_clk_src),
+	CLK_LIST(csi1p_clk_src),
+	CLK_LIST(csi2p_clk_src),
+	CLK_LIST(camss_gp0_clk_src),
+	CLK_LIST(camss_gp1_clk_src),
+	CLK_LIST(mclk0_clk_src),
+	CLK_LIST(mclk1_clk_src),
+	CLK_LIST(mclk2_clk_src),
+	CLK_LIST(mclk3_clk_src),
+	CLK_LIST(csi0phytimer_clk_src),
+	CLK_LIST(csi1phytimer_clk_src),
+	CLK_LIST(csi2phytimer_clk_src),
+	CLK_LIST(crypto_clk_src),
+	CLK_LIST(gp1_clk_src),
+	CLK_LIST(gp2_clk_src),
+	CLK_LIST(gp3_clk_src),
+	CLK_LIST(pdm2_clk_src),
+	CLK_LIST(rbcpr_gfx_clk_src),
+	CLK_LIST(sdcc1_apps_clk_src),
+	CLK_LIST(sdcc1_ice_core_clk_src),
+	CLK_LIST(sdcc2_apps_clk_src),
+	CLK_LIST(usb30_mock_utmi_clk_src),
+	CLK_LIST(usb3_aux_clk_src),
+	CLK_LIST(gcc_apc0_droop_detector_gpll0_clk),
+	CLK_LIST(gcc_apc1_droop_detector_gpll0_clk),
+	CLK_LIST(gcc_blsp1_qup1_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup1_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_qup2_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup2_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_qup3_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup3_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_qup4_i2c_apps_clk),
+	CLK_LIST(gcc_blsp1_qup4_spi_apps_clk),
+	CLK_LIST(gcc_blsp1_uart1_apps_clk),
+	CLK_LIST(gcc_blsp1_uart2_apps_clk),
+	CLK_LIST(gcc_blsp2_qup1_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup1_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_qup2_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup2_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_qup3_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup3_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_qup4_i2c_apps_clk),
+	CLK_LIST(gcc_blsp2_qup4_spi_apps_clk),
+	CLK_LIST(gcc_blsp2_uart1_apps_clk),
+	CLK_LIST(gcc_blsp2_uart2_apps_clk),
+	CLK_LIST(gcc_camss_cci_ahb_clk),
+	CLK_LIST(gcc_camss_cci_clk),
+	CLK_LIST(gcc_camss_cpp_ahb_clk),
+	CLK_LIST(gcc_camss_cpp_axi_clk),
+	CLK_LIST(gcc_camss_cpp_clk),
+	CLK_LIST(gcc_camss_csi0_ahb_clk),
+	CLK_LIST(gcc_camss_csi0_clk),
+	CLK_LIST(gcc_camss_csi0_csiphy_3p_clk),
+	CLK_LIST(gcc_camss_csi0phy_clk),
+	CLK_LIST(gcc_camss_csi0pix_clk),
+	CLK_LIST(gcc_camss_csi0rdi_clk),
+	CLK_LIST(gcc_camss_csi1_ahb_clk),
+	CLK_LIST(gcc_camss_csi1_clk),
+	CLK_LIST(gcc_camss_csi1_csiphy_3p_clk),
+	CLK_LIST(gcc_camss_csi1phy_clk),
+	CLK_LIST(gcc_camss_csi1pix_clk),
+	CLK_LIST(gcc_camss_csi1rdi_clk),
+	CLK_LIST(gcc_camss_csi2_ahb_clk),
+	CLK_LIST(gcc_camss_csi2_clk),
+	CLK_LIST(gcc_camss_csi2_csiphy_3p_clk),
+	CLK_LIST(gcc_camss_csi2phy_clk),
+	CLK_LIST(gcc_camss_csi2pix_clk),
+	CLK_LIST(gcc_camss_csi2rdi_clk),
+	CLK_LIST(gcc_camss_csi_vfe0_clk),
+	CLK_LIST(gcc_camss_csi_vfe1_clk),
+	CLK_LIST(gcc_camss_gp0_clk),
+	CLK_LIST(gcc_camss_gp1_clk),
+	CLK_LIST(gcc_camss_ispif_ahb_clk),
+	CLK_LIST(gcc_camss_jpeg0_clk),
+	CLK_LIST(gcc_camss_jpeg_ahb_clk),
+	CLK_LIST(gcc_camss_jpeg_axi_clk),
+	CLK_LIST(gcc_camss_mclk0_clk),
+	CLK_LIST(gcc_camss_mclk1_clk),
+	CLK_LIST(gcc_camss_mclk2_clk),
+	CLK_LIST(gcc_camss_mclk3_clk),
+	CLK_LIST(gcc_camss_micro_ahb_clk),
+	CLK_LIST(gcc_camss_csi0phytimer_clk),
+	CLK_LIST(gcc_camss_csi1phytimer_clk),
+	CLK_LIST(gcc_camss_csi2phytimer_clk),
+	CLK_LIST(gcc_camss_ahb_clk),
+	CLK_LIST(gcc_camss_top_ahb_clk),
+	CLK_LIST(gcc_camss_vfe0_clk),
+	CLK_LIST(gcc_camss_vfe_ahb_clk),
+	CLK_LIST(gcc_camss_vfe_axi_clk),
+	CLK_LIST(gcc_camss_vfe1_ahb_clk),
+	CLK_LIST(gcc_camss_vfe1_axi_clk),
+	CLK_LIST(gcc_camss_vfe1_clk),
+	CLK_LIST(gcc_dcc_clk),
+	CLK_LIST(gcc_gp1_clk),
+	CLK_LIST(gcc_gp2_clk),
+	CLK_LIST(gcc_gp3_clk),
+	CLK_LIST(gcc_mss_cfg_ahb_clk),
+	CLK_LIST(gcc_mss_q6_bimc_axi_clk),
+	CLK_LIST(gcc_pcnoc_usb3_axi_clk),
+	CLK_LIST(gcc_pdm2_clk),
+	CLK_LIST(gcc_pdm_ahb_clk),
+	CLK_LIST(gcc_rbcpr_gfx_clk),
+	CLK_LIST(gcc_sdcc1_ahb_clk),
+	CLK_LIST(gcc_sdcc1_apps_clk),
+	CLK_LIST(gcc_sdcc1_ice_core_clk),
+	CLK_LIST(gcc_sdcc2_ahb_clk),
+	CLK_LIST(gcc_sdcc2_apps_clk),
+	CLK_LIST(gcc_usb30_master_clk),
+	CLK_LIST(gcc_usb30_mock_utmi_clk),
+	CLK_LIST(gcc_usb30_sleep_clk),
+	CLK_LIST(gcc_usb3_aux_clk),
+	CLK_LIST(gcc_usb_phy_cfg_ahb_clk),
+	CLK_LIST(gcc_venus0_ahb_clk),
+	CLK_LIST(gcc_venus0_axi_clk),
+	CLK_LIST(gcc_venus0_core0_vcodec0_clk),
+	CLK_LIST(gcc_venus0_vcodec0_clk),
+	CLK_LIST(gcc_qusb_ref_clk),
+	CLK_LIST(gcc_usb_ss_ref_clk),
+	CLK_LIST(gcc_usb3_pipe_clk),
+	CLK_LIST(gcc_qusb2_phy_reset),
+	CLK_LIST(gcc_usb3_phy_reset),
+	CLK_LIST(gcc_usb3phy_phy_reset),
+
+	CLK_LIST(mdp_clk_src),
+	CLK_LIST(esc0_clk_src),
+	CLK_LIST(esc1_clk_src),
+	CLK_LIST(vsync_clk_src),
+	CLK_LIST(gcc_mdss_ahb_clk),
+	CLK_LIST(gcc_mdss_axi_clk),
+	CLK_LIST(gcc_mdss_esc0_clk),
+	CLK_LIST(gcc_mdss_esc1_clk),
+	CLK_LIST(gcc_mdss_mdp_clk),
+	CLK_LIST(gcc_mdss_vsync_clk),
+};
+
+static const struct msm_reset_map gcc_8953_resets[] = {
+
+	[GCC_QUSB2_PHY_BCR] = { 0x4103C },
+	[GCC_USB3_PHY_BCR] = { 0x3F034 },
+	[GCC_USB3PHY_PHY_BCR] = { 0x3F03C },
+	[GCC_USB_30_BCR] = { 0x3F070 },
+	[GCC_CAMSS_MICRO_BCR] = {0x56008},
+
+};
+#define SPEED_BIN	7
+
+static void override_for_8953(struct platform_device *pdev)
+{
+	struct resource *res;
+	void __iomem *base;
+	u32 config_efuse, bin;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+	if (!res)
+		return;
+
+	base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!base) {
+		dev_warn(&pdev->dev,
+			"Unable to ioremap efuse reg address. Defaulting to 0.\n");
+		return;
+	}
+
+	config_efuse = readl_relaxed(base);
+	devm_iounmap(&pdev->dev, base);
+
+	bin = (config_efuse >> 8) & 0x7;
+
+	if (bin == SPEED_BIN) {
+		vcodec0_clk_src.freq_tbl = ftbl_vcodec0_clk_src_540MHz;
+		vcodec0_clk_src.c.fmax[VDD_DIG_HIGH] = 540000000;
+	}
+
+	dev_info(&pdev->dev, "Venus speed bin: %u\n", bin);
+}
+
+static int msm_gcc_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+	u32 regval;
+
+	ret = vote_bimc(&bimc_clk, INT_MAX);
+	if (ret < 0)
+		return ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+	if (!res) {
+		dev_err(&pdev->dev, "Register base not defined\n");
+		return -ENOMEM;
+	}
+
+	virt_bases[GCC_BASE] = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+	if (!virt_bases[GCC_BASE]) {
+		dev_err(&pdev->dev, "Failed to ioremap CC registers\n");
+		return -ENOMEM;
+	}
+
+	vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+	if (IS_ERR(vdd_dig.regulator[0])) {
+		if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+					"Unable to get vdd_dig regulator!!!\n");
+		return PTR_ERR(vdd_dig.regulator[0]);
+	}
+
+	override_for_8953(pdev);
+
+	 /*Vote for GPLL0 to turn on. Needed by acpuclock. */
+	regval = readl_relaxed(GCC_REG_BASE(APCS_GPLL_ENA_VOTE));
+	regval |= BIT(0);
+	writel_relaxed(regval, GCC_REG_BASE(APCS_GPLL_ENA_VOTE));
+
+	ret = of_msm_clock_register(pdev->dev.of_node,
+				msm_clocks_lookup,
+				ARRAY_SIZE(msm_clocks_lookup));
+	if (ret)
+		return ret;
+
+	ret = enable_rpm_scaling();
+	if (ret < 0) {
+		dev_err(&pdev->dev, "rpm scaling failed to enable %d\n", ret);
+		return ret;
+	}
+
+	clk_set_rate(&apss_ahb_clk_src.c, 19200000);
+	clk_prepare_enable(&apss_ahb_clk_src.c);
+
+	clk_prepare_enable(&gcc_blsp1_ahb_clk.c);
+	clk_prepare_enable(&gcc_usb30_master_clk.c);
+	clk_prepare_enable(&gcc_usb30_mock_utmi_clk.c);
+	clk_prepare_enable(&gcc_blsp1_uart1_apps_clk.c);
+	clk_prepare_enable(&gcc_apss_ahb_clk.c);
+	clk_prepare_enable(&gcc_crypto_ahb_clk.c);
+	clk_prepare_enable(&gcc_crypto_axi_clk.c);
+	/*
+	 * Hold an active set vote for PCNOC AHB source. Sleep set
+	 * vote is 0.
+	 */
+	clk_set_rate(&pcnoc_keepalive_a_clk.c, 19200000);
+	clk_prepare_enable(&pcnoc_keepalive_a_clk.c);
+
+	clk_prepare_enable(&xo_a_clk_src.c);
+	msm_reset_controller_register(pdev, gcc_8953_resets,
+			ARRAY_SIZE(gcc_8953_resets), virt_bases[GCC_BASE]);
+
+	dev_info(&pdev->dev, "Registered GCC clocks\n");
+
+	return 0;
+}
+
+static const struct of_device_id msm_clock_gcc_match_table[] = {
+	{ .compatible = "qcom,gcc-8953" },
+	{},
+};
+
+static struct platform_driver msm_clock_gcc_driver = {
+	.probe = msm_gcc_probe,
+	.driver = {
+		.name = "qcom,gcc-8953",
+		.of_match_table = msm_clock_gcc_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_gcc_init(void)
+{
+	return platform_driver_register(&msm_clock_gcc_driver);
+}
+arch_initcall(msm_gcc_init);
+
+static struct clk_lookup msm_clocks_measure[] = {
+	CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"),
+	CLK_LIST(debug_cpu_clk),
+};
+
+static int msm_clock_debug_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	clk_ops_debug_mux = clk_ops_gen_mux;
+	clk_ops_debug_mux.get_rate = measure_get_rate;
+
+	debug_cpu_clk.c.parent = devm_clk_get(&pdev->dev, "debug_cpu_clk");
+	if (IS_ERR(debug_cpu_clk.c.parent)) {
+		dev_err(&pdev->dev, "Failed to get CPU debug Mux\n");
+		return PTR_ERR(debug_cpu_clk.c.parent);
+	}
+
+	ret =  of_msm_clock_register(pdev->dev.of_node, msm_clocks_measure,
+					ARRAY_SIZE(msm_clocks_measure));
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register debug Mux\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered Debug Mux successfully\n");
+	return ret;
+}
+
+static const struct of_device_id msm_clock_debug_match_table[] = {
+	{ .compatible = "qcom,cc-debug-8953" },
+	{}
+};
+
+static struct platform_driver msm_clock_debug_driver = {
+	.probe = msm_clock_debug_probe,
+	.driver = {
+		.name = "qcom,cc-debug-8953",
+		.of_match_table = msm_clock_debug_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_clock_debug_init(void)
+{
+	return platform_driver_register(&msm_clock_debug_driver);
+}
+late_initcall(msm_clock_debug_init);
+
+/* MDSS DSI_PHY_PLL */
+static struct clk_lookup msm_clocks_gcc_mdss[] = {
+	CLK_LIST(ext_pclk0_clk_src),
+	CLK_LIST(ext_pclk1_clk_src),
+	CLK_LIST(ext_byte0_clk_src),
+	CLK_LIST(ext_byte1_clk_src),
+	CLK_LIST(pclk0_clk_src),
+	CLK_LIST(pclk1_clk_src),
+	CLK_LIST(byte0_clk_src),
+	CLK_LIST(byte1_clk_src),
+	CLK_LIST(gcc_mdss_pclk0_clk),
+	CLK_LIST(gcc_mdss_pclk1_clk),
+	CLK_LIST(gcc_mdss_byte0_clk),
+	CLK_LIST(gcc_mdss_byte1_clk),
+	CLK_LIST(mdss_mdp_vote_clk),
+	CLK_LIST(mdss_rotator_vote_clk),
+};
+
+static int msm_gcc_mdss_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct clk *curr_p;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+	if (!res) {
+		dev_err(&pdev->dev, "Register base not defined\n");
+		return -ENOMEM;
+	}
+
+	virt_bases[MDSS_BASE] = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!virt_bases[MDSS_BASE]) {
+		dev_err(&pdev->dev, "Failed to ioremap CC registers\n");
+		return -ENOMEM;
+	}
+
+	curr_p = ext_pclk0_clk_src.c.parent = devm_clk_get(&pdev->dev,
+								"pclk0_src");
+	if (IS_ERR(curr_p)) {
+		dev_err(&pdev->dev, "Failed to get pclk0 source.\n");
+		return PTR_ERR(curr_p);
+	}
+
+	curr_p = ext_pclk1_clk_src.c.parent = devm_clk_get(&pdev->dev,
+								"pclk1_src");
+	if (IS_ERR(curr_p)) {
+		dev_err(&pdev->dev, "Failed to get pclk1 source.\n");
+		ret = PTR_ERR(curr_p);
+		goto pclk1_fail;
+	}
+
+	curr_p = ext_byte0_clk_src.c.parent = devm_clk_get(&pdev->dev,
+								"byte0_src");
+	if (IS_ERR(curr_p)) {
+		dev_err(&pdev->dev, "Failed to get byte0 source.\n");
+		ret = PTR_ERR(curr_p);
+		goto byte0_fail;
+	}
+
+	curr_p = ext_byte1_clk_src.c.parent = devm_clk_get(&pdev->dev,
+								"byte1_src");
+	if (IS_ERR(curr_p)) {
+		dev_err(&pdev->dev, "Failed to get byte1 source.\n");
+		ret = PTR_ERR(curr_p);
+		goto byte1_fail;
+	}
+
+	ext_pclk0_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+	ext_pclk1_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+	ext_byte0_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+	ext_byte1_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+
+	ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_gcc_mdss,
+					ARRAY_SIZE(msm_clocks_gcc_mdss));
+	if (ret)
+		goto fail;
+
+	dev_info(&pdev->dev, "Registered GCC MDSS clocks.\n");
+
+	return ret;
+fail:
+	devm_clk_put(&pdev->dev, ext_byte1_clk_src.c.parent);
+byte1_fail:
+	devm_clk_put(&pdev->dev, ext_byte0_clk_src.c.parent);
+byte0_fail:
+	devm_clk_put(&pdev->dev, ext_pclk1_clk_src.c.parent);
+pclk1_fail:
+	devm_clk_put(&pdev->dev, ext_pclk0_clk_src.c.parent);
+	return ret;
+}
+
+static const struct of_device_id msm_clock_mdss_match_table[] = {
+	{ .compatible = "qcom,gcc-mdss-8953" },
+	{}
+};
+
+static struct platform_driver msm_clock_gcc_mdss_driver = {
+	.probe = msm_gcc_mdss_probe,
+	.driver = {
+		.name = "gcc-mdss-8953",
+		.of_match_table = msm_clock_mdss_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_gcc_mdss_init(void)
+{
+	return platform_driver_register(&msm_clock_gcc_mdss_driver);
+}
+fs_initcall_sync(msm_gcc_mdss_init);
+
+/* GFX Clocks */
+static struct clk_lookup msm_clocks_gcc_gfx[] = {
+	CLK_LIST(gfx3d_clk_src),
+	CLK_LIST(gcc_oxili_ahb_clk),
+	CLK_LIST(gcc_oxili_aon_clk),
+	CLK_LIST(gcc_oxili_gfx3d_clk),
+	CLK_LIST(gcc_oxili_timer_clk),
+	CLK_LIST(gcc_bimc_gfx_clk),
+	CLK_LIST(gcc_bimc_gpu_clk),
+};
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
+								char *prop_name)
+{
+	struct device_node *of = pdev->dev.of_node;
+	int prop_len, i;
+	struct clk_vdd_class *vdd = c->vdd_class;
+	u32 *array;
+
+	if (!of_find_property(of, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % 2) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	prop_len /= 2;
+	vdd->level_votes = devm_kzalloc(&pdev->dev,
+				prop_len * sizeof(*vdd->level_votes),
+					GFP_KERNEL);
+	if (!vdd->level_votes)
+		return -ENOMEM;
+
+	vdd->vdd_uv = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+					GFP_KERNEL);
+	if (!vdd->vdd_uv)
+		return -ENOMEM;
+
+	c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!c->fmax)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev,
+			prop_len * sizeof(u32) * 2, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	of_property_read_u32_array(of, prop_name, array, prop_len * 2);
+	for (i = 0; i < prop_len; i++) {
+		c->fmax[i] = array[2 * i];
+		vdd->vdd_uv[i] = array[2 * i + 1];
+	}
+
+	devm_kfree(&pdev->dev, array);
+	vdd->num_levels = prop_len;
+	vdd->cur_level = prop_len;
+	c->num_fmax = prop_len;
+
+	return 0;
+}
+
+static int msm_gcc_gfx_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+	u32 regval;
+	bool compat_bin = false;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+	if (!res) {
+		dev_err(&pdev->dev, "Register base not defined\n");
+		return -ENOMEM;
+	}
+
+	virt_bases[GFX_BASE] = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+	if (!virt_bases[GFX_BASE]) {
+		dev_err(&pdev->dev, "Failed to ioremap CC registers\n");
+		return -ENOMEM;
+	}
+
+	vdd_gfx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_gfx");
+	if (IS_ERR(vdd_gfx.regulator[0])) {
+		if (PTR_ERR(vdd_gfx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_gfx regulator!");
+		return PTR_ERR(vdd_gfx.regulator[0]);
+	}
+
+	compat_bin = of_device_is_compatible(pdev->dev.of_node,
+							"qcom,gcc-gfx-sdm450");
+	if (compat_bin)
+		gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_sdm450;
+
+	ret = of_get_fmax_vdd_class(pdev, &gcc_oxili_gfx3d_clk.c,
+					"qcom,gfxfreq-corner");
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to get gfx freq-corner mapping info\n");
+		return ret;
+	}
+
+	ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_gcc_gfx,
+				ARRAY_SIZE(msm_clocks_gcc_gfx));
+
+	/* Oxili Ocmem in GX rail: OXILI_GMEM_CLAMP_IO */
+	regval = readl_relaxed(GCC_REG_BASE(GX_DOMAIN_MISC));
+	regval &= ~BIT(0);
+	writel_relaxed(regval, GCC_REG_BASE(GX_DOMAIN_MISC));
+
+	dev_info(&pdev->dev, "Registered GCC GFX clocks.\n");
+
+	return ret;
+}
+
+static const struct of_device_id msm_clock_gfx_match_table[] = {
+	{ .compatible = "qcom,gcc-gfx-8953" },
+	{ .compatible = "qcom,gcc-gfx-sdm450" },
+	{}
+};
+
+static struct platform_driver msm_clock_gcc_gfx_driver = {
+	.probe = msm_gcc_gfx_probe,
+	.driver = {
+		.name = "gcc-gfx-8953",
+		.of_match_table = msm_clock_gfx_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_gcc_gfx_init(void)
+{
+	return platform_driver_register(&msm_clock_gcc_gfx_driver);
+}
+arch_initcall_sync(msm_gcc_gfx_init);
diff --git a/drivers/clk/msm/clock-generic.c b/drivers/clk/msm/clock-generic.c
new file mode 100644
index 0000000..b4e6bdd
--- /dev/null
+++ b/drivers/clk/msm/clock-generic.c
@@ -0,0 +1,921 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+/* ==================== Mux clock ==================== */
+
+static int mux_parent_to_src_sel(struct mux_clk *mux, struct clk *p)
+{
+	return parent_to_src_sel(mux->parents, mux->num_parents, p);
+}
+
+static int mux_set_parent(struct clk *c, struct clk *p)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int sel = mux_parent_to_src_sel(mux, p);
+	struct clk *old_parent;
+	int rc = 0, i;
+	unsigned long flags;
+
+	if (sel < 0 && mux->rec_parents) {
+		for (i = 0; i < mux->num_rec_parents; i++) {
+			rc = clk_set_parent(mux->rec_parents[i], p);
+			if (!rc) {
+				/*
+				 * This is necessary to ensure prepare/enable
+				 * counts get propagated correctly.
+				 */
+				p = mux->rec_parents[i];
+				sel = mux_parent_to_src_sel(mux, p);
+				break;
+			}
+		}
+	}
+
+	if (sel < 0)
+		return sel;
+
+	rc = __clk_pre_reparent(c, p, &flags);
+	if (rc)
+		goto out;
+
+	rc = mux->ops->set_mux_sel(mux, sel);
+	if (rc)
+		goto set_fail;
+
+	old_parent = c->parent;
+	c->parent = p;
+	c->rate = clk_get_rate(p);
+	__clk_post_reparent(c, old_parent, &flags);
+
+	return 0;
+
+set_fail:
+	__clk_post_reparent(c, p, &flags);
+out:
+	return rc;
+}
+
+static long mux_round_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int i;
+	unsigned long prate, rrate = 0;
+
+	for (i = 0; i < mux->num_parents; i++) {
+		prate = clk_round_rate(mux->parents[i].src, rate);
+		if (is_better_rate(rate, rrate, prate))
+			rrate = prate;
+	}
+	if (!rrate)
+		return -EINVAL;
+
+	return rrate;
+}
+
+static int mux_set_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	struct clk *new_parent = NULL;
+	int rc = 0, i;
+	unsigned long new_par_curr_rate;
+	unsigned long flags;
+
+	/*
+	 * Check if one of the possible parents is already at the requested
+	 * rate.
+	 */
+	for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) {
+		struct clk *p = mux->parents[i].src;
+
+		if (p->rate == rate && clk_round_rate(p, rate) == rate) {
+			new_parent = mux->parents[i].src;
+			break;
+		}
+	}
+
+	for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) {
+		if (clk_round_rate(mux->parents[i].src, rate) == rate) {
+			new_parent = mux->parents[i].src;
+			if (!mux->try_new_parent)
+				break;
+			if (mux->try_new_parent && new_parent != c->parent)
+				break;
+		}
+	}
+
+	if (new_parent == NULL)
+		return -EINVAL;
+
+	/*
+	 * Switch to safe parent since the old and new parent might be the
+	 * same and the parent might temporarily turn off while switching
+	 * rates. If the mux can switch between distinct sources safely
+	 * (indicated by try_new_parent), and the new source is not the current
+	 * parent, do not switch to the safe parent.
+	 */
+	if (mux->safe_sel >= 0 &&
+		!(mux->try_new_parent && (new_parent != c->parent))) {
+		/*
+		 * The safe parent might be a clock with multiple sources;
+		 * to select the "safe" source, set a safe frequency.
+		 */
+		if (mux->safe_freq) {
+			rc = clk_set_rate(mux->safe_parent, mux->safe_freq);
+			if (rc) {
+				pr_err("Failed to set safe rate on %s\n",
+					clk_name(mux->safe_parent));
+				return rc;
+			}
+		}
+
+		/*
+		 * Some mux implementations might switch to/from a low power
+		 * parent as part of their disable/enable ops. Grab the
+		 * enable lock to avoid racing with these implementations.
+		 */
+		spin_lock_irqsave(&c->lock, flags);
+		rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
+		spin_unlock_irqrestore(&c->lock, flags);
+		if (rc)
+			return rc;
+
+	}
+
+	new_par_curr_rate = clk_get_rate(new_parent);
+	rc = clk_set_rate(new_parent, rate);
+	if (rc)
+		goto set_rate_fail;
+
+	rc = mux_set_parent(c, new_parent);
+	if (rc)
+		goto set_par_fail;
+
+	return 0;
+
+set_par_fail:
+	clk_set_rate(new_parent, new_par_curr_rate);
+set_rate_fail:
+	WARN(mux->ops->set_mux_sel(mux,
+		mux_parent_to_src_sel(mux, c->parent)),
+		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+	return rc;
+}
+
+static int mux_enable(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	if (mux->ops->enable)
+		return mux->ops->enable(mux);
+	return 0;
+}
+
+static void mux_disable(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	if (mux->ops->disable)
+		return mux->ops->disable(mux);
+}
+
+static struct clk *mux_get_parent(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int sel = mux->ops->get_mux_sel(mux);
+	int i;
+
+	for (i = 0; i < mux->num_parents; i++) {
+		if (mux->parents[i].sel == sel)
+			return mux->parents[i].src;
+	}
+
+	/* Unfamiliar parent. */
+	return NULL;
+}
+
+static enum handoff mux_handoff(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	c->rate = clk_get_rate(c->parent);
+	mux->safe_sel = mux_parent_to_src_sel(mux, mux->safe_parent);
+
+	if (mux->en_mask && mux->ops && mux->ops->is_enabled)
+		return mux->ops->is_enabled(mux)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_clk_list_registers(struct clk *c, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+
+	if (mux->ops && mux->ops->list_registers)
+		return mux->ops->list_registers(mux, n, regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+const struct clk_ops clk_ops_gen_mux = {
+	.enable = mux_enable,
+	.disable = mux_disable,
+	.set_parent = mux_set_parent,
+	.round_rate = mux_round_rate,
+	.set_rate = mux_set_rate,
+	.handoff = mux_handoff,
+	.get_parent = mux_get_parent,
+	.list_registers = mux_clk_list_registers,
+};
+
+/* ==================== Divider clock ==================== */
+
+static long __div_round_rate(struct div_data *data, unsigned long rate,
+	struct clk *parent, unsigned int *best_div, unsigned long *best_prate)
+{
+	unsigned int div, min_div, max_div, _best_div = 1;
+	unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
+	unsigned int numer;
+
+	rate = max(rate, 1UL);
+
+	min_div = max(data->min_div, 1U);
+	max_div = min(data->max_div, (unsigned int) (ULONG_MAX));
+
+	/*
+	 * div values are doubled for half dividers.
+	 * Adjust for that by picking a numer of 2.
+	 */
+	numer = data->is_half_divider ? 2 : 1;
+
+	for (div = min_div; div <= max_div; div++) {
+		if (data->skip_odd_div && (div & 1))
+			if (!(data->allow_div_one && (div == 1)))
+				continue;
+		if (data->skip_even_div && !(div & 1))
+			continue;
+		req_prate = mult_frac(rate, div, numer);
+		prate = clk_round_rate(parent, req_prate);
+		if (IS_ERR_VALUE(prate))
+			break;
+
+		actual_rate = mult_frac(prate, numer, div);
+		if (is_better_rate(rate, rrate, actual_rate)) {
+			rrate = actual_rate;
+			_best_div = div;
+			_best_prate = prate;
+		}
+
+		/*
+		 * Trying higher dividers is only going to ask the parent for
+		 * a higher rate. If it can't even output a rate higher than
+		 * the one we request for this divider, the parent is not
+		 * going to be able to output an even higher rate required
+		 * for a higher divider. So, stop trying higher dividers.
+		 */
+		if (actual_rate < rate)
+			break;
+
+		if (rrate <= rate + data->rate_margin)
+			break;
+	}
+
+	if (!rrate)
+		return -EINVAL;
+	if (best_div)
+		*best_div = _best_div;
+	if (best_prate)
+		*best_prate = _best_prate;
+
+	return rrate;
+}
+
+static long div_round_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	return __div_round_rate(&d->data, rate, c->parent, NULL, NULL);
+}
+
+static int _find_safe_div(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	struct div_data *data = &d->data;
+	unsigned long fast = max(rate, c->rate);
+	unsigned int numer = data->is_half_divider ? 2 : 1;
+	int i, safe_div = 0;
+
+	if (!d->safe_freq)
+		return 0;
+
+	/* Find the max safe freq that is lesser than fast */
+	for (i = data->max_div; i >= data->min_div; i--)
+		if (mult_frac(d->safe_freq, numer, i) <= fast)
+			safe_div = i;
+
+	return safe_div ?: -EINVAL;
+}
+
+static int div_set_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	int safe_div, div, rc = 0;
+	long rrate, old_prate, new_prate;
+	struct div_data *data = &d->data;
+
+	rrate = __div_round_rate(data, rate, c->parent, &div, &new_prate);
+	if (rrate < rate || rrate > rate + data->rate_margin)
+		return -EINVAL;
+
+	/*
+	 * For fixed divider clock we don't want to return an error if the
+	 * requested rate matches the achievable rate. So, don't check for
+	 * !d->ops and return an error. __div_round_rate() ensures div ==
+	 * d->div if !d->ops.
+	 */
+
+	safe_div = _find_safe_div(c, rate);
+	if (d->safe_freq && safe_div < 0) {
+		pr_err("No safe div on %s for transitioning from %lu to %lu\n",
+			c->dbg_name, c->rate, rate);
+		return -EINVAL;
+	}
+
+	safe_div = max(safe_div, div);
+
+	if (safe_div > data->div) {
+		rc = d->ops->set_div(d, safe_div);
+		if (rc) {
+			pr_err("Failed to set div %d on %s\n", safe_div,
+				c->dbg_name);
+			return rc;
+		}
+	}
+
+	old_prate = clk_get_rate(c->parent);
+	rc = clk_set_rate(c->parent, new_prate);
+	if (rc)
+		goto set_rate_fail;
+
+	if (div < data->div)
+		rc = d->ops->set_div(d, div);
+	else if (div < safe_div)
+		rc = d->ops->set_div(d, div);
+	if (rc)
+		goto div_dec_fail;
+
+	data->div = div;
+
+	return 0;
+
+div_dec_fail:
+	WARN(clk_set_rate(c->parent, old_prate),
+		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+set_rate_fail:
+	if (safe_div > data->div)
+		WARN(d->ops->set_div(d, data->div),
+			"Set rate failed for %s. Also in bad state!\n",
+			c->dbg_name);
+	return rc;
+}
+
+static int div_enable(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (d->ops && d->ops->enable)
+		return d->ops->enable(d);
+	return 0;
+}
+
+static void div_disable(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (d->ops && d->ops->disable)
+		return d->ops->disable(d);
+}
+
+static enum handoff div_handoff(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+	unsigned int div = d->data.div;
+
+	if (d->ops && d->ops->get_div)
+		div = max(d->ops->get_div(d), 1);
+	div = max(div, 1U);
+	c->rate = clk_get_rate(c->parent) / div;
+
+	if (!d->ops || !d->ops->set_div)
+		d->data.min_div = d->data.max_div = div;
+	d->data.div = div;
+
+	if (d->en_mask && d->ops && d->ops->is_enabled)
+		return d->ops->is_enabled(d)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *div_clk_list_registers(struct clk *c, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (d->ops && d->ops->list_registers)
+		return d->ops->list_registers(d, n, regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+const struct clk_ops clk_ops_div = {
+	.enable = div_enable,
+	.disable = div_disable,
+	.round_rate = div_round_rate,
+	.set_rate = div_set_rate,
+	.handoff = div_handoff,
+	.list_registers = div_clk_list_registers,
+};
+
+static long __slave_div_round_rate(struct clk *c, unsigned long rate,
+					int *best_div)
+{
+	struct div_clk *d = to_div_clk(c);
+	unsigned int div, min_div, max_div;
+	long p_rate;
+
+	rate = max(rate, 1UL);
+
+	min_div = d->data.min_div;
+	max_div = d->data.max_div;
+
+	p_rate = clk_get_rate(c->parent);
+	div = DIV_ROUND_CLOSEST(p_rate, rate);
+	div = max(div, min_div);
+	div = min(div, max_div);
+	if (best_div)
+		*best_div = div;
+
+	return p_rate / div;
+}
+
+static long slave_div_round_rate(struct clk *c, unsigned long rate)
+{
+	return __slave_div_round_rate(c, rate, NULL);
+}
+
+static int slave_div_set_rate(struct clk *c, unsigned long rate)
+{
+	struct div_clk *d = to_div_clk(c);
+	int div, rc = 0;
+	long rrate;
+
+	rrate = __slave_div_round_rate(c, rate, &div);
+	if (rrate != rate)
+		return -EINVAL;
+
+	if (div == d->data.div)
+		return 0;
+
+	/*
+	 * For fixed divider clock we don't want to return an error if the
+	 * requested rate matches the achievable rate. So, don't check for
+	 * !d->ops and return an error. __slave_div_round_rate() ensures
+	 * div == d->data.div if !d->ops.
+	 */
+	rc = d->ops->set_div(d, div);
+	if (rc)
+		return rc;
+
+	d->data.div = div;
+
+	return 0;
+}
+
+static unsigned long slave_div_get_rate(struct clk *c)
+{
+	struct div_clk *d = to_div_clk(c);
+
+	if (!d->data.div)
+		return 0;
+	return clk_get_rate(c->parent) / d->data.div;
+}
+
+const struct clk_ops clk_ops_slave_div = {
+	.enable = div_enable,
+	.disable = div_disable,
+	.round_rate = slave_div_round_rate,
+	.set_rate = slave_div_set_rate,
+	.get_rate = slave_div_get_rate,
+	.handoff = div_handoff,
+	.list_registers = div_clk_list_registers,
+};
+
+
+/**
+ * External clock
+ * Some clock controllers have input clock signal that come from outside the
+ * clock controller. That input clock signal might then be used as a source for
+ * several clocks inside the clock controller. This external clock
+ * implementation models this input clock signal by just passing on the requests
+ * to the clock's parent, the original external clock source. The driver for the
+ * clock controller should clk_get() the original external clock in the probe
+ * function and set is as a parent to this external clock..
+ */
+
+long parent_round_rate(struct clk *c, unsigned long rate)
+{
+	return clk_round_rate(c->parent, rate);
+}
+
+int parent_set_rate(struct clk *c, unsigned long rate)
+{
+	return clk_set_rate(c->parent, rate);
+}
+
+unsigned long parent_get_rate(struct clk *c)
+{
+	return clk_get_rate(c->parent);
+}
+
+static int ext_set_parent(struct clk *c, struct clk *p)
+{
+	return clk_set_parent(c->parent, p);
+}
+
+static struct clk *ext_get_parent(struct clk *c)
+{
+	struct ext_clk *ext = to_ext_clk(c);
+
+	if (!IS_ERR_OR_NULL(c->parent))
+		return c->parent;
+	return clk_get(ext->dev, ext->clk_id);
+}
+
+static enum handoff ext_handoff(struct clk *c)
+{
+	c->rate = clk_get_rate(c->parent);
+	/* Similar reasoning applied in div_handoff, see comment there. */
+	return HANDOFF_DISABLED_CLK;
+}
+
+const struct clk_ops clk_ops_ext = {
+	.handoff = ext_handoff,
+	.round_rate = parent_round_rate,
+	.set_rate = parent_set_rate,
+	.get_rate = parent_get_rate,
+	.set_parent = ext_set_parent,
+	.get_parent = ext_get_parent,
+};
+
+static void *ext_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct ext_clk *ext;
+	const char *str;
+	int rc;
+
+	ext = devm_kzalloc(dev, sizeof(*ext), GFP_KERNEL);
+	if (!ext)
+		return ERR_PTR(-ENOMEM);
+
+	ext->dev = dev;
+	rc = of_property_read_string(np, "qcom,clock-names", &str);
+	if (!rc)
+		ext->clk_id = (void *)str;
+
+	ext->c.ops = &clk_ops_ext;
+	return msmclk_generic_clk_init(dev, np, &ext->c);
+}
+MSMCLK_PARSER(ext_clk_dt_parser, "qcom,ext-clk", 0);
+
+/* ==================== Mux_div clock ==================== */
+
+static int mux_div_clk_enable(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops->enable)
+		return md->ops->enable(md);
+	return 0;
+}
+
+static void mux_div_clk_disable(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops->disable)
+		return md->ops->disable(md);
+}
+
+static long __mux_div_round_rate(struct clk *c, unsigned long rate,
+	struct clk **best_parent, int *best_div, unsigned long *best_prate)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned int i;
+	unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
+	struct clk *_best_parent = 0;
+
+	if (md->try_get_rate) {
+		for (i = 0; i < md->num_parents; i++) {
+			int divider;
+			unsigned long p_rate;
+
+			rrate = __div_round_rate(&md->data, rate,
+						md->parents[i].src,
+						&divider, &p_rate);
+			/*
+			 * Check if one of the possible parents is already at
+			 * the requested rate.
+			 */
+			if (p_rate == clk_get_rate(md->parents[i].src)
+					&& rrate == rate) {
+				best = rrate;
+				_best_div = divider;
+				_best_prate = p_rate;
+				_best_parent = md->parents[i].src;
+				goto end;
+			}
+		}
+	}
+
+	for (i = 0; i < md->num_parents; i++) {
+		int div;
+		unsigned long prate;
+
+		rrate = __div_round_rate(&md->data, rate, md->parents[i].src,
+				&div, &prate);
+
+		if (is_better_rate(rate, best, rrate)) {
+			best = rrate;
+			_best_div = div;
+			_best_prate = prate;
+			_best_parent = md->parents[i].src;
+		}
+
+		if (rate <= rrate && rrate <= rate + md->data.rate_margin)
+			break;
+	}
+end:
+	if (best_div)
+		*best_div = _best_div;
+	if (best_prate)
+		*best_prate = _best_prate;
+	if (best_parent)
+		*best_parent = _best_parent;
+
+	if (best)
+		return best;
+	return -EINVAL;
+}
+
+static long mux_div_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	return __mux_div_round_rate(c, rate, NULL, NULL, NULL);
+}
+
+/* requires enable lock to be held */
+static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+	u32 rc = 0, src_sel;
+
+	src_sel = parent_to_src_sel(md->parents, md->num_parents, parent);
+	/*
+	 * If the clock is disabled, don't change to the new settings until
+	 * the clock is reenabled
+	 */
+	if (md->c.count)
+		rc = md->ops->set_src_div(md, src_sel, div);
+	if (!rc) {
+		md->data.div = div;
+		md->src_sel = src_sel;
+	}
+
+	return rc;
+}
+
+static int set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+	unsigned long flags;
+	u32 rc;
+
+	spin_lock_irqsave(&md->c.lock, flags);
+	rc = __set_src_div(md, parent, div);
+	spin_unlock_irqrestore(&md->c.lock, flags);
+
+	return rc;
+}
+
+/* Must be called after handoff to ensure parent clock rates are initialized */
+static int safe_parent_init_once(struct clk *c)
+{
+	unsigned long rrate;
+	u32 best_div;
+	struct clk *best_parent;
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (IS_ERR(md->safe_parent))
+		return -EINVAL;
+	if (!md->safe_freq || md->safe_parent)
+		return 0;
+
+	rrate = __mux_div_round_rate(c, md->safe_freq, &best_parent,
+			&best_div, NULL);
+
+	if (rrate == md->safe_freq) {
+		md->safe_div = best_div;
+		md->safe_parent = best_parent;
+	} else {
+		md->safe_parent = ERR_PTR(-EINVAL);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned long flags, rrate;
+	unsigned long new_prate, new_parent_orig_rate;
+	struct clk *old_parent, *new_parent;
+	u32 new_div, old_div;
+	int rc;
+
+	rc = safe_parent_init_once(c);
+	if (rc)
+		return rc;
+
+	rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
+							&new_prate);
+	if (rrate < rate || rrate > rate + md->data.rate_margin)
+		return -EINVAL;
+
+	old_parent = c->parent;
+	old_div = md->data.div;
+
+	/* Refer to the description of safe_freq in clock-generic.h */
+	if (md->safe_freq)
+		rc = set_src_div(md, md->safe_parent, md->safe_div);
+
+	else if (new_parent == old_parent && new_div >= old_div) {
+		/*
+		 * If both the parent_rate and divider changes, there may be an
+		 * intermediate frequency generated. Ensure this intermediate
+		 * frequency is less than both the new rate and previous rate.
+		 */
+		rc = set_src_div(md, old_parent, new_div);
+	}
+	if (rc)
+		return rc;
+
+	new_parent_orig_rate = clk_get_rate(new_parent);
+	rc = clk_set_rate(new_parent, new_prate);
+	if (rc) {
+		pr_err("failed to set %s to %ld\n",
+			clk_name(new_parent), new_prate);
+		goto err_set_rate;
+	}
+
+	rc = __clk_pre_reparent(c, new_parent, &flags);
+	if (rc)
+		goto err_pre_reparent;
+
+	/* Set divider and mux src atomically */
+	rc = __set_src_div(md, new_parent, new_div);
+	if (rc)
+		goto err_set_src_div;
+
+	c->parent = new_parent;
+
+	__clk_post_reparent(c, old_parent, &flags);
+	return 0;
+
+err_set_src_div:
+	/* Not switching to new_parent, so disable it */
+	__clk_post_reparent(c, new_parent, &flags);
+err_pre_reparent:
+	rc = clk_set_rate(new_parent, new_parent_orig_rate);
+	WARN(rc, "%s: error changing new_parent (%s) rate back to %ld\n",
+		clk_name(c), clk_name(new_parent), new_parent_orig_rate);
+err_set_rate:
+	rc = set_src_div(md, old_parent, old_div);
+	WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
+		clk_name(c), old_div, clk_name(old_parent));
+
+	return rc;
+}
+
+static struct clk *mux_div_clk_get_parent(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	u32 i, div, src_sel;
+
+	md->ops->get_src_div(md, &src_sel, &div);
+
+	md->data.div = div;
+	md->src_sel = src_sel;
+
+	for (i = 0; i < md->num_parents; i++) {
+		if (md->parents[i].sel == src_sel)
+			return md->parents[i].src;
+	}
+
+	return NULL;
+}
+
+static enum handoff mux_div_clk_handoff(struct clk *c)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+	unsigned long parent_rate;
+	unsigned int numer;
+
+	parent_rate = clk_get_rate(c->parent);
+	/*
+	 * div values are doubled for half dividers.
+	 * Adjust for that by picking a numer of 2.
+	 */
+	numer = md->data.is_half_divider ? 2 : 1;
+
+	if (md->data.div) {
+		c->rate = mult_frac(parent_rate, numer, md->data.div);
+	} else {
+		c->rate = 0;
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if (md->en_mask && md->ops && md->ops->is_enabled)
+		return md->ops->is_enabled(md)
+			? HANDOFF_ENABLED_CLK
+			: HANDOFF_DISABLED_CLK;
+
+	/*
+	 * If this function returns 'enabled' even when the clock downstream
+	 * of this clock is disabled, then handoff code will unnecessarily
+	 * enable the current parent of this clock. If this function always
+	 * returns 'disabled' and a clock downstream is on, the clock handoff
+	 * code will bump up the ref count for this clock and its current
+	 * parent as necessary. So, clocks without an actual HW gate can
+	 * always return disabled.
+	 */
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_div_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct mux_div_clk *md = to_mux_div_clk(c);
+
+	if (md->ops && md->ops->list_registers)
+		return md->ops->list_registers(md, n, regs, size);
+
+	return ERR_PTR(-EINVAL);
+}
+
+const struct clk_ops clk_ops_mux_div_clk = {
+	.enable = mux_div_clk_enable,
+	.disable = mux_div_clk_disable,
+	.set_rate = mux_div_clk_set_rate,
+	.round_rate = mux_div_clk_round_rate,
+	.get_parent = mux_div_clk_get_parent,
+	.handoff = mux_div_clk_handoff,
+	.list_registers = mux_div_clk_list_registers,
+};
diff --git a/drivers/clk/msm/clock-local2.c b/drivers/clk/msm/clock-local2.c
new file mode 100644
index 0000000..f200d0b
--- /dev/null
+++ b/drivers/clk/msm/clock-local2.c
@@ -0,0 +1,2907 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+/*
+ * When enabling/disabling a clock, check the halt bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define HALT_CHECK_MAX_LOOPS	500
+/* For clock without halt checking, wait this long after enables/disables. */
+#define HALT_CHECK_DELAY_US	500
+
+#define RCG_FORCE_DISABLE_DELAY_US	100
+
+/*
+ * When updating an RCG configuration, check the update bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define UPDATE_CHECK_MAX_LOOPS	500
+
+DEFINE_SPINLOCK(local_clock_reg_lock);
+struct clk_freq_tbl rcg_dummy_freq = F_END;
+
+#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
+#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
+#define M_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0x8)
+#define N_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0xC)
+#define D_REG(x)	(*(x)->base + (x)->cmd_rcgr_reg + 0x10)
+#define CBCR_REG(x)	(*(x)->base + (x)->cbcr_reg)
+#define BCR_REG(x)	(*(x)->base + (x)->bcr_reg)
+#define RST_REG(x)	(*(x)->base + (x)->reset_reg)
+#define VOTE_REG(x)	(*(x)->base + (x)->vote_reg)
+#define GATE_EN_REG(x)	(*(x)->base + (x)->en_reg)
+#define DIV_REG(x)	(*(x)->base + (x)->offset)
+#define MUX_REG(x)	(*(x)->base + (x)->offset)
+
+/*
+ * Important clock bit positions and masks
+ */
+#define CMD_RCGR_ROOT_ENABLE_BIT	BIT(1)
+#define CBCR_BRANCH_ENABLE_BIT		BIT(0)
+#define CBCR_BRANCH_OFF_BIT		BIT(31)
+#define CMD_RCGR_CONFIG_UPDATE_BIT	BIT(0)
+#define CMD_RCGR_ROOT_STATUS_BIT	BIT(31)
+#define BCR_BLK_ARES_BIT		BIT(0)
+#define CBCR_HW_CTL_BIT			BIT(1)
+#define CFG_RCGR_DIV_MASK		BM(4, 0)
+#define CFG_RCGR_SRC_SEL_MASK		BM(10, 8)
+#define MND_MODE_MASK			BM(13, 12)
+#define MND_DUAL_EDGE_MODE_BVAL		BVAL(13, 12, 0x2)
+#define CMD_RCGR_CONFIG_DIRTY_MASK	BM(7, 4)
+#define CBCR_CDIV_LSB			16
+#define CBCR_CDIV_MSB			19
+
+enum branch_state {
+	BRANCH_ON,
+	BRANCH_OFF,
+};
+
+static struct clk_freq_tbl cxo_f = {
+	.freq_hz = 19200000,
+	.m_val = 0,
+	.n_val = 0,
+	.d_val = 0,
+	.div_src_val = 0,
+};
+
+struct div_map {
+	u32 mask;
+	int div;
+};
+
+/*
+ * RCG functions
+ */
+
+/*
+ * Update an RCG with a new configuration. This may include a new M, N, or D
+ * value, source selection or pre-divider value.
+ *
+ */
+static void rcg_update_config(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	int count = UPDATE_CHECK_MAX_LOOPS;
+
+	if (rcg->non_local_control_timeout)
+		count = rcg->non_local_control_timeout;
+
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+
+	/* Wait for update to take effect */
+	for (; count > 0; count--) {
+		if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+				CMD_RCGR_CONFIG_UPDATE_BIT))
+			return;
+		udelay(1);
+	}
+
+	CLK_WARN(&rcg->c, count == 0, "rcg didn't update its configuration.");
+}
+
+static void rcg_on_check(struct rcg_clk *rcg)
+{
+	int count = UPDATE_CHECK_MAX_LOOPS;
+
+	if (rcg->non_local_control_timeout)
+		count = rcg->non_local_control_timeout;
+
+	/* Wait for RCG to turn on */
+	for (; count > 0; count--) {
+		if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+				CMD_RCGR_ROOT_STATUS_BIT))
+			return;
+		udelay(1);
+	}
+	CLK_WARN(&rcg->c, count == 0, "rcg didn't turn on.");
+}
+
+/* RCG set rate function for clocks with Half Integer Dividers. */
+static void __set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	u32 cfg_regval;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+	cfg_regval |= nf->div_src_val;
+	writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+	rcg_update_config(rcg);
+}
+
+void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	__set_rate_hid(rcg, nf);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+/* RCG set rate function for clocks with MND & Half Integer Dividers. */
+static void __set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	u32 cfg_regval;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	writel_relaxed(nf->m_val, M_REG(rcg));
+	writel_relaxed(nf->n_val, N_REG(rcg));
+	writel_relaxed(nf->d_val, D_REG(rcg));
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+	cfg_regval |= nf->div_src_val;
+
+	/* Activate or disable the M/N:D divider as necessary */
+	cfg_regval &= ~MND_MODE_MASK;
+	if (nf->n_val != 0)
+		cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
+	writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+	rcg_update_config(rcg);
+}
+
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	__set_rate_mnd(rcg, nf);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void rcg_set_force_enable(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval |= CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+	rcg_on_check(rcg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void rcg_clear_force_enable(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	cmd_rcgr_regval &= ~CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	/* Add a delay of 100usecs to let the RCG disable */
+	udelay(RCG_FORCE_DISABLE_DELAY_US);
+}
+
+static int rcg_clk_enable(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	WARN(rcg->current_freq == &rcg_dummy_freq,
+		"Attempting to prepare %s before setting its rate."
+		, rcg->c.dbg_name);
+
+	if (rcg->force_enable_rcgr) {
+		rcg_set_force_enable(rcg);
+		return 0;
+	}
+
+	if (!rcg->non_local_children || rcg->current_freq == &rcg_dummy_freq)
+		return 0;
+	/*
+	 * Switch from CXO to saved mux value. Force enable/disable while
+	 * switching. The current parent is already prepared and enabled
+	 * at this point, and the CXO source is always-on. Therefore the
+	 * RCG can safely execute a dynamic switch.
+	 */
+	rcg_set_force_enable(rcg);
+	rcg->set_rate(rcg, rcg->current_freq);
+	rcg_clear_force_enable(rcg);
+
+	return 0;
+}
+
+static void rcg_clk_disable(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	if (rcg->force_enable_rcgr) {
+		rcg_clear_force_enable(rcg);
+		return;
+	}
+
+	if (!rcg->non_local_children)
+		return;
+
+	/*
+	 * Save mux select and switch to CXO. Force enable/disable while
+	 * switching. The current parent is still prepared and enabled at this
+	 * point, and the CXO source is always-on. Therefore the RCG can safely
+	 * execute a dynamic switch.
+	 */
+	rcg_set_force_enable(rcg);
+	rcg->set_rate(rcg, &cxo_f);
+	rcg_clear_force_enable(rcg);
+}
+
+static int prepare_enable_rcg_srcs(struct clk *c, struct clk *curr,
+					struct clk *new, unsigned long *flags)
+{
+	int rc;
+
+	rc = clk_prepare(curr);
+	if (rc)
+		return rc;
+
+	if (c->prepare_count) {
+		rc = clk_prepare(new);
+		if (rc)
+			goto err_new_src_prepare;
+	}
+
+	rc = clk_prepare(new);
+	if (rc)
+		goto err_new_src_prepare2;
+
+	spin_lock_irqsave(&c->lock, *flags);
+	rc = clk_enable(curr);
+	if (rc) {
+		spin_unlock_irqrestore(&c->lock, *flags);
+		goto err_curr_src_enable;
+	}
+
+	if (c->count) {
+		rc = clk_enable(new);
+		if (rc) {
+			spin_unlock_irqrestore(&c->lock, *flags);
+			goto err_new_src_enable;
+		}
+	}
+
+	rc = clk_enable(new);
+	if (rc) {
+		spin_unlock_irqrestore(&c->lock, *flags);
+		goto err_new_src_enable2;
+	}
+	return 0;
+
+err_new_src_enable2:
+	if (c->count)
+		clk_disable(new);
+err_new_src_enable:
+	clk_disable(curr);
+err_curr_src_enable:
+	clk_unprepare(new);
+err_new_src_prepare2:
+	if (c->prepare_count)
+		clk_unprepare(new);
+err_new_src_prepare:
+	clk_unprepare(curr);
+	return rc;
+}
+
+static void disable_unprepare_rcg_srcs(struct clk *c, struct clk *curr,
+					struct clk *new, unsigned long *flags)
+{
+	clk_disable(new);
+	clk_disable(curr);
+	if (c->count)
+		clk_disable(curr);
+	spin_unlock_irqrestore(&c->lock, *flags);
+
+	clk_unprepare(new);
+	clk_unprepare(curr);
+	if (c->prepare_count)
+		clk_unprepare(curr);
+}
+
+static int rcg_clk_set_duty_cycle(struct clk *c, u32 numerator,
+				u32 denominator)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	u32 notn_m_val, n_val, m_val, d_val, not2d_val;
+	u32 max_n_value;
+
+	if (!numerator || numerator == denominator)
+		return -EINVAL;
+
+	if (!rcg->mnd_reg_width)
+		rcg->mnd_reg_width = 8;
+
+	max_n_value = 1 << (rcg->mnd_reg_width - 1);
+
+	notn_m_val = readl_relaxed(N_REG(rcg));
+	m_val = readl_relaxed(M_REG(rcg));
+	n_val = ((~notn_m_val) + m_val) & BM((rcg->mnd_reg_width - 1), 0);
+
+	if (n_val > max_n_value) {
+		pr_warn("%s duty-cycle cannot be set for required frequency %ld\n",
+				c->dbg_name, clk_get_rate(c));
+		return -EINVAL;
+	}
+
+	/* Calculate the 2d value */
+	d_val = DIV_ROUND_CLOSEST((numerator * n_val * 2),  denominator);
+
+	/* Check BIT WIDTHS OF 2d.  If D is too big reduce Duty cycle. */
+	if (d_val > (BIT(rcg->mnd_reg_width) - 1)) {
+		d_val = (BIT(rcg->mnd_reg_width) - 1) / 2;
+		d_val *= 2;
+	}
+
+	not2d_val = (~d_val) & BM((rcg->mnd_reg_width - 1), 0);
+
+	writel_relaxed(not2d_val, D_REG(rcg));
+	rcg_update_config(rcg);
+
+	return 0;
+}
+
+static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct clk_freq_tbl *cf, *nf;
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	int rc;
+	unsigned long flags;
+
+	for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
+			&& nf->freq_hz != rate; nf++)
+		;
+
+	if (nf->freq_hz == FREQ_END)
+		return -EINVAL;
+
+	cf = rcg->current_freq;
+	if (nf->src_freq != FIXED_CLK_SRC) {
+		rc = clk_set_rate(nf->src_clk, nf->src_freq);
+		if (rc)
+			return rc;
+	}
+
+	if (rcg->non_local_control_timeout) {
+		/*
+		 * __clk_pre_reparent only enables the RCG source if the SW
+		 * count for the RCG is non-zero. We need to make sure that
+		 * both PLL sources are ON before force turning on the RCG.
+		 */
+		rc = prepare_enable_rcg_srcs(c, cf->src_clk, nf->src_clk,
+								&flags);
+	} else
+		rc = __clk_pre_reparent(c, nf->src_clk, &flags);
+
+	if (rc)
+		return rc;
+
+	WARN_ON(!rcg->set_rate);
+
+	/* Perform clock-specific frequency switch operations. */
+	if ((rcg->non_local_children && c->count) ||
+			rcg->non_local_control_timeout) {
+		/*
+		 * Force enable the RCG before updating the RCG configuration
+		 * since the downstream clock/s can be disabled at around the
+		 * same time causing the feedback from the CBCR to turn off
+		 * the RCG.
+		 */
+		rcg_set_force_enable(rcg);
+		rcg->set_rate(rcg, nf);
+		rcg_clear_force_enable(rcg);
+	} else if (!rcg->non_local_children) {
+		rcg->set_rate(rcg, nf);
+	}
+
+	/*
+	 * If non_local_children is set and the RCG is not enabled,
+	 * the following operations switch parent in software and cache
+	 * the frequency. The mux switch will occur when the RCG is enabled.
+	 */
+	rcg->current_freq = nf;
+	c->parent = nf->src_clk;
+
+	if (rcg->non_local_control_timeout)
+		disable_unprepare_rcg_srcs(c, cf->src_clk, nf->src_clk,
+								&flags);
+	else
+		__clk_post_reparent(c, cf->src_clk, &flags);
+
+	return 0;
+}
+
+/*
+ * Return a supported rate that's at least the specified rate or
+ * the max supported rate if the specified rate is larger than the
+ * max supported rate.
+ */
+static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *f;
+
+	for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
+		if (f->freq_hz >= rate)
+			return f->freq_hz;
+
+	f--;
+	return f->freq_hz;
+}
+
+/* Return the nth supported frequency for a given clock. */
+static long rcg_clk_list_rate(struct clk *c, unsigned long  n)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+
+	if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
+		return -ENXIO;
+
+	return (rcg->freq_tbl + n)->freq_hz;
+}
+
+static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, bool has_mnd,
+								bool match_rate)
+{
+	u32 n_regval = 0, m_regval = 0, d_regval = 0;
+	u32 cfg_regval, div, div_regval;
+	struct clk_freq_tbl *freq;
+	u32 cmd_rcgr_regval;
+
+	if (!rcg->freq_tbl) {
+		WARN(1, "No frequency table present for rcg %s\n",
+							rcg->c.dbg_name);
+		return NULL;
+	}
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+		WARN(1, "Pending transaction for rcg %s\n", rcg->c.dbg_name);
+		return NULL;
+	}
+
+	/* Get values of m, n, d, div and src_sel registers. */
+	if (has_mnd) {
+		m_regval = readl_relaxed(M_REG(rcg));
+		n_regval = readl_relaxed(N_REG(rcg));
+		d_regval = readl_relaxed(D_REG(rcg));
+
+		/*
+		 * The n and d values stored in the frequency tables are sign
+		 * extended to 32 bits. The n and d values in the registers are
+		 * sign extended to 8 or 16 bits. Sign extend the values read
+		 * from the registers so that they can be compared to the
+		 * values in the frequency tables.
+		 */
+		n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
+		d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
+	}
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+	cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
+				| MND_MODE_MASK;
+
+	/* If mnd counter is present, check if it's in use. */
+	has_mnd = (has_mnd) &&
+		((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
+
+	/*
+	 * Clear out the mn counter mode bits since we now want to compare only
+	 * the source mux selection and pre-divider values in the registers.
+	 */
+	cfg_regval &= ~MND_MODE_MASK;
+
+	/* Figure out what rate the rcg is running at */
+	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+		/* source select does not match */
+		if ((freq->div_src_val & CFG_RCGR_SRC_SEL_MASK)
+		    != (cfg_regval & CFG_RCGR_SRC_SEL_MASK))
+			continue;
+		/*
+		 * Stop if we found the required parent in the frequency table
+		 * and only care if the source matches but dont care if the
+		 * frequency matches
+		 */
+		if (!match_rate)
+			break;
+		/* divider does not match */
+		div = freq->div_src_val & CFG_RCGR_DIV_MASK;
+		div_regval = cfg_regval & CFG_RCGR_DIV_MASK;
+		if (div != div_regval && (div > 1 || div_regval > 1))
+			continue;
+
+		if (has_mnd) {
+			if (freq->m_val != m_regval)
+				continue;
+			if (freq->n_val != n_regval)
+				continue;
+			if (freq->d_val != d_regval)
+				continue;
+		} else if (freq->n_val) {
+			continue;
+		}
+		break;
+	}
+
+	/* No known frequency found */
+	if (freq->freq_hz == FREQ_END) {
+		/*
+		 * If we can't recognize the frequency and non_local_children is
+		 * set, switch to safe frequency. It is assumed the current
+		 * parent has been turned on by the bootchain if the RCG is on.
+		 */
+		if (rcg->non_local_children) {
+			rcg->set_rate(rcg, &cxo_f);
+			WARN(1, "don't recognize rcg frequency for %s\n",
+				rcg->c.dbg_name);
+		}
+		return NULL;
+	}
+
+	rcg->current_freq = freq;
+	return freq->src_clk;
+}
+
+static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
+{
+	u32 cmd_rcgr_regval;
+
+	if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
+		rcg->c.rate = rcg->current_freq->freq_hz;
+
+	/* Is the root enabled? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static struct clk *display_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), false, false);
+}
+
+static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), true, true);
+}
+
+static struct clk *rcg_clk_get_parent(struct clk *c)
+{
+	return _rcg_clk_get_parent(to_rcg_clk(c), false, true);
+}
+
+static enum handoff rcg_mnd_clk_handoff(struct clk *c)
+{
+	return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static enum handoff rcg_clk_handoff(struct clk *c)
+{
+	return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static void __iomem *rcg_hid_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CMD_RCGR_REG(rcg);
+}
+
+static void __iomem *rcg_mnd_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+		{"M_VAL", 0x8},
+		{"N_VAL", 0xC},
+		{"D_VAL", 0x10},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CMD_RCGR_REG(rcg);
+}
+
+#define BRANCH_CHECK_MASK	BM(31, 28)
+#define BRANCH_ON_VAL		BVAL(31, 28, 0x0)
+#define BRANCH_OFF_VAL		BVAL(31, 28, 0x8)
+#define BRANCH_NOC_FSM_ON_VAL	BVAL(31, 28, 0x2)
+
+/*
+ * Branch clock functions
+ */
+static void branch_clk_halt_check(struct clk *c, u32 halt_check,
+			void __iomem *cbcr_reg, enum branch_state br_status)
+{
+	char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
+
+	/*
+	 * Use a memory barrier since some halt status registers are
+	 * not within the same 1K segment as the branch/root enable
+	 * registers.  It's also needed in the udelay() case to ensure
+	 * the delay starts after the branch disable.
+	 */
+	mb();
+
+	if (halt_check == DELAY || halt_check == HALT_VOTED) {
+		udelay(HALT_CHECK_DELAY_US);
+	} else if (halt_check == HALT) {
+		int count;
+		u32 val;
+
+		for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+			val = readl_relaxed(cbcr_reg);
+			val &= BRANCH_CHECK_MASK;
+			switch (br_status) {
+			case BRANCH_ON:
+				if (val == BRANCH_ON_VAL
+					|| val == BRANCH_NOC_FSM_ON_VAL)
+					return;
+				break;
+
+			case BRANCH_OFF:
+				if (val == BRANCH_OFF_VAL)
+					return;
+				break;
+			};
+			udelay(1);
+		}
+		CLK_WARN(c, count == 0, "status stuck %s", status_str);
+	}
+}
+
+static unsigned long branch_clk_aggregate_rate(const struct clk *parent)
+{
+	struct clk *clk;
+	unsigned long rate = 0;
+
+	list_for_each_entry(clk, &parent->children, siblings) {
+		struct branch_clk *v = to_branch_clk(clk);
+
+		if (v->is_prepared)
+			rate = max(clk->rate, rate);
+	}
+	return rate;
+}
+
+static int cbcr_set_flags(void * __iomem regaddr, unsigned long flags)
+{
+	u32 cbcr_val;
+	unsigned long irq_flags;
+	int delay_us = 0, ret = 0;
+
+	spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
+	cbcr_val = readl_relaxed(regaddr);
+	switch (flags) {
+	case CLKFLAG_PERIPH_OFF_SET:
+		cbcr_val |= BIT(12);
+		delay_us = 1;
+		break;
+	case CLKFLAG_PERIPH_OFF_CLEAR:
+		cbcr_val &= ~BIT(12);
+		break;
+	case CLKFLAG_RETAIN_PERIPH:
+		cbcr_val |= BIT(13);
+		delay_us = 1;
+		break;
+	case CLKFLAG_NORETAIN_PERIPH:
+		cbcr_val &= ~BIT(13);
+		break;
+	case CLKFLAG_RETAIN_MEM:
+		cbcr_val |= BIT(14);
+		delay_us = 1;
+		break;
+	case CLKFLAG_NORETAIN_MEM:
+		cbcr_val &= ~BIT(14);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	writel_relaxed(cbcr_val, regaddr);
+	/* Make sure power is enabled before returning. */
+	mb();
+	udelay(delay_us);
+
+	spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
+
+	return ret;
+}
+
+static int branch_clk_set_flags(struct clk *c, unsigned long flags)
+{
+	return cbcr_set_flags(CBCR_REG(to_branch_clk(c)), flags);
+}
+
+static DEFINE_MUTEX(branch_clk_lock);
+
+static int branch_clk_prepare(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	unsigned long curr_rate;
+	int ret = 0;
+
+	if (!branch->aggr_sibling_rates)
+		return ret;
+
+	mutex_lock(&branch_clk_lock);
+	branch->is_prepared = false;
+	curr_rate = branch_clk_aggregate_rate(c->parent);
+	if (c->rate > curr_rate) {
+		ret = clk_set_rate(c->parent, c->rate);
+		if (ret)
+			goto exit;
+	}
+	branch->is_prepared = true;
+exit:
+	mutex_unlock(&branch_clk_lock);
+	return ret;
+}
+
+static int branch_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 cbcr_val;
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (branch->toggle_memory) {
+		branch_clk_set_flags(c, CLKFLAG_RETAIN_MEM);
+		branch_clk_set_flags(c, CLKFLAG_RETAIN_PERIPH);
+	}
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	cbcr_val = readl_relaxed(CBCR_REG(branch));
+	cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(cbcr_val, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/*
+	 * For clocks controlled by other masters via voting registers,
+	 * delay polling for the status bit to allow previous clk_disable
+	 * by the GDS controller to go through.
+	 */
+	if (branch->no_halt_check_on_disable)
+		udelay(5);
+
+	/* Wait for clock to enable before continuing. */
+	branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+				BRANCH_ON);
+
+	return 0;
+}
+
+static void branch_clk_unprepare(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	unsigned long curr_rate, new_rate;
+
+	if (!branch->aggr_sibling_rates)
+		return;
+
+	mutex_lock(&branch_clk_lock);
+	branch->is_prepared = false;
+	new_rate = branch_clk_aggregate_rate(c->parent);
+	curr_rate = max(new_rate, c->rate);
+	if (new_rate < curr_rate)
+		clk_set_rate(c->parent, new_rate);
+	mutex_unlock(&branch_clk_lock);
+}
+
+static void branch_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct branch_clk *branch = to_branch_clk(c);
+	u32 reg_val;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	reg_val = readl_relaxed(CBCR_REG(branch));
+	reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(reg_val, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/* Wait for clock to disable before continuing. */
+	if (!branch->no_halt_check_on_disable)
+		branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+					BRANCH_OFF);
+
+	if (branch->toggle_memory) {
+		branch_clk_set_flags(c, CLKFLAG_NORETAIN_MEM);
+		branch_clk_set_flags(c, CLKFLAG_NORETAIN_PERIPH);
+	}
+}
+
+static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
+{
+	unsigned long flags;
+	u32 regval;
+
+	if (rate > branch->max_div)
+		return -EINVAL;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(CBCR_REG(branch));
+	regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+	regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
+	writel_relaxed(regval, CBCR_REG(branch));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+static int branch_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct branch_clk *clkh, *branch = to_branch_clk(c);
+	struct clk *clkp, *parent = c->parent;
+	unsigned long curr_rate, new_rate, other_rate = 0;
+	int ret = 0;
+
+	if (branch->max_div)
+		return branch_cdiv_set_rate(branch, rate);
+
+	if (branch->has_sibling)
+		return -EPERM;
+
+	if (!branch->aggr_sibling_rates)
+		return clk_set_rate(c->parent, rate);
+
+	mutex_lock(&branch_clk_lock);
+	if (!branch->is_prepared) {
+		c->rate = rate;
+		goto exit;
+	}
+	/*
+	 * Get the aggregate rate without this clock's vote and update
+	 * if the new rate is different than the current rate.
+	 */
+	list_for_each_entry(clkp, &parent->children, siblings) {
+		clkh = to_branch_clk(clkp);
+		if (clkh->is_prepared && clkh != branch)
+			other_rate = max(clkp->rate, other_rate);
+	}
+	curr_rate = max(other_rate, c->rate);
+	new_rate = max(other_rate, rate);
+	if (new_rate != curr_rate) {
+		ret = clk_set_rate(parent, new_rate);
+		if (!ret)
+			c->rate = rate;
+	}
+exit:
+	mutex_unlock(&branch_clk_lock);
+	return ret;
+}
+
+static long branch_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (branch->max_div)
+		return rate <= (branch->max_div) ? rate : -EPERM;
+
+	if (!branch->has_sibling)
+		return clk_round_rate(c->parent, rate);
+
+	return -EPERM;
+}
+
+static unsigned long branch_clk_get_rate(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (branch->max_div)
+		return branch->c.rate;
+
+	return clk_get_rate(c->parent);
+}
+
+static long branch_clk_list_rate(struct clk *c, unsigned long  n)
+{
+	int level;
+	unsigned long fmax = 0, rate;
+	struct branch_clk *branch = to_branch_clk(c);
+	struct clk *parent = c->parent;
+
+	if (branch->has_sibling == 1)
+		return -ENXIO;
+
+	if (!parent || !parent->ops->list_rate)
+		return -ENXIO;
+
+	/* Find max frequency supported within voltage constraints. */
+	if (!parent->vdd_class) {
+		fmax = ULONG_MAX;
+	} else {
+		for (level = 0; level < parent->num_fmax; level++)
+			if (parent->fmax[level])
+				fmax = parent->fmax[level];
+	}
+
+	rate = parent->ops->list_rate(parent, n);
+	if (rate <= fmax)
+		return rate;
+	else
+		return -ENXIO;
+}
+
+static enum handoff branch_clk_handoff(struct clk *c)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	u32 cbcr_regval;
+
+	cbcr_regval = readl_relaxed(CBCR_REG(branch));
+
+	/* Set the cdiv to c->rate for fixed divider branch clock */
+	if (c->rate && (c->rate < branch->max_div)) {
+		cbcr_regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+		cbcr_regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, c->rate);
+		writel_relaxed(cbcr_regval, CBCR_REG(branch));
+	}
+
+	if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
+		return HANDOFF_DISABLED_CLK;
+
+	if (!(cbcr_regval & CBCR_BRANCH_ENABLE_BIT)) {
+		if (!branch->check_enable_bit) {
+			pr_warn("%s clock is enabled in HW", c->dbg_name);
+			pr_warn("even though ENABLE_BIT is not set\n");
+		}
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if (branch->max_div) {
+		cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+		cbcr_regval >>= CBCR_CDIV_LSB;
+		c->rate = cbcr_regval;
+	} else if (!branch->has_sibling) {
+		c->rate = clk_get_rate(c->parent);
+	}
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static int __branch_clk_reset(void __iomem *bcr_reg,
+				enum clk_reset_action action)
+{
+	int ret = 0;
+	unsigned long flags;
+	u32 reg_val;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	reg_val = readl_relaxed(bcr_reg);
+	switch (action) {
+	case CLK_RESET_ASSERT:
+		reg_val |= BCR_BLK_ARES_BIT;
+		break;
+	case CLK_RESET_DEASSERT:
+		reg_val &= ~BCR_BLK_ARES_BIT;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	writel_relaxed(reg_val, bcr_reg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	/* Make sure write is issued before returning. */
+	mb();
+
+	return ret;
+}
+
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+
+	if (!branch->bcr_reg)
+		return -EPERM;
+	return __branch_clk_reset(BCR_REG(branch), action);
+}
+
+static void __iomem *branch_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct branch_clk *branch = to_branch_clk(c);
+	static struct clk_register_data data[] = {
+		{"CBCR", 0x0},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return CBCR_REG(branch);
+}
+
+/*
+ * Voteable clock functions
+ */
+static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+	if (!vclk->bcr_reg) {
+		WARN("clk_reset called on an unsupported clock (%s)\n",
+			c->dbg_name);
+		return -EPERM;
+	}
+	return __branch_clk_reset(BCR_REG(vclk), action);
+}
+
+static int local_vote_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 ena;
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	ena = readl_relaxed(VOTE_REG(vclk));
+	ena |= vclk->en_mask;
+	writel_relaxed(ena, VOTE_REG(vclk));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	branch_clk_halt_check(c, vclk->halt_check, CBCR_REG(vclk), BRANCH_ON);
+
+	return 0;
+}
+
+static void local_vote_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	u32 ena;
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	ena = readl_relaxed(VOTE_REG(vclk));
+	ena &= ~vclk->en_mask;
+	writel_relaxed(ena, VOTE_REG(vclk));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static enum handoff local_vote_clk_handoff(struct clk *c)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+	u32 vote_regval;
+
+	/* Is the branch voted on by apps? */
+	vote_regval = readl_relaxed(VOTE_REG(vclk));
+	if (!(vote_regval & vclk->en_mask))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned long ticks, void __iomem *ctl_reg,
+				void __iomem *status_reg)
+{
+	/* Stop counters and set the XO4 counter start value. */
+	writel_relaxed(ticks, ctl_reg);
+
+	/* Wait for timer to become ready. */
+	while ((readl_relaxed(status_reg) & BIT(25)) != 0)
+		cpu_relax();
+
+	/* Run measurement and wait for completion. */
+	writel_relaxed(BIT(20)|ticks, ctl_reg);
+	while ((readl_relaxed(status_reg) & BIT(25)) == 0)
+		cpu_relax();
+
+	/* Return measured ticks. */
+	return readl_relaxed(status_reg) & BM(24, 0);
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+unsigned long measure_get_rate(struct clk *c)
+{
+	unsigned long flags;
+	u32 gcc_xo4_reg, regval;
+	u64 raw_count_short, raw_count_full;
+	unsigned long ret;
+	u32 sample_ticks = 0x10000;
+	u32 multiplier = to_mux_clk(c)->post_div + 1;
+	struct measure_clk_data *data = to_mux_clk(c)->priv;
+
+	regval = readl_relaxed(MUX_REG(to_mux_clk(c)));
+	/* clear and set post divider bits */
+	regval &= ~BM(15, 12);
+	regval |= BVAL(15, 12, to_mux_clk(c)->post_div);
+	writel_relaxed(regval, MUX_REG(to_mux_clk(c)));
+
+	ret = clk_prepare_enable(data->cxo);
+	if (ret) {
+		pr_warn("CXO clock failed to enable. Can't measure\n");
+		ret = 0;
+		goto fail;
+	}
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+
+	/* Enable CXO/4 and RINGOSC branch. */
+	gcc_xo4_reg = readl_relaxed(*data->base + data->xo_div4_cbcr);
+	gcc_xo4_reg |= CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(gcc_xo4_reg, *data->base + data->xo_div4_cbcr);
+
+	/*
+	 * The ring oscillator counter will not reset if the measured clock
+	 * is not running.  To detect this, run a short measurement before
+	 * the full measurement.  If the raw results of the two are the same
+	 * then the clock must be off.
+	 */
+
+	/* Run a short measurement. (~1 ms) */
+	raw_count_short = run_measurement(0x1000, *data->base + data->ctl_reg,
+					  *data->base + data->status_reg);
+	/* Run a full measurement. (~14 ms) */
+	raw_count_full = run_measurement(sample_ticks,
+					 *data->base + data->ctl_reg,
+					 *data->base + data->status_reg);
+
+	gcc_xo4_reg &= ~CBCR_BRANCH_ENABLE_BIT;
+	writel_relaxed(gcc_xo4_reg, *data->base + data->xo_div4_cbcr);
+
+	/* Return 0 if the clock is off. */
+	if (raw_count_full == raw_count_short) {
+		ret = 0;
+	} else {
+		/* Compute rate in Hz. */
+		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
+		do_div(raw_count_full, ((sample_ticks * 10) + 35));
+		ret = (raw_count_full * multiplier);
+	}
+	writel_relaxed(data->plltest_val, *data->base + data->plltest_reg);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	clk_disable_unprepare(data->cxo);
+
+fail:
+	regval = readl_relaxed(MUX_REG(to_mux_clk(c)));
+	/* clear post divider bits */
+	regval &= ~BM(15, 12);
+	writel_relaxed(regval, MUX_REG(to_mux_clk(c)));
+
+	return ret;
+}
+
+struct frac_entry {
+	int num;
+	int den;
+};
+
+static void __iomem *local_vote_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct local_vote_clk *vclk = to_local_vote_clk(c);
+	static struct clk_register_data data1[] = {
+		{"CBCR", 0x0},
+	};
+	static struct clk_register_data data2[] = {
+		{"APPS_VOTE", 0x0},
+		{"APPS_SLEEP_VOTE", 0x4},
+	};
+	switch (n) {
+	case 0:
+		*regs = data1;
+		*size = ARRAY_SIZE(data1);
+		return CBCR_REG(vclk);
+	case 1:
+		*regs = data2;
+		*size = ARRAY_SIZE(data2);
+		return VOTE_REG(vclk);
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+}
+
+static struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
+	{52, 295},	/* 119 M */
+	{11, 57},	/* 130.25 M */
+	{63, 307},	/* 138.50 M */
+	{11, 50},	/* 148.50 M */
+	{47, 206},	/* 154 M */
+	{31, 100},	/* 205.25 M */
+	{107, 269},	/* 268.50 M */
+	{0, 0},
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+	{31, 211},	/* 119 M */
+	{32, 199},	/* 130.25 M */
+	{63, 307},	/* 138.50 M */
+	{11, 60},	/* 148.50 M */
+	{50, 263},	/* 154 M */
+	{31, 120},	/* 205.25 M */
+	{119, 359},	/* 268.50 M */
+	{0, 0},
+};
+
+static bool is_same_rcg_config(struct rcg_clk *rcg, struct clk_freq_tbl *freq,
+			       bool has_mnd)
+{
+	u32 cfg;
+
+	/* RCG update pending */
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_CONFIG_DIRTY_MASK)
+		return false;
+	if (has_mnd)
+		if (readl_relaxed(M_REG(rcg)) != freq->m_val ||
+		    readl_relaxed(N_REG(rcg)) != freq->n_val ||
+		    readl_relaxed(D_REG(rcg)) != freq->d_val)
+			return false;
+	/*
+	 * Both 0 and 1 represent same divider value in HW.
+	 * Always use 0 to simplify comparison.
+	 */
+	if ((freq->div_src_val & CFG_RCGR_DIV_MASK) == 1)
+		freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+	cfg = readl_relaxed(CFG_RCGR_REG(rcg));
+	if ((cfg & CFG_RCGR_DIV_MASK) == 1)
+		cfg &= ~CFG_RCGR_DIV_MASK;
+	if (cfg != freq->div_src_val)
+		return false;
+
+	return true;
+}
+
+static int set_rate_edp_pixel(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+	struct frac_entry *frac;
+	int delta = 100000;
+	s64 request;
+	s64 src_rate;
+	unsigned long flags;
+
+	src_rate = clk_get_rate(clk->parent);
+
+	if (src_rate == 810000000)
+		frac = frac_table_810m;
+	else
+		frac = frac_table_675m;
+
+	while (frac->num) {
+		request = rate;
+		request *= frac->den;
+		request = div_s64(request, frac->num);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta))) {
+			frac++;
+			continue;
+		}
+
+		pixel_freq->div_src_val &= ~BM(4, 0);
+		if (frac->den == frac->num) {
+			pixel_freq->m_val = 0;
+			pixel_freq->n_val = 0;
+		} else {
+			pixel_freq->m_val = frac->num;
+			pixel_freq->n_val = ~(frac->den - frac->num);
+			pixel_freq->d_val = ~frac->den;
+		}
+		spin_lock_irqsave(&local_clock_reg_lock, flags);
+		if (!is_same_rcg_config(rcg, pixel_freq, true))
+			__set_rate_mnd(rcg, pixel_freq);
+		spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+enum handoff byte_rcg_handoff(struct clk *clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	u32 div_val;
+	unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+	/* If the pre-divider is used, find the rate after the division */
+	div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
+	if (div_val > 1)
+		pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+	else
+		pre_div_rate = parent_rate;
+
+	clk->rate = pre_div_rate;
+
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static int set_rate_byte(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk *pll = clk->parent;
+	unsigned long source_rate, div, flags;
+	struct clk_freq_tbl *byte_freq = rcg->current_freq;
+	int rc;
+
+	if (rate == 0)
+		return -EINVAL;
+
+	rc = clk_set_rate(pll, rate);
+	if (rc)
+		return rc;
+
+	source_rate = clk_round_rate(pll, rate);
+	if ((2 * source_rate) % rate)
+		return -EINVAL;
+
+	div = ((2 * source_rate)/rate) - 1;
+	if (div > CFG_RCGR_DIV_MASK)
+		return -EINVAL;
+
+	/*
+	 * Both 0 and 1 represent same divider value in HW.
+	 * Always use 0 to simplify comparison.
+	 */
+	div = (div == 1) ? 0 : div;
+
+	byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+	byte_freq->div_src_val |= BVAL(4, 0, div);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	if (!is_same_rcg_config(rcg, byte_freq, false))
+		__set_rate_hid(rcg, byte_freq);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+enum handoff pixel_rcg_handoff(struct clk *clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
+	unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+	/* If the pre-divider is used, find the rate after the division */
+	div_val = cfg_regval & CFG_RCGR_DIV_MASK;
+	if (div_val > 1)
+		pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+	else
+		pre_div_rate = parent_rate;
+
+	clk->rate = pre_div_rate;
+
+	/*
+	 * Pixel clocks have one frequency entry in their frequency table.
+	 * Update that entry.
+	 */
+	if (rcg->current_freq) {
+		rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+		rcg->current_freq->div_src_val |= div_val;
+	}
+
+	/* If MND is used, find the rate after the MND division */
+	if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+		mval = readl_relaxed(M_REG(rcg));
+		nval = readl_relaxed(N_REG(rcg));
+		if (!nval)
+			return HANDOFF_DISABLED_CLK;
+		nval = (~nval) + mval;
+		if (rcg->current_freq) {
+			rcg->current_freq->n_val = ~(nval - mval);
+			rcg->current_freq->m_val = mval;
+			rcg->current_freq->d_val = ~nval;
+		}
+		clk->rate = (pre_div_rate * mval) / nval;
+	}
+
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long round_rate_pixel(struct clk *clk, unsigned long rate)
+{
+	int frac_num[] = {3, 2, 4, 1};
+	int frac_den[] = {8, 9, 9, 1};
+	int delta = 100000;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+		unsigned long request = (rate * frac_den[i]) / frac_num[i];
+		unsigned long src_rate;
+
+		src_rate = clk_round_rate(clk->parent, request);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta)))
+			continue;
+
+		return (src_rate * frac_num[i]) / frac_den[i];
+	}
+
+	return -EINVAL;
+}
+
+
+static int set_rate_pixel(struct clk *clk, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+	int frac_num[] = {3, 2, 4, 1};
+	int frac_den[] = {8, 9, 9, 1};
+	int delta = 100000;
+	int i, rc;
+
+	for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+		unsigned long request = (rate * frac_den[i]) / frac_num[i];
+		unsigned long src_rate;
+
+		src_rate = clk_round_rate(clk->parent, request);
+		if ((src_rate < (request - delta)) ||
+			(src_rate > (request + delta)))
+			continue;
+
+		rc =  clk_set_rate(clk->parent, src_rate);
+		if (rc)
+			return rc;
+
+		pixel_freq->div_src_val &= ~BM(4, 0);
+		if (frac_den[i] == frac_num[i]) {
+			pixel_freq->m_val = 0;
+			pixel_freq->n_val = 0;
+		} else {
+			pixel_freq->m_val = frac_num[i];
+			pixel_freq->n_val = ~(frac_den[i] - frac_num[i]);
+			pixel_freq->d_val = ~frac_den[i];
+		}
+		set_rate_mnd(rcg, pixel_freq);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int rcg_clk_set_parent(struct clk *clk, struct clk *parent_clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	struct clk *old_parent = clk->parent;
+	struct clk_freq_tbl *nf;
+	unsigned long flags;
+	int rc = 0;
+	unsigned int parent_rate, rate;
+	u32 m_val, n_val, d_val, div_val;
+	u32 cfg_regval;
+
+	/* Find the source clock freq tbl for the requested parent */
+	if (!rcg->freq_tbl)
+		return -ENXIO;
+
+	for (nf = rcg->freq_tbl; parent_clk != nf->src_clk; nf++) {
+		if (nf->freq_hz == FREQ_END)
+			return -ENXIO;
+	}
+
+	/* This implementation recommends that the RCG be unprepared
+	 * when switching RCG source since the divider configuration
+	 * remains unchanged.
+	 */
+	WARN(clk->prepare_count,
+		"Trying to switch RCG source while it is prepared!\n");
+
+	parent_rate = clk_get_rate(parent_clk);
+
+	div_val = (rcg->current_freq->div_src_val & CFG_RCGR_DIV_MASK);
+	if (div_val)
+		parent_rate /= ((div_val + 1) >> 1);
+
+	/* Update divisor. Source select bits should already be as expected */
+	nf->div_src_val &= ~CFG_RCGR_DIV_MASK;
+	nf->div_src_val |= div_val;
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+	if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+		nf->m_val = m_val = readl_relaxed(M_REG(rcg));
+		n_val = readl_relaxed(N_REG(rcg));
+		d_val = readl_relaxed(D_REG(rcg));
+
+		/* Sign extend the n and d values as those in registers are not
+		 * sign extended.
+		 */
+		n_val |= (n_val >> 8) ? BM(31, 16) : BM(31, 8);
+		d_val |= (d_val >> 8) ? BM(31, 16) : BM(31, 8);
+
+		nf->n_val = n_val;
+		nf->d_val = d_val;
+
+		n_val = ~(n_val) + m_val;
+		rate = parent_rate * m_val;
+		if (n_val)
+			rate /= n_val;
+		else
+			WARN(1, "n_val was 0!!");
+	} else
+		rate = parent_rate;
+
+	/* Warn if switching to the new parent with the current m, n ,d values
+	 * violates the voltage constraints for the RCG.
+	 */
+	WARN(!is_rate_valid(clk, rate) && clk->prepare_count,
+		"Switch to new RCG parent violates voltage requirement!\n");
+
+	rc = __clk_pre_reparent(clk, nf->src_clk, &flags);
+	if (rc)
+		return rc;
+
+	/* Switch RCG source */
+	rcg->set_rate(rcg, nf);
+
+	rcg->current_freq = nf;
+	clk->parent = parent_clk;
+	clk->rate = rate;
+
+	__clk_post_reparent(clk, old_parent, &flags);
+
+	return 0;
+}
+
+/*
+ * Unlike other clocks, the HDMI rate is adjusted through PLL
+ * re-programming. It is also routed through an HID divider.
+ */
+static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *nf = rcg->freq_tbl;
+	int rc;
+
+	rc = clk_set_rate(nf->src_clk, rate);
+	if (rc < 0)
+		goto out;
+	set_rate_hid(rcg, nf);
+
+	rcg->current_freq = nf;
+out:
+	return rc;
+}
+
+static struct clk *rcg_hdmi_clk_get_parent(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk_freq_tbl *freq = rcg->freq_tbl;
+	u32 cmd_rcgr_regval;
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+		return NULL;
+
+	rcg->current_freq->freq_hz = clk_get_rate(c->parent);
+
+	return freq->src_clk;
+}
+
+static int rcg_clk_set_rate_edp(struct clk *c, unsigned long rate)
+{
+	struct clk_freq_tbl *nf;
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	int rc;
+
+	for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
+		if (nf->freq_hz == FREQ_END) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+	rc = clk_set_rate(nf->src_clk, rate);
+	if (rc < 0)
+		goto out;
+	set_rate_hid(rcg, nf);
+
+	rcg->current_freq = nf;
+	c->parent = nf->src_clk;
+out:
+	return rc;
+}
+
+static struct clk *edp_clk_get_parent(struct clk *c)
+{
+	struct rcg_clk *rcg = to_rcg_clk(c);
+	struct clk *clk;
+	struct clk_freq_tbl *freq;
+	unsigned long rate;
+	u32 cmd_rcgr_regval;
+
+	/* Is there a pending configuration? */
+	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+		return NULL;
+
+	/* Figure out what rate the rcg is running at */
+	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+		clk = freq->src_clk;
+		if (clk && clk->ops->get_rate) {
+			rate = clk->ops->get_rate(clk);
+			if (rate == freq->freq_hz)
+				break;
+		}
+	}
+
+	/* No known frequency found */
+	if (freq->freq_hz == FREQ_END)
+		return NULL;
+
+	rcg->current_freq = freq;
+	return freq->src_clk;
+}
+
+static int gate_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	u32 regval;
+	struct gate_clk *g = to_gate_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(GATE_EN_REG(g));
+	regval |= g->en_mask;
+	writel_relaxed(regval, GATE_EN_REG(g));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	if (g->delay_us)
+		udelay(g->delay_us);
+
+	return 0;
+}
+
+static void gate_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	u32 regval;
+	struct gate_clk *g = to_gate_clk(c);
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(GATE_EN_REG(g));
+	regval &= ~(g->en_mask);
+	writel_relaxed(regval, GATE_EN_REG(g));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	if (g->delay_us)
+		udelay(g->delay_us);
+}
+
+static void __iomem *gate_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct gate_clk *g = to_gate_clk(c);
+	static struct clk_register_data data[] = {
+		{"EN_REG", 0x0},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return GATE_EN_REG(g);
+}
+
+static enum handoff gate_clk_handoff(struct clk *c)
+{
+	struct gate_clk *g = to_gate_clk(c);
+	u32 regval;
+
+	regval = readl_relaxed(GATE_EN_REG(g));
+	if (regval & g->en_mask)
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+static int gate_clk_set_flags(struct clk *c, unsigned long flags)
+{
+	return cbcr_set_flags(GATE_EN_REG(to_gate_clk(c)), flags);
+}
+
+
+static int reset_clk_rst(struct clk *c, enum clk_reset_action action)
+{
+	struct reset_clk *rst = to_reset_clk(c);
+
+	if (!rst->reset_reg)
+		return -EPERM;
+
+	return __branch_clk_reset(RST_REG(rst), action);
+}
+
+static void __iomem *reset_clk_list_registers(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct reset_clk *rst = to_reset_clk(clk);
+	static struct clk_register_data data[] = {
+		{"BCR", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return RST_REG(rst);
+}
+
+static DEFINE_SPINLOCK(mux_reg_lock);
+
+static int mux_reg_enable(struct mux_clk *clk)
+{
+	u32 regval;
+	unsigned long flags;
+
+	if (!clk->en_mask)
+		return 0;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->en_offset);
+	regval |= clk->en_mask;
+	writel_relaxed(regval, *clk->base + clk->en_offset);
+	/* Ensure enable request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+	return 0;
+}
+
+static void mux_reg_disable(struct mux_clk *clk)
+{
+	u32 regval;
+	unsigned long flags;
+
+	if (!clk->en_mask)
+		return;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->en_offset);
+	regval &= ~clk->en_mask;
+	writel_relaxed(regval, *clk->base + clk->en_offset);
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+}
+
+static int mux_reg_set_mux_sel(struct mux_clk *clk, int sel)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mux_reg_lock, flags);
+	regval = readl_relaxed(MUX_REG(clk));
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (sel & clk->mask) << clk->shift;
+	writel_relaxed(regval, MUX_REG(clk));
+	/* Ensure switch request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+	return 0;
+}
+
+static int mux_reg_get_mux_sel(struct mux_clk *clk)
+{
+	u32 regval = readl_relaxed(MUX_REG(clk));
+
+	return (regval >> clk->shift) & clk->mask;
+}
+
+static bool mux_reg_is_enabled(struct mux_clk *clk)
+{
+	u32 regval = readl_relaxed(MUX_REG(clk));
+
+	return !!(regval & clk->en_mask);
+}
+
+static void __iomem *mux_clk_list_registers(struct mux_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	static struct clk_register_data data[] = {
+		{"DEBUG_CLK_CTL", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return *clk->base + clk->offset;
+}
+
+/* PLL post-divider setting for each divider value */
+static struct div_map postdiv_map[] = {
+	{  0x0, 1  },
+	{  0x1, 2  },
+	{  0x3, 3  },
+	{  0x3, 4  },
+	{  0x5, 5  },
+	{  0x7, 7  },
+	{  0x7, 8  },
+	{  0xF, 16 },
+};
+
+static int postdiv_reg_set_div(struct div_clk *clk, int div)
+{
+	struct clk *parent = NULL;
+	u32 regval;
+	unsigned long flags;
+	unsigned int mask = -1;
+	int i, ret = 0;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(postdiv_map); i++) {
+		if (postdiv_map[i].div == div) {
+			mask = postdiv_map[i].mask;
+			break;
+		}
+	}
+
+	if (mask < 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&clk->c.lock, flags);
+	parent = clk->c.parent;
+	if (parent->count && parent->ops->disable)
+		parent->ops->disable(parent);
+
+	regval = readl_relaxed(DIV_REG(clk));
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (mask & clk->mask) << clk->shift;
+	writel_relaxed(regval, DIV_REG(clk));
+	/* Ensure switch request goes through before returning */
+	mb();
+
+	if (parent->count && parent->ops->enable) {
+		ret = parent->ops->enable(parent);
+		if (ret)
+			pr_err("Failed to force enable div parent!\n");
+	}
+
+	spin_unlock_irqrestore(&clk->c.lock, flags);
+	return ret;
+}
+
+static int postdiv_reg_get_div(struct div_clk *clk)
+{
+	u32 regval;
+	int i, div = 0;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return clk->data.div;
+
+	regval = readl_relaxed(DIV_REG(clk));
+	regval = (regval >> clk->shift) & clk->mask;
+	for (i = 0; i < ARRAY_SIZE(postdiv_map); i++) {
+		if (postdiv_map[i].mask == regval) {
+			div = postdiv_map[i].div;
+			break;
+		}
+	}
+	if (!div)
+		return -EINVAL;
+
+	return div;
+}
+
+static int div_reg_set_div(struct div_clk *clk, int div)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return 0;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(*clk->base + clk->offset);
+	regval &= ~(clk->mask << clk->shift);
+	regval |= (div & clk->mask) << clk->shift;
+	/* Ensure switch request goes through before returning */
+	mb();
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+	return 0;
+}
+
+static int div_reg_get_div(struct div_clk *clk)
+{
+	u32 regval;
+	/* Divider is not configurable */
+	if (!clk->mask)
+		return clk->data.div;
+
+	regval = readl_relaxed(*clk->base + clk->offset);
+	return (regval >> clk->shift) & clk->mask;
+}
+
+/* =================Half-integer RCG without MN counter================= */
+#define RCGR_CMD_REG(x) ((x)->base + (x)->div_offset)
+#define RCGR_DIV_REG(x) ((x)->base + (x)->div_offset + 4)
+#define RCGR_SRC_REG(x) ((x)->base + (x)->div_offset + 4)
+
+static int rcg_mux_div_update_config(struct mux_div_clk *md)
+{
+	u32 regval, count;
+
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+
+	/* Wait for update to take effect */
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+			    CMD_RCGR_CONFIG_UPDATE_BIT))
+			return 0;
+		udelay(1);
+	}
+
+	CLK_WARN(&md->c, true, "didn't update its configuration.");
+
+	return -EBUSY;
+}
+
+static void rcg_get_src_div(struct mux_div_clk *md, u32 *src_sel, u32 *div)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	/* Is there a pending configuration? */
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	if (regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+		CLK_WARN(&md->c, true, "it's a pending configuration.");
+		spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+		return;
+	}
+
+	regval = readl_relaxed(RCGR_DIV_REG(md));
+	regval &= (md->div_mask << md->div_shift);
+	*div = regval >> md->div_shift;
+
+	/* bypass */
+	if (*div == 0)
+		*div = 1;
+	/* the div is doubled here*/
+	*div += 1;
+
+	regval = readl_relaxed(RCGR_SRC_REG(md));
+	regval &= (md->src_mask << md->src_shift);
+	*src_sel = regval >> md->src_shift;
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void mux_div_set_force_enable(struct mux_div_clk *md)
+{
+	u32 regval;
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval |= CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+
+	/* Wait for RCG to turn ON */
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+				CMD_RCGR_CONFIG_UPDATE_BIT))
+			goto exit;
+		udelay(1);
+	}
+	CLK_WARN(&md->c, count == 0, "rcg didn't turn on.");
+exit:
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void mux_div_clear_force_enable(struct mux_div_clk *md)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	regval &= ~CMD_RCGR_ROOT_ENABLE_BIT;
+	writel_relaxed(regval, RCGR_CMD_REG(md));
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int rcg_set_src_div(struct mux_div_clk *md, u32 src_sel, u32 div)
+{
+	u32 regval;
+	unsigned long flags;
+	int ret;
+
+	/* for half-integer divider, div here is doubled */
+	if (div)
+		div -= 1;
+
+	spin_lock_irqsave(&local_clock_reg_lock, flags);
+	regval = readl_relaxed(RCGR_DIV_REG(md));
+	regval &= ~(md->div_mask << md->div_shift);
+	regval |= div << md->div_shift;
+	writel_relaxed(regval, RCGR_DIV_REG(md));
+
+	regval = readl_relaxed(RCGR_SRC_REG(md));
+	regval &= ~(md->src_mask << md->src_shift);
+	regval |= src_sel << md->src_shift;
+	writel_relaxed(regval, RCGR_SRC_REG(md));
+
+	ret = rcg_mux_div_update_config(md);
+	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+	return ret;
+}
+
+static int rcg_enable(struct mux_div_clk *md)
+{
+	if (md->force_enable_md)
+		mux_div_set_force_enable(md);
+
+	return rcg_set_src_div(md, md->src_sel, md->data.div);
+}
+
+static void rcg_disable(struct mux_div_clk *md)
+{
+	u32 src_sel;
+
+	if (md->force_enable_md)
+		mux_div_clear_force_enable(md);
+
+	if (!md->safe_freq)
+		return;
+
+	src_sel = parent_to_src_sel(md->parents, md->num_parents,
+				md->safe_parent);
+
+	rcg_set_src_div(md, src_sel, md->safe_div);
+}
+
+static bool rcg_is_enabled(struct mux_div_clk *md)
+{
+	u32 regval;
+
+	regval = readl_relaxed(RCGR_CMD_REG(md));
+	if (regval & CMD_RCGR_ROOT_STATUS_BIT)
+		return false;
+	else
+		return true;
+}
+
+static void __iomem *rcg_list_registers(struct mux_div_clk *md, int n,
+			struct clk_register_data **regs, u32 *size)
+{
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return RCGR_CMD_REG(md);
+}
+
+const struct clk_ops clk_ops_empty;
+
+const struct clk_ops clk_ops_rst = {
+	.reset = reset_clk_rst,
+	.list_registers = reset_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_rcg = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = rcg_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_rcg_mnd = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate,
+	.set_duty_cycle = rcg_clk_set_duty_cycle,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_mnd_clk_handoff,
+	.get_parent = rcg_mnd_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_pixel = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = round_rate_pixel,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_pixel_multiparent = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = round_rate_pixel,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+	.get_parent = display_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+};
+
+const struct clk_ops clk_ops_edppixel = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_edp_pixel,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = pixel_rcg_handoff,
+	.list_registers = rcg_mnd_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_byte = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_byte,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = byte_rcg_handoff,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_byte_multiparent = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = set_rate_byte,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = byte_rcg_handoff,
+	.list_registers = rcg_hid_clk_list_registers,
+	.get_parent = display_clk_get_parent,
+	.set_parent = rcg_clk_set_parent,
+};
+
+const struct clk_ops clk_ops_rcg_hdmi = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate_hdmi,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = rcg_hdmi_clk_get_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_rcg_edp = {
+	.enable = rcg_clk_enable,
+	.disable = rcg_clk_disable,
+	.set_rate = rcg_clk_set_rate_edp,
+	.list_rate = rcg_clk_list_rate,
+	.round_rate = rcg_clk_round_rate,
+	.handoff = rcg_clk_handoff,
+	.get_parent = edp_clk_get_parent,
+	.list_registers = rcg_hid_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_branch = {
+	.enable = branch_clk_enable,
+	.prepare = branch_clk_prepare,
+	.disable = branch_clk_disable,
+	.unprepare = branch_clk_unprepare,
+	.set_rate = branch_clk_set_rate,
+	.get_rate = branch_clk_get_rate,
+	.list_rate = branch_clk_list_rate,
+	.round_rate = branch_clk_round_rate,
+	.reset = branch_clk_reset,
+	.set_flags = branch_clk_set_flags,
+	.handoff = branch_clk_handoff,
+	.list_registers = branch_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_vote = {
+	.enable = local_vote_clk_enable,
+	.disable = local_vote_clk_disable,
+	.reset = local_vote_clk_reset,
+	.handoff = local_vote_clk_handoff,
+	.list_registers = local_vote_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_gate = {
+	.enable = gate_clk_enable,
+	.disable = gate_clk_disable,
+	.set_rate = parent_set_rate,
+	.get_rate = parent_get_rate,
+	.round_rate = parent_round_rate,
+	.set_flags = gate_clk_set_flags,
+	.handoff = gate_clk_handoff,
+	.list_registers = gate_clk_list_registers,
+};
+
+struct clk_mux_ops mux_reg_ops = {
+	.enable = mux_reg_enable,
+	.disable = mux_reg_disable,
+	.set_mux_sel = mux_reg_set_mux_sel,
+	.get_mux_sel = mux_reg_get_mux_sel,
+	.is_enabled = mux_reg_is_enabled,
+	.list_registers = mux_clk_list_registers,
+};
+
+struct clk_div_ops div_reg_ops = {
+	.set_div = div_reg_set_div,
+	.get_div = div_reg_get_div,
+};
+
+const struct clk_div_ops postdiv_reg_ops = {
+	.set_div = postdiv_reg_set_div,
+	.get_div = postdiv_reg_get_div,
+};
+
+struct mux_div_ops rcg_mux_div_ops = {
+	.enable = rcg_enable,
+	.disable = rcg_disable,
+	.set_src_div = rcg_set_src_div,
+	.get_src_div = rcg_get_src_div,
+	.is_enabled = rcg_is_enabled,
+	.list_registers = rcg_list_registers,
+};
+
+static void *cbc_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct msmclk_data *drv;
+	struct branch_clk *branch_clk;
+	u32 rc;
+
+	branch_clk = devm_kzalloc(dev, sizeof(*branch_clk), GFP_KERNEL);
+	if (!branch_clk)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	branch_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&branch_clk->cbcr_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,base-offset dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,bcr-offset", &branch_clk->bcr_reg);
+
+	of_property_read_u32(np, "qcom,halt-check",
+					(u32 *)&branch_clk->halt_check);
+
+	branch_clk->has_sibling = of_property_read_bool(np,
+							"qcom,has-sibling");
+
+	branch_clk->c.ops = &clk_ops_branch;
+
+	return msmclk_generic_clk_init(dev, np, &branch_clk->c);
+}
+MSMCLK_PARSER(cbc_dt_parser, "qcom,cbc", 0);
+
+static void *local_vote_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct local_vote_clk *vote_clk;
+	struct msmclk_data *drv;
+	int rc, val;
+
+	vote_clk = devm_kzalloc(dev, sizeof(*vote_clk), GFP_KERNEL);
+	if (!vote_clk)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	vote_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&vote_clk->cbcr_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,base-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-offset", &vote_clk->vote_reg);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &val);
+	if (rc) {
+		dt_err(np, "missing/incorrect qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	vote_clk->en_mask = BIT(val);
+
+	vote_clk->c.ops = &clk_ops_vote;
+
+	/* Optional property */
+	of_property_read_u32(np, "qcom,bcr-offset", &vote_clk->bcr_reg);
+
+	return msmclk_generic_clk_init(dev, np, &vote_clk->c);
+}
+MSMCLK_PARSER(local_vote_clk_dt_parser, "qcom,local-vote-clk", 0);
+
+static void *gate_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct gate_clk *gate_clk;
+	struct msmclk_data *drv;
+	u32 en_bit, rc;
+
+	gate_clk = devm_kzalloc(dev, sizeof(*gate_clk), GFP_KERNEL);
+	if (!gate_clk)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	gate_clk->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,en-offset", &gate_clk->en_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &en_bit);
+	if (rc) {
+		dt_err(np, "missing qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	gate_clk->en_mask = BIT(en_bit);
+
+	/* Optional Property */
+	rc = of_property_read_u32(np, "qcom,delay", &gate_clk->delay_us);
+	if (rc)
+		gate_clk->delay_us = 0;
+
+	gate_clk->c.ops = &clk_ops_gate;
+	return msmclk_generic_clk_init(dev, np, &gate_clk->c);
+}
+MSMCLK_PARSER(gate_clk_dt_parser, "qcom,gate-clk", 0);
+
+
+static inline u32 rcg_calc_m(u32 m, u32 n)
+{
+	return m;
+}
+
+static inline u32 rcg_calc_n(u32 m, u32 n)
+{
+	n = n > 1 ? n : 0;
+	return ~((n)-(m)) * !!(n);
+}
+
+static inline u32 rcg_calc_duty_cycle(u32 m, u32 n)
+{
+	return ~n;
+}
+
+static inline u32 rcg_calc_div_src(u32 div_int, u32 div_frac, u32 src_sel)
+{
+	int div = 2 * div_int + (div_frac ? 1 : 0) - 1;
+	/* set bypass mode instead of a divider of 1 */
+	div = (div != 1) ? div : 0;
+	return BVAL(4, 0, max(div, 0))
+			| BVAL(10, 8, src_sel);
+}
+
+struct clk_src *msmclk_parse_clk_src(struct device *dev,
+				struct device_node *np, int *array_size)
+{
+	struct clk_src *clks;
+	const void *prop;
+	int num_parents, len, i, prop_len, rc;
+	char *name = "qcom,parents";
+
+	if (!array_size) {
+		dt_err(np, "array_size must be a valid pointer\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	prop = of_get_property(np, name, &prop_len);
+	if (!prop) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	len = sizeof(phandle) + sizeof(u32);
+	if (prop_len % len) {
+		dt_prop_err(np, name, "invalid property length\n");
+		return ERR_PTR(-EINVAL);
+	}
+	num_parents = prop_len / len;
+
+	clks = devm_kzalloc(dev, sizeof(*clks) * num_parents, GFP_KERNEL);
+	if (!clks)
+		return ERR_PTR(-ENOMEM);
+
+	/* Assume that u32 and phandle have the same size */
+	for (i = 0; i < num_parents; i++) {
+		phandle p;
+		struct clk_src *a = &clks[i];
+
+		rc = of_property_read_u32_index(np, name, 2 * i, &a->sel);
+		rc |= of_property_read_phandle_index(np, name, 2 * i + 1, &p);
+
+		if (rc) {
+			dt_prop_err(np, name,
+				"unable to read parent clock or mux index\n");
+			return ERR_PTR(-EINVAL);
+		}
+
+		a->src = msmclk_parse_phandle(dev, p);
+		if (IS_ERR(a->src)) {
+			dt_prop_err(np, name, "hashtable lookup failed\n");
+			return ERR_CAST(a->src);
+		}
+	}
+
+	*array_size = num_parents;
+
+	return clks;
+}
+
+static int rcg_parse_freq_tbl(struct device *dev,
+			struct device_node *np, struct rcg_clk *rcg)
+{
+	const void *prop;
+	u32 prop_len, num_rows, i, j = 0;
+	struct clk_freq_tbl *tbl;
+	int rc;
+	char *name = "qcom,freq-tbl";
+
+	prop = of_get_property(np, name, &prop_len);
+	if (!prop) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % 6) {
+		dt_prop_err(np, name, "bad length\n");
+		return -EINVAL;
+	}
+
+	num_rows = prop_len / 6;
+	/* Array is null terminated. */
+	rcg->freq_tbl = devm_kzalloc(dev,
+				sizeof(*rcg->freq_tbl) * (num_rows + 1),
+				GFP_KERNEL);
+
+	if (!rcg->freq_tbl) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	tbl = rcg->freq_tbl;
+	for (i = 0; i < num_rows; i++, tbl++) {
+		phandle p;
+		u32 div_int, div_frac, m, n, src_sel, freq_hz;
+
+		rc = of_property_read_u32_index(np, name, j++, &freq_hz);
+		rc |= of_property_read_u32_index(np, name, j++, &div_int);
+		rc |= of_property_read_u32_index(np, name, j++, &div_frac);
+		rc |= of_property_read_u32_index(np, name, j++, &m);
+		rc |= of_property_read_u32_index(np, name, j++, &n);
+		rc |= of_property_read_u32_index(np, name, j++, &p);
+
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return -EINVAL;
+		}
+
+		tbl->freq_hz = (unsigned long)freq_hz;
+		tbl->src_clk = msmclk_parse_phandle(dev, p);
+		if (IS_ERR_OR_NULL(tbl->src_clk)) {
+			dt_prop_err(np, name, "hashtable lookup failure\n");
+			return PTR_ERR(tbl->src_clk);
+		}
+
+		tbl->m_val = rcg_calc_m(m, n);
+		tbl->n_val = rcg_calc_n(m, n);
+		tbl->d_val = rcg_calc_duty_cycle(m, n);
+
+		src_sel = parent_to_src_sel(rcg->c.parents,
+					rcg->c.num_parents, tbl->src_clk);
+		tbl->div_src_val = rcg_calc_div_src(div_int, div_frac,
+								src_sel);
+	}
+	/* End table with special value */
+	tbl->freq_hz = FREQ_END;
+	return 0;
+}
+
+static void *rcg_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct rcg_clk *rcg;
+	struct msmclk_data *drv;
+	int rc;
+
+	rcg = devm_kzalloc(dev, sizeof(*rcg), GFP_KERNEL);
+	if (!rcg)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return drv;
+	rcg->base = &drv->base;
+
+	rcg->c.parents = msmclk_parse_clk_src(dev, np, &rcg->c.num_parents);
+	if (IS_ERR(rcg->c.parents)) {
+		dt_err(np, "unable to read parents\n");
+		return ERR_CAST(rcg->c.parents);
+	}
+
+	rc = of_property_read_u32(np, "qcom,base-offset", &rcg->cmd_rcgr_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = rcg_parse_freq_tbl(dev, np, rcg);
+	if (rc) {
+		dt_err(np, "unable to read freq_tbl\n");
+		return ERR_PTR(rc);
+	}
+	rcg->current_freq = &rcg_dummy_freq;
+
+	if (of_device_is_compatible(np, "qcom,rcg-hid")) {
+		rcg->c.ops = &clk_ops_rcg;
+		rcg->set_rate = set_rate_hid;
+	} else if (of_device_is_compatible(np, "qcom,rcg-mn")) {
+		rcg->c.ops = &clk_ops_rcg_mnd;
+		rcg->set_rate = set_rate_mnd;
+	} else {
+		dt_err(np, "unexpected compatible string\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return msmclk_generic_clk_init(dev, np, &rcg->c);
+}
+MSMCLK_PARSER(rcg_clk_dt_parser, "qcom,rcg-hid", 0);
+MSMCLK_PARSER(rcg_clk_dt_parser, "qcom,rcg-mn", 1);
+
+static int parse_rec_parents(struct device *dev,
+			struct device_node *np, struct mux_clk *mux)
+{
+	int i, rc;
+	char *name = "qcom,recursive-parents";
+	phandle p;
+
+	mux->num_rec_parents = of_property_count_phandles(np, name);
+	if (mux->num_rec_parents <= 0)
+		return 0;
+
+	mux->rec_parents = devm_kzalloc(dev,
+			sizeof(*mux->rec_parents) * mux->num_rec_parents,
+			GFP_KERNEL);
+
+	if (!mux->rec_parents) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < mux->num_rec_parents; i++) {
+		rc = of_property_read_phandle_index(np, name, i, &p);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return rc;
+		}
+
+		mux->rec_parents[i] = msmclk_parse_phandle(dev, p);
+		if (IS_ERR(mux->rec_parents[i])) {
+			dt_prop_err(np, name, "hashtable lookup failure\n");
+			return PTR_ERR(mux->rec_parents[i]);
+		}
+	}
+
+	return 0;
+}
+
+static void *mux_reg_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct mux_clk *mux;
+	struct msmclk_data *drv;
+	int rc;
+
+	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+	if (!mux)
+		return ERR_PTR(-ENOMEM);
+
+	mux->parents = msmclk_parse_clk_src(dev, np, &mux->num_parents);
+	if (IS_ERR(mux->parents))
+		return mux->parents;
+
+	mux->c.parents = mux->parents;
+	mux->c.num_parents = mux->num_parents;
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return drv;
+	mux->base = &drv->base;
+
+	rc = parse_rec_parents(dev, np, mux);
+	if (rc) {
+		dt_err(np, "Incorrect qcom,recursive-parents dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = of_property_read_u32(np, "qcom,offset", &mux->offset);
+	if (rc) {
+		dt_err(np, "missing qcom,offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,mask", &mux->mask);
+	if (rc) {
+		dt_err(np, "missing qcom,mask dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,shift", &mux->shift);
+	if (rc) {
+		dt_err(np, "missing qcom,shift dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	mux->c.ops = &clk_ops_gen_mux;
+	mux->ops = &mux_reg_ops;
+
+	/* Optional Properties */
+	of_property_read_u32(np, "qcom,en-offset", &mux->en_offset);
+	of_property_read_u32(np, "qcom,en-mask", &mux->en_mask);
+
+	return msmclk_generic_clk_init(dev, np, &mux->c);
+};
+MSMCLK_PARSER(mux_reg_clk_dt_parser, "qcom,mux-reg", 0);
+
+static void *measure_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct mux_clk *mux;
+	struct clk *c;
+	struct measure_clk_data *p;
+	struct clk_ops *clk_ops_measure_mux;
+	phandle cxo;
+	int rc;
+
+	c = mux_reg_clk_dt_parser(dev, np);
+	if (IS_ERR(c))
+		return c;
+
+	mux = to_mux_clk(c);
+
+	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_phandle_index(np, "qcom,cxo", 0, &cxo);
+	if (rc) {
+		dt_err(np, "missing qcom,cxo\n");
+		return ERR_PTR(-EINVAL);
+	}
+	p->cxo = msmclk_parse_phandle(dev, cxo);
+	if (IS_ERR_OR_NULL(p->cxo)) {
+		dt_prop_err(np, "qcom,cxo", "hashtable lookup failure\n");
+		return p->cxo;
+	}
+
+	rc = of_property_read_u32(np, "qcom,xo-div4-cbcr", &p->xo_div4_cbcr);
+	if (rc) {
+		dt_err(np, "missing qcom,xo-div4-cbcr dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,test-pad-config", &p->plltest_val);
+	if (rc) {
+		dt_err(np, "missing qcom,test-pad-config dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	p->base = mux->base;
+	p->ctl_reg = mux->offset + 0x4;
+	p->status_reg = mux->offset + 0x8;
+	p->plltest_reg = mux->offset + 0xC;
+	mux->priv = p;
+
+	clk_ops_measure_mux = devm_kzalloc(dev, sizeof(*clk_ops_measure_mux),
+								GFP_KERNEL);
+	if (!clk_ops_measure_mux)
+		return ERR_PTR(-ENOMEM);
+
+	*clk_ops_measure_mux = clk_ops_gen_mux;
+	clk_ops_measure_mux->get_rate = measure_get_rate;
+
+	mux->c.ops = clk_ops_measure_mux;
+
+	/* Already did generic clk init */
+	return &mux->c;
+};
+MSMCLK_PARSER(measure_clk_dt_parser, "qcom,measure-mux", 0);
+
+static void *div_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct div_clk *div_clk;
+	struct msmclk_data *drv;
+	int rc;
+
+	div_clk = devm_kzalloc(dev, sizeof(*div_clk), GFP_KERNEL);
+	if (!div_clk)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_u32(np, "qcom,max-div", &div_clk->data.max_div);
+	if (rc) {
+		dt_err(np, "missing qcom,max-div\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,min-div", &div_clk->data.min_div);
+	if (rc) {
+		dt_err(np, "missing qcom,min-div\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,base-offset", &div_clk->offset);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,mask", &div_clk->mask);
+	if (rc) {
+		dt_err(np, "missing qcom,mask\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,shift", &div_clk->shift);
+	if (rc) {
+		dt_err(np, "missing qcom,shift\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (of_property_read_bool(np, "qcom,slave-div"))
+		div_clk->c.ops = &clk_ops_slave_div;
+	else
+		div_clk->c.ops = &clk_ops_div;
+	div_clk->ops = &div_reg_ops;
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	div_clk->base = &drv->base;
+
+	return msmclk_generic_clk_init(dev, np, &div_clk->c);
+};
+MSMCLK_PARSER(div_clk_dt_parser, "qcom,div-clk", 0);
+
+static void *fixed_div_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct div_clk *div_clk;
+	int rc;
+
+	div_clk = devm_kzalloc(dev, sizeof(*div_clk), GFP_KERNEL);
+	if (!div_clk)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_u32(np, "qcom,div", &div_clk->data.div);
+	if (rc) {
+		dt_err(np, "missing qcom,div\n");
+		return ERR_PTR(-EINVAL);
+	}
+	div_clk->data.min_div = div_clk->data.div;
+	div_clk->data.max_div = div_clk->data.div;
+
+	if (of_property_read_bool(np, "qcom,slave-div"))
+		div_clk->c.ops = &clk_ops_slave_div;
+	else
+		div_clk->c.ops = &clk_ops_div;
+	div_clk->ops = &div_reg_ops;
+
+	return msmclk_generic_clk_init(dev, np, &div_clk->c);
+}
+MSMCLK_PARSER(fixed_div_clk_dt_parser, "qcom,fixed-div-clk", 0);
+
+static void *reset_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct reset_clk *reset_clk;
+	struct msmclk_data *drv;
+	int rc;
+
+	reset_clk = devm_kzalloc(dev, sizeof(*reset_clk), GFP_KERNEL);
+	if (!reset_clk)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_u32(np, "qcom,base-offset",
+						&reset_clk->reset_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,base-offset\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	reset_clk->base = &drv->base;
+
+	reset_clk->c.ops = &clk_ops_rst;
+	return msmclk_generic_clk_init(dev, np, &reset_clk->c);
+};
+MSMCLK_PARSER(reset_clk_dt_parser, "qcom,reset-clk", 0);
diff --git a/drivers/clk/msm/clock-pll.c b/drivers/clk/msm/clock-pll.c
new file mode 100644
index 0000000..26c04e5
--- /dev/null
+++ b/drivers/clk/msm/clock-pll.c
@@ -0,0 +1,1204 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#include "clock.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_MODE_MASK BM(3, 0)
+
+#define PLL_EN_REG(x)		(*(x)->base + (unsigned long) (x)->en_reg)
+#define PLL_STATUS_REG(x)	(*(x)->base + (unsigned long) (x)->status_reg)
+#define PLL_ALT_STATUS_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->alt_status_reg)
+#define PLL_MODE_REG(x)		(*(x)->base + (unsigned long) (x)->mode_reg)
+#define PLL_L_REG(x)		(*(x)->base + (unsigned long) (x)->l_reg)
+#define PLL_M_REG(x)		(*(x)->base + (unsigned long) (x)->m_reg)
+#define PLL_N_REG(x)		(*(x)->base + (unsigned long) (x)->n_reg)
+#define PLL_CONFIG_REG(x)	(*(x)->base + (unsigned long) (x)->config_reg)
+#define PLL_ALPHA_REG(x)	(*(x)->base + (unsigned long) (x)->alpha_reg)
+#define PLL_CFG_ALT_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_alt_reg)
+#define PLL_CFG_CTL_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_ctl_reg)
+#define PLL_CFG_CTL_HI_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->config_ctl_hi_reg)
+#define PLL_TEST_CTL_LO_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->test_ctl_lo_reg)
+#define PLL_TEST_CTL_HI_REG(x)	(*(x)->base + (unsigned long) \
+							(x)->test_ctl_hi_reg)
+static DEFINE_SPINLOCK(pll_reg_lock);
+
+#define ENABLE_WAIT_MAX_LOOPS 200
+#define PLL_LOCKED_BIT BIT(16)
+
+#define SPM_FORCE_EVENT   0x4
+
+static int pll_vote_clk_enable(struct clk *c)
+{
+	u32 ena, count;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena |= pllv->en_mask;
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	/*
+	 * Use a memory barrier since some PLL status registers are
+	 * not within the same 1K segment as the voting registers.
+	 */
+	mb();
+
+	/* Wait for pll to enable. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
+			return 0;
+		udelay(1);
+	}
+
+	WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
+
+	return -ETIMEDOUT;
+}
+
+static void pll_vote_clk_disable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena &= ~(pllv->en_mask);
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static int pll_vote_clk_is_enabled(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
+}
+
+static enum handoff pll_vote_clk_handoff(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *pll_vote_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	static struct clk_register_data data1[] = {
+		{"APPS_VOTE", 0x0},
+	};
+
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data1;
+	*size = ARRAY_SIZE(data1);
+	return PLL_EN_REG(pllv);
+}
+
+const struct clk_ops clk_ops_pll_vote = {
+	.enable = pll_vote_clk_enable,
+	.disable = pll_vote_clk_disable,
+	.is_enabled = pll_vote_clk_is_enabled,
+	.handoff = pll_vote_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+/*
+ *  spm_event() -- Set/Clear SPM events
+ *  PLL off sequence -- enable (1)
+ *    Set L2_SPM_FORCE_EVENT_EN[bit] register to 1
+ *    Set L2_SPM_FORCE_EVENT[bit] register to 1
+ *  PLL on sequence -- enable (0)
+ *   Clear L2_SPM_FORCE_EVENT[bit] register to 0
+ *   Clear L2_SPM_FORCE_EVENT_EN[bit] register to 0
+ */
+static void spm_event(void __iomem *base, u32 offset, u32 bit,
+							bool enable)
+{
+	uint32_t val;
+
+	if (!base)
+		return;
+
+	if (enable) {
+		/* L2_SPM_FORCE_EVENT_EN */
+		val = readl_relaxed(base + offset);
+		val |= BIT(bit);
+		writel_relaxed(val, (base + offset));
+		/* Ensure that the write above goes through. */
+		mb();
+
+		/* L2_SPM_FORCE_EVENT */
+		val = readl_relaxed(base + offset + SPM_FORCE_EVENT);
+		val |= BIT(bit);
+		writel_relaxed(val, (base + offset + SPM_FORCE_EVENT));
+		/* Ensure that the write above goes through. */
+		mb();
+	} else {
+		/* L2_SPM_FORCE_EVENT */
+		val = readl_relaxed(base + offset + SPM_FORCE_EVENT);
+		val &= ~BIT(bit);
+		writel_relaxed(val, (base + offset + SPM_FORCE_EVENT));
+		/* Ensure that the write above goes through. */
+		mb();
+
+		/* L2_SPM_FORCE_EVENT_EN */
+		val = readl_relaxed(base + offset);
+		val &= ~BIT(bit);
+		writel_relaxed(val, (base + offset));
+		/* Ensure that the write above goes through. */
+		mb();
+	}
+}
+
+static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
+			struct pll_config_masks *masks)
+{
+	u32 regval;
+
+	regval = readl_relaxed(pll_config);
+
+	/* Enable the MN counter if used */
+	if (f->m_val)
+		regval |= masks->mn_en_mask;
+
+	/* Set pre-divider and post-divider values */
+	regval &= ~masks->pre_div_mask;
+	regval |= f->pre_div_val;
+	regval &= ~masks->post_div_mask;
+	regval |= f->post_div_val;
+
+	/* Select VCO setting */
+	regval &= ~masks->vco_mask;
+	regval |= f->vco_val;
+
+	/* Enable main output if it has not been enabled */
+	if (masks->main_output_mask && !(regval & masks->main_output_mask))
+		regval |= masks->main_output_mask;
+
+	writel_relaxed(regval, pll_config);
+}
+
+static int sr2_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	int ret = 0, count;
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, false);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return ret;
+}
+
+void __variable_rate_pll_init(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 regval;
+
+	regval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+	if (pll->masks.post_div_mask) {
+		regval &= ~pll->masks.post_div_mask;
+		regval |= pll->vals.post_div_masked;
+	}
+
+	if (pll->masks.pre_div_mask) {
+		regval &= ~pll->masks.pre_div_mask;
+		regval |= pll->vals.pre_div_masked;
+	}
+
+	if (pll->masks.main_output_mask)
+		regval |= pll->masks.main_output_mask;
+
+	if (pll->masks.early_output_mask)
+		regval |= pll->masks.early_output_mask;
+
+	if (pll->vals.enable_mn)
+		regval |= pll->masks.mn_en_mask;
+	else
+		regval &= ~pll->masks.mn_en_mask;
+
+	writel_relaxed(regval, PLL_CONFIG_REG(pll));
+
+	regval = readl_relaxed(PLL_MODE_REG(pll));
+	if (pll->masks.apc_pdn_mask)
+		regval &= ~pll->masks.apc_pdn_mask;
+	writel_relaxed(regval, PLL_MODE_REG(pll));
+
+	writel_relaxed(pll->vals.alpha_val, PLL_ALPHA_REG(pll));
+	writel_relaxed(pll->vals.config_ctl_val, PLL_CFG_CTL_REG(pll));
+	if (pll->vals.config_ctl_hi_val)
+		writel_relaxed(pll->vals.config_ctl_hi_val,
+				PLL_CFG_CTL_HI_REG(pll));
+	if (pll->init_test_ctl) {
+		writel_relaxed(pll->vals.test_ctl_lo_val,
+				PLL_TEST_CTL_LO_REG(pll));
+		writel_relaxed(pll->vals.test_ctl_hi_val,
+				PLL_TEST_CTL_HI_REG(pll));
+	}
+
+	pll->inited = true;
+}
+
+static int variable_rate_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	int ret = 0, count;
+	u32 mode, testlo;
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	u32 mode_lock;
+	u64 time;
+	bool early_lock = false;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	if (unlikely(!to_pll_clk(c)->inited))
+		__variable_rate_pll_init(c);
+
+	mode = readl_relaxed(PLL_MODE_REG(pll));
+
+	/* Set test control bits as required by HW doc */
+	if (pll->test_ctl_lo_reg && pll->vals.test_ctl_lo_val &&
+		pll->pgm_test_ctl_enable)
+		writel_relaxed(pll->vals.test_ctl_lo_val,
+				PLL_TEST_CTL_LO_REG(pll));
+
+	if (!pll->test_ctl_dbg) {
+		/* Enable test_ctl debug */
+		mode |= BIT(3);
+		writel_relaxed(mode, PLL_MODE_REG(pll));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0xC0;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+	}
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Use 10us to be sure.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * 5us delay mandated by HPG. However, put in a 200us delay here.
+	 * This is to address possible locking issues with the PLL exhibit
+	 * early "transient" locks about 16us from this point. With this
+	 * higher delay, we avoid running into those transients.
+	 */
+	mb();
+	udelay(200);
+
+	/* Clear test control bits */
+	if (pll->test_ctl_lo_reg && pll->vals.test_ctl_lo_val &&
+		pll->pgm_test_ctl_enable)
+		writel_relaxed(0x0, PLL_TEST_CTL_LO_REG(pll));
+
+
+	time = sched_clock();
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask) {
+			udelay(1);
+			/*
+			 * Check again to be sure. This is to avoid
+			 * breaking too early if there is a "transient"
+			 * lock.
+			 */
+			if ((readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+				break;
+				early_lock = true;
+		}
+		udelay(1);
+	}
+	time = sched_clock() - time;
+
+	mode_lock = readl_relaxed(PLL_STATUS_REG(pll));
+
+	if (!(mode_lock & lockmask)) {
+		pr_err("PLL lock bit detection total wait time: %lld ns", time);
+		pr_err("PLL %s didn't lock after enabling for L value 0x%x!\n",
+			c->dbg_name, readl_relaxed(PLL_L_REG(pll)));
+		pr_err("mode register is 0x%x\n",
+			readl_relaxed(PLL_STATUS_REG(pll)));
+		pr_err("user control register is 0x%x\n",
+			readl_relaxed(PLL_CONFIG_REG(pll)));
+		pr_err("config control register is 0x%x\n",
+			readl_relaxed(PLL_CFG_CTL_REG(pll)));
+		pr_err("test control high register is 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_HI_REG(pll)));
+		pr_err("test control low register is 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)));
+		pr_err("early lock? %s\n", early_lock ? "yes" : "no");
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0x40;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0x80;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+		testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		testlo &= ~BM(7, 6);
+		testlo |= 0xC0;
+		writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+		/* Wait for the write to complete */
+		mb();
+
+		pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+			readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+			readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+		panic("failed to lock %s PLL\n", c->dbg_name);
+	}
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return ret;
+}
+
+static void variable_rate_pll_clk_disable_hwfsm(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 regval;
+
+	/* Set test control bit to stay-in-CFA if necessary */
+	if (pll->test_ctl_lo_reg && pll->pgm_test_ctl_enable) {
+		regval = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		writel_relaxed(regval | BIT(16),
+				PLL_TEST_CTL_LO_REG(pll));
+	}
+
+	/* 8 reference clock cycle delay mandated by the HPG */
+	udelay(1);
+}
+
+static int variable_rate_pll_clk_enable_hwfsm(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	int count;
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	unsigned long flags;
+	u32 regval;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	/* Clear test control bit if necessary */
+	if (pll->test_ctl_lo_reg && pll->pgm_test_ctl_enable) {
+		regval = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+		regval &= ~BIT(16);
+		writel_relaxed(regval, PLL_TEST_CTL_LO_REG(pll));
+	}
+
+	/* Wait for 50us explicitly to avoid transient locks */
+	udelay(50);
+
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+static void __pll_clk_enable_reg(void __iomem *mode_reg)
+{
+	u32 mode = readl_relaxed(mode_reg);
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, mode_reg);
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, mode_reg);
+
+	/* Wait until PLL is locked. */
+	mb();
+	udelay(50);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, mode_reg);
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+}
+
+static int local_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	__pll_clk_enable_reg(PLL_MODE_REG(pll));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+static void __pll_clk_disable_reg(void __iomem *mode_reg)
+{
+	u32 mode = readl_relaxed(mode_reg);
+
+	mode &= ~PLL_MODE_MASK;
+	writel_relaxed(mode, mode_reg);
+}
+
+static void local_pll_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	/*
+	 * Disable the PLL output, disable test mode, enable
+	 * the bypass mode, and assert the reset.
+	 */
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, true);
+	__pll_clk_disable_reg(PLL_MODE_REG(pll));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff local_pll_clk_handoff(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+	unsigned long parent_rate;
+	u32 lval, mval, nval, userval;
+
+	if ((mode & mask) != mask)
+		return HANDOFF_DISABLED_CLK;
+
+	/* Assume bootloaders configure PLL to c->rate */
+	if (c->rate)
+		return HANDOFF_ENABLED_CLK;
+
+	parent_rate = clk_get_rate(c->parent);
+	lval = readl_relaxed(PLL_L_REG(pll));
+	mval = readl_relaxed(PLL_M_REG(pll));
+	nval = readl_relaxed(PLL_N_REG(pll));
+	userval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+	c->rate = parent_rate * lval;
+
+	if (pll->masks.mn_en_mask && userval) {
+		if (!nval)
+			nval = 1;
+		c->rate += (parent_rate * mval) / nval;
+	}
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long local_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_freq_tbl *nf;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	if (!pll->freq_tbl)
+		return -EINVAL;
+
+	for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END; nf++)
+		if (nf->freq_hz >= rate)
+			return nf->freq_hz;
+
+	nf--;
+	return nf->freq_hz;
+}
+
+static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_freq_tbl *nf;
+	struct pll_clk *pll = to_pll_clk(c);
+	unsigned long flags;
+
+	for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
+			&& nf->freq_hz != rate; nf++)
+		;
+
+	if (nf->freq_hz == PLL_FREQ_END)
+		return -EINVAL;
+
+	/*
+	 * Ensure PLL is off before changing rate. For optimization reasons,
+	 * assume no downstream clock is using actively using it.
+	 */
+	spin_lock_irqsave(&c->lock, flags);
+	if (c->count)
+		c->ops->disable(c);
+
+	writel_relaxed(nf->l_val, PLL_L_REG(pll));
+	writel_relaxed(nf->m_val, PLL_M_REG(pll));
+	writel_relaxed(nf->n_val, PLL_N_REG(pll));
+
+	__pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
+
+	if (c->count)
+		c->ops->enable(c);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+	return 0;
+}
+
+static enum handoff variable_rate_pll_handoff(struct clk *c)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+	u32 lval;
+
+	pll->src_rate = clk_get_rate(c->parent);
+
+	lval = readl_relaxed(PLL_L_REG(pll));
+	if (!lval)
+		return HANDOFF_DISABLED_CLK;
+
+	c->rate = pll->src_rate * lval;
+
+	if (c->rate > pll->max_rate || c->rate < pll->min_rate) {
+		WARN(1, "%s: Out of spec PLL", c->dbg_name);
+		return HANDOFF_DISABLED_CLK;
+	}
+
+	if ((mode & mask) != mask)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+static long variable_rate_pll_round_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+
+	if (!pll->src_rate)
+		return 0;
+
+	if (pll->no_prepared_reconfig && c->prepare_count && c->rate != rate)
+		return -EINVAL;
+
+	if (rate < pll->min_rate)
+		rate = pll->min_rate;
+	if (rate > pll->max_rate)
+		rate = pll->max_rate;
+
+	return min(pll->max_rate,
+			DIV_ROUND_UP(rate, pll->src_rate) * pll->src_rate);
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int variable_rate_pll_set_rate(struct clk *c, unsigned long rate)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	unsigned long flags;
+	u32 l_val;
+
+	if (rate != variable_rate_pll_round_rate(c, rate))
+		return -EINVAL;
+
+	l_val = rate / pll->src_rate;
+
+	spin_lock_irqsave(&c->lock, flags);
+
+	if (c->count && c->ops->disable)
+		c->ops->disable(c);
+
+	writel_relaxed(l_val, PLL_L_REG(pll));
+
+	if (c->count && c->ops->enable)
+		c->ops->enable(c);
+
+	spin_unlock_irqrestore(&c->lock, flags);
+
+	return 0;
+}
+
+int sr_pll_clk_enable(struct clk *c)
+{
+	u32 mode;
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	mode = readl_relaxed(PLL_MODE_REG(pll));
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait until PLL is locked. */
+	mb();
+	udelay(60);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+	return 0;
+}
+
+int sr_hpm_lp_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 count, mode;
+	int ret = 0;
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	/* Disable PLL bypass mode and de-assert reset. */
+	mode = PLL_BYPASSNL | PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
+		WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure the write above goes through before returning. */
+	mb();
+
+out:
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return ret;
+}
+
+
+static void __iomem *variable_rate_pll_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	struct pll_clk *pll = to_pll_clk(c);
+	static struct clk_register_data data[] = {
+		{"MODE", 0x0},
+		{"L", 0x4},
+		{"ALPHA", 0x8},
+		{"USER_CTL", 0x10},
+		{"CONFIG_CTL", 0x14},
+		{"STATUS", 0x1C},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return PLL_MODE_REG(pll);
+}
+
+static void __iomem *local_pll_clk_list_registers(struct clk *c, int n,
+				struct clk_register_data **regs, u32 *size)
+{
+	/* Not compatible with 8960 & friends */
+	struct pll_clk *pll = to_pll_clk(c);
+	static struct clk_register_data data[] = {
+		{"MODE", 0x0},
+		{"L", 0x4},
+		{"M", 0x8},
+		{"N", 0xC},
+		{"USER", 0x10},
+		{"CONFIG", 0x14},
+		{"STATUS", 0x1C},
+	};
+	if (n)
+		return ERR_PTR(-EINVAL);
+
+	*regs = data;
+	*size = ARRAY_SIZE(data);
+	return PLL_MODE_REG(pll);
+}
+
+
+const struct clk_ops clk_ops_local_pll = {
+	.enable = local_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_sr2_pll = {
+	.enable = sr2_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.round_rate = local_pll_clk_round_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
+const struct clk_ops clk_ops_variable_rate_pll_hwfsm = {
+	.enable = variable_rate_pll_clk_enable_hwfsm,
+	.disable = variable_rate_pll_clk_disable_hwfsm,
+	.set_rate = variable_rate_pll_set_rate,
+	.round_rate = variable_rate_pll_round_rate,
+	.handoff = variable_rate_pll_handoff,
+};
+
+const struct clk_ops clk_ops_variable_rate_pll = {
+	.enable = variable_rate_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = variable_rate_pll_set_rate,
+	.round_rate = variable_rate_pll_round_rate,
+	.handoff = variable_rate_pll_handoff,
+	.list_registers = variable_rate_pll_list_registers,
+};
+
+static DEFINE_SPINLOCK(soft_vote_lock);
+
+static int pll_acpu_vote_clk_enable(struct clk *c)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&soft_vote_lock, flags);
+
+	if (!*pllv->soft_vote)
+		ret = pll_vote_clk_enable(c);
+	if (ret == 0)
+		*pllv->soft_vote |= (pllv->soft_vote_mask);
+
+	spin_unlock_irqrestore(&soft_vote_lock, flags);
+	return ret;
+}
+
+static void pll_acpu_vote_clk_disable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&soft_vote_lock, flags);
+
+	*pllv->soft_vote &= ~(pllv->soft_vote_mask);
+	if (!*pllv->soft_vote)
+		pll_vote_clk_disable(c);
+
+	spin_unlock_irqrestore(&soft_vote_lock, flags);
+}
+
+static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
+{
+	if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
+		return HANDOFF_DISABLED_CLK;
+
+	if (pll_acpu_vote_clk_enable(c))
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+const struct clk_ops clk_ops_pll_acpu_vote = {
+	.enable = pll_acpu_vote_clk_enable,
+	.disable = pll_acpu_vote_clk_disable,
+	.is_enabled = pll_vote_clk_is_enabled,
+	.handoff = pll_acpu_vote_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+
+static int pll_sleep_clk_enable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena &= ~(pllv->en_mask);
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return 0;
+}
+
+static void pll_sleep_clk_disable(struct clk *c)
+{
+	u32 ena;
+	unsigned long flags;
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena |= pllv->en_mask;
+	writel_relaxed(ena, PLL_EN_REG(pllv));
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff pll_sleep_clk_handoff(struct clk *c)
+{
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+	if (!(readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask))
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+/*
+ * This .ops is meant to be used by gpll0_sleep_clk_src. The aim is to utilise
+ * the h/w feature of sleep enable bit to denote if the PLL can be turned OFF
+ * once APPS goes to PC. gpll0_sleep_clk_src will be enabled only if there is a
+ * peripheral client using it and disabled if there is none. The current
+ * implementation of enable .ops  clears the h/w bit of sleep enable while the
+ * disable .ops asserts it.
+ */
+
+const struct clk_ops clk_ops_pll_sleep_vote = {
+	.enable = pll_sleep_clk_enable,
+	.disable = pll_sleep_clk_disable,
+	.handoff = pll_sleep_clk_handoff,
+	.list_registers = pll_vote_clk_list_registers,
+};
+
+static void __set_fsm_mode(void __iomem *mode_reg,
+					u32 bias_count, u32 lock_count)
+{
+	u32 regval = readl_relaxed(mode_reg);
+
+	/* De-assert reset to FSM */
+	regval &= ~BIT(21);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program bias count */
+	regval &= ~BM(19, 14);
+	regval |= BVAL(19, 14, bias_count);
+	writel_relaxed(regval, mode_reg);
+
+	/* Program lock count */
+	regval &= ~BM(13, 8);
+	regval |= BVAL(13, 8, lock_count);
+	writel_relaxed(regval, mode_reg);
+
+	/* Enable PLL FSM voting */
+	regval |= BIT(20);
+	writel_relaxed(regval, mode_reg);
+}
+
+static void __configure_alt_config(struct pll_alt_config config,
+		struct pll_config_regs *regs)
+{
+	u32 regval;
+
+	regval = readl_relaxed(PLL_CFG_ALT_REG(regs));
+
+	if (config.mask) {
+		regval &= ~config.mask;
+		regval |= config.val;
+	}
+
+	writel_relaxed(regval, PLL_CFG_ALT_REG(regs));
+}
+
+void __configure_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	u32 regval;
+
+	writel_relaxed(config->l, PLL_L_REG(regs));
+	writel_relaxed(config->m, PLL_M_REG(regs));
+	writel_relaxed(config->n, PLL_N_REG(regs));
+
+	regval = readl_relaxed(PLL_CONFIG_REG(regs));
+
+	/* Enable the MN accumulator  */
+	if (config->mn_ena_mask) {
+		regval &= ~config->mn_ena_mask;
+		regval |= config->mn_ena_val;
+	}
+
+	/* Enable the main output */
+	if (config->main_output_mask) {
+		regval &= ~config->main_output_mask;
+		regval |= config->main_output_val;
+	}
+
+	/* Enable the aux output */
+	if (config->aux_output_mask) {
+		regval &= ~config->aux_output_mask;
+		regval |= config->aux_output_val;
+	}
+
+	/* Set pre-divider and post-divider values */
+	regval &= ~config->pre_div_mask;
+	regval |= config->pre_div_val;
+	regval &= ~config->post_div_mask;
+	regval |= config->post_div_val;
+
+	/* Select VCO setting */
+	regval &= ~config->vco_mask;
+	regval |= config->vco_val;
+
+	if (config->add_factor_mask) {
+		regval &= ~config->add_factor_mask;
+		regval |= config->add_factor_val;
+	}
+
+	writel_relaxed(regval, PLL_CONFIG_REG(regs));
+
+	if (regs->config_alt_reg)
+		__configure_alt_config(config->alt_cfg, regs);
+
+	if (regs->config_ctl_reg)
+		writel_relaxed(config->cfg_ctl_val, PLL_CFG_CTL_REG(regs));
+}
+
+void configure_sr_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	__configure_pll(config, regs, ena_fsm_mode);
+	if (ena_fsm_mode)
+		__set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
+}
+
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+		struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+	__configure_pll(config, regs, ena_fsm_mode);
+	if (ena_fsm_mode)
+		__set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
+}
+
+static void *votable_pll_clk_dt_parser(struct device *dev,
+						struct device_node *np)
+{
+	struct pll_vote_clk *v, *peer;
+	struct clk *c;
+	u32 val, rc;
+	phandle p;
+	struct msmclk_data *drv;
+
+	v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
+	if (!v)
+		return ERR_PTR(-ENOMEM);
+
+	drv = msmclk_parse_phandle(dev, np->parent->phandle);
+	if (IS_ERR_OR_NULL(drv))
+		return ERR_CAST(drv);
+	v->base = &drv->base;
+
+	rc = of_property_read_u32(np, "qcom,en-offset", (u32 *)&v->en_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,en-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,en-bit", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,en-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->en_mask = BIT(val);
+
+	rc = of_property_read_u32(np, "qcom,status-offset",
+						(u32 *)&v->status_reg);
+	if (rc) {
+		dt_err(np, "missing qcom,status-offset dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rc = of_property_read_u32(np, "qcom,status-bit", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,status-bit dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->status_mask = BIT(val);
+
+	rc = of_property_read_u32(np, "qcom,pll-config-rate", &val);
+	if (rc) {
+		dt_err(np, "missing qcom,pll-config-rate dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+	v->c.rate = val;
+
+	if (of_device_is_compatible(np, "qcom,active-only-pll"))
+		v->soft_vote_mask = PLL_SOFT_VOTE_ACPU;
+	else if (of_device_is_compatible(np, "qcom,sleep-active-pll"))
+		v->soft_vote_mask = PLL_SOFT_VOTE_PRIMARY;
+
+	if (of_device_is_compatible(np, "qcom,votable-pll")) {
+		v->c.ops = &clk_ops_pll_vote;
+		return msmclk_generic_clk_init(dev, np, &v->c);
+	}
+
+	rc = of_property_read_phandle_index(np, "qcom,peer", 0, &p);
+	if (rc) {
+		dt_err(np, "missing qcom,peer dt property\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	c = msmclk_lookup_phandle(dev, p);
+	if (!IS_ERR_OR_NULL(c)) {
+		v->soft_vote = devm_kzalloc(dev, sizeof(*v->soft_vote),
+						GFP_KERNEL);
+		if (!v->soft_vote)
+			return ERR_PTR(-ENOMEM);
+
+		peer = to_pll_vote_clk(c);
+		peer->soft_vote = v->soft_vote;
+	}
+
+	v->c.ops = &clk_ops_pll_acpu_vote;
+	return msmclk_generic_clk_init(dev, np, &v->c);
+}
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,active-only-pll", 0);
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,sleep-active-pll", 1);
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,votable-pll", 2);
diff --git a/drivers/clk/msm/clock-rcgwr.c b/drivers/clk/msm/clock-rcgwr.c
new file mode 100644
index 0000000..75f8e04
--- /dev/null
+++ b/drivers/clk/msm/clock-rcgwr.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/debugfs.h>
+
+#define CMD_RCGR_REG		0x0
+#define CMD_UPDATE_EN		BIT(0)
+/* Async_clk_en */
+#define CMD_ROOT_EN		BIT(1)
+
+struct rcgwr {
+	void __iomem *base;
+	void __iomem *rcg_base;
+	int *dfs_sid_offset;
+	int *dfs_sid_value;
+	int  dfs_sid_len;
+	int *link_sid_offset;
+	int *link_sid_value;
+	int  link_sid_len;
+	int *lmh_sid_offset;
+	int *lmh_sid_value;
+	int  lmh_sid_len;
+	bool inited;
+};
+
+static struct rcgwr **rcgwr;
+static struct platform_device *cpu_clock_dev;
+static u32 num_clusters;
+
+#define DFS_SID_1_2		0x10
+#define DFS_SID_3_4		0x14
+#define DFS_SID_5_6		0x18
+#define DFS_SID_7_8		0x1C
+#define DFS_SID_9_10		0x20
+#define DFS_SID_11_12		0x24
+#define DFS_SID_13_14		0x28
+#define DFS_SID_15		0x2C
+#define LMH_SID_1_2		0x30
+#define LMH_SID_3_4		0x34
+#define LMH_SID_5		0x38
+#define DCVS_CFG_CTL		0x50
+#define LMH_CFG_CTL		0x54
+#define RC_CFG_CTL		0x58
+#define RC_CFG_DBG		0x5C
+#define RC_CFG_UPDATE		0x60
+
+#define RC_CFG_UPDATE_EN_BIT	8
+#define RC_CFG_ACK_BIT		16
+
+#define UPDATE_CHECK_MAX_LOOPS  500
+
+#define DFS_SID_START		0xE
+#define LMH_SID_START		0x6
+#define DCVS_CONFIG		0x2
+#define LINK_SID		0x3
+
+/* Sequence for enable */
+static int ramp_en[] = { 0x800, 0xC00, 0x400};
+
+static int check_rcg_config(void __iomem *base)
+{
+	u32 cmd_rcgr_regval, count;
+
+	cmd_rcgr_regval = readl_relaxed(base + CMD_RCGR_REG);
+	cmd_rcgr_regval |= CMD_ROOT_EN;
+	writel_relaxed(cmd_rcgr_regval, (base + CMD_RCGR_REG));
+
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		cmd_rcgr_regval = readl_relaxed(base + CMD_RCGR_REG);
+		cmd_rcgr_regval &= CMD_UPDATE_EN;
+		if (!(cmd_rcgr_regval)) {
+			pr_debug("cmd_rcgr state on update bit cleared 0x%x, cmd 0x%x\n",
+					readl_relaxed(base + CMD_RCGR_REG),
+					cmd_rcgr_regval);
+			return 0;
+		}
+		udelay(1);
+	}
+
+	WARN_ON(count == 0);
+
+	return -EINVAL;
+}
+
+static int rc_config_update(void __iomem *base, u32 rc_value, u32 rc_ack_bit)
+{
+	u32 count, ret = 0, regval;
+
+	regval = readl_relaxed(base + RC_CFG_UPDATE);
+	regval |= rc_value;
+	writel_relaxed(regval, base + RC_CFG_UPDATE);
+	regval |= BIT(RC_CFG_UPDATE_EN_BIT);
+	writel_relaxed(regval, base + RC_CFG_UPDATE);
+
+	/* Poll for update ack */
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		regval = readl_relaxed((base + RC_CFG_UPDATE))
+						  >> RC_CFG_ACK_BIT;
+		if (regval == BIT(rc_ack_bit)) {
+			ret = 0;
+			break;
+		}
+		udelay(1);
+	}
+	WARN_ON(count == 0);
+
+	/* Clear RC_CFG_UPDATE_EN */
+	writel_relaxed(0 << RC_CFG_UPDATE_EN_BIT, (base + RC_CFG_UPDATE));
+	/* Poll for update ack */
+	for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+		regval = readl_relaxed((base + RC_CFG_UPDATE))
+						>> RC_CFG_ACK_BIT;
+		if (!regval)
+			return ret;
+		udelay(1);
+	}
+	WARN_ON(count == 0);
+
+	return -EINVAL;
+}
+
+
+static int ramp_control_enable(struct platform_device *pdev,
+		struct rcgwr *rcgwr)
+{
+	int i = 0, ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(ramp_en); i++) {
+		ret = check_rcg_config(rcgwr->rcg_base);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to update config!!!\n");
+			return ret;
+		}
+		writel_relaxed(ramp_en[i], rcgwr->base + DCVS_CFG_CTL);
+		ret = rc_config_update(rcgwr->base, DCVS_CONFIG, DCVS_CONFIG);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Failed to config update for 0x2 and ACK 0x4\n");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int ramp_down_disable(struct platform_device *pdev,
+		struct rcgwr *rcgwr)
+{
+	int ret = 0;
+
+	ret = check_rcg_config(rcgwr->rcg_base);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to update config!!!\n");
+		return ret;
+	}
+
+	writel_relaxed(0x200, rcgwr->base + DCVS_CFG_CTL);
+	ret = rc_config_update(rcgwr->base, DCVS_CONFIG, DCVS_CONFIG);
+	if (ret)
+		dev_err(&pdev->dev,
+			"Failed to config update for 0x2 and ACK 0x4\n");
+
+	return ret;
+}
+
+static int ramp_control_disable(struct platform_device *pdev,
+		struct rcgwr *rcgwr)
+{
+	int ret = 0;
+
+	if (!rcgwr->inited)
+		return 0;
+
+	ret = check_rcg_config(rcgwr->rcg_base);
+	if  (ret) {
+		dev_err(&pdev->dev, "Failed to update config!!!\n");
+		return ret;
+	}
+
+	writel_relaxed(0x0, rcgwr->base + DCVS_CFG_CTL);
+
+	ret = rc_config_update(rcgwr->base, DCVS_CONFIG, DCVS_CONFIG);
+	if (ret)
+		dev_err(&pdev->dev,
+			"Failed to config update for 0x2 and ACK 0x4\n");
+
+	rcgwr->inited = false;
+
+	return ret;
+}
+
+static int ramp_link_sid(struct platform_device *pdev, struct rcgwr *rcgwr)
+{
+	int ret = 0, i;
+
+	if (!rcgwr->link_sid_len) {
+		pr_err("Use Default Link SID\n");
+		return 0;
+	}
+
+	ret = check_rcg_config(rcgwr->rcg_base);
+	if  (ret) {
+		dev_err(&pdev->dev, "Failed to update config!!!\n");
+		return ret;
+	}
+
+	for (i = 0; i < rcgwr->link_sid_len; i++)
+		writel_relaxed(rcgwr->link_sid_value[i],
+				rcgwr->base + rcgwr->link_sid_offset[i]);
+
+	ret = rc_config_update(rcgwr->base, LINK_SID, LINK_SID);
+	if (ret)
+		dev_err(&pdev->dev,
+			"Failed to config update for 0x3 and ACK 0x8\n");
+
+	return ret;
+}
+
+static int ramp_lmh_sid(struct platform_device *pdev, struct rcgwr *rcgwr)
+{
+	int ret = 0, i, j;
+
+	if (!rcgwr->lmh_sid_len) {
+		pr_err("Use Default LMH SID\n");
+		return 0;
+	}
+
+	ret = check_rcg_config(rcgwr->rcg_base);
+	if  (ret) {
+		dev_err(&pdev->dev, "Failed to update config!!!\n");
+		return ret;
+	}
+
+	for (i = 0; i < rcgwr->lmh_sid_len; i++)
+		writel_relaxed(rcgwr->lmh_sid_value[i],
+				rcgwr->base + rcgwr->lmh_sid_offset[i]);
+
+	for (i = LMH_SID_START, j = 0; j < rcgwr->lmh_sid_len; i--, j++) {
+		ret = rc_config_update(rcgwr->base, i, i);
+		if (ret) {
+			dev_err(&pdev->dev,
+			"Failed to update config for DFSSID-0x%x and ack 0x%lx\n",
+					i, BIT(i));
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int ramp_dfs_sid(struct platform_device *pdev, struct rcgwr *rcgwr)
+{
+	int ret = 0, i, j;
+
+	if (!rcgwr->dfs_sid_len) {
+		pr_err("Use Default DFS SID\n");
+		return 0;
+	}
+
+	ret = check_rcg_config(rcgwr->rcg_base);
+	if  (ret) {
+		dev_err(&pdev->dev, "Failed to update config!!!\n");
+		return ret;
+	}
+
+	for (i = 0; i < rcgwr->dfs_sid_len; i++)
+		writel_relaxed(rcgwr->dfs_sid_value[i],
+				rcgwr->base + rcgwr->dfs_sid_offset[i]);
+
+	for (i = DFS_SID_START, j = 0; j < rcgwr->dfs_sid_len; i--, j++) {
+		ret = rc_config_update(rcgwr->base, i, i);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Failed to update config for DFSSID-0x%x and ack 0x%lx\n",
+					i, BIT(i));
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int parse_dt_rcgwr(struct platform_device *pdev, char *prop_name,
+				int **off, int **val, int *len)
+{
+	struct device_node *node = pdev->dev.of_node;
+	int prop_len, i;
+	u32 *array;
+
+	if (!of_find_property(node, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % 2) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	prop_len /= 2;
+
+	*off = devm_kzalloc(&pdev->dev, prop_len * sizeof(u32), GFP_KERNEL);
+	if (!*off)
+		return -ENOMEM;
+
+	*val = devm_kzalloc(&pdev->dev, prop_len * sizeof(u32), GFP_KERNEL);
+	if (!*val)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev,
+			prop_len * sizeof(u32) * 2, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	of_property_read_u32_array(node, prop_name, array, prop_len * 2);
+	for (i = 0; i < prop_len; i++) {
+		*(*off + i) = array[i * 2];
+		*(*val + i) = array[2 * i + 1];
+	}
+
+	*len = prop_len;
+
+	return 0;
+}
+
+static int rcgwr_init_bases(struct platform_device *pdev, struct rcgwr *rcgwr,
+		const char *name)
+{
+	struct resource *res;
+	char rcg_name[] = "rcgwr-xxx-base";
+	char rcg_mux[] = "xxx-mux";
+
+	snprintf(rcg_name, ARRAY_SIZE(rcg_name), "rcgwr-%s-base", name);
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, rcg_name);
+	if (!res) {
+		dev_err(&pdev->dev, "missing %s\n", rcg_name);
+		return -EINVAL;
+	}
+
+	rcgwr->base = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!rcgwr->base) {
+		dev_err(&pdev->dev, "ioremap failed for %s\n",
+					rcg_name);
+		return -ENOMEM;
+	}
+
+	snprintf(rcg_mux, ARRAY_SIZE(rcg_mux), "%s-mux", name);
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, rcg_mux);
+	if (!res) {
+		dev_err(&pdev->dev, "missing %s\n", rcg_mux);
+		return -EINVAL;
+	}
+
+	rcgwr->rcg_base = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!rcgwr->rcg_base) {
+		dev_err(&pdev->dev, "ioremap failed for %s\n",
+					rcg_name);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*
+ * Disable the RCG ramp controller.
+ */
+int clock_rcgwr_disable(struct platform_device *pdev)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (!rcgwr[i])
+			return -ENOMEM;
+		ret = ramp_control_disable(pdev, rcgwr[i]);
+		if (ret)
+			dev_err(&pdev->dev,
+			"Ramp controller disable failed for Cluster-%d\n", i);
+	}
+
+	return ret;
+}
+
+static int clock_rcgwr_disable_set(void *data, u64 val)
+{
+	if (val) {
+		pr_err("Enabling not supported!!\n");
+		return -EINVAL;
+	} else
+		return clock_rcgwr_disable(cpu_clock_dev);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rcgwr_enable_fops, NULL,
+			clock_rcgwr_disable_set, "%lld\n");
+
+static int clock_debug_enable_show(struct seq_file *m, void *v)
+{
+	int i = 0;
+
+	seq_puts(m, "Cluster\t\tEnable\n");
+
+	for (i = 0; i < num_clusters; i++)
+		seq_printf(m, "%d\t\t%d\n", i, rcgwr[i]->inited);
+
+	return 0;
+}
+
+static int clock_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, clock_debug_enable_show, inode->i_private);
+}
+
+static const struct file_operations rcgwr_enable_show = {
+	.owner		= THIS_MODULE,
+	.open		= clock_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/*
+ * Program the DFS Sequence ID.
+ * Program the Link Sequence ID.
+ * Enable RCG with ramp controller.
+ */
+int clock_rcgwr_init(struct platform_device *pdev)
+{
+	int ret = 0, i;
+	char link_sid[] = "qcom,link-sid-xxx";
+	char dfs_sid[]  = "qcom,dfs-sid-xxx";
+	char lmh_sid[]  = "qcom,lmh-sid-xxx";
+	char ramp_dis[] = "qcom,ramp-dis-xxx";
+	char names[] = "cxxx";
+	struct dentry *debugfs_base;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-clusters",
+						&num_clusters);
+	if (ret)
+		panic("Cannot read num-clusters from dt (ret:%d)\n", ret);
+
+	rcgwr = devm_kzalloc(&pdev->dev, sizeof(struct rcgwr) * num_clusters,
+				GFP_KERNEL);
+	if (!rcgwr)
+		return -ENOMEM;
+
+	for (i = 0; i < num_clusters; i++) {
+		rcgwr[i] = devm_kzalloc(&pdev->dev, sizeof(struct rcgwr),
+				GFP_KERNEL);
+		if (!rcgwr[i])
+			goto fail_mem;
+
+		snprintf(names, ARRAY_SIZE(names), "c%d", i);
+
+		ret = rcgwr_init_bases(pdev, rcgwr[i], names);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to init_bases for RCGwR\n");
+			goto fail_mem;
+		}
+
+		snprintf(dfs_sid, ARRAY_SIZE(dfs_sid),
+					"qcom,dfs-sid-%s", names);
+		ret = parse_dt_rcgwr(pdev, dfs_sid, &(rcgwr[i]->dfs_sid_offset),
+			&(rcgwr[i]->dfs_sid_value), &(rcgwr[i]->dfs_sid_len));
+		if (ret)
+			dev_err(&pdev->dev,
+				"No DFS SID tables found for Cluster-%d\n", i);
+
+		snprintf(link_sid, ARRAY_SIZE(link_sid),
+					"qcom,link-sid-%s", names);
+		ret = parse_dt_rcgwr(pdev, link_sid,
+			&(rcgwr[i]->link_sid_offset),
+			&(rcgwr[i]->link_sid_value), &(rcgwr[i]->link_sid_len));
+		if (ret)
+			dev_err(&pdev->dev,
+				"No Link SID tables found for Cluster-%d\n", i);
+
+		snprintf(lmh_sid, ARRAY_SIZE(lmh_sid),
+					"qcom,lmh-sid-%s", names);
+		ret = parse_dt_rcgwr(pdev, lmh_sid,
+			&(rcgwr[i]->lmh_sid_offset),
+			&(rcgwr[i]->lmh_sid_value), &(rcgwr[i]->lmh_sid_len));
+		if (ret)
+			dev_err(&pdev->dev,
+				"No LMH SID tables found for Cluster-%d\n", i);
+
+		ret = ramp_lmh_sid(pdev, rcgwr[i]);
+		if (ret)
+			goto fail_mem;
+
+		ret = ramp_dfs_sid(pdev, rcgwr[i]);
+		if (ret)
+			goto fail_mem;
+
+		ret = ramp_link_sid(pdev, rcgwr[i]);
+		if (ret)
+			goto fail_mem;
+
+		ret = ramp_control_enable(pdev, rcgwr[i]);
+		if (ret)
+			goto fail_mem;
+
+		snprintf(ramp_dis, ARRAY_SIZE(ramp_dis),
+					"qcom,ramp-dis-%s", names);
+		if (of_property_read_bool(pdev->dev.of_node, ramp_dis)) {
+			ret = ramp_down_disable(pdev, rcgwr[i]);
+			if (ret)
+				goto fail_mem;
+		}
+
+		rcgwr[i]->inited = true;
+	}
+
+	cpu_clock_dev = pdev;
+
+	debugfs_base = debugfs_create_dir("rcgwr", NULL);
+	if (debugfs_base) {
+		if (!debugfs_create_file("enable", 0444, debugfs_base, NULL,
+				&rcgwr_enable_fops)) {
+			pr_err("Unable to create `enable` debugfs entry\n");
+			debugfs_remove(debugfs_base);
+		}
+
+		if (!debugfs_create_file("status", 0444, debugfs_base, NULL,
+					&rcgwr_enable_show)) {
+			pr_err("Unable to create `status` debugfs entry\n");
+			debugfs_remove_recursive(debugfs_base);
+		}
+	} else
+		pr_err("Unable to create debugfs dir\n");
+
+	pr_info("RCGwR  Init Completed\n");
+
+	return ret;
+
+fail_mem:
+	--i;
+	for (; i >= 0 ; i--) {
+		devm_kfree(&pdev->dev, rcgwr[i]);
+		rcgwr[i] = NULL;
+	}
+	devm_kfree(&pdev->dev, rcgwr);
+	panic("RCGwR failed to Initialize\n");
+}
diff --git a/drivers/clk/msm/clock-rpm.c b/drivers/clk/msm/clock-rpm.c
new file mode 100644
index 0000000..f95823d
--- /dev/null
+++ b/drivers/clk/msm/clock-rpm.c
@@ -0,0 +1,473 @@
+/* Copyright (c) 2010-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/rtmutex.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-rpm.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#define __clk_rpmrs_set_rate(r, value, ctx) \
+	((r)->rpmrs_data->set_rate_fn((r), (value), (ctx)))
+
+#define clk_rpmrs_set_rate_sleep(r, value) \
+	    __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id)
+
+#define clk_rpmrs_set_rate_active(r, value) \
+	   __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id)
+
+static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
+				uint32_t context)
+{
+	int ret;
+
+	struct msm_rpm_kvp kvp = {
+		.key = r->rpm_key,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	switch (context) {
+	case MSM_RPM_CTX_ACTIVE_SET:
+		if (*r->last_active_set_vote == value)
+			return 0;
+		break;
+	case MSM_RPM_CTX_SLEEP_SET:
+		if (*r->last_sleep_set_vote == value)
+			return 0;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	ret = msm_rpm_send_message(context, r->rpm_res_type, r->rpm_clk_id,
+			&kvp, 1);
+	if (ret)
+		return ret;
+
+	switch (context) {
+	case MSM_RPM_CTX_ACTIVE_SET:
+		*r->last_active_set_vote = value;
+		break;
+	case MSM_RPM_CTX_SLEEP_SET:
+		*r->last_sleep_set_vote = value;
+		break;
+	}
+
+	return 0;
+}
+
+static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
+{
+	if (!r->branch)
+		r->c.rate = INT_MAX;
+
+	return 0;
+}
+
+static int clk_rpmrs_is_enabled_smd(struct rpm_clk *r)
+{
+	return !!r->c.prepare_count;
+}
+
+struct clk_rpmrs_data {
+	int (*set_rate_fn)(struct rpm_clk *r, uint32_t value, uint32_t context);
+	int (*get_rate_fn)(struct rpm_clk *r);
+	int (*handoff_fn)(struct rpm_clk *r);
+	int (*is_enabled)(struct rpm_clk *r);
+	int ctx_active_id;
+	int ctx_sleep_id;
+};
+
+struct clk_rpmrs_data clk_rpmrs_data_smd = {
+	.set_rate_fn = clk_rpmrs_set_rate_smd,
+	.handoff_fn = clk_rpmrs_handoff_smd,
+	.is_enabled = clk_rpmrs_is_enabled_smd,
+	.ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
+	.ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
+};
+
+static DEFINE_RT_MUTEX(rpm_clock_lock);
+
+static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
+			unsigned long *active_khz, unsigned long *sleep_khz)
+{
+	/* Convert the rate (hz) to khz */
+	*active_khz = DIV_ROUND_UP(rate, 1000);
+
+	/*
+	 * Active-only clocks don't care what the rate is during sleep. So,
+	 * they vote for zero.
+	 */
+	if (r->active_only)
+		*sleep_khz = 0;
+	else
+		*sleep_khz = *active_khz;
+}
+
+static int rpm_clk_prepare(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	uint32_t value;
+	int rc = 0;
+	unsigned long this_khz, this_sleep_khz;
+	unsigned long peer_khz = 0, peer_sleep_khz = 0;
+	struct rpm_clk *peer = r->peer;
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);
+
+	/* Don't send requests to the RPM if the rate has not been set. */
+	if (this_khz == 0)
+		goto out;
+
+	/* Take peer clock's rate into account only if it's enabled. */
+	if (peer->enabled)
+		to_active_sleep_khz(peer, peer->c.rate,
+				&peer_khz, &peer_sleep_khz);
+
+	value = max(this_khz, peer_khz);
+	if (r->branch)
+		value = !!value;
+
+	rc = clk_rpmrs_set_rate_active(r, value);
+	if (rc)
+		goto out;
+
+	value = max(this_sleep_khz, peer_sleep_khz);
+	if (r->branch)
+		value = !!value;
+
+	rc = clk_rpmrs_set_rate_sleep(r, value);
+	if (rc) {
+		/* Undo the active set vote and restore it to peer_khz */
+		value = peer_khz;
+		rc = clk_rpmrs_set_rate_active(r, value);
+	}
+
+out:
+	if (!rc)
+		r->enabled = true;
+
+	rt_mutex_unlock(&rpm_clock_lock);
+
+	return rc;
+}
+
+static void rpm_clk_unprepare(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	if (r->c.rate) {
+		uint32_t value;
+		struct rpm_clk *peer = r->peer;
+		unsigned long peer_khz = 0, peer_sleep_khz = 0;
+		int rc;
+
+		/* Take peer clock's rate into account only if it's enabled. */
+		if (peer->enabled)
+			to_active_sleep_khz(peer, peer->c.rate,
+				&peer_khz, &peer_sleep_khz);
+
+		value = r->branch ? !!peer_khz : peer_khz;
+		rc = clk_rpmrs_set_rate_active(r, value);
+		if (rc)
+			goto out;
+
+		value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
+		rc = clk_rpmrs_set_rate_sleep(r, value);
+	}
+	r->enabled = false;
+out:
+	rt_mutex_unlock(&rpm_clock_lock);
+
+}
+
+static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	unsigned long this_khz, this_sleep_khz;
+	int rc = 0;
+
+	rt_mutex_lock(&rpm_clock_lock);
+
+	if (r->enabled) {
+		uint32_t value;
+		struct rpm_clk *peer = r->peer;
+		unsigned long peer_khz = 0, peer_sleep_khz = 0;
+
+		to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);
+
+		/* Take peer clock's rate into account only if it's enabled. */
+		if (peer->enabled)
+			to_active_sleep_khz(peer, peer->c.rate,
+					&peer_khz, &peer_sleep_khz);
+
+		value = max(this_khz, peer_khz);
+		rc = clk_rpmrs_set_rate_active(r, value);
+		if (rc)
+			goto out;
+
+		value = max(this_sleep_khz, peer_sleep_khz);
+		rc = clk_rpmrs_set_rate_sleep(r, value);
+	}
+
+out:
+	rt_mutex_unlock(&rpm_clock_lock);
+
+	return rc;
+}
+
+static unsigned long rpm_clk_get_rate(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+
+	if (r->rpmrs_data->get_rate_fn)
+		return r->rpmrs_data->get_rate_fn(r);
+	else
+		return clk->rate;
+}
+
+static int rpm_clk_is_enabled(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+
+	return r->rpmrs_data->is_enabled(r);
+}
+
+static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	/* Not supported. */
+	return rate;
+}
+
+static bool rpm_clk_is_local(struct clk *clk)
+{
+	return false;
+}
+
+static enum handoff rpm_clk_handoff(struct clk *clk)
+{
+	struct rpm_clk *r = to_rpm_clk(clk);
+	int rc;
+
+	/*
+	 * Querying an RPM clock's status will return 0 unless the clock's
+	 * rate has previously been set through the RPM. When handing off,
+	 * assume these clocks are enabled (unless the RPM call fails) so
+	 * child clocks of these RPM clocks can still be handed off.
+	 */
+	rc  = r->rpmrs_data->handoff_fn(r);
+	if (rc < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	/*
+	 * Since RPM handoff code may update the software rate of the clock by
+	 * querying the RPM, we need to make sure our request to RPM now
+	 * matches the software rate of the clock. When we send the request
+	 * to RPM, we also need to update any other state info we would
+	 * normally update. So, call the appropriate clock function instead
+	 * of directly using the RPM driver APIs.
+	 */
+	rc = rpm_clk_prepare(clk);
+	if (rc < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+#define RPM_MISC_CLK_TYPE	0x306b6c63
+#define RPM_SCALING_ENABLE_ID	0x2
+
+int enable_rpm_scaling(void)
+{
+	int rc, value = 0x1;
+	static int is_inited;
+
+	struct msm_rpm_kvp kvp = {
+		.key = RPM_SMD_KEY_ENABLE,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	if (is_inited)
+		return 0;
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_SLEEP_SET,
+			RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "RPM clock scaling (sleep set) did not enable!\n");
+		return rc;
+	}
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+			RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "RPM clock scaling (active set) did not enable!\n");
+		return rc;
+	}
+
+	is_inited++;
+	return 0;
+}
+
+int vote_bimc(struct rpm_clk *r, uint32_t value)
+{
+	int rc;
+
+	struct msm_rpm_kvp kvp = {
+		.key = r->rpm_key,
+		.data = (void *)&value,
+		.length = sizeof(value),
+	};
+
+	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+			r->rpm_res_type, r->rpmrs_data->ctx_active_id,
+			&kvp, 1);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			WARN(1, "BIMC vote not sent!\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+const struct clk_ops clk_ops_rpm = {
+	.prepare = rpm_clk_prepare,
+	.unprepare = rpm_clk_unprepare,
+	.set_rate = rpm_clk_set_rate,
+	.get_rate = rpm_clk_get_rate,
+	.is_enabled = rpm_clk_is_enabled,
+	.round_rate = rpm_clk_round_rate,
+	.is_local = rpm_clk_is_local,
+	.handoff = rpm_clk_handoff,
+};
+
+const struct clk_ops clk_ops_rpm_branch = {
+	.prepare = rpm_clk_prepare,
+	.unprepare = rpm_clk_unprepare,
+	.is_local = rpm_clk_is_local,
+	.handoff = rpm_clk_handoff,
+};
+
+static struct rpm_clk *rpm_clk_dt_parser_common(struct device *dev,
+						struct device_node *np)
+{
+	struct rpm_clk *rpm, *peer;
+	struct clk *c;
+	int rc = 0;
+	phandle p;
+	const char *str;
+
+	rpm = devm_kzalloc(dev, sizeof(*rpm), GFP_KERNEL);
+	if (!rpm)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_phandle_index(np, "qcom,rpm-peer", 0, &p);
+	if (rc) {
+		dt_err(np, "missing qcom,rpm-peer dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	/* Rely on whoever's called last to setup the circular ref */
+	c = msmclk_lookup_phandle(dev, p);
+	if (!IS_ERR(c)) {
+		uint32_t *sleep = devm_kzalloc(dev, sizeof(uint32_t),
+					       GFP_KERNEL);
+		uint32_t *active =
+			devm_kzalloc(dev, sizeof(uint32_t),
+				     GFP_KERNEL);
+
+		if (!sleep || !active)
+			return ERR_PTR(-ENOMEM);
+		peer = to_rpm_clk(c);
+		peer->peer = rpm;
+		rpm->peer = peer;
+		rpm->last_active_set_vote = active;
+		peer->last_active_set_vote = active;
+		rpm->last_sleep_set_vote = sleep;
+		peer->last_sleep_set_vote = sleep;
+	}
+
+	rpm->rpmrs_data = &clk_rpmrs_data_smd;
+	rpm->active_only = of_device_is_compatible(np, "qcom,rpm-a-clk") ||
+			of_device_is_compatible(np, "qcom,rpm-branch-a-clk");
+
+	rc = of_property_read_string(np, "qcom,res-type", &str);
+	if (rc) {
+		dt_err(np, "missing qcom,res-type dt property\n");
+		return ERR_PTR(rc);
+	}
+	if (sscanf(str, "%4c", (char *) &rpm->rpm_res_type) <= 0)
+		return ERR_PTR(-EINVAL);
+
+	rc = of_property_read_u32(np, "qcom,res-id", &rpm->rpm_clk_id);
+	if (rc) {
+		dt_err(np, "missing qcom,res-id dt property\n");
+		return ERR_PTR(rc);
+	}
+
+	rc = of_property_read_string(np, "qcom,key", &str);
+	if (rc) {
+		dt_err(np, "missing qcom,key dt property\n");
+		return ERR_PTR(rc);
+	}
+	if (sscanf(str, "%4c", (char *) &rpm->rpm_key) <= 0)
+		return ERR_PTR(-EINVAL);
+	return rpm;
+}
+
+static void *rpm_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+	struct rpm_clk *rpm;
+
+	rpm = rpm_clk_dt_parser_common(dev, np);
+	if (IS_ERR(rpm))
+		return rpm;
+
+	rpm->c.ops = &clk_ops_rpm;
+	return msmclk_generic_clk_init(dev, np, &rpm->c);
+}
+
+static void *rpm_branch_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct rpm_clk *rpm;
+	u32 rate;
+	int rc;
+
+	rpm = rpm_clk_dt_parser_common(dev, np);
+	if (IS_ERR(rpm))
+		return rpm;
+
+	rpm->c.ops = &clk_ops_rpm_branch;
+	rpm->branch = true;
+
+	rc = of_property_read_u32(np, "qcom,rcg-init-rate", &rate);
+	if (!rc)
+		rpm->c.rate = rate;
+
+	return msmclk_generic_clk_init(dev, np, &rpm->c);
+}
+MSMCLK_PARSER(rpm_clk_dt_parser, "qcom,rpm-clk", 0);
+MSMCLK_PARSER(rpm_clk_dt_parser, "qcom,rpm-a-clk", 1);
+MSMCLK_PARSER(rpm_branch_clk_dt_parser, "qcom,rpm-branch-clk", 0);
+MSMCLK_PARSER(rpm_branch_clk_dt_parser, "qcom,rpm-branch-a-clk", 1);
diff --git a/drivers/clk/msm/clock-voter.c b/drivers/clk/msm/clock-voter.c
new file mode 100644
index 0000000..b504724
--- /dev/null
+++ b/drivers/clk/msm/clock-voter.c
@@ -0,0 +1,202 @@
+/* Copyright (c) 2010-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+static DEFINE_RT_MUTEX(voter_clk_lock);
+
+/* Aggregate the rate of clocks that are currently on. */
+static unsigned long voter_clk_aggregate_rate(const struct clk *parent)
+{
+	struct clk *clk;
+	unsigned long rate = 0;
+
+	list_for_each_entry(clk, &parent->children, siblings) {
+		struct clk_voter *v = to_clk_voter(clk);
+
+		if (v->enabled)
+			rate = max(clk->rate, rate);
+	}
+	return rate;
+}
+
+static int voter_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int ret = 0;
+	struct clk *clkp;
+	struct clk_voter *clkh, *v = to_clk_voter(clk);
+	unsigned long cur_rate, new_rate, other_rate = 0;
+
+	if (v->is_branch)
+		return 0;
+
+	rt_mutex_lock(&voter_clk_lock);
+
+	if (v->enabled) {
+		struct clk *parent = clk->parent;
+
+		/*
+		 * Get the aggregate rate without this clock's vote and update
+		 * if the new rate is different than the current rate
+		 */
+		list_for_each_entry(clkp, &parent->children, siblings) {
+			clkh = to_clk_voter(clkp);
+			if (clkh->enabled && clkh != v)
+				other_rate = max(clkp->rate, other_rate);
+		}
+
+		cur_rate = max(other_rate, clk->rate);
+		new_rate = max(other_rate, rate);
+
+		if (new_rate != cur_rate) {
+			ret = clk_set_rate(parent, new_rate);
+			if (ret)
+				goto unlock;
+		}
+	}
+	clk->rate = rate;
+unlock:
+	rt_mutex_unlock(&voter_clk_lock);
+
+	return ret;
+}
+
+static int voter_clk_prepare(struct clk *clk)
+{
+	int ret = 0;
+	unsigned long cur_rate;
+	struct clk *parent;
+	struct clk_voter *v = to_clk_voter(clk);
+
+	rt_mutex_lock(&voter_clk_lock);
+	parent = clk->parent;
+
+	if (v->is_branch) {
+		v->enabled = true;
+		goto out;
+	}
+
+	/*
+	 * Increase the rate if this clock is voting for a higher rate
+	 * than the current rate.
+	 */
+	cur_rate = voter_clk_aggregate_rate(parent);
+	if (clk->rate > cur_rate) {
+		ret = clk_set_rate(parent, clk->rate);
+		if (ret)
+			goto out;
+	}
+	v->enabled = true;
+out:
+	rt_mutex_unlock(&voter_clk_lock);
+
+	return ret;
+}
+
+static void voter_clk_unprepare(struct clk *clk)
+{
+	unsigned long cur_rate, new_rate;
+	struct clk *parent;
+	struct clk_voter *v = to_clk_voter(clk);
+
+
+	rt_mutex_lock(&voter_clk_lock);
+	parent = clk->parent;
+
+	/*
+	 * Decrease the rate if this clock was the only one voting for
+	 * the highest rate.
+	 */
+	v->enabled = false;
+	if (v->is_branch)
+		goto out;
+
+	new_rate = voter_clk_aggregate_rate(parent);
+	cur_rate = max(new_rate, clk->rate);
+
+	if (new_rate < cur_rate)
+		clk_set_rate(parent, new_rate);
+
+out:
+	rt_mutex_unlock(&voter_clk_lock);
+}
+
+static int voter_clk_is_enabled(struct clk *clk)
+{
+	struct clk_voter *v = to_clk_voter(clk);
+
+	return v->enabled;
+}
+
+static long voter_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	return clk_round_rate(clk->parent, rate);
+}
+
+static bool voter_clk_is_local(struct clk *clk)
+{
+	return true;
+}
+
+static enum handoff voter_clk_handoff(struct clk *clk)
+{
+	if (!clk->rate)
+		return HANDOFF_DISABLED_CLK;
+
+	/*
+	 * Send the default rate to the parent if necessary and update the
+	 * software state of the voter clock.
+	 */
+	if (voter_clk_prepare(clk) < 0)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
+const struct clk_ops clk_ops_voter = {
+	.prepare = voter_clk_prepare,
+	.unprepare = voter_clk_unprepare,
+	.set_rate = voter_clk_set_rate,
+	.is_enabled = voter_clk_is_enabled,
+	.round_rate = voter_clk_round_rate,
+	.is_local = voter_clk_is_local,
+	.handoff = voter_clk_handoff,
+};
+
+static void *sw_vote_clk_dt_parser(struct device *dev,
+					struct device_node *np)
+{
+	struct clk_voter *v;
+	int rc;
+	u32 temp;
+
+	v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
+	if (!v)
+		return ERR_PTR(-ENOMEM);
+
+	rc = of_property_read_u32(np, "qcom,config-rate", &temp);
+	if (rc) {
+		dt_prop_err(np, "qcom,config-rate", "is missing");
+		return ERR_PTR(rc);
+	}
+
+	v->c.ops = &clk_ops_voter;
+	return msmclk_generic_clk_init(dev, np, &v->c);
+}
+MSMCLK_PARSER(sw_vote_clk_dt_parser, "qcom,sw-vote-clk", 0);
diff --git a/drivers/clk/msm/clock.c b/drivers/clk/msm/clock.c
new file mode 100644
index 0000000..30eac98
--- /dev/null
+++ b/drivers/clk/msm/clock.c
@@ -0,0 +1,1407 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+
+#include <trace/events/power.h>
+#include "clock.h"
+
+struct handoff_clk {
+	struct list_head list;
+	struct clk *clk;
+};
+static LIST_HEAD(handoff_list);
+
+struct handoff_vdd {
+	struct list_head list;
+	struct clk_vdd_class *vdd_class;
+};
+static LIST_HEAD(handoff_vdd_list);
+
+static DEFINE_MUTEX(msm_clock_init_lock);
+LIST_HEAD(orphan_clk_list);
+static LIST_HEAD(clk_notifier_list);
+
+/* Find the voltage level required for a given rate. */
+int find_vdd_level(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	for (level = 0; level < clk->num_fmax; level++)
+		if (rate <= clk->fmax[level])
+			break;
+
+	if (level == clk->num_fmax) {
+		pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+			clk->dbg_name);
+		return -EINVAL;
+	}
+
+	return level;
+}
+
+/* Update voltage level given the current votes. */
+static int update_vdd(struct clk_vdd_class *vdd_class)
+{
+	int level, rc = 0, i, ignore;
+	struct regulator **r = vdd_class->regulator;
+	int *uv = vdd_class->vdd_uv;
+	int *ua = vdd_class->vdd_ua;
+	int n_reg = vdd_class->num_regulators;
+	int cur_lvl = vdd_class->cur_level;
+	int max_lvl = vdd_class->num_levels - 1;
+	int cur_base = cur_lvl * n_reg;
+	int new_base;
+
+	/* aggregate votes */
+	for (level = max_lvl; level > 0; level--)
+		if (vdd_class->level_votes[level])
+			break;
+
+	if (level == cur_lvl)
+		return 0;
+
+	max_lvl = max_lvl * n_reg;
+	new_base = level * n_reg;
+	for (i = 0; i < vdd_class->num_regulators; i++) {
+		rc = regulator_set_voltage(r[i], uv[new_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		if (rc)
+			goto set_voltage_fail;
+
+		if (ua) {
+			rc = regulator_set_load(r[i], ua[new_base + i]);
+			rc = rc > 0 ? 0 : rc;
+			if (rc)
+				goto set_mode_fail;
+		}
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			rc = regulator_enable(r[i]);
+		else if (level == 0)
+			rc = regulator_disable(r[i]);
+		if (rc)
+			goto enable_disable_fail;
+	}
+	if (vdd_class->set_vdd && !vdd_class->num_regulators)
+		rc = vdd_class->set_vdd(vdd_class, level);
+
+	if (!rc)
+		vdd_class->cur_level = level;
+
+	return rc;
+
+enable_disable_fail:
+	/*
+	 * set_optimum_mode could use voltage to derive mode.  Restore
+	 * previous voltage setting for r[i] first.
+	 */
+	if (ua) {
+		regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		regulator_set_load(r[i], ua[cur_base + i]);
+	}
+
+set_mode_fail:
+	regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+
+set_voltage_fail:
+	for (i--; i >= 0; i--) {
+		regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+		if (ua)
+			regulator_set_load(r[i], ua[cur_base + i]);
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			regulator_disable(r[i]);
+		else if (level == 0)
+			ignore = regulator_enable(r[i]);
+	}
+	return rc;
+}
+
+/* Vote for a voltage level. */
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+	vdd_class->level_votes[level]++;
+	rc = update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]--;
+	mutex_unlock(&vdd_class->lock);
+
+	return rc;
+}
+
+/* Remove vote for a voltage level. */
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc = 0;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+	if (WARN(!vdd_class->level_votes[level],
+			"Reference counts are incorrect for %s level %d\n",
+			vdd_class->class_name, level))
+		goto out;
+	vdd_class->level_votes[level]--;
+	rc = update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]++;
+out:
+	mutex_unlock(&vdd_class->lock);
+	return rc;
+}
+
+/* Vote for a voltage level corresponding to a clock's rate. */
+static int vote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return 0;
+
+	level = find_vdd_level(clk, rate);
+	if (level < 0)
+		return level;
+
+	return vote_vdd_level(clk->vdd_class, level);
+}
+
+/* Remove vote for a voltage level corresponding to a clock's rate. */
+static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return;
+
+	level = find_vdd_level(clk, rate);
+	if (level < 0)
+		return;
+
+	unvote_vdd_level(clk->vdd_class, level);
+}
+
+/* Check if the rate is within the voltage limits of the clock. */
+bool is_rate_valid(struct clk *clk, unsigned long rate)
+{
+	int level;
+
+	if (!clk->vdd_class)
+		return true;
+
+	level = find_vdd_level(clk, rate);
+	return level >= 0;
+}
+
+/**
+ * __clk_pre_reparent() - Set up the new parent before switching to it and
+ * prevent the enable state of the child clock from changing.
+ * @c: The child clock that's going to switch parents
+ * @new: The new parent that the child clock is going to switch to
+ * @flags: Pointer to scratch space to save spinlock flags
+ *
+ * Cannot be called from atomic context.
+ *
+ * Use this API to set up the @new parent clock to be able to support the
+ * current prepare and enable state of the child clock @c. Once the parent is
+ * set up, the child clock can safely switch to it.
+ *
+ * The caller shall grab the prepare_lock of clock @c before calling this API
+ * and only release it after calling __clk_post_reparent() for clock @c (or
+ * if this API fails). This is necessary to prevent the prepare state of the
+ * child clock @c from changing while the reparenting is in progress. Since
+ * this API takes care of grabbing the enable lock of @c, only atomic
+ * operation are allowed between calls to __clk_pre_reparent and
+ * __clk_post_reparent()
+ *
+ * The scratch space pointed to by @flags should not be altered before
+ * calling __clk_post_reparent() for clock @c.
+ *
+ * See also: __clk_post_reparent()
+ */
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags)
+{
+	int rc;
+
+	if (c->prepare_count) {
+		rc = clk_prepare(new);
+		if (rc)
+			return rc;
+	}
+
+	spin_lock_irqsave(&c->lock, *flags);
+	if (c->count) {
+		rc = clk_enable(new);
+		if (rc) {
+			spin_unlock_irqrestore(&c->lock, *flags);
+			clk_unprepare(new);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+/**
+ * __clk_post_reparent() - Release requirements on old parent after switching
+ * away from it and allow changes to the child clock's enable state.
+ * @c:   The child clock that switched parents
+ * @old: The old parent that the child clock switched away from or the new
+ *	 parent of a failed reparent attempt.
+ * @flags: Pointer to scratch space where spinlock flags were saved
+ *
+ * Cannot be called from atomic context.
+ *
+ * This API works in tandem with __clk_pre_reparent. Use this API to
+ * - Remove prepare and enable requirements from the @old parent after
+ *   switching away from it
+ * - Or, undo the effects of __clk_pre_reparent() after a failed attempt to
+ *   change parents
+ *
+ * The caller shall release the prepare_lock of @c that was grabbed before
+ * calling __clk_pre_reparent() only after this API is called (or if
+ * __clk_pre_reparent() fails). This is necessary to prevent the prepare
+ * state of the child clock @c from changing while the reparenting is in
+ * progress. Since this API releases the enable lock of @c, the limit to
+ * atomic operations set by __clk_pre_reparent() is no longer present.
+ *
+ * The scratch space pointed to by @flags shall not be altered since the call
+ * to  __clk_pre_reparent() for clock @c.
+ *
+ * See also: __clk_pre_reparent()
+ */
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags)
+{
+	if (c->count)
+		clk_disable(old);
+	spin_unlock_irqrestore(&c->lock, *flags);
+
+	if (c->prepare_count)
+		clk_unprepare(old);
+}
+
+int clk_prepare(struct clk *clk)
+{
+	int ret = 0;
+	struct clk *parent;
+
+	if (!clk)
+		return 0;
+	if (IS_ERR(clk))
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+	if (clk->prepare_count == 0) {
+		parent = clk->parent;
+
+		ret = clk_prepare(parent);
+		if (ret)
+			goto out;
+		ret = clk_prepare(clk->depends);
+		if (ret)
+			goto err_prepare_depends;
+
+		ret = vote_rate_vdd(clk, clk->rate);
+		if (ret)
+			goto err_vote_vdd;
+		if (clk->ops->prepare)
+			ret = clk->ops->prepare(clk);
+		if (ret)
+			goto err_prepare_clock;
+	}
+	clk->prepare_count++;
+out:
+	mutex_unlock(&clk->prepare_lock);
+	return ret;
+err_prepare_clock:
+	unvote_rate_vdd(clk, clk->rate);
+err_vote_vdd:
+	clk_unprepare(clk->depends);
+err_prepare_depends:
+	clk_unprepare(parent);
+	goto out;
+}
+EXPORT_SYMBOL(clk_prepare);
+
+/*
+ * Standard clock functions defined in include/linux/clk.h
+ */
+int clk_enable(struct clk *clk)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct clk *parent;
+	const char *name;
+
+	if (!clk)
+		return 0;
+	if (IS_ERR(clk))
+		return -EINVAL;
+	name = clk->dbg_name;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	WARN(!clk->prepare_count,
+			"%s: Don't call enable on unprepared clocks\n", name);
+	if (clk->count == 0) {
+		parent = clk->parent;
+
+		ret = clk_enable(parent);
+		if (ret)
+			goto err_enable_parent;
+		ret = clk_enable(clk->depends);
+		if (ret)
+			goto err_enable_depends;
+
+		trace_clock_enable(name, 1, smp_processor_id());
+		if (clk->ops->enable)
+			ret = clk->ops->enable(clk);
+		if (ret)
+			goto err_enable_clock;
+	}
+	clk->count++;
+	spin_unlock_irqrestore(&clk->lock, flags);
+
+	return 0;
+
+err_enable_clock:
+	clk_disable(clk->depends);
+err_enable_depends:
+	clk_disable(parent);
+err_enable_parent:
+	spin_unlock_irqrestore(&clk->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+	const char *name;
+	unsigned long flags;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+	name = clk->dbg_name;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	WARN(!clk->prepare_count,
+			"%s: Never called prepare or calling disable after unprepare\n",
+			name);
+	if (WARN(clk->count == 0, "%s is unbalanced", name))
+		goto out;
+	if (clk->count == 1) {
+		struct clk *parent = clk->parent;
+
+		trace_clock_disable(name, 0, smp_processor_id());
+		if (clk->ops->disable)
+			clk->ops->disable(clk);
+		clk_disable(clk->depends);
+		clk_disable(parent);
+	}
+	clk->count--;
+out:
+	spin_unlock_irqrestore(&clk->lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+void clk_unprepare(struct clk *clk)
+{
+	const char *name;
+
+	if (IS_ERR_OR_NULL(clk))
+		return;
+	name = clk->dbg_name;
+
+	mutex_lock(&clk->prepare_lock);
+	if (WARN(!clk->prepare_count, "%s is unbalanced (prepare)", name))
+		goto out;
+	if (clk->prepare_count == 1) {
+		struct clk *parent = clk->parent;
+
+		WARN(clk->count,
+			"%s: Don't call unprepare when the clock is enabled\n",
+			name);
+
+		if (clk->ops->unprepare)
+			clk->ops->unprepare(clk);
+		unvote_rate_vdd(clk, clk->rate);
+		clk_unprepare(clk->depends);
+		clk_unprepare(parent);
+	}
+	clk->prepare_count--;
+out:
+	mutex_unlock(&clk->prepare_lock);
+}
+EXPORT_SYMBOL(clk_unprepare);
+
+int clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->reset)
+		return -EINVAL;
+
+	return clk->ops->reset(clk, action);
+}
+EXPORT_SYMBOL(clk_reset);
+
+/**
+ * __clk_notify - call clk notifier chain
+ * @clk: struct clk * that is changing rate
+ * @msg: clk notifier type (see include/linux/clk.h)
+ * @old_rate: old clk rate
+ * @new_rate: new clk rate
+ *
+ * Triggers a notifier call chain on the clk rate-change notification
+ * for 'clk'.  Passes a pointer to the struct clk and the previous
+ * and current rates to the notifier callback.  Intended to be called by
+ * internal clock code only.  Returns NOTIFY_DONE from the last driver
+ * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
+ * a driver returns that.
+ */
+static int __clk_notify(struct clk *clk, unsigned long msg,
+		unsigned long old_rate, unsigned long new_rate)
+{
+	struct msm_clk_notifier *cn;
+	struct msm_clk_notifier_data cnd;
+	int ret = NOTIFY_DONE;
+
+	cnd.clk = clk;
+	cnd.old_rate = old_rate;
+	cnd.new_rate = new_rate;
+
+	list_for_each_entry(cn, &clk_notifier_list, node) {
+		if (cn->clk == clk) {
+			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
+					&cnd);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * clk rate change notifiers
+ *
+ * Note - The following notifier functionality is a verbatim copy
+ * of the implementation in the common clock framework, copied here
+ * until MSM switches to the common clock framework.
+ */
+
+/**
+ * msm_clk_notif_register - add a clk rate change notifier
+ * @clk: struct clk * to watch
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request notification when clk's rate changes.  This uses an SRCU
+ * notifier because we want it to block and notifier unregistrations are
+ * uncommon.  The callbacks associated with the notifier must not
+ * re-enter into the clk framework by calling any top-level clk APIs;
+ * this will cause a nested prepare_lock mutex.
+ *
+ * Pre-change notifier callbacks will be passed the current, pre-change
+ * rate of the clk via struct msm_clk_notifier_data.old_rate.  The new,
+ * post-change rate of the clk is passed via struct
+ * msm_clk_notifier_data.new_rate.
+ *
+ * Post-change notifiers will pass the now-current, post-change rate of
+ * the clk in both struct msm_clk_notifier_data.old_rate and struct
+ * msm_clk_notifier_data.new_rate.
+ *
+ * Abort-change notifiers are effectively the opposite of pre-change
+ * notifiers: the original pre-change clk rate is passed in via struct
+ * msm_clk_notifier_data.new_rate and the failed post-change rate is passed
+ * in via struct msm_clk_notifier_data.old_rate.
+ *
+ * msm_clk_notif_register() must be called from non-atomic context.
+ * Returns -EINVAL if called with null arguments, -ENOMEM upon
+ * allocation failure; otherwise, passes along the return value of
+ * srcu_notifier_chain_register().
+ */
+int msm_clk_notif_register(struct clk *clk, struct notifier_block *nb)
+{
+	struct msm_clk_notifier *cn;
+	int ret = -ENOMEM;
+
+	if (!clk || !nb)
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	/* search the list of notifiers for this clk */
+	list_for_each_entry(cn, &clk_notifier_list, node)
+		if (cn->clk == clk)
+			break;
+
+	/* if clk wasn't in the notifier list, allocate new clk_notifier */
+	if (cn->clk != clk) {
+		cn = kzalloc(sizeof(struct msm_clk_notifier), GFP_KERNEL);
+		if (!cn)
+			goto out;
+
+		cn->clk = clk;
+		srcu_init_notifier_head(&cn->notifier_head);
+
+		list_add(&cn->node, &clk_notifier_list);
+	}
+
+	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
+
+	clk->notifier_count++;
+
+out:
+	mutex_unlock(&clk->prepare_lock);
+
+	return ret;
+}
+
+/**
+ * msm_clk_notif_unregister - remove a clk rate change notifier
+ * @clk: struct clk *
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request no further notification for changes to 'clk' and frees memory
+ * allocated in msm_clk_notifier_register.
+ *
+ * Returns -EINVAL if called with null arguments; otherwise, passes
+ * along the return value of srcu_notifier_chain_unregister().
+ */
+int msm_clk_notif_unregister(struct clk *clk, struct notifier_block *nb)
+{
+	struct msm_clk_notifier *cn = NULL;
+	int ret = -EINVAL;
+
+	if (!clk || !nb)
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	list_for_each_entry(cn, &clk_notifier_list, node)
+		if (cn->clk == clk)
+			break;
+
+	if (cn->clk == clk) {
+		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+
+		clk->notifier_count--;
+
+		/* XXX the notifier code should handle this better */
+		if (!cn->notifier_head.head) {
+			srcu_cleanup_notifier_head(&cn->notifier_head);
+			list_del(&cn->node);
+			kfree(cn);
+		}
+
+	} else {
+		ret = -ENOENT;
+	}
+
+	mutex_unlock(&clk->prepare_lock);
+
+	return ret;
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return 0;
+
+	if (!clk->ops->get_rate)
+		return clk->rate;
+
+	return clk->ops->get_rate(clk);
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	unsigned long start_rate;
+	int rc = 0;
+	const char *name;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+	name = clk->dbg_name;
+
+	if (!is_rate_valid(clk, rate))
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+
+	/* Return early if the rate isn't going to change */
+	if (clk->rate == rate && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+		goto out;
+
+	if (!clk->ops->set_rate) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	trace_clock_set_rate(name, rate, raw_smp_processor_id());
+
+	start_rate = clk->rate;
+
+	if (clk->notifier_count)
+		__clk_notify(clk, PRE_RATE_CHANGE, clk->rate, rate);
+
+	if (clk->ops->pre_set_rate) {
+		rc = clk->ops->pre_set_rate(clk, rate);
+		if (rc)
+			goto abort_set_rate;
+	}
+
+	/* Enforce vdd requirements for target frequency. */
+	if (clk->prepare_count) {
+		rc = vote_rate_vdd(clk, rate);
+		if (rc)
+			goto err_vote_vdd;
+	}
+
+	rc = clk->ops->set_rate(clk, rate);
+	if (rc)
+		goto err_set_rate;
+	clk->rate = rate;
+
+	/* Release vdd requirements for starting frequency. */
+	if (clk->prepare_count)
+		unvote_rate_vdd(clk, start_rate);
+
+	if (clk->ops->post_set_rate)
+		clk->ops->post_set_rate(clk, start_rate);
+
+	if (clk->notifier_count)
+		__clk_notify(clk, POST_RATE_CHANGE, start_rate, clk->rate);
+
+	trace_clock_set_rate_complete(name, clk->rate, raw_smp_processor_id());
+out:
+	mutex_unlock(&clk->prepare_lock);
+	return rc;
+
+abort_set_rate:
+	__clk_notify(clk, ABORT_RATE_CHANGE, clk->rate, rate);
+err_set_rate:
+	if (clk->prepare_count)
+		unvote_rate_vdd(clk, rate);
+err_vote_vdd:
+	/* clk->rate is still the old rate. So, pass the new rate instead. */
+	if (clk->ops->post_set_rate)
+		clk->ops->post_set_rate(clk, rate);
+	goto out;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	long rrate;
+	unsigned long fmax = 0, i;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	for (i = 0; i < clk->num_fmax; i++)
+		fmax = max(fmax, clk->fmax[i]);
+	if (!fmax)
+		fmax = ULONG_MAX;
+	rate = min(rate, fmax);
+
+	if (clk->ops->round_rate)
+		rrate = clk->ops->round_rate(clk, rate);
+	else if (clk->rate)
+		rrate = clk->rate;
+	else
+		return -EINVAL;
+
+	if (rrate > fmax)
+		return -EINVAL;
+	return rrate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->set_max_rate)
+		return -EINVAL;
+
+	return clk->ops->set_max_rate(clk, rate);
+}
+EXPORT_SYMBOL(clk_set_max_rate);
+
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p)
+{
+	int i;
+
+	for (i = 0; i < num_parents; i++) {
+		if (parents[i].src == p)
+			return parents[i].sel;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(parent_to_src_sel);
+
+int clk_get_parent_sel(struct clk *c, struct clk *parent)
+{
+	return parent_to_src_sel(c->parents, c->num_parents, parent);
+}
+EXPORT_SYMBOL(clk_get_parent_sel);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (!clk->ops->set_parent && clk->parent == parent)
+		return 0;
+
+	if (!clk->ops->set_parent)
+		return -EINVAL;
+
+	mutex_lock(&clk->prepare_lock);
+	if (clk->parent == parent && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+		goto out;
+	rc = clk->ops->set_parent(clk, parent);
+out:
+	mutex_unlock(&clk->prepare_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return NULL;
+
+	return clk->parent;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_flags(struct clk *clk, unsigned long flags)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+	if (!clk->ops->set_flags)
+		return -EINVAL;
+
+	return clk->ops->set_flags(clk, flags);
+}
+EXPORT_SYMBOL(clk_set_flags);
+
+int clk_set_duty_cycle(struct clk *clk, u32 numerator, u32 denominator)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	if (numerator > denominator) {
+		pr_err("Numerator cannot be > denominator\n");
+		return -EINVAL;
+	}
+
+	if (!denominator) {
+		pr_err("Denominator can not be Zero\n");
+		return -EINVAL;
+	}
+
+	if (!clk->ops->set_duty_cycle)
+		return -EINVAL;
+
+	return clk->ops->set_duty_cycle(clk, numerator, denominator);
+}
+EXPORT_SYMBOL(clk_set_duty_cycle);
+
+static LIST_HEAD(initdata_list);
+
+static void init_sibling_lists(struct clk_lookup *clock_tbl, size_t num_clocks)
+{
+	struct clk *clk, *parent;
+	unsigned long n;
+
+	for (n = 0; n < num_clocks; n++) {
+		clk = clock_tbl[n].clk;
+		parent = clk->parent;
+		if (parent && list_empty(&clk->siblings))
+			list_add(&clk->siblings, &parent->children);
+	}
+}
+
+static void vdd_class_init(struct clk_vdd_class *vdd)
+{
+	struct handoff_vdd *v;
+
+	if (!vdd)
+		return;
+
+	if (vdd->skip_handoff)
+		return;
+
+	list_for_each_entry(v, &handoff_vdd_list, list) {
+		if (v->vdd_class == vdd)
+			return;
+	}
+
+	pr_debug("voting for vdd_class %s\n", vdd->class_name);
+	if (vote_vdd_level(vdd, vdd->num_levels - 1))
+		pr_err("failed to vote for %s\n", vdd->class_name);
+
+	v = kmalloc(sizeof(*v), GFP_KERNEL);
+	if (!v)
+		return;
+
+	v->vdd_class = vdd;
+	list_add_tail(&v->list, &handoff_vdd_list);
+}
+
+static int __handoff_clk(struct clk *clk)
+{
+	enum handoff state = HANDOFF_DISABLED_CLK;
+	struct handoff_clk *h = NULL;
+	int rc, i;
+
+	if (clk == NULL || clk->flags & CLKFLAG_INIT_DONE ||
+	    clk->flags & CLKFLAG_SKIP_HANDOFF)
+		return 0;
+
+	if (clk->flags & CLKFLAG_INIT_ERR)
+		return -ENXIO;
+
+	if (clk->flags & CLKFLAG_EPROBE_DEFER)
+		return -EPROBE_DEFER;
+
+	/* Handoff any 'depends' clock first. */
+	rc = __handoff_clk(clk->depends);
+	if (rc)
+		goto err;
+
+	/*
+	 * Handoff functions for the parent must be called before the
+	 * children can be handed off. Without handing off the parents and
+	 * knowing their rate and state (on/off), it's impossible to figure
+	 * out the rate and state of the children.
+	 */
+	if (clk->ops->get_parent)
+		clk->parent = clk->ops->get_parent(clk);
+
+	if (IS_ERR(clk->parent)) {
+		rc = PTR_ERR(clk->parent);
+		goto err;
+	}
+
+	rc = __handoff_clk(clk->parent);
+	if (rc)
+		goto err;
+
+	for (i = 0; i < clk->num_parents; i++) {
+		rc = __handoff_clk(clk->parents[i].src);
+		if (rc)
+			goto err;
+	}
+
+	if (clk->ops->handoff)
+		state = clk->ops->handoff(clk);
+
+	if (state == HANDOFF_ENABLED_CLK) {
+
+		h = kmalloc(sizeof(*h), GFP_KERNEL);
+		if (!h) {
+			rc = -ENOMEM;
+			goto err;
+		}
+
+		rc = clk_prepare_enable(clk->parent);
+		if (rc)
+			goto err;
+
+		rc = clk_prepare_enable(clk->depends);
+		if (rc)
+			goto err_depends;
+
+		rc = vote_rate_vdd(clk, clk->rate);
+		WARN(rc, "%s unable to vote for voltage!\n", clk->dbg_name);
+
+		clk->count = 1;
+		clk->prepare_count = 1;
+		h->clk = clk;
+		list_add_tail(&h->list, &handoff_list);
+
+		pr_debug("Handed off %s rate=%lu\n", clk->dbg_name, clk->rate);
+	}
+
+	if (clk->init_rate && clk_set_rate(clk, clk->init_rate))
+		pr_err("failed to set an init rate of %lu on %s\n",
+			clk->init_rate, clk->dbg_name);
+	if (clk->always_on && clk_prepare_enable(clk))
+		pr_err("failed to enable always-on clock %s\n",
+			clk->dbg_name);
+
+	clk->flags |= CLKFLAG_INIT_DONE;
+	/* if the clk is on orphan list, remove it */
+	list_del_init(&clk->list);
+	clock_debug_register(clk);
+
+	return 0;
+
+err_depends:
+	clk_disable_unprepare(clk->parent);
+err:
+	kfree(h);
+	if (rc == -EPROBE_DEFER) {
+		clk->flags |= CLKFLAG_EPROBE_DEFER;
+		if (list_empty(&clk->list))
+			list_add_tail(&clk->list, &orphan_clk_list);
+	} else {
+		pr_err("%s handoff failed (%d)\n", clk->dbg_name, rc);
+		clk->flags |= CLKFLAG_INIT_ERR;
+	}
+	return rc;
+}
+
+/**
+ * msm_clock_register() - Register additional clock tables
+ * @table: Table of clocks
+ * @size: Size of @table
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int msm_clock_register(struct clk_lookup *table, size_t size)
+{
+	int n = 0, rc;
+	struct clk *c, *safe;
+	bool found_more_clks;
+
+	mutex_lock(&msm_clock_init_lock);
+
+	init_sibling_lists(table, size);
+
+	/*
+	 * Enable regulators and temporarily set them up at maximum voltage.
+	 * Once all the clocks have made their respective vote, remove this
+	 * temporary vote. The removing of the temporary vote is done at
+	 * late_init, by which time we assume all the clocks would have been
+	 * handed off.
+	 */
+	for (n = 0; n < size; n++)
+		vdd_class_init(table[n].clk->vdd_class);
+
+	/*
+	 * Detect and preserve initial clock state until clock_late_init() or
+	 * a driver explicitly changes it, whichever is first.
+	 */
+
+	for (n = 0; n < size; n++)
+		__handoff_clk(table[n].clk);
+
+	/* maintain backwards compatibility */
+	if (table[0].con_id || table[0].dev_id)
+		clkdev_add_table(table, size);
+
+	do {
+		found_more_clks = false;
+		/* clear cached __handoff_clk return values */
+		list_for_each_entry_safe(c, safe, &orphan_clk_list, list)
+			c->flags &= ~CLKFLAG_EPROBE_DEFER;
+
+		list_for_each_entry_safe(c, safe, &orphan_clk_list, list) {
+			rc = __handoff_clk(c);
+			if (!rc)
+				found_more_clks = true;
+		}
+	} while (found_more_clks);
+
+	mutex_unlock(&msm_clock_init_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_clock_register);
+
+struct of_msm_provider_data {
+	struct clk_lookup *table;
+	size_t size;
+};
+
+static struct clk *of_clk_src_get(struct of_phandle_args *clkspec,
+				  void *data)
+{
+	struct of_msm_provider_data *ofdata = data;
+	int n;
+
+	for (n = 0; n < ofdata->size; n++) {
+		if (clkspec->args[0] == ofdata->table[n].of_idx)
+			return ofdata->table[n].clk;
+	}
+	return ERR_PTR(-ENOENT);
+}
+
+#define MAX_LEN_OPP_HANDLE	50
+#define LEN_OPP_HANDLE		16
+#define LEN_OPP_VCORNER_HANDLE	22
+
+static struct device **derive_device_list(struct clk *clk,
+					struct device_node *np,
+					char *clk_handle_name, int len)
+{
+	int j, count, cpu;
+	struct platform_device *pdev;
+	struct device_node *dev_node;
+	struct device **device_list;
+
+	count = len/sizeof(u32);
+	device_list = kmalloc_array(count, sizeof(struct device *),
+							GFP_KERNEL);
+	if (!device_list)
+		return ERR_PTR(-ENOMEM);
+
+	for (j = 0; j < count; j++) {
+		device_list[j] = NULL;
+		dev_node = of_parse_phandle(np, clk_handle_name, j);
+		if (!dev_node) {
+			pr_err("Unable to get device_node pointer for %s opp-handle (%s)\n",
+					clk->dbg_name, clk_handle_name);
+			goto err_parse_phandle;
+		}
+
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == dev_node)
+				device_list[j] = get_cpu_device(cpu);
+		}
+
+		if (device_list[j])
+			continue;
+
+		pdev = of_find_device_by_node(dev_node);
+		if (!pdev) {
+			pr_err("Unable to find platform_device node for %s opp-handle\n",
+						clk->dbg_name);
+			goto err_parse_phandle;
+		}
+		device_list[j] = &pdev->dev;
+	}
+	return device_list;
+err_parse_phandle:
+	kfree(device_list);
+	return ERR_PTR(-EINVAL);
+}
+
+static int get_voltage(struct clk *clk, unsigned long rate,
+				int store_vcorner, int n)
+{
+	struct clk_vdd_class *vdd;
+	int uv, level, corner;
+
+	/*
+	 * Use the first regulator in the vdd class
+	 * for the OPP table.
+	 */
+	vdd = clk->vdd_class;
+	if (vdd->num_regulators > 1) {
+		corner = vdd->vdd_uv[vdd->num_regulators * n];
+	} else {
+		level = find_vdd_level(clk, rate);
+		if (level < 0) {
+			pr_err("Could not find vdd level\n");
+			return -EINVAL;
+		}
+		corner = vdd->vdd_uv[level];
+	}
+
+	if (!corner) {
+		pr_err("%s: Unable to find vdd level for rate %lu\n",
+					clk->dbg_name, rate);
+		return -EINVAL;
+	}
+
+	if (store_vcorner) {
+		uv = corner;
+		return uv;
+	}
+
+	uv = regulator_list_corner_voltage(vdd->regulator[0], corner);
+	if (uv < 0) {
+		pr_err("%s: no uv for corner %d - err: %d\n",
+				clk->dbg_name, corner, uv);
+		return uv;
+	}
+	return uv;
+}
+
+static int add_and_print_opp(struct clk *clk, struct device **device_list,
+				int count, unsigned long rate, int uv, int n)
+{
+	int j, ret = 0;
+
+	for (j = 0; j < count; j++) {
+		ret = dev_pm_opp_add(device_list[j], rate, uv);
+		if (ret) {
+			pr_err("%s: couldn't add OPP for %lu - err: %d\n",
+						clk->dbg_name, rate, ret);
+			return ret;
+		}
+		if (n == 1 || n == clk->num_fmax - 1 ||
+					rate == clk_round_rate(clk, INT_MAX))
+			pr_info("%s: set OPP pair(%lu Hz: %u uV) on %s\n",
+						clk->dbg_name, rate, uv,
+						dev_name(device_list[j]));
+	}
+	return ret;
+}
+
+static void populate_clock_opp_table(struct device_node *np,
+			struct clk_lookup *table, size_t size)
+{
+	struct device **device_list;
+	struct clk *clk;
+	char clk_handle_name[MAX_LEN_OPP_HANDLE];
+	char clk_store_volt_corner[MAX_LEN_OPP_HANDLE];
+	size_t i;
+	int n, len, count, uv = 0;
+	unsigned long rate, ret = 0;
+	bool store_vcorner;
+
+	/* Iterate across all clocks in the clock controller */
+	for (i = 0; i < size; i++) {
+		n = 1;
+		rate = 0;
+
+		store_vcorner = false;
+		clk = table[i].clk;
+		if (!clk || !clk->num_fmax || clk->opp_table_populated)
+			continue;
+
+		if (strlen(clk->dbg_name) + LEN_OPP_HANDLE
+					< MAX_LEN_OPP_HANDLE) {
+			ret = snprintf(clk_handle_name,
+					ARRAY_SIZE(clk_handle_name),
+					"qcom,%s-opp-handle", clk->dbg_name);
+			if (ret < strlen(clk->dbg_name) + LEN_OPP_HANDLE) {
+				pr_err("Failed to hold clk_handle_name\n");
+				continue;
+			}
+		} else {
+			pr_err("clk name (%s) too large to fit in clk_handle_name\n",
+							clk->dbg_name);
+			continue;
+		}
+
+		if (strlen(clk->dbg_name) + LEN_OPP_VCORNER_HANDLE
+					< MAX_LEN_OPP_HANDLE) {
+			ret = snprintf(clk_store_volt_corner,
+				ARRAY_SIZE(clk_store_volt_corner),
+				"qcom,%s-opp-store-vcorner", clk->dbg_name);
+			if (ret < strlen(clk->dbg_name) +
+						LEN_OPP_VCORNER_HANDLE) {
+				pr_err("Failed to hold clk_store_volt_corner\n");
+				continue;
+			}
+		} else {
+			pr_err("clk name (%s) too large to fit in clk_store_volt_corner\n",
+							clk->dbg_name);
+			continue;
+		}
+
+		if (!of_find_property(np, clk_handle_name, &len)) {
+			pr_debug("Unable to find %s\n", clk_handle_name);
+			if (!of_find_property(np, clk_store_volt_corner,
+								&len)) {
+				pr_debug("Unable to find %s\n",
+						clk_store_volt_corner);
+				continue;
+			} else {
+				store_vcorner = true;
+				device_list = derive_device_list(clk, np,
+						clk_store_volt_corner, len);
+			}
+		} else
+			device_list = derive_device_list(clk, np,
+						clk_handle_name, len);
+		if (IS_ERR_OR_NULL(device_list)) {
+			pr_err("Failed to fill device_list\n");
+			continue;
+		}
+
+		count = len/sizeof(u32);
+		while (1) {
+			/*
+			 * Calling clk_round_rate will not work for all clocks
+			 * (eg. mux_div). Use their fmax values instead to get
+			 *  list of all available frequencies.
+			 */
+			if (clk->ops->list_rate) {
+				ret = clk_round_rate(clk, rate + 1);
+				if (ret < 0) {
+					pr_err("clk_round_rate failed for %s\n",
+							clk->dbg_name);
+					goto err_round_rate;
+				}
+				/*
+				 * If clk_round_rate give the same value on
+				 * consecutive iterations, exit loop since
+				 * we're at the maximum clock frequency.
+				 */
+				if (rate == ret)
+					break;
+				rate = ret;
+			} else {
+				if (n < clk->num_fmax)
+					rate = clk->fmax[n];
+				else
+					break;
+			}
+
+			uv = get_voltage(clk, rate, store_vcorner, n);
+			if (uv < 0)
+				goto err_round_rate;
+
+			ret = add_and_print_opp(clk, device_list, count,
+							rate, uv, n);
+			if (ret)
+				goto err_round_rate;
+
+			n++;
+		}
+err_round_rate:
+		/* If OPP table population was successful, set the flag */
+		if (uv >= 0 && ret >= 0)
+			clk->opp_table_populated = true;
+		kfree(device_list);
+	}
+}
+
+/**
+ * of_msm_clock_register() - Register clock tables with clkdev and with the
+ *			     clock DT framework
+ * @table: Table of clocks
+ * @size: Size of @table
+ * @np: Device pointer corresponding to the clock-provider device
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+				size_t size)
+{
+	int ret = 0;
+	struct of_msm_provider_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->table = table;
+	data->size = size;
+
+	ret = of_clk_add_provider(np, of_clk_src_get, data);
+	if (ret) {
+		kfree(data);
+		return -ENOMEM;
+	}
+
+	populate_clock_opp_table(np, table, size);
+	return msm_clock_register(table, size);
+}
+EXPORT_SYMBOL(of_msm_clock_register);
+
+/**
+ * msm_clock_init() - Register and initialize a clock driver
+ * @data: Driver-specific clock initialization data
+ *
+ * Upon return from this call, clock APIs may be used to control
+ * clocks registered with this API.
+ */
+int __init msm_clock_init(struct clock_init_data *data)
+{
+	if (!data)
+		return -EINVAL;
+
+	if (data->pre_init)
+		data->pre_init();
+
+	mutex_lock(&msm_clock_init_lock);
+	if (data->late_init)
+		list_add(&data->list, &initdata_list);
+	mutex_unlock(&msm_clock_init_lock);
+
+	msm_clock_register(data->table, data->size);
+
+	if (data->post_init)
+		data->post_init();
+
+	return 0;
+}
+
+static int __init clock_late_init(void)
+{
+	struct handoff_clk *h, *h_temp;
+	struct handoff_vdd *v, *v_temp;
+	struct clock_init_data *initdata, *initdata_temp;
+	int ret = 0;
+
+	pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
+
+	mutex_lock(&msm_clock_init_lock);
+
+	list_for_each_entry_safe(initdata, initdata_temp,
+					&initdata_list, list) {
+		ret = initdata->late_init();
+		if (ret)
+			pr_err("%s: %pS failed late_init.\n", __func__,
+				initdata);
+	}
+
+	list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
+		clk_disable_unprepare(h->clk);
+		list_del(&h->list);
+		kfree(h);
+	}
+
+	list_for_each_entry_safe(v, v_temp, &handoff_vdd_list, list) {
+		unvote_vdd_level(v->vdd_class, v->vdd_class->num_levels - 1);
+		list_del(&v->list);
+		kfree(v);
+	}
+
+	mutex_unlock(&msm_clock_init_lock);
+
+	return ret;
+}
+/* clock_late_init should run only after all deferred probing
+ * (excluding DLKM probes) has completed.
+ */
+late_initcall_sync(clock_late_init);
diff --git a/drivers/clk/msm/clock.h b/drivers/clk/msm/clock.h
new file mode 100644
index 0000000..f8c6fbf
--- /dev/null
+++ b/drivers/clk/msm/clock.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_CLK_MSM_CLOCK_H
+#define __DRIVERS_CLK_MSM_CLOCK_H
+
+#include <linux/clkdev.h>
+
+/**
+ * struct clock_init_data - SoC specific clock initialization data
+ * @table: table of lookups to add
+ * @size: size of @table
+ * @pre_init: called before initializing the clock driver.
+ * @post_init: called after registering @table. clock APIs can be called inside.
+ * @late_init: called during late init
+ */
+struct clock_init_data {
+	struct list_head list;
+	struct clk_lookup *table;
+	size_t size;
+	void (*pre_init)(void);
+	void (*post_init)(void);
+	int (*late_init)(void);
+};
+
+int msm_clock_init(struct clock_init_data *data);
+int find_vdd_level(struct clk *clk, unsigned long rate);
+extern struct list_head orphan_clk_list;
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMMON_CLK_MSM)
+int clock_debug_register(struct clk *clk);
+void clock_debug_print_enabled(bool print_parent);
+#elif defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMMON_CLK_QCOM)
+void clock_debug_print_enabled(bool print_parent);
+#else
+static inline int clock_debug_register(struct clk *unused)
+{
+	return 0;
+}
+static inline void clock_debug_print_enabled(void) { return; }
+#endif
+
+#endif
diff --git a/drivers/clk/msm/gdsc.c b/drivers/clk/msm/gdsc.c
new file mode 100644
index 0000000..e24795e
--- /dev/null
+++ b/drivers/clk/msm/gdsc.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/reset.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+
+#define PWR_ON_MASK		BIT(31)
+#define EN_REST_WAIT_MASK	(0xF << 20)
+#define EN_FEW_WAIT_MASK	(0xF << 16)
+#define CLK_DIS_WAIT_MASK	(0xF << 12)
+#define SW_OVERRIDE_MASK	BIT(2)
+#define HW_CONTROL_MASK		BIT(1)
+#define SW_COLLAPSE_MASK	BIT(0)
+#define GMEM_CLAMP_IO_MASK	BIT(0)
+#define GMEM_RESET_MASK		BIT(4)
+#define BCR_BLK_ARES_BIT	BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL	(0x2 << 20)
+#define EN_FEW_WAIT_VAL		(0x8 << 16)
+#define CLK_DIS_WAIT_VAL	(0x2 << 12)
+
+#define TIMEOUT_US		100
+
+struct gdsc {
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+	void __iomem		*gdscr;
+	struct clk		**clocks;
+	struct reset_control	**reset_clocks;
+	int			clock_count;
+	int			reset_count;
+	bool			toggle_mem;
+	bool			toggle_periph;
+	bool			toggle_logic;
+	bool			resets_asserted;
+	bool			root_en;
+	bool			force_root_en;
+	int			root_clk_idx;
+	bool			no_status_check_on_disable;
+	bool			is_gdsc_enabled;
+	bool			allow_clear;
+	bool			reset_aon;
+	void __iomem		*domain_addr;
+	void __iomem		*hw_ctrl_addr;
+	void __iomem		*sw_reset_addr;
+	u32			gds_timeout;
+};
+
+enum gdscr_status {
+	ENABLED,
+	DISABLED,
+};
+
+static DEFINE_MUTEX(gdsc_seq_lock);
+
+void gdsc_allow_clear_retention(struct regulator *regulator)
+{
+	struct gdsc *sc = regulator_get_drvdata(regulator);
+
+	if (sc)
+		sc->allow_clear = true;
+}
+
+static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
+{
+	void __iomem *gdscr;
+	int count = sc->gds_timeout;
+	u32 val;
+
+	if (sc->hw_ctrl_addr)
+		gdscr = sc->hw_ctrl_addr;
+	else
+		gdscr = sc->gdscr;
+
+	for (; count > 0; count--) {
+		val = readl_relaxed(gdscr);
+		val &= PWR_ON_MASK;
+		switch (status) {
+		case ENABLED:
+			if (val)
+				return 0;
+			break;
+		case DISABLED:
+			if (!val)
+				return 0;
+			break;
+		}
+		/*
+		 * There is no guarantee about the delay needed for the enable
+		 * bit in the GDSCR to be set or reset after the GDSC state
+		 * changes. Hence, keep on checking for a reasonable number
+		 * of times until the bit is set with the least possible delay
+		 * between succeessive tries.
+		 */
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int gdsc_is_enabled(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+
+	if (!sc->toggle_logic)
+		return !sc->resets_asserted;
+
+	regval = readl_relaxed(sc->gdscr);
+	if (regval & PWR_ON_MASK) {
+		/*
+		 * The GDSC might be turned on due to TZ/HYP vote on the
+		 * votable GDS registers. Check the SW_COLLAPSE_MASK to
+		 * determine if HLOS has voted for it.
+		 */
+		if (!(regval & SW_COLLAPSE_MASK))
+			return true;
+	}
+	return false;
+}
+
+static int gdsc_enable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval, hw_ctrl_regval = 0x0;
+	int i, ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->root_en || sc->force_root_en)
+		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+	if (sc->toggle_logic) {
+		if (sc->sw_reset_addr) {
+			regval = readl_relaxed(sc->sw_reset_addr);
+			regval |= BCR_BLK_ARES_BIT;
+			writel_relaxed(regval, sc->sw_reset_addr);
+			/*
+			 * BLK_ARES should be kept asserted for 1us before
+			 * being de-asserted.
+			 */
+			wmb();
+			udelay(1);
+
+			regval &= ~BCR_BLK_ARES_BIT;
+			writel_relaxed(regval, sc->sw_reset_addr);
+
+			/* Make sure de-assert goes through before continuing */
+			wmb();
+		}
+
+		if (sc->domain_addr) {
+			if (sc->reset_aon) {
+				regval = readl_relaxed(sc->domain_addr);
+				regval |= GMEM_RESET_MASK;
+				writel_relaxed(regval, sc->domain_addr);
+				/*
+				 * Keep reset asserted for at-least 1us before
+				 * continuing.
+				 */
+				wmb();
+				udelay(1);
+
+				regval &= ~GMEM_RESET_MASK;
+				writel_relaxed(regval, sc->domain_addr);
+				/*
+				 * Make sure GMEM_RESET is de-asserted before
+				 * continuing.
+				 */
+				wmb();
+			}
+
+			regval = readl_relaxed(sc->domain_addr);
+			regval &= ~GMEM_CLAMP_IO_MASK;
+			writel_relaxed(regval, sc->domain_addr);
+			/*
+			 * Make sure CLAMP_IO is de-asserted before continuing.
+			 */
+			wmb();
+		}
+
+		regval = readl_relaxed(sc->gdscr);
+		if (regval & HW_CONTROL_MASK) {
+			dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
+				 sc->rdesc.name);
+			mutex_unlock(&gdsc_seq_lock);
+			return -EBUSY;
+		}
+
+		regval &= ~SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+
+		/* Wait for 8 XO cycles before polling the status bit. */
+		mb();
+		udelay(1);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret) {
+			regval = readl_relaxed(sc->gdscr);
+			if (sc->hw_ctrl_addr) {
+				hw_ctrl_regval =
+					readl_relaxed(sc->hw_ctrl_addr);
+				dev_warn(&rdev->dev, "%s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
+					sc->rdesc.name, sc->gds_timeout,
+					regval, hw_ctrl_regval);
+
+				ret = poll_gdsc_status(sc, ENABLED);
+				if (ret) {
+					dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
+					sc->rdesc.name, sc->gds_timeout,
+					readl_relaxed(sc->gdscr),
+					readl_relaxed(sc->hw_ctrl_addr));
+
+					mutex_unlock(&gdsc_seq_lock);
+					return ret;
+				}
+			} else {
+				dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
+					sc->rdesc.name,
+					regval);
+				udelay(sc->gds_timeout);
+				regval = readl_relaxed(sc->gdscr);
+				dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
+					sc->rdesc.name, regval,
+					sc->gds_timeout);
+				mutex_unlock(&gdsc_seq_lock);
+				return ret;
+			}
+		}
+	} else {
+		for (i = 0; i < sc->reset_count; i++)
+			reset_control_deassert(sc->reset_clocks[i]);
+		sc->resets_asserted = false;
+	}
+
+	for (i = 0; i < sc->clock_count; i++) {
+		if (unlikely(i == sc->root_clk_idx))
+			continue;
+		if (sc->toggle_mem)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+		if (sc->toggle_periph)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+	}
+
+	/*
+	 * If clocks to this power domain were already on, they will take an
+	 * additional 4 clock cycles to re-enable after the rail is enabled.
+	 * Delay to account for this. A delay is also needed to ensure clocks
+	 * are not enabled within 400ns of enabling power to the memories.
+	 */
+	udelay(1);
+
+	/* Delay to account for staggered memory powerup. */
+	udelay(1);
+
+	if (sc->force_root_en)
+		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+	sc->is_gdsc_enabled = true;
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static int gdsc_disable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int i, ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->force_root_en)
+		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+	for (i = sc->clock_count-1; i >= 0; i--) {
+		if (unlikely(i == sc->root_clk_idx))
+			continue;
+		if (sc->toggle_mem && sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+		if (sc->toggle_periph && sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	/* Delay to account for staggered memory powerdown. */
+	udelay(1);
+
+	if (sc->toggle_logic) {
+		regval = readl_relaxed(sc->gdscr);
+		if (regval & HW_CONTROL_MASK) {
+			dev_warn(&rdev->dev, "Invalid disable while %s is under HW control\n",
+				 sc->rdesc.name);
+			mutex_unlock(&gdsc_seq_lock);
+			return -EBUSY;
+		}
+
+		regval |= SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/* Wait for 8 XO cycles before polling the status bit. */
+		mb();
+		udelay(1);
+
+		if (sc->no_status_check_on_disable) {
+			/*
+			 * Add a short delay here to ensure that gdsc_enable
+			 * right after it was disabled does not put it in a
+			 * weird state.
+			 */
+			udelay(TIMEOUT_US);
+		} else {
+			ret = poll_gdsc_status(sc, DISABLED);
+			if (ret)
+				dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
+					sc->rdesc.name, regval);
+		}
+
+		if (sc->domain_addr) {
+			regval = readl_relaxed(sc->domain_addr);
+			regval |= GMEM_CLAMP_IO_MASK;
+			writel_relaxed(regval, sc->domain_addr);
+			/* Make sure CLAMP_IO is asserted before continuing. */
+			wmb();
+		}
+	} else {
+		for (i = sc->reset_count-1; i >= 0; i--)
+			reset_control_assert(sc->reset_clocks[i]);
+		sc->resets_asserted = true;
+	}
+
+	/*
+	 * Check if gdsc_enable was called for this GDSC. If not, the root
+	 * clock will not have been enabled prior to this.
+	 */
+	if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
+		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+	sc->is_gdsc_enabled = false;
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+
+	mutex_lock(&gdsc_seq_lock);
+	regval = readl_relaxed(sc->gdscr);
+	mutex_unlock(&gdsc_seq_lock);
+	if (regval & HW_CONTROL_MASK)
+		return REGULATOR_MODE_FAST;
+	return REGULATOR_MODE_NORMAL;
+}
+
+static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	regval = readl_relaxed(sc->gdscr);
+
+	/*
+	 * HW control can only be enable/disabled when SW_COLLAPSE
+	 * indicates on.
+	 */
+	if (regval & SW_COLLAPSE_MASK) {
+		dev_err(&rdev->dev, "can't enable hw collapse now\n");
+		mutex_unlock(&gdsc_seq_lock);
+		return -EBUSY;
+	}
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		/* Turn on HW trigger mode */
+		regval |= HW_CONTROL_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/*
+		 * There may be a race with internal HW trigger signal,
+		 * that will result in GDSC going through a power down and
+		 * up cycle.  In case HW trigger signal is controlled by
+		 * firmware that also poll same status bits as we do, FW
+		 * might read an 'on' status before the GDSC can finish
+		 * power cycle.  We wait 1us before returning to ensure
+		 * FW can't immediately poll the status bit.
+		 */
+		mb();
+		udelay(1);
+		break;
+
+	case REGULATOR_MODE_NORMAL:
+		/* Turn off HW trigger mode */
+		regval &= ~HW_CONTROL_MASK;
+		writel_relaxed(regval, sc->gdscr);
+		/*
+		 * There may be a race with internal HW trigger signal,
+		 * that will result in GDSC going through a power down and
+		 * up cycle.  If we poll too early, status bit will
+		 * indicate 'on' before the GDSC can finish the power cycle.
+		 * Account for this case by waiting 1us before polling.
+		 */
+		mb();
+		udelay(1);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret)
+			dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
+				sc->rdesc.name, regval);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static struct regulator_ops gdsc_ops = {
+	.is_enabled = gdsc_is_enabled,
+	.enable = gdsc_enable,
+	.disable = gdsc_disable,
+	.set_mode = gdsc_set_mode,
+	.get_mode = gdsc_get_mode,
+};
+
+static int gdsc_probe(struct platform_device *pdev)
+{
+	static atomic_t gdsc_count = ATOMIC_INIT(-1);
+	struct regulator_config reg_config = {};
+	struct regulator_init_data *init_data;
+	struct resource *res;
+	struct gdsc *sc;
+	uint32_t regval, clk_dis_wait_val = CLK_DIS_WAIT_VAL;
+	bool retain_mem, retain_periph, support_hw_trigger;
+	int i, ret;
+	u32 timeout;
+
+	sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
+	if (sc == NULL)
+		return -ENOMEM;
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+			&sc->rdesc);
+	if (init_data == NULL)
+		return -ENOMEM;
+
+	if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+		init_data->supply_regulator = "parent";
+
+	ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
+				      &sc->rdesc.name);
+	if (ret)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -EINVAL;
+	sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (sc->gdscr == NULL)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"domain_addr");
+	if (res) {
+		sc->domain_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->domain_addr == NULL)
+			return -ENOMEM;
+	}
+
+	sc->reset_aon = of_property_read_bool(pdev->dev.of_node,
+						"qcom,reset-aon-logic");
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"sw_reset");
+	if (res) {
+		sc->sw_reset_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->sw_reset_addr == NULL)
+			return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"hw_ctrl_addr");
+	if (res) {
+		sc->hw_ctrl_addr = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (sc->hw_ctrl_addr == NULL)
+			return -ENOMEM;
+	}
+
+	sc->gds_timeout = TIMEOUT_US;
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,gds-timeout",
+							&timeout);
+	if (!ret)
+		sc->gds_timeout = timeout;
+
+	sc->clock_count = of_property_count_strings(pdev->dev.of_node,
+					    "clock-names");
+	if (sc->clock_count == -EINVAL) {
+		sc->clock_count = 0;
+	} else if (IS_ERR_VALUE((unsigned long)sc->clock_count)) {
+		dev_err(&pdev->dev, "Failed to get clock names\n");
+		return -EINVAL;
+	}
+
+	sc->clocks = devm_kzalloc(&pdev->dev,
+			sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
+	if (!sc->clocks)
+		return -ENOMEM;
+
+	sc->root_clk_idx = -1;
+
+	sc->root_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,enable-root-clk");
+	sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,force-enable-root-clk");
+	for (i = 0; i < sc->clock_count; i++) {
+		const char *clock_name;
+
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+					      i, &clock_name);
+		sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
+		if (IS_ERR(sc->clocks[i])) {
+			int rc = PTR_ERR(sc->clocks[i]);
+
+			if (rc != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "Failed to get %s\n",
+					clock_name);
+			return rc;
+		}
+
+		if (!strcmp(clock_name, "core_root_clk"))
+			sc->root_clk_idx = i;
+	}
+
+	if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
+		dev_err(&pdev->dev, "Failed to get root clock name\n");
+		return -EINVAL;
+	}
+
+	sc->rdesc.id = atomic_inc_return(&gdsc_count);
+	sc->rdesc.ops = &gdsc_ops;
+	sc->rdesc.type = REGULATOR_VOLTAGE;
+	sc->rdesc.owner = THIS_MODULE;
+	platform_set_drvdata(pdev, sc);
+
+	/*
+	 * Disable HW trigger: collapse/restore occur based on registers writes.
+	 * Disable SW override: Use hardware state-machine for sequencing.
+	 */
+	regval = readl_relaxed(sc->gdscr);
+	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
+
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
+				  &clk_dis_wait_val))
+		clk_dis_wait_val = clk_dis_wait_val << 12;
+
+	/* Configure wait time between states. */
+	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
+	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | clk_dis_wait_val;
+	writel_relaxed(regval, sc->gdscr);
+
+	sc->no_status_check_on_disable =
+			of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-status-check-on-disable");
+	retain_mem = of_property_read_bool(pdev->dev.of_node,
+					    "qcom,retain-mem");
+	sc->toggle_mem = !retain_mem;
+	retain_periph = of_property_read_bool(pdev->dev.of_node,
+					    "qcom,retain-periph");
+	sc->toggle_periph = !retain_periph;
+	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
+						"qcom,skip-logic-collapse");
+	support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
+						    "qcom,support-hw-trigger");
+	if (support_hw_trigger) {
+		init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
+		init_data->constraints.valid_modes_mask |=
+				REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
+	}
+
+	if (!sc->toggle_logic) {
+		sc->reset_count = of_property_count_strings(pdev->dev.of_node,
+							"reset-names");
+		if (sc->reset_count == -EINVAL) {
+			sc->reset_count = 0;
+		} else if (IS_ERR_VALUE((unsigned long)sc->reset_count)) {
+			dev_err(&pdev->dev, "Failed to get reset reset names\n");
+			return -EINVAL;
+		}
+
+		sc->reset_clocks = devm_kzalloc(&pdev->dev,
+					sizeof(struct reset_control *) *
+					sc->reset_count,
+					GFP_KERNEL);
+		if (!sc->reset_clocks)
+			return -ENOMEM;
+
+		for (i = 0; i < sc->reset_count; i++) {
+			const char *reset_name;
+
+			of_property_read_string_index(pdev->dev.of_node,
+					"reset-names", i, &reset_name);
+			sc->reset_clocks[i] = devm_reset_control_get(&pdev->dev,
+								reset_name);
+			if (IS_ERR(sc->reset_clocks[i])) {
+				int rc = PTR_ERR(sc->reset_clocks[i]);
+
+				if (rc != -EPROBE_DEFER)
+					dev_err(&pdev->dev, "Failed to get %s\n",
+							reset_name);
+				return rc;
+			}
+		}
+
+		regval &= ~SW_COLLAPSE_MASK;
+		writel_relaxed(regval, sc->gdscr);
+
+		ret = poll_gdsc_status(sc, ENABLED);
+		if (ret) {
+			dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
+				sc->rdesc.name, regval);
+			return ret;
+		}
+	}
+
+	sc->allow_clear = of_property_read_bool(pdev->dev.of_node,
+							"qcom,disallow-clear");
+	sc->allow_clear = !sc->allow_clear;
+
+	for (i = 0; i < sc->clock_count; i++) {
+		if (retain_mem || (regval & PWR_ON_MASK) || !sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+
+		if (retain_periph || (regval & PWR_ON_MASK) || !sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+		else
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = sc;
+	reg_config.of_node = pdev->dev.of_node;
+	sc->rdev = regulator_register(&sc->rdesc, &reg_config);
+	if (IS_ERR(sc->rdev)) {
+		dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
+			sc->rdesc.name);
+		return PTR_ERR(sc->rdev);
+	}
+
+	return 0;
+}
+
+static int gdsc_remove(struct platform_device *pdev)
+{
+	struct gdsc *sc = platform_get_drvdata(pdev);
+
+	regulator_unregister(sc->rdev);
+	return 0;
+}
+
+static const  struct of_device_id gdsc_match_table[] = {
+	{ .compatible = "qcom,gdsc" },
+	{}
+};
+
+static struct platform_driver gdsc_driver = {
+	.probe		= gdsc_probe,
+	.remove		= gdsc_remove,
+	.driver		= {
+		.name		= "gdsc",
+		.of_match_table = gdsc_match_table,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init gdsc_init(void)
+{
+	return platform_driver_register(&gdsc_driver);
+}
+subsys_initcall(gdsc_init);
+
+static void __exit gdsc_exit(void)
+{
+	platform_driver_unregister(&gdsc_driver);
+}
+module_exit(gdsc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");
diff --git a/drivers/clk/msm/msm-clock-controller.c b/drivers/clk/msm/msm-clock-controller.c
new file mode 100644
index 0000000..82ffb6e
--- /dev/null
+++ b/drivers/clk/msm/msm-clock-controller.c
@@ -0,0 +1,748 @@
+/*
+ * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "msmclock: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/hashtable.h>
+
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/msm-clock-controller.h>
+#include <soc/qcom/clock-rpm.h>
+
+/* Protects list operations */
+static DEFINE_MUTEX(msmclk_lock);
+static LIST_HEAD(msmclk_parser_list);
+static u32 msmclk_debug;
+
+struct hitem {
+	struct hlist_node list;
+	phandle key;
+	void *ptr;
+};
+
+int of_property_count_phandles(struct device_node *np, char *propname)
+{
+	const __be32 *phandle;
+	int size;
+
+	phandle = of_get_property(np, propname, &size);
+	return phandle ? (size / sizeof(*phandle)) : -EINVAL;
+}
+EXPORT_SYMBOL(of_property_count_phandles);
+
+int of_property_read_phandle_index(struct device_node *np, char *propname,
+					int index, phandle *p)
+{
+	const __be32 *phandle;
+	int size;
+
+	phandle = of_get_property(np, propname, &size);
+	if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
+		return -EINVAL;
+
+	*p = be32_to_cpup(phandle + index);
+	return 0;
+}
+EXPORT_SYMBOL(of_property_read_phandle_index);
+
+static int generic_vdd_parse_regulators(struct device *dev,
+		struct clk_vdd_class *vdd, struct device_node *np)
+{
+	int num_regulators, i, rc;
+	char *name = "qcom,regulators";
+
+	num_regulators = of_property_count_phandles(np, name);
+	if (num_regulators <= 0) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return -EINVAL;
+	}
+
+	vdd->regulator = devm_kzalloc(dev,
+				sizeof(*vdd->regulator) * num_regulators,
+				GFP_KERNEL);
+	if (!vdd->regulator) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_regulators; i++) {
+		phandle p;
+
+		rc = of_property_read_phandle_index(np, name, i, &p);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read phandle\n");
+			return rc;
+		}
+
+		vdd->regulator[i] = msmclk_parse_phandle(dev, p);
+		if (IS_ERR(vdd->regulator[i])) {
+			dt_prop_err(np, name, "hashtable lookup failed\n");
+			return PTR_ERR(vdd->regulator[i]);
+		}
+	}
+
+	vdd->num_regulators = num_regulators;
+	return 0;
+}
+
+static int generic_vdd_parse_levels(struct device *dev,
+		struct clk_vdd_class *vdd, struct device_node *np)
+{
+	int len, rc;
+	char *name = "qcom,uV-levels";
+
+	if (!of_find_property(np, name, &len)) {
+		dt_prop_err(np, name, "missing dt property\n");
+		return -EINVAL;
+	}
+
+	len /= sizeof(u32);
+	if (len % vdd->num_regulators) {
+		dt_err(np, "mismatch beween qcom,uV-levels and qcom,regulators dt properties\n");
+		return -EINVAL;
+	}
+
+	vdd->num_levels = len / vdd->num_regulators;
+	vdd->vdd_uv = devm_kzalloc(dev, len * sizeof(*vdd->vdd_uv),
+						GFP_KERNEL);
+	vdd->level_votes = devm_kzalloc(dev,
+				vdd->num_levels * sizeof(*vdd->level_votes),
+				GFP_KERNEL);
+
+	if (!vdd->vdd_uv || !vdd->level_votes) {
+		dt_err(np, "memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(np, name, vdd->vdd_uv,
+					vdd->num_levels * vdd->num_regulators);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32 array\n");
+		return -EINVAL;
+	}
+
+	/* Optional Property */
+	name = "qcom,uA-levels";
+	if (!of_find_property(np, name, &len))
+		return 0;
+
+	len /= sizeof(u32);
+	if (len / vdd->num_regulators != vdd->num_levels) {
+		dt_err(np, "size of qcom,uA-levels and qcom,uV-levels must match\n");
+		return -EINVAL;
+	}
+
+	vdd->vdd_ua = devm_kzalloc(dev, len * sizeof(*vdd->vdd_ua),
+						GFP_KERNEL);
+	if (!vdd->vdd_ua)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(np, name, vdd->vdd_ua,
+					vdd->num_levels * vdd->num_regulators);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32 array\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void *simple_vdd_class_dt_parser(struct device *dev,
+			struct device_node *np)
+{
+	struct clk_vdd_class *vdd;
+	int rc = 0;
+
+	vdd = devm_kzalloc(dev, sizeof(*vdd), GFP_KERNEL);
+	if (!vdd)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&vdd->lock);
+	vdd->class_name = np->name;
+
+	rc = generic_vdd_parse_regulators(dev, vdd, np);
+	rc |= generic_vdd_parse_levels(dev, vdd, np);
+	if (rc) {
+		dt_err(np, "unable to read vdd_class\n");
+		return ERR_PTR(rc);
+	}
+
+	return vdd;
+}
+MSMCLK_PARSER(simple_vdd_class_dt_parser, "qcom,simple-vdd-class", 0);
+
+static int generic_clk_parse_parents(struct device *dev, struct clk *c,
+					struct device_node *np)
+{
+	int rc;
+	phandle p;
+	char *name = "qcom,parent";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_phandle_index(np, name, 0, &p);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read phandle\n");
+		return rc;
+	}
+
+	c->parent = msmclk_parse_phandle(dev, p);
+	if (IS_ERR(c->parent)) {
+		dt_prop_err(np, name, "hashtable lookup failed\n");
+		return PTR_ERR(c->parent);
+	}
+
+	return 0;
+}
+
+static int generic_clk_parse_vdd(struct device *dev, struct clk *c,
+					struct device_node *np)
+{
+	phandle p;
+	int rc;
+	char *name = "qcom,supply-group";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_phandle_index(np, name, 0, &p);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read phandle\n");
+		return rc;
+	}
+
+	c->vdd_class = msmclk_parse_phandle(dev, p);
+	if (IS_ERR(c->vdd_class)) {
+		dt_prop_err(np, name, "hashtable lookup failed\n");
+		return PTR_ERR(c->vdd_class);
+	}
+
+	return 0;
+}
+
+static int generic_clk_parse_flags(struct device *dev, struct clk *c,
+						struct device_node *np)
+{
+	int rc;
+	char *name = "qcom,clk-flags";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_u32(np, name, &c->flags);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int generic_clk_parse_fmax(struct device *dev, struct clk *c,
+					struct device_node *np)
+{
+	u32 prop_len, i;
+	int rc;
+	char *name = "qcom,clk-fmax";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, &prop_len))
+		return 0;
+
+	if (!c->vdd_class) {
+		dt_err(np, "both qcom,clk-fmax and qcom,supply-group must be defined\n");
+		return -EINVAL;
+	}
+
+	prop_len /= sizeof(u32);
+	if (prop_len % 2) {
+		dt_prop_err(np, name, "bad length\n");
+		return -EINVAL;
+	}
+
+	/* Value at proplen - 2 is the index of the  last entry in fmax array */
+	rc = of_property_read_u32_index(np, name, prop_len - 2, &c->num_fmax);
+	c->num_fmax += 1;
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32\n");
+		return rc;
+	}
+
+	c->fmax = devm_kzalloc(dev, sizeof(*c->fmax) * c->num_fmax, GFP_KERNEL);
+	if (!c->fmax)
+		return -ENOMEM;
+
+	for (i = 0; i < prop_len; i += 2) {
+		u32 level, value;
+
+		rc = of_property_read_u32_index(np, name, i, &level);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return rc;
+		}
+
+		rc = of_property_read_u32_index(np, name, i + 1, &value);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read u32\n");
+			return rc;
+		}
+
+		if (level >= c->num_fmax) {
+			dt_prop_err(np, name, "must be sorted\n");
+			return -EINVAL;
+		}
+		c->fmax[level] = value;
+	}
+
+	return 0;
+}
+
+static int generic_clk_add_lookup_tbl_entry(struct device *dev, struct clk *c)
+{
+	struct msmclk_data *drv = dev_get_drvdata(dev);
+	struct clk_lookup *cl;
+
+	if (drv->clk_tbl_size >= drv->max_clk_tbl_size) {
+		dev_err(dev, "child node count should be > clock_count?\n");
+		return -EINVAL;
+	}
+
+	cl = drv->clk_tbl + drv->clk_tbl_size;
+	cl->clk = c;
+	drv->clk_tbl_size++;
+	return 0;
+}
+
+static int generic_clk_parse_depends(struct device *dev, struct clk *c,
+						struct device_node *np)
+{
+	phandle p;
+	int rc;
+	char *name = "qcom,depends";
+
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_phandle_index(np, name, 0, &p);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read phandle\n");
+		return rc;
+	}
+
+	c->depends = msmclk_parse_phandle(dev, p);
+	if (IS_ERR(c->depends)) {
+		dt_prop_err(np, name, "hashtable lookup failed\n");
+		return PTR_ERR(c->depends);
+	}
+
+	return 0;
+}
+
+static int generic_clk_parse_init_config(struct device *dev, struct clk *c,
+						struct device_node *np)
+{
+	int rc;
+	u32 temp;
+	char *name = "qcom,always-on";
+
+	c->always_on = of_property_read_bool(np, name);
+
+	name = "qcom,config-rate";
+	/* This property is optional */
+	if (!of_find_property(np, name, NULL))
+		return 0;
+
+	rc = of_property_read_u32(np, name, &temp);
+	if (rc) {
+		dt_prop_err(np, name, "unable to read u32\n");
+		return rc;
+	}
+	c->init_rate = temp;
+
+	return rc;
+}
+
+void *msmclk_generic_clk_init(struct device *dev, struct device_node *np,
+				struct clk *c)
+{
+	int rc;
+
+	/* CLK_INIT macro */
+	spin_lock_init(&c->lock);
+	mutex_init(&c->prepare_lock);
+	INIT_LIST_HEAD(&c->children);
+	INIT_LIST_HEAD(&c->siblings);
+	INIT_LIST_HEAD(&c->list);
+	c->dbg_name = np->name;
+
+	rc = generic_clk_add_lookup_tbl_entry(dev, c);
+	rc |= generic_clk_parse_flags(dev, c, np);
+	rc |= generic_clk_parse_parents(dev, c, np);
+	rc |= generic_clk_parse_vdd(dev, c, np);
+	rc |= generic_clk_parse_fmax(dev, c, np);
+	rc |= generic_clk_parse_depends(dev, c, np);
+	rc |= generic_clk_parse_init_config(dev, c, np);
+
+	if (rc) {
+		dt_err(np, "unable to read clk\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return c;
+}
+
+static struct msmclk_parser *msmclk_parser_lookup(struct device_node *np)
+{
+	struct msmclk_parser *item;
+
+	list_for_each_entry(item, &msmclk_parser_list, list) {
+		if (of_device_is_compatible(np, item->compatible))
+			return item;
+	}
+	return NULL;
+}
+void msmclk_parser_register(struct msmclk_parser *item)
+{
+	mutex_lock(&msmclk_lock);
+	list_add(&item->list, &msmclk_parser_list);
+	mutex_unlock(&msmclk_lock);
+}
+
+static int msmclk_htable_add(struct device *dev, void *result, phandle key);
+
+void *msmclk_parse_dt_node(struct device *dev, struct device_node *np)
+{
+	struct msmclk_parser *parser;
+	phandle key;
+	void *result;
+	int rc;
+
+	key = np->phandle;
+	result = msmclk_lookup_phandle(dev, key);
+	if (!result)
+		return ERR_PTR(-EINVAL);
+
+	if (!of_device_is_available(np)) {
+		dt_err(np, "node is disabled\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	parser = msmclk_parser_lookup(np);
+	if (IS_ERR_OR_NULL(parser)) {
+		dt_err(np, "no parser found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* This may return -EPROBE_DEFER */
+	result = parser->parsedt(dev, np);
+	if (IS_ERR(result)) {
+		dt_err(np, "parsedt failed");
+		return result;
+	}
+
+	rc = msmclk_htable_add(dev, result, key);
+	if (rc)
+		return ERR_PTR(rc);
+
+	return result;
+}
+
+void *msmclk_parse_phandle(struct device *dev, phandle key)
+{
+	struct hitem *item;
+	struct device_node *np;
+	struct msmclk_data *drv = dev_get_drvdata(dev);
+
+	/*
+	 * the default phandle value is 0. Since hashtable keys must
+	 * be unique, reject the default value.
+	 */
+	if (!key)
+		return ERR_PTR(-EINVAL);
+
+	hash_for_each_possible(drv->htable, item, list, key) {
+		if (item->key == key)
+			return item->ptr;
+	}
+
+	np = of_find_node_by_phandle(key);
+	if (!np)
+		return ERR_PTR(-EINVAL);
+
+	return msmclk_parse_dt_node(dev, np);
+}
+EXPORT_SYMBOL(msmclk_parse_phandle);
+
+void *msmclk_lookup_phandle(struct device *dev, phandle key)
+{
+	struct hitem *item;
+	struct msmclk_data *drv = dev_get_drvdata(dev);
+
+	hash_for_each_possible(drv->htable, item, list, key) {
+		if (item->key == key)
+			return item->ptr;
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(msmclk_lookup_phandle);
+
+static int msmclk_htable_add(struct device *dev, void *data, phandle key)
+{
+	struct hitem *item;
+	struct msmclk_data *drv = dev_get_drvdata(dev);
+
+	/*
+	 * If there are no phandle references to a node, key == 0. However, if
+	 * there is a second node like this, both will have key == 0. This
+	 * violates the requirement that hashtable keys be unique. Skip it.
+	 */
+	if (!key)
+		return 0;
+
+	if (!IS_ERR(msmclk_lookup_phandle(dev, key))) {
+		struct device_node *np = of_find_node_by_phandle(key);
+
+		dev_err(dev, "attempt to add duplicate entry for %s\n",
+				np ? np->name : "NULL");
+		return -EINVAL;
+	}
+
+	item = devm_kzalloc(dev, sizeof(*item), GFP_KERNEL);
+	if (!item)
+		return -ENOMEM;
+
+	INIT_HLIST_NODE(&item->list);
+	item->key = key;
+	item->ptr = data;
+
+	hash_add(drv->htable, &item->list, key);
+	return 0;
+}
+
+/*
+ * Currently, regulators are the only elements capable of probe deferral.
+ * Check them first to handle probe deferal efficiently.
+ */
+static int get_ext_regulators(struct device *dev)
+{
+	int num_strings, i, rc;
+	struct device_node *np;
+	void *item;
+	char *name = "qcom,regulator-names";
+
+	np = dev->of_node;
+	/* This property is optional */
+	num_strings = of_property_count_strings(np, name);
+	if (num_strings <= 0)
+		return 0;
+
+	for (i = 0; i < num_strings; i++) {
+		const char *str;
+		char buf[50];
+		phandle key;
+
+		rc = of_property_read_string_index(np, name, i, &str);
+		if (rc) {
+			dt_prop_err(np, name, "unable to read string\n");
+			return rc;
+		}
+
+		item = devm_regulator_get(dev, str);
+		if (IS_ERR(item)) {
+			dev_err(dev, "Failed to get regulator: %s\n", str);
+			return PTR_ERR(item);
+		}
+
+		snprintf(buf, ARRAY_SIZE(buf), "%s-supply", str);
+		rc = of_property_read_phandle_index(np, buf, 0, &key);
+		if (rc) {
+			dt_prop_err(np, buf, "unable to read phandle\n");
+			return rc;
+		}
+
+		rc = msmclk_htable_add(dev, item, key);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static struct clk *msmclk_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+	phandle key;
+	struct clk *c = ERR_PTR(-ENOENT);
+
+	key = clkspec->args[0];
+	c = msmclk_lookup_phandle(data, key);
+
+	if (!IS_ERR(c) && !(c->flags & CLKFLAG_INIT_DONE))
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return c;
+}
+
+static void *regulator_dt_parser(struct device *dev, struct device_node *np)
+{
+	dt_err(np, "regulators should be handled in probe()");
+	return ERR_PTR(-EINVAL);
+}
+MSMCLK_PARSER(regulator_dt_parser, "qcom,rpm-smd-regulator", 0);
+
+static void *msmclk_dt_parser(struct device *dev, struct device_node *np)
+{
+	dt_err(np, "calling into other clock controllers isn't allowed");
+	return ERR_PTR(-EINVAL);
+}
+MSMCLK_PARSER(msmclk_dt_parser, "qcom,msm-clock-controller", 0);
+
+static struct msmclk_data *msmclk_drv_init(struct device *dev)
+{
+	struct msmclk_data *drv;
+	size_t size;
+
+	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return ERR_PTR(-ENOMEM);
+
+	dev_set_drvdata(dev, drv);
+
+	drv->dev = dev;
+	INIT_LIST_HEAD(&drv->list);
+
+	/* This overestimates size */
+	drv->max_clk_tbl_size = of_get_child_count(dev->of_node);
+	size = sizeof(*drv->clk_tbl) * drv->max_clk_tbl_size;
+	drv->clk_tbl = devm_kzalloc(dev, size, GFP_KERNEL);
+	if (!drv->clk_tbl)
+		return ERR_PTR(-ENOMEM);
+
+	hash_init(drv->htable);
+	return drv;
+}
+
+static int msmclk_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct device *dev;
+	struct msmclk_data *drv;
+	struct device_node *child;
+	void *result;
+	int rc = 0;
+
+	dev = &pdev->dev;
+	drv = msmclk_drv_init(dev);
+	if (IS_ERR(drv))
+		return PTR_ERR(drv);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc-base");
+	if (!res) {
+		dt_err(dev->of_node, "missing cc-base\n");
+		return -EINVAL;
+	}
+	drv->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drv->base) {
+		dev_err(dev, "ioremap failed for drv->base\n");
+		return -ENOMEM;
+	}
+	rc = msmclk_htable_add(dev, drv, dev->of_node->phandle);
+	if (rc)
+		return rc;
+
+	rc = enable_rpm_scaling();
+	if (rc)
+		return rc;
+
+	rc = get_ext_regulators(dev);
+	if (rc)
+		return rc;
+
+	/*
+	 * Returning -EPROBE_DEFER here is inefficient due to
+	 * destroying work 'unnecessarily'
+	 */
+	for_each_available_child_of_node(dev->of_node, child) {
+		result = msmclk_parse_dt_node(dev, child);
+		if (!IS_ERR(result))
+			continue;
+		if (!msmclk_debug)
+			return PTR_ERR(result);
+		/*
+		 * Parse and report all errors instead of immediately
+		 * exiting. Return the first error code.
+		 */
+		if (!rc)
+			rc = PTR_ERR(result);
+	}
+	if (rc)
+		return rc;
+
+	rc = of_clk_add_provider(dev->of_node, msmclk_clk_get, dev);
+	if (rc) {
+		dev_err(dev, "of_clk_add_provider failed\n");
+		return rc;
+	}
+
+	/*
+	 * can't fail after registering clocks, because users may have
+	 * gotten clock references. Failing would delete the memory.
+	 */
+	WARN_ON(msm_clock_register(drv->clk_tbl, drv->clk_tbl_size));
+	dev_info(dev, "registered clocks\n");
+
+	return 0;
+}
+
+static const struct of_device_id msmclk_match_table[] = {
+	{.compatible = "qcom,msm-clock-controller"},
+	{}
+};
+
+static struct platform_driver msmclk_driver = {
+	.probe = msmclk_probe,
+	.driver = {
+		.name =  "msm-clock-controller",
+		.of_match_table = msmclk_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static bool initialized;
+int __init msmclk_init(void)
+{
+	int rc;
+
+	if (initialized)
+		return 0;
+
+	rc = platform_driver_register(&msmclk_driver);
+	if (rc)
+		return rc;
+	initialized = true;
+	return rc;
+}
+arch_initcall(msmclk_init);
diff --git a/drivers/clk/msm/reset.c b/drivers/clk/msm/reset.c
new file mode 100644
index 0000000..0f47fd6
--- /dev/null
+++ b/drivers/clk/msm/reset.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+
+#include "reset.h"
+
+static int msm_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	rcdev->ops->assert(rcdev, id);
+	udelay(1);
+	rcdev->ops->deassert(rcdev, id);
+	return 0;
+}
+
+static int
+msm_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	struct msm_reset_controller *rst;
+	const struct msm_reset_map *map;
+	u32 regval;
+
+	rst = to_msm_reset_controller(rcdev);
+	map = &rst->reset_map[id];
+
+	regval = readl_relaxed(rst->base + map->reg);
+	regval |= BIT(map->bit);
+	writel_relaxed(regval, rst->base + map->reg);
+
+	/* Make sure the reset is asserted */
+	mb();
+
+	return 0;
+}
+
+static int
+msm_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	struct msm_reset_controller *rst;
+	const struct msm_reset_map *map;
+	u32 regval;
+
+	rst = to_msm_reset_controller(rcdev);
+	map = &rst->reset_map[id];
+
+	regval = readl_relaxed(rst->base + map->reg);
+	regval &= ~BIT(map->bit);
+	writel_relaxed(regval, rst->base + map->reg);
+
+	/* Make sure the reset is de-asserted */
+	mb();
+
+	return 0;
+}
+
+struct reset_control_ops msm_reset_ops = {
+	.reset = msm_reset,
+	.assert = msm_reset_assert,
+	.deassert = msm_reset_deassert,
+};
+EXPORT_SYMBOL(msm_reset_ops);
+
+int msm_reset_controller_register(struct platform_device *pdev,
+	const struct msm_reset_map *map, unsigned int num_resets,
+	void __iomem *virt_base)
+{
+	struct msm_reset_controller *reset;
+	int ret = 0;
+
+	reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+	if (!reset)
+		return -ENOMEM;
+
+	reset->rcdev.of_node = pdev->dev.of_node;
+	reset->rcdev.ops = &msm_reset_ops;
+	reset->rcdev.owner = pdev->dev.driver->owner;
+	reset->rcdev.nr_resets = num_resets;
+	reset->reset_map = map;
+	reset->base = virt_base;
+
+	ret = reset_controller_register(&reset->rcdev);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to register with reset controller\n");
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_reset_controller_register);
diff --git a/drivers/clk/msm/reset.h b/drivers/clk/msm/reset.h
new file mode 100644
index 0000000..9e3b2fb
--- /dev/null
+++ b/drivers/clk/msm/reset.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_CLK_RESET_H
+#define __DRIVERS_CLK_RESET_H
+
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+struct msm_reset_map {
+	unsigned int reg;
+	u8 bit;
+};
+
+struct msm_reset_controller {
+	const struct msm_reset_map *reset_map;
+	struct reset_controller_dev rcdev;
+	void __iomem  *base;
+};
+
+#define to_msm_reset_controller(r) \
+	container_of(r, struct msm_reset_controller, rcdev)
+
+extern struct reset_control_ops msm_reset_ops;
+
+int msm_reset_controller_register(struct platform_device *pdev,
+		const struct msm_reset_map *map, unsigned int nr_resets,
+		void __iomem *virt_base);
+#endif
diff --git a/drivers/clk/qcom/gcc-sdxpoorwills.c b/drivers/clk/qcom/gcc-sdxpoorwills.c
index a62a9a8..696d7fb 100644
--- a/drivers/clk/qcom/gcc-sdxpoorwills.c
+++ b/drivers/clk/qcom/gcc-sdxpoorwills.c
@@ -541,7 +541,10 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
 };
 
 static const struct freq_tbl ftbl_gcc_emac_clk_src[] = {
+	F(2500000, P_BI_TCXO, 1, 25, 192),
+	F(5000000, P_BI_TCXO, 1, 25, 96),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
 	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
 	F(125000000, P_GPLL4_OUT_EVEN, 4, 0, 0),
 	F(250000000, P_GPLL4_OUT_EVEN, 2, 0, 0),
@@ -1340,19 +1343,6 @@ static struct clk_gate2 gcc_mss_gpll0_div_clk_src = {
 	},
 };
 
-static struct clk_branch gcc_mss_snoc_axi_clk = {
-	.halt_reg = 0x40148,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x40148,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mss_snoc_axi_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_pcie_0_clkref_clk = {
 	.halt_reg = 0x88004,
 	.halt_check = BRANCH_HALT,
@@ -1368,7 +1358,7 @@ static struct clk_branch gcc_pcie_0_clkref_clk = {
 
 static struct clk_branch gcc_pcie_aux_clk = {
 	.halt_reg = 0x37020,
-	.halt_check = BRANCH_HALT_VOTED,
+	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
 		.enable_reg = 0x6d00c,
 		.enable_mask = BIT(3),
@@ -1427,7 +1417,7 @@ static struct clk_branch gcc_pcie_phy_refgen_clk = {
 
 static struct clk_branch gcc_pcie_pipe_clk = {
 	.halt_reg = 0x37028,
-	.halt_check = BRANCH_HALT_VOTED,
+	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
 		.enable_reg = 0x6d00c,
 		.enable_mask = BIT(4),
@@ -1806,7 +1796,6 @@ static struct clk_regmap *gcc_sdxpoorwills_clocks[] = {
 	[GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
 	[GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
 	[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
-	[GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
 	[GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
 	[GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
 	[GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index fec75b1..61c0ae8 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -739,26 +739,22 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
 
 void free_cluster_node(struct lpm_cluster *cluster)
 {
-	struct list_head *list;
 	struct lpm_cpu *cpu, *n;
-	int i;
+	struct lpm_cluster *cl, *m;
 
-	list_for_each(list, &cluster->child) {
-		struct lpm_cluster *n;
-
-		n = list_entry(list, typeof(*n), list);
-		list_del(list);
-		free_cluster_node(n);
+	list_for_each_entry_safe(cl, m, &cluster->child, list) {
+		list_del(&cl->list);
+		free_cluster_node(cl);
 	};
 
 	list_for_each_entry_safe(cpu, n, &cluster->cpu, list) {
-		struct lpm_cpu *cpu = list_entry(list, typeof(*cpu), list);
+		int i;
 
+		list_del(&cpu->list);
 		for (i = 0; i < cpu->nlevels; i++) {
 			kfree(cpu->levels[i].name);
 			cpu->levels[i].name = NULL;
 		}
-		list_del(list);
 	}
 }
 
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 0bff951..5452ad8 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -45,7 +45,11 @@
 #include <asm/cpuidle.h>
 #include "lpm-levels.h"
 #include <trace/events/power.h>
+#if defined(CONFIG_COMMON_CLK)
 #include "../clk/clk.h"
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#include "../../drivers/clk/msm/clock.h"
+#endif /* CONFIG_COMMON_CLK */
 #define CREATE_TRACE_POINTS
 #include <trace/events/trace_msm_low_power.h>
 
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index b8effac..3a2239c 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -135,6 +135,15 @@
 	  it can conflict with existing profiling tools. This governor is
 	  unlikely to be useful for other devices.
 
+config DEVFREQ_GOV_SPDM_HYP
+	bool "QTI SPDM Hypervisor Governor"
+	depends on ARCH_QCOM
+	help
+	  Hypervisor based governor for CPU bandwidth voting
+	  for QTI chipsets.
+	  Sets the frequency using a "on-demand" algorithm.
+	  This governor is unlikely to be useful for other devices.
+
 config DEVFREQ_GOV_MEMLAT
 	tristate "HW monitor based governor for device BW"
 	depends on ARM_MEMLAT_MON
@@ -227,6 +236,24 @@
 	  agnostic interface to so that some of the devfreq governors can be
 	  shared across SoCs.
 
+config SPDM_SCM
+	bool "QTI SPDM SCM based call support"
+	depends on DEVFREQ_SPDM
+	help
+	  SPDM driver support the dcvs algorithm logic being accessed via
+	  scm or hvc calls. This adds the support for SPDM interaction to
+          tz via SCM based call. If not selected then Hypervior interaction
+          will be activated.
+
+config DEVFREQ_SPDM
+	bool "QTI SPDM based bandwidth voting"
+	depends on ARCH_QCOM
+	select DEVFREQ_GOV_SPDM_HYP
+	help
+	  This adds the support for SPDM based bandwidth voting on QTI chipsets.
+	  This driver allows any SPDM based client to vote for bandwidth.
+	  Used with the QTI SPDM Hypervisor Governor.
+
 source "drivers/devfreq/event/Kconfig"
 
 endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index f248e02..0202f66 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_QCOM_M4M_HWMON)		+= m4m-hwmon.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON)	+= governor_bw_hwmon.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON)	+= governor_cache_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_SPDM_HYP)	+= governor_spdm_bw_hyp.o
 obj-$(CONFIG_DEVFREQ_GOV_MEMLAT)       += governor_memlat.o
 
 # DEVFREQ Drivers
@@ -23,6 +24,7 @@
 obj-$(CONFIG_ARM_TEGRA_DEVFREQ)		+= tegra-devfreq.o
 obj-$(CONFIG_QCOM_DEVFREQ_DEVBW)		+= devfreq_devbw.o
 obj-$(CONFIG_DEVFREQ_SIMPLE_DEV)	+= devfreq_simple_dev.o
+obj-$(CONFIG_DEVFREQ_SPDM)		+= devfreq_spdm.o devfreq_spdm_debugfs.o
 
 # DEVFREQ Event Drivers
 obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/drivers/devfreq/devfreq_spdm.c b/drivers/devfreq/devfreq_spdm.c
new file mode 100644
index 0000000..3290a2a
--- /dev/null
+++ b/drivers/devfreq/devfreq_spdm.c
@@ -0,0 +1,444 @@
+/*
+ *Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ *This program is free software; you can redistribute it and/or modify
+ *it under the terms of the GNU General Public License version 2 and
+ *only version 2 as published by the Free Software Foundation.
+ *
+ *This program is distributed in the hope that it will be useful,
+ *but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include <linux/init.h>
+#include <linux/ipc_logging.h>
+#include <linux/gfp.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/msm-bus.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "governor.h"
+#include "devfreq_spdm.h"
+
+static void *spdm_ipc_log_ctxt;
+#define DEVFREQ_SPDM_DEFAULT_WINDOW_MS 100
+#define SPDM_IPC_LOG_PAGES	5
+
+#define SPDM_IPC_LOG(x...)	do { \
+	pr_debug(x); \
+	if (spdm_ipc_log_ctxt) \
+		ipc_log_string(spdm_ipc_log_ctxt, x); \
+} while (0)
+
+#define COPY_SIZE(x, y) ((x) <= (y) ? (x) : (y))
+
+static int change_bw(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct spdm_data *data = 0;
+	int i;
+	int next_idx;
+	int ret = 0;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (!dev || !freq)
+		return -EINVAL;
+
+	data = dev_get_drvdata(dev);
+	if (!data)
+		return -EINVAL;
+
+	if (data->devfreq->previous_freq == *freq)
+		goto update_thresholds;
+
+	next_idx = data->cur_idx + 1;
+	next_idx = next_idx % 2;
+
+	for (i = 0; i < data->pdata->usecase[next_idx].num_paths; i++)
+		data->pdata->usecase[next_idx].vectors[i].ab = (*freq) << 6;
+
+	data->cur_idx = next_idx;
+	ret = msm_bus_scale_client_update_request(data->bus_scale_client_id,
+						  data->cur_idx);
+
+update_thresholds:
+	desc.arg[0] = SPDM_CMD_ENABLE;
+	desc.arg[1] = data->spdm_client;
+	desc.arg[2] = (clk_get_rate(data->cci_clk)) / 1000;
+	ext_status = spdm_ext_call(&desc, 3);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	return ret;
+}
+
+static int get_cur_bw(struct device *dev, unsigned long *freq)
+{
+	struct spdm_data *data = 0;
+
+	if (!dev || !freq)
+		return -EINVAL;
+
+	data = dev_get_drvdata(dev);
+	if (!data)
+		return -EINVAL;
+
+	*freq = data->pdata->usecase[data->cur_idx].vectors[0].ab >> 6;
+
+	return 0;
+}
+
+static int get_dev_status(struct device *dev, struct devfreq_dev_status *status)
+{
+	struct spdm_data *data = 0;
+	int ret;
+
+	if (!dev || !status)
+		return -EINVAL;
+
+	data = dev_get_drvdata(dev);
+	if (!data)
+		return -EINVAL;
+
+	/*
+	 * determine if we want to go up or down based on the notification.
+	 */
+	if (data->action == SPDM_UP)
+		status->busy_time = 255;
+	else
+		status->busy_time = 0;
+	status->total_time = 255;
+	ret = get_cur_bw(dev, &status->current_frequency);
+	if (ret)
+		return ret;
+
+	return 0;
+
+}
+
+static int populate_config_data(struct spdm_data *data,
+				struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	struct device_node *node = pdev->dev.of_node;
+	struct property *prop = 0;
+
+	ret = of_property_read_u32(node, "qcom,max-vote",
+				   &data->config_data.max_vote);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,bw-upstep",
+				   &data->config_data.upstep);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,bw-dwnstep",
+				   &data->config_data.downstep);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,alpha-up",
+				   &data->config_data.aup);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,alpha-down",
+				   &data->config_data.adown);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,bucket-size",
+				   &data->config_data.bucket_size);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(node, "qcom,pl-freqs",
+					 data->config_data.pl_freqs,
+					 SPDM_PL_COUNT - 1);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(node, "qcom,reject-rate",
+					 data->config_data.reject_rate,
+					 SPDM_PL_COUNT * 2);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(node, "qcom,response-time-us",
+					 data->config_data.response_time_us,
+					 SPDM_PL_COUNT * 2);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(node, "qcom,cci-response-time-us",
+					 data->config_data.cci_response_time_us,
+					 SPDM_PL_COUNT * 2);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(node, "qcom,max-cci-freq",
+				   &data->config_data.max_cci_freq);
+	if (ret)
+		return ret;
+	ret = of_property_read_u32(node, "qcom,up-step-multp",
+				   &data->config_data.up_step_multp);
+	if (ret)
+		return ret;
+
+	prop = of_find_property(node, "qcom,ports", 0);
+	if (!prop)
+		return -EINVAL;
+	data->config_data.num_ports = prop->length / sizeof(u32);
+	data->config_data.ports =
+	    devm_kzalloc(&pdev->dev, prop->length, GFP_KERNEL);
+	if (!data->config_data.ports)
+		return -ENOMEM;
+	ret = of_property_read_u32_array(node, "qcom,ports",
+					 data->config_data.ports,
+					 data->config_data.num_ports);
+	if (ret) {
+		devm_kfree(&pdev->dev, data->config_data.ports);
+		data->config_data.ports = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int populate_spdm_data(struct spdm_data *data,
+			      struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	struct device_node *node = pdev->dev.of_node;
+
+	ret = populate_config_data(data, pdev);
+	if (ret)
+		return ret;
+
+	ret =
+	    of_property_read_u32(node, "qcom,spdm-client", &data->spdm_client);
+	if (ret)
+		goto no_client;
+
+	ret = of_property_read_u32(node, "qcom,spdm-interval", &data->window);
+	if (ret)
+		data->window = DEVFREQ_SPDM_DEFAULT_WINDOW_MS;
+
+	data->pdata = msm_bus_cl_get_pdata(pdev);
+	if (!data->pdata) {
+		ret = -EINVAL;
+		goto no_pdata;
+	}
+
+	return 0;
+
+no_client:
+no_pdata:
+	devm_kfree(&pdev->dev, data->config_data.ports);
+	data->config_data.ports = NULL;
+	return ret;
+}
+
+#ifdef CONFIG_MSM_HVC
+int __spdm_hyp_call(struct spdm_args *args, int num_args)
+{
+	struct hvc_desc desc = { { 0 } };
+	int status;
+
+	memcpy(desc.arg, args->arg,
+		COPY_SIZE(sizeof(desc.arg), sizeof(args->arg)));
+	SPDM_IPC_LOG("hvc call fn:0x%x, cmd:%llu, num_args:%d\n",
+		HVC_FN_SIP(SPDM_HYP_FNID), desc.arg[0], num_args);
+
+	status = hvc(HVC_FN_SIP(SPDM_HYP_FNID), &desc);
+
+	memcpy(args->ret, desc.ret,
+		COPY_SIZE(sizeof(args->ret), sizeof(desc.ret)));
+	SPDM_IPC_LOG("hvc return fn:0x%x cmd:%llu Ret[0]:%llu Ret[1]:%llu\n",
+			HVC_FN_SIP(SPDM_HYP_FNID), desc.arg[0],
+			desc.ret[0], desc.ret[1]);
+	return status;
+}
+#endif
+
+int __spdm_scm_call(struct spdm_args *args, int num_args)
+{
+	int status = 0;
+
+	SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,num_args:%d\n",
+		__func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID,
+		args->arg[0], num_args);
+
+	if (!is_scm_armv8()) {
+		status = scm_call(SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg,
+				sizeof(args->arg), args->ret,
+				sizeof(args->ret));
+	} else {
+		struct scm_desc desc = {0};
+		/*
+		 * Need to hard code this, this is a requirement from TZ syscall
+		 * interface.
+		 */
+		desc.arginfo = SCM_ARGS(6);
+		memcpy(desc.args, args->arg,
+			COPY_SIZE(sizeof(desc.args), sizeof(args->arg)));
+
+		status = scm_call2(SCM_SIP_FNID(SPDM_SCM_SVC_ID,
+				SPDM_SCM_CMD_ID), &desc);
+
+		memcpy(args->ret, desc.ret,
+			COPY_SIZE(sizeof(args->ret), sizeof(desc.ret)));
+	}
+	SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,Ret[0]:%llu,Ret[1]:%llu\n"
+		, __func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg[0],
+		args->ret[0], args->ret[1]);
+	return status;
+}
+
+static int probe(struct platform_device *pdev)
+{
+	struct spdm_data *data = 0;
+	int ret = -EINVAL;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->action = SPDM_DOWN;
+
+	platform_set_drvdata(pdev, data);
+
+	ret = populate_spdm_data(data, pdev);
+	if (ret)
+		goto bad_of;
+
+	desc.arg[0] = SPDM_CMD_GET_VERSION;
+	ext_status = spdm_ext_call(&desc, 1);
+	if (ext_status) {
+		pr_err("%s:External command %u failed with error %u\n",
+			__func__, (int)desc.arg[0], ext_status);
+		goto bad_of;
+	}
+
+	if (desc.ret[0] < SPDM_TZ_VERSION) {
+		pr_err("%s: Version mismatch expected 0x%x got 0x%x", __func__,
+			SPDM_TZ_VERSION, (int)desc.arg[0]);
+		goto bad_of;
+	}
+
+	data->bus_scale_client_id = msm_bus_scale_register_client(data->pdata);
+	if (!data->bus_scale_client_id) {
+		ret = -EINVAL;
+		goto no_bus_scaling;
+	}
+
+	data->cci_clk = clk_get(&pdev->dev, "cci_clk");
+	if (IS_ERR(data->cci_clk)) {
+		ret = PTR_ERR(data->cci_clk);
+		goto no_clock;
+	}
+
+	data->profile =
+	    devm_kzalloc(&pdev->dev, sizeof(*(data->profile)), GFP_KERNEL);
+	if (!data->profile) {
+		ret = -ENOMEM;
+		goto no_profile;
+	}
+	data->profile->target = change_bw;
+	data->profile->get_dev_status = get_dev_status;
+	data->profile->get_cur_freq = get_cur_bw;
+	data->profile->polling_ms = data->window;
+
+	data->devfreq =
+	    devfreq_add_device(&pdev->dev, data->profile, "spdm_bw_hyp", data);
+	if (IS_ERR(data->devfreq)) {
+		ret = PTR_ERR(data->devfreq);
+		goto no_spdm_device;
+	}
+
+	spdm_init_debugfs(&pdev->dev);
+	spdm_ipc_log_ctxt = ipc_log_context_create(SPDM_IPC_LOG_PAGES,
+							"devfreq_spdm", 0);
+
+	if (IS_ERR_OR_NULL(spdm_ipc_log_ctxt)) {
+		pr_err("%s: Failed to create IPC log context\n", __func__);
+		spdm_ipc_log_ctxt = NULL;
+	}
+
+
+	return 0;
+
+no_spdm_device:
+	devm_kfree(&pdev->dev, data->profile);
+no_profile:
+no_clock:
+	msm_bus_scale_unregister_client(data->bus_scale_client_id);
+no_bus_scaling:
+	devm_kfree(&pdev->dev, data->config_data.ports);
+bad_of:
+	devm_kfree(&pdev->dev, data);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int remove(struct platform_device *pdev)
+{
+	struct spdm_data *data = 0;
+
+	data = platform_get_drvdata(pdev);
+
+	spdm_remove_debugfs(data);
+
+	if (data->devfreq)
+		devfreq_remove_device(data->devfreq);
+
+	if (data->profile)
+		devm_kfree(&pdev->dev, data->profile);
+
+	if (data->bus_scale_client_id)
+		msm_bus_scale_unregister_client(data->bus_scale_client_id);
+
+	if (data->config_data.ports)
+		devm_kfree(&pdev->dev, data->config_data.ports);
+
+	devm_kfree(&pdev->dev, data);
+	platform_set_drvdata(pdev, NULL);
+
+	if (spdm_ipc_log_ctxt)
+		ipc_log_context_destroy(spdm_ipc_log_ctxt);
+
+	return 0;
+}
+
+static const struct of_device_id devfreq_spdm_match[] = {
+	{.compatible = "qcom,devfreq_spdm"},
+	{}
+};
+
+static struct platform_driver devfreq_spdm_drvr = {
+	.driver = {
+		   .name = "devfreq_spdm",
+		   .owner = THIS_MODULE,
+		   .of_match_table = devfreq_spdm_match,
+		   },
+	.probe = probe,
+	.remove = remove,
+};
+
+static int __init devfreq_spdm_init(void)
+{
+	return platform_driver_register(&devfreq_spdm_drvr);
+}
+
+module_init(devfreq_spdm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_spdm.h b/drivers/devfreq/devfreq_spdm.h
new file mode 100644
index 0000000..1e5ab03
--- /dev/null
+++ b/drivers/devfreq/devfreq_spdm.h
@@ -0,0 +1,131 @@
+/*
+ *Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ *This program is free software; you can redistribute it and/or modify
+ *it under the terms of the GNU General Public License version 2 and
+ *only version 2 as published by the Free Software Foundation.
+ *
+ *This program is distributed in the hope that it will be useful,
+ *but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *GNU General Public License for more details.
+ */
+
+#ifndef DEVFREQ_SPDM_H
+#define DEVFREQ_SPDM_H
+
+#include <linux/list.h>
+#ifdef CONFIG_MSM_HVC
+#include <soc/qcom/hvc.h>
+#endif
+#include <soc/qcom/scm.h>
+
+enum pl_levels { SPDM_PL1, SPDM_PL2, SPDM_PL3, SPDM_PL_COUNT };
+enum actions { SPDM_UP, SPDM_DOWN };
+enum spdm_client { SPDM_CLIENT_CPU, SPDM_CLIENT_GPU, SPDM_CLIENT_COUNT };
+
+struct spdm_config_data {
+	/* in MB/s */
+	u32 upstep;
+	u32 downstep;
+	u32 up_step_multp;
+
+	u32 num_ports;
+	u32 *ports;
+	u32 aup;
+	u32 adown;
+	u32 bucket_size;
+
+	/*
+	 * If We define n PL levels we need n-1 frequencies to tell
+	 * where to change from one pl to another
+	 */
+	/* hz */
+	u32 pl_freqs[SPDM_PL_COUNT - 1];
+	/*
+	 * We have a low threshold and a high threhold for each pl to support
+	 * the two port solution so we need twice as many entries as
+	 * performance levels
+	 */
+	/* in 100th's of a percent */
+	u32 reject_rate[SPDM_PL_COUNT * 2];
+	u32 response_time_us[SPDM_PL_COUNT * 2];
+	u32 cci_response_time_us[SPDM_PL_COUNT * 2];
+	/* hz */
+	u32 max_cci_freq;
+	/* in MB/s */
+	u32 max_vote;
+
+};
+
+struct spdm_data {
+	/* bus scaling data */
+	int cur_idx;
+	struct msm_bus_scale_pdata *pdata;
+	u32 bus_scale_client_id;
+	/* in mb/s */
+	u32 new_bw;
+
+	/* devfreq data */
+	struct devfreq *devfreq;
+	struct devfreq_dev_profile *profile;
+	unsigned long action;
+	int window;
+	struct clk *cci_clk;
+
+	/* spdm hw/gov data */
+	struct spdm_config_data config_data;
+
+	enum spdm_client spdm_client;
+	/* list used by governor to keep track of spdm devices */
+	struct list_head list;
+
+	struct dentry *debugfs_dir;
+
+	bool enabled;
+};
+
+extern void spdm_init_debugfs(struct device *dev);
+extern void spdm_remove_debugfs(struct spdm_data *data);
+
+#define SPDM_HYP_FNID 5
+#define SPDM_SCM_SVC_ID 0x9
+#define SPDM_SCM_CMD_ID 0x4
+#define SPDM_TZ_VERSION 0x20000 /* TZ SPDM driver version */
+/* SPDM CMD ID's for hypervisor/SCM */
+#define SPDM_CMD_GET_VERSION 0
+#define SPDM_CMD_GET_BW_ALL 1
+#define SPDM_CMD_GET_BW_SPECIFIC 2
+#define SPDM_CMD_ENABLE 3
+#define SPDM_CMD_DISABLE 4
+#define SPDM_CMD_CFG_PORTS 5
+#define SPDM_CMD_CFG_FLTR 6
+#define SPDM_CMD_CFG_PL 7
+#define SPDM_CMD_CFG_REJRATE_LOW 8
+#define SPDM_CMD_CFG_REJRATE_MED 9
+#define SPDM_CMD_CFG_REJRATE_HIGH 10
+#define SPDM_CMD_CFG_RESPTIME_LOW 11
+#define SPDM_CMD_CFG_RESPTIME_MED 12
+#define SPDM_CMD_CFG_RESPTIME_HIGH 13
+#define SPDM_CMD_CFG_CCIRESPTIME_LOW 14
+#define SPDM_CMD_CFG_CCIRESPTIME_MED 15
+#define SPDM_CMD_CFG_CCIRESPTIME_HIGH 16
+#define SPDM_CMD_CFG_MAXCCI 17
+#define SPDM_CMD_CFG_VOTES 18
+
+#define SPDM_MAX_ARGS 6
+#define SPDM_MAX_RETS 3
+
+struct spdm_args {
+	u64 arg[SPDM_MAX_ARGS];
+	u64 ret[SPDM_MAX_RETS];
+};
+
+#ifdef CONFIG_SPDM_SCM
+extern int __spdm_scm_call(struct spdm_args *args, int num_args);
+#define spdm_ext_call __spdm_scm_call
+#else
+extern int __spdm_hyp_call(struct spdm_args *args, int num_args);
+#define spdm_ext_call __spdm_hyp_call
+#endif
+#endif
diff --git a/drivers/devfreq/devfreq_spdm_debugfs.c b/drivers/devfreq/devfreq_spdm_debugfs.c
new file mode 100644
index 0000000..4e49d5b
--- /dev/null
+++ b/drivers/devfreq/devfreq_spdm_debugfs.c
@@ -0,0 +1,848 @@
+/*
+ *Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ *This program is free software; you can redistribute it and/or modify
+ *it under the terms of the GNU General Public License version 2 and
+ *only version 2 as published by the Free Software Foundation.
+ *
+ *This program is distributed in the hope that it will be useful,
+ *but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "devfreq_spdm.h"
+#include "governor.h"
+
+static int spdm_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static char buf[PAGE_SIZE];
+
+static ssize_t enable_write(struct file *file, const char __user *data,
+			    size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i;
+	int next_idx;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		goto err;
+		size = -EINVAL;
+	}
+
+	if (sscanf(buf, "%u\n", &i) != 1) {
+		size = -EINVAL;
+		goto err;
+	}
+	i = !!i;
+
+	if (i == spdm_data->enabled)
+		goto out;
+
+	spdm_data->devfreq->governor->event_handler(spdm_data->devfreq,
+						    i ? DEVFREQ_GOV_START :
+						    DEVFREQ_GOV_STOP, NULL);
+
+	if (!i) {
+		next_idx = spdm_data->cur_idx + 1;
+		next_idx = next_idx % 2;
+
+		for (i = 0; i < spdm_data->pdata->usecase[next_idx].num_paths;
+		     i++)
+			spdm_data->pdata->usecase[next_idx].vectors[i].ab = 0;
+
+		spdm_data->cur_idx = next_idx;
+		msm_bus_scale_client_update_request
+		    (spdm_data->bus_scale_client_id, spdm_data->cur_idx);
+	}
+
+out:
+	*offset += size;
+err:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t enable_read(struct file *file, char __user *data,
+			   size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int len = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	len = scnprintf(buf, size, "%u\n", spdm_data->enabled);
+	len = simple_read_from_buffer(data, size, offset, buf, len);
+
+	memset(buf, 0, sizeof(buf));
+	return len;
+}
+
+static const struct file_operations enable_fops = {
+	.open = spdm_open,
+	.write = enable_write,
+	.read = enable_read,
+};
+
+static ssize_t pl_write(struct file *file, const char __user *data,
+			size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+	int i;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.pl_freqs[0],
+	       &spdm_data->config_data.pl_freqs[1]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_PL;
+	desc.arg[1] = spdm_data->spdm_client;
+	for (i = 0; i < SPDM_PL_COUNT - 1; i++)
+		desc.arg[i+2] = spdm_data->config_data.pl_freqs[i];
+	ext_status = spdm_ext_call(&desc, SPDM_PL_COUNT + 1);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+
+}
+
+static ssize_t pl_read(struct file *file, char __user *data,
+		       size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n", spdm_data->config_data.pl_freqs[0],
+		     spdm_data->config_data.pl_freqs[1]);
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations pl_fops = {
+	.open = spdm_open,
+	.write = pl_write,
+	.read = pl_read,
+};
+
+static ssize_t rejrate_low_write(struct file *file, const char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[0],
+	       &spdm_data->config_data.reject_rate[1]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_REJRATE_LOW;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.reject_rate[0];
+	desc.arg[3] = spdm_data->config_data.reject_rate[1];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t rejrate_low_read(struct file *file, char __user *data,
+				size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.reject_rate[0],
+		     spdm_data->config_data.reject_rate[1]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rrl_fops = {
+	.open = spdm_open,
+	.write = rejrate_low_write,
+	.read = rejrate_low_read,
+};
+
+static ssize_t rejrate_med_write(struct file *file, const char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[2],
+	       &spdm_data->config_data.reject_rate[3]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_REJRATE_MED;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.reject_rate[2];
+	desc.arg[3] = spdm_data->config_data.reject_rate[3];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t rejrate_med_read(struct file *file, char __user *data,
+				size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.reject_rate[2],
+		     spdm_data->config_data.reject_rate[3]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rrm_fops = {
+	.open = spdm_open,
+	.write = rejrate_med_write,
+	.read = rejrate_med_read,
+};
+
+static ssize_t rejrate_high_write(struct file *file, const char __user *data,
+				  size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[4],
+	       &spdm_data->config_data.reject_rate[5]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_REJRATE_HIGH;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.reject_rate[4];
+	desc.arg[3] = spdm_data->config_data.reject_rate[5];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t rejrate_high_read(struct file *file, char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.reject_rate[4],
+		     spdm_data->config_data.reject_rate[5]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rrh_fops = {
+	.open = spdm_open,
+	.write = rejrate_high_write,
+	.read = rejrate_high_read,
+};
+
+static ssize_t resptime_low_write(struct file *file, const char __user *data,
+				  size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[0],
+	       &spdm_data->config_data.response_time_us[1]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_RESPTIME_LOW;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.response_time_us[0];
+	desc.arg[3] = spdm_data->config_data.response_time_us[1];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t resptime_low_read(struct file *file, char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.response_time_us[0],
+		     spdm_data->config_data.response_time_us[1]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rtl_fops = {
+	.open = spdm_open,
+	.write = resptime_low_write,
+	.read = resptime_low_read,
+};
+
+static ssize_t resptime_med_write(struct file *file, const char __user *data,
+				  size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[2],
+	       &spdm_data->config_data.response_time_us[3]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_RESPTIME_MED;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.response_time_us[2];
+	desc.arg[3] = spdm_data->config_data.response_time_us[3];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t resptime_med_read(struct file *file, char __user *data,
+				 size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.response_time_us[2],
+		     spdm_data->config_data.response_time_us[3]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rtm_fops = {
+	.open = spdm_open,
+	.write = resptime_med_write,
+	.read = resptime_med_read,
+};
+
+static ssize_t resptime_high_write(struct file *file, const char __user *data,
+				   size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[4],
+	       &spdm_data->config_data.response_time_us[5]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_RESPTIME_HIGH;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.response_time_us[4];
+	desc.arg[3] = spdm_data->config_data.response_time_us[5];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t resptime_high_read(struct file *file, char __user *data,
+				  size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.response_time_us[4],
+		     spdm_data->config_data.response_time_us[5]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations rth_fops = {
+	.open = spdm_open,
+	.write = resptime_high_write,
+	.read = resptime_high_read,
+};
+
+static ssize_t cciresptime_low_write(struct file *file,
+				     const char __user *data, size_t size,
+				     loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u\n",
+		   &spdm_data->config_data.cci_response_time_us[0],
+		   &spdm_data->config_data.cci_response_time_us[1]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_LOW;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.cci_response_time_us[0];
+	desc.arg[3] = spdm_data->config_data.cci_response_time_us[1];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t cciresptime_low_read(struct file *file, char __user *data,
+				    size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.cci_response_time_us[0],
+		     spdm_data->config_data.cci_response_time_us[1]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations ccil_fops = {
+	.open = spdm_open,
+	.write = cciresptime_low_write,
+	.read = cciresptime_low_read,
+};
+
+static ssize_t cciresptime_med_write(struct file *file,
+				     const char __user *data, size_t size,
+				     loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u\n",
+		   &spdm_data->config_data.cci_response_time_us[2],
+		   &spdm_data->config_data.cci_response_time_us[3]) != 2) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_MED;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.cci_response_time_us[2];
+	desc.arg[3] = spdm_data->config_data.cci_response_time_us[3];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t cciresptime_med_read(struct file *file, char __user *data,
+				    size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.cci_response_time_us[2],
+		     spdm_data->config_data.cci_response_time_us[3]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations ccim_fops = {
+	.open = spdm_open,
+	.write = cciresptime_med_write,
+	.read = cciresptime_med_read,
+};
+
+static ssize_t cciresptime_high_write(struct file *file,
+				      const char __user *data,
+				      size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u\n",
+		   &spdm_data->config_data.cci_response_time_us[4],
+		   &spdm_data->config_data.cci_response_time_us[5]) != 2){
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_HIGH;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.cci_response_time_us[4];
+	desc.arg[3] = spdm_data->config_data.cci_response_time_us[5];
+	ext_status = spdm_ext_call(&desc, 4);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t cciresptime_high_read(struct file *file, char __user *data,
+				     size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u\n",
+		     spdm_data->config_data.cci_response_time_us[4],
+		     spdm_data->config_data.cci_response_time_us[5]);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations ccih_fops = {
+	.open = spdm_open,
+	.write = cciresptime_high_write,
+	.read = cciresptime_high_read,
+};
+
+static ssize_t cci_max_write(struct file *file, const char __user *data,
+			     size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u\n", &spdm_data->config_data.max_cci_freq) != 1) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_MAXCCI;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.max_cci_freq;
+	ext_status = spdm_ext_call(&desc, 3);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t cci_max_read(struct file *file, char __user *data,
+			    size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u\n", spdm_data->config_data.max_cci_freq);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations ccimax_fops = {
+	.open = spdm_open,
+	.write = cci_max_write,
+	.read = cci_max_read,
+};
+
+static ssize_t vote_cfg_write(struct file *file, const char __user *data,
+			      size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(buf, data, size)) {
+		size = -EINVAL;
+		goto out;
+	}
+	if (sscanf(buf, "%u %u %u %u\n", &spdm_data->config_data.upstep,
+	       &spdm_data->config_data.downstep,
+	       &spdm_data->config_data.max_vote,
+	       &spdm_data->config_data.up_step_multp) != 4) {
+		size = -EINVAL;
+		goto out;
+	}
+
+	desc.arg[0] = SPDM_CMD_CFG_VOTES;
+	desc.arg[1] = spdm_data->spdm_client;
+	desc.arg[2] = spdm_data->config_data.upstep;
+	desc.arg[3] = spdm_data->config_data.downstep;
+	desc.arg[4] = spdm_data->config_data.max_vote;
+	desc.arg[5] = spdm_data->config_data.up_step_multp;
+	ext_status = spdm_ext_call(&desc, 6);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	*offset += size;
+out:
+	memset(buf, 0, sizeof(buf));
+	return size;
+}
+
+static ssize_t vote_cfg_read(struct file *file, char __user *data,
+			     size_t size, loff_t *offset)
+{
+	struct spdm_data *spdm_data = file->private_data;
+	int i = 32;
+
+	if (size > sizeof(buf))
+		return -EINVAL;
+
+	i = scnprintf(buf, size, "%u %u %u %u\n",
+		     spdm_data->config_data.upstep,
+		     spdm_data->config_data.downstep,
+		     spdm_data->config_data.max_vote,
+		     spdm_data->config_data.up_step_multp);
+
+	i = simple_read_from_buffer(data, size, offset, buf, i);
+	memset(buf, 0, sizeof(buf));
+	return i;
+}
+
+static const struct file_operations vote_fops = {
+	.open = spdm_open,
+	.write = vote_cfg_write,
+	.read = vote_cfg_read,
+};
+
+void spdm_init_debugfs(struct device *dev)
+{
+	struct spdm_data *data = 0;
+
+	data = dev_get_drvdata(dev);
+	data->debugfs_dir = debugfs_create_dir(dev_name(dev), NULL);
+
+	debugfs_create_file("enable", 0600, data->debugfs_dir, data,
+			    &enable_fops);
+	debugfs_create_file("pl_freqs", 0600, data->debugfs_dir, data,
+			    &pl_fops);
+	debugfs_create_file("rej_rate_low", 0600, data->debugfs_dir, data,
+			    &rrl_fops);
+	debugfs_create_file("rej_rate_med", 0600, data->debugfs_dir, data,
+			    &rrm_fops);
+	debugfs_create_file("rej_rate_high", 0600, data->debugfs_dir, data,
+			    &rrh_fops);
+	debugfs_create_file("resp_time_low", 0600, data->debugfs_dir, data,
+			    &rtl_fops);
+	debugfs_create_file("resp_time_med", 0600, data->debugfs_dir, data,
+			    &rtm_fops);
+	debugfs_create_file("resp_time_high", 0600, data->debugfs_dir, data,
+			    &rth_fops);
+	debugfs_create_file("cci_resp_time_low", 0600, data->debugfs_dir, data,
+			    &ccil_fops);
+	debugfs_create_file("cci_resp_time_med", 0600, data->debugfs_dir, data,
+			    &ccim_fops);
+	debugfs_create_file("cci_resp_time_high", 0600, data->debugfs_dir,
+			    data, &ccih_fops);
+	debugfs_create_file("cci_max", 0600, data->debugfs_dir, data,
+			    &ccimax_fops);
+	debugfs_create_file("vote_cfg", 0600, data->debugfs_dir, data,
+			    &vote_fops);
+}
+
+void spdm_remove_debugfs(struct spdm_data *data)
+{
+	debugfs_remove_recursive(data->debugfs_dir);
+}
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_spdm_bw_hyp.c b/drivers/devfreq/governor_spdm_bw_hyp.c
new file mode 100644
index 0000000..5751ab6
--- /dev/null
+++ b/drivers/devfreq/governor_spdm_bw_hyp.c
@@ -0,0 +1,419 @@
+/*
+ *Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ *This program is free software; you can redistribute it and/or modify
+ *it under the terms of the GNU General Public License version 2 and
+ *only version 2 as published by the Free Software Foundation.
+ *
+ *This program is distributed in the hope that it will be useful,
+ *but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *GNU General Public License for more details.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/rpm-smd.h>
+#include "governor.h"
+#include "devfreq_spdm.h"
+
+enum msm_spdm_rt_res {
+	SPDM_RES_ID = 1,
+	SPDM_RES_TYPE = 0x63707362,
+	SPDM_KEY = 0x00006e65,
+	SPDM_SIZE = 4,
+};
+
+static LIST_HEAD(devfreqs);
+static DEFINE_MUTEX(devfreqs_lock);
+
+static int enable_clocks(void)
+{
+	struct msm_rpm_request *rpm_req;
+	int id;
+	const int one = 1;
+
+	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
+					 SPDM_RES_ID, 1);
+	if (!rpm_req)
+		return -ENODEV;
+	msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&one,
+			     sizeof(int));
+	id = msm_rpm_send_request(rpm_req);
+	msm_rpm_wait_for_ack(id);
+	msm_rpm_free_request(rpm_req);
+
+	return 0;
+}
+
+static int disable_clocks(void)
+{
+	struct msm_rpm_request *rpm_req;
+	int id;
+	const int zero = 0;
+
+	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
+					 SPDM_RES_ID, 1);
+	if (!rpm_req)
+		return -ENODEV;
+	msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&zero,
+			     sizeof(int));
+	id = msm_rpm_send_request(rpm_req);
+	msm_rpm_wait_for_ack(id);
+	msm_rpm_free_request(rpm_req);
+
+	return 0;
+}
+
+static irqreturn_t threaded_isr(int irq, void *dev_id)
+{
+	struct spdm_data *data;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+
+	/* call hyp to get bw_vote */
+	desc.arg[0] = SPDM_CMD_GET_BW_ALL;
+	ext_status = spdm_ext_call(&desc, 1);
+	if (ext_status)
+		pr_err("External command %u failed with error %u",
+			(int)desc.arg[0], ext_status);
+	mutex_lock(&devfreqs_lock);
+	list_for_each_entry(data, &devfreqs, list) {
+		if (data == NULL || data->devfreq == NULL) {
+			pr_err("Spurious interrupts\n");
+			break;
+		}
+		if (data->spdm_client == desc.ret[0]) {
+			devfreq_monitor_suspend(data->devfreq);
+			mutex_lock(&data->devfreq->lock);
+			data->action = SPDM_UP;
+			data->new_bw =
+				(desc.ret[1] * 1000) >> 6;
+			update_devfreq(data->devfreq);
+			data->action = SPDM_DOWN;
+			mutex_unlock(&data->devfreq->lock);
+			devfreq_monitor_resume(data->devfreq);
+			break;
+		}
+	}
+	mutex_unlock(&devfreqs_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t isr(int irq, void *dev_id)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static int gov_spdm_hyp_target_bw(struct devfreq *devfreq, unsigned long *freq)
+{
+	struct devfreq_dev_status status;
+	int ret = -EINVAL;
+	int usage;
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+	u64 bw_ret;
+
+	if (!devfreq || !devfreq->profile || !devfreq->profile->get_dev_status)
+		return ret;
+
+	ret = devfreq->profile->get_dev_status(devfreq->dev.parent, &status);
+	if (ret)
+		return ret;
+
+	usage = (status.busy_time * 100) / status.total_time;
+
+	if (usage > 0) {
+		/* up was already called as part of hyp, so just use the
+		 * already stored values.
+		 */
+		*freq = ((struct spdm_data *)devfreq->data)->new_bw;
+	} else {
+		desc.arg[0] = SPDM_CMD_GET_BW_SPECIFIC;
+		desc.arg[1] = ((struct spdm_data *)devfreq->data)->spdm_client;
+		ext_status = spdm_ext_call(&desc, 2);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		bw_ret = desc.ret[0] * 1000;
+		*freq = bw_ret >> 6;
+	}
+
+	return 0;
+}
+
+static int gov_spdm_hyp_eh(struct devfreq *devfreq, unsigned int event,
+			   void *data)
+{
+	struct spdm_args desc = { { 0 } };
+	int ext_status = 0;
+	struct spdm_data *spdm_data = (struct spdm_data *)devfreq->data;
+	int i;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		mutex_lock(&devfreqs_lock);
+		list_add(&spdm_data->list, &devfreqs);
+		mutex_unlock(&devfreqs_lock);
+		/* call hyp with config data */
+		desc.arg[0] = SPDM_CMD_CFG_PORTS;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.num_ports;
+		for (i = 0; i < spdm_data->config_data.num_ports; i++)
+			desc.arg[i+3] = spdm_data->config_data.ports[i];
+		ext_status = spdm_ext_call(&desc,
+				spdm_data->config_data.num_ports + 3);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_FLTR;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.aup;
+		desc.arg[3] = spdm_data->config_data.adown;
+		desc.arg[4] = spdm_data->config_data.bucket_size;
+		ext_status = spdm_ext_call(&desc, 5);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_PL;
+		desc.arg[1] = spdm_data->spdm_client;
+		for (i = 0; i < SPDM_PL_COUNT - 1; i++)
+			desc.arg[i+2] = spdm_data->config_data.pl_freqs[i];
+		ext_status = spdm_ext_call(&desc, SPDM_PL_COUNT + 1);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_REJRATE_LOW;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.reject_rate[0];
+		desc.arg[3] = spdm_data->config_data.reject_rate[1];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_REJRATE_MED;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.reject_rate[2];
+		desc.arg[3] = spdm_data->config_data.reject_rate[3];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_REJRATE_HIGH;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.reject_rate[4];
+		desc.arg[3] = spdm_data->config_data.reject_rate[5];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_RESPTIME_LOW;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.response_time_us[0];
+		desc.arg[3] = spdm_data->config_data.response_time_us[1];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_RESPTIME_MED;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.response_time_us[2];
+		desc.arg[3] = spdm_data->config_data.response_time_us[3];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_RESPTIME_HIGH;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.response_time_us[4];
+		desc.arg[3] = spdm_data->config_data.response_time_us[5];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_LOW;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.cci_response_time_us[0];
+		desc.arg[3] = spdm_data->config_data.cci_response_time_us[1];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_MED;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.cci_response_time_us[2];
+		desc.arg[3] = spdm_data->config_data.cci_response_time_us[3];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_HIGH;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.cci_response_time_us[4];
+		desc.arg[3] = spdm_data->config_data.cci_response_time_us[5];
+		ext_status = spdm_ext_call(&desc, 4);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_MAXCCI;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.max_cci_freq;
+		ext_status = spdm_ext_call(&desc, 3);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		desc.arg[0] = SPDM_CMD_CFG_VOTES;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = spdm_data->config_data.upstep;
+		desc.arg[3] = spdm_data->config_data.downstep;
+		desc.arg[4] = spdm_data->config_data.max_vote;
+		desc.arg[5] = spdm_data->config_data.up_step_multp;
+		ext_status = spdm_ext_call(&desc, 6);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+
+		/* call hyp enable/commit */
+		desc.arg[0] = SPDM_CMD_ENABLE;
+		desc.arg[1] = spdm_data->spdm_client;
+		desc.arg[2] = 0;
+		ext_status = spdm_ext_call(&desc, 3);
+		if (ext_status) {
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+			mutex_lock(&devfreqs_lock);
+			/*
+			 * the spdm device probe will fail so remove it from
+			 * the list  to prevent accessing a deleted pointer in
+			 * the future
+			 */
+			list_del(&spdm_data->list);
+			mutex_unlock(&devfreqs_lock);
+			return -EINVAL;
+		}
+		spdm_data->enabled = true;
+		devfreq_monitor_start(devfreq);
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		devfreq_monitor_stop(devfreq);
+		/* find devfreq in list and remove it */
+		mutex_lock(&devfreqs_lock);
+		list_del(&spdm_data->list);
+		mutex_unlock(&devfreqs_lock);
+
+		/* call hypvervisor to disable */
+		desc.arg[0] = SPDM_CMD_DISABLE;
+		desc.arg[1] = spdm_data->spdm_client;
+		ext_status = spdm_ext_call(&desc, 2);
+		if (ext_status)
+			pr_err("External command %u failed with error %u",
+				(int)desc.arg[0], ext_status);
+		spdm_data->enabled = false;
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		devfreq_interval_update(devfreq, (unsigned int *)data);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		devfreq_monitor_suspend(devfreq);
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		devfreq_monitor_resume(devfreq);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor spdm_hyp_gov = {
+	.name = "spdm_bw_hyp",
+	.get_target_freq = gov_spdm_hyp_target_bw,
+	.event_handler = gov_spdm_hyp_eh,
+};
+
+static int probe(struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	int *irq = 0;
+
+	irq = devm_kzalloc(&pdev->dev, sizeof(int), GFP_KERNEL);
+	if (!irq)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, irq);
+
+	ret = devfreq_add_governor(&spdm_hyp_gov);
+	if (ret)
+		goto nogov;
+
+	*irq = platform_get_irq_byname(pdev, "spdm-irq");
+	ret = request_threaded_irq(*irq, isr, threaded_isr,
+				   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+				   spdm_hyp_gov.name, pdev);
+	if (ret)
+		goto no_irq;
+
+	enable_clocks();
+	return 0;
+
+no_irq:
+	devfreq_remove_governor(&spdm_hyp_gov);
+nogov:
+	devm_kfree(&pdev->dev, irq);
+	return ret;
+}
+
+static int remove(struct platform_device *pdev)
+{
+	int *irq = 0;
+
+	disable_clocks();
+	irq = platform_get_drvdata(pdev);
+	free_irq(*irq, pdev);
+	devfreq_remove_governor(&spdm_hyp_gov);
+	devm_kfree(&pdev->dev, irq);
+	return 0;
+}
+
+static const struct of_device_id gov_spdm_match[] = {
+	{.compatible = "qcom,gov_spdm_hyp"},
+	{}
+};
+
+static struct platform_driver gov_spdm_hyp_drvr = {
+	.driver = {
+		   .name = "gov_spdm_hyp",
+		   .owner = THIS_MODULE,
+		   .of_match_table = gov_spdm_match,
+		   },
+	.probe = probe,
+	.remove = remove,
+};
+
+static int __init governor_spdm_bw_hyp(void)
+{
+	return platform_driver_register(&gov_spdm_hyp_drvr);
+}
+
+module_init(governor_spdm_bw_hyp);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 31e5b76..ba71ce8 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,7 +3,7 @@
 	tristate "MSM DRM"
 	depends on DRM
 	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
-	depends on OF && COMMON_CLK
+	depends on OF
 	select REGULATOR
 	select DRM_KMS_HELPER
 	select DRM_PANEL
@@ -39,6 +39,7 @@
 config DRM_MSM_HDMI
 	bool "Enable HDMI support in MSM DRM driver"
 	depends on DRM_MSM
+	depends on COMMON_CLK
 	default n
 	help
 	  Compile in support for HDMI driver in msm drm
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 2d76d13..79f2ec9 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -332,6 +332,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux,
 	struct drm_dp_aux_msg helper_msg;
 	u32 const message_size = 0x10;
 	u32 const segment_address = 0x30;
+	u32 const edid_block_length = 0x80;
 	bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
 	bool i2c_read = input_msg->request &
 		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
@@ -339,6 +340,15 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux,
 	if (!i2c_mot || !i2c_read || (input_msg->size == 0))
 		return;
 
+	/*
+	 * Sending the segment value and EDID offset will be performed
+	 * from the DRM upstream EDID driver for each block. Avoid
+	 * duplicate AUX transactions related to this while reading the
+	 * first 16 bytes of each block.
+	 */
+	if (!(aux->offset % edid_block_length))
+		goto end;
+
 	aux->read = false;
 	aux->cmd_busy = true;
 	aux->no_send_addr = true;
@@ -371,6 +381,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux,
 	helper_msg.buffer = &aux->offset;
 	helper_msg.size = 1;
 	dp_aux_cmd_fifo_tx(aux, &helper_msg);
+end:
 	aux->offset += message_size;
 
 	if (aux->offset == 0x80 || aux->offset == 0x100)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 5318a5f..bfbcf54 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -2830,6 +2830,27 @@ static void _dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl)
 		dsi_ctrl->misr_cache);
 
 }
+/**
+ * dsi_ctrl_get_host_engine_init_state() - Return host init state
+ * @dsi_ctrl:          DSI controller handle.
+ * @state:             Controller initialization state
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_get_host_engine_init_state(struct dsi_ctrl *dsi_ctrl,
+		bool *state)
+{
+	if (!dsi_ctrl || !state) {
+		pr_err("Invalid Params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	*state = dsi_ctrl->current_state.host_initialized;
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+	return 0;
+}
 
 /**
  * dsi_ctrl_update_host_engine_state_for_cont_splash() -
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 8850df4..77df585 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -717,4 +717,11 @@ void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable);
  * @enable:		   variable to control enable/disable irq line
  */
 void dsi_ctrl_irq_update(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_get_host_engine_init_state() - Return host init state
+ */
+int dsi_ctrl_get_host_engine_init_state(struct dsi_ctrl *dsi_ctrl,
+		bool *state);
+
 #endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
index d94d6f7b..bb0b603 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
@@ -113,6 +113,7 @@ void dsi_ctrl_hw_kickoff_non_embedded_mode(struct dsi_ctrl_hw *ctrl,
 	reg = DSI_R32(ctrl, DSI_DMA_FIFO_CTRL);
 	reg |= BIT(20);
 	reg |= BIT(16);
+	reg |= 0x33;/* Set READ and WRITE watermark levels to maximum */
 	DSI_W32(ctrl, DSI_DMA_FIFO_CTRL, reg);
 
 	DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index c753c80..594b3f5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -618,9 +618,8 @@ void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl,
 	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
 
 	reg = DSI_R32(ctrl, DSI_DMA_FIFO_CTRL);
-	reg &= ~BIT(20);/* Enable write watermark*/
-	reg &= ~BIT(16);/* Enable read watermark */
-
+	reg |= BIT(20);/* Disable write watermark*/
+	reg |= BIT(16);/* Disable read watermark */
 
 	DSI_W32(ctrl, DSI_DMA_FIFO_CTRL, reg);
 	DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 985cb51..3d99172 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -564,6 +564,87 @@ int dsi_display_check_status(void *display)
 	return rc;
 }
 
+static int dsi_display_cmd_prepare(const char *cmd_buf, u32 cmd_buf_len,
+		struct dsi_cmd_desc *cmd, u8 *payload, u32 payload_len)
+{
+	int i;
+
+	memset(cmd, 0x00, sizeof(*cmd));
+	cmd->msg.type = cmd_buf[0];
+	cmd->last_command = (cmd_buf[1] == 1 ? true : false);
+	cmd->msg.channel = cmd_buf[2];
+	cmd->msg.flags = cmd_buf[3];
+	cmd->msg.ctrl = 0;
+	cmd->post_wait_ms = cmd_buf[4];
+	cmd->msg.tx_len = ((cmd_buf[5] << 8) | (cmd_buf[6]));
+
+	if (cmd->msg.tx_len > payload_len) {
+		pr_err("Incorrect payload length tx_len %ld, payload_len %d\n",
+				cmd->msg.tx_len, payload_len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cmd->msg.tx_len; i++)
+		payload[i] = cmd_buf[7 + i];
+
+	cmd->msg.tx_buf = payload;
+	return 0;
+}
+
+static int dsi_display_ctrl_get_host_init_state(struct dsi_display *dsi_display,
+		bool *state)
+{
+	struct dsi_display_ctrl *ctrl;
+	int i, rc = -EINVAL;
+
+	for (i = 0 ; i < dsi_display->ctrl_count; i++) {
+		ctrl = &dsi_display->ctrl[i];
+		rc = dsi_ctrl_get_host_engine_init_state(ctrl->ctrl, state);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+int dsi_display_cmd_transfer(void *display, const char *cmd_buf,
+		u32 cmd_buf_len)
+{
+	struct dsi_display *dsi_display = display;
+	struct dsi_cmd_desc cmd;
+	u8 cmd_payload[MAX_CMD_PAYLOAD_SIZE];
+	int rc = 0;
+	bool state = false;
+
+	if (!dsi_display || !cmd_buf) {
+		pr_err("[DSI] invalid params\n");
+		return -EINVAL;
+	}
+
+	pr_debug("[DSI] Display command transfer\n");
+
+	rc = dsi_display_cmd_prepare(cmd_buf, cmd_buf_len,
+			&cmd, cmd_payload, MAX_CMD_PAYLOAD_SIZE);
+	if (rc) {
+		pr_err("[DSI] command prepare failed. rc %d\n", rc);
+		return rc;
+	}
+
+	mutex_lock(&dsi_display->display_lock);
+	rc = dsi_display_ctrl_get_host_init_state(dsi_display, &state);
+	if (rc || !state) {
+		pr_err("[DSI] Invalid host state %d rc %d\n",
+				state, rc);
+		rc = -EPERM;
+		goto end;
+	}
+
+	rc = dsi_display->host.ops->transfer(&dsi_display->host,
+			&cmd.msg);
+end:
+	mutex_unlock(&dsi_display->display_lock);
+	return rc;
+}
+
 int dsi_display_soft_reset(void *display)
 {
 	struct dsi_display *dsi_display;
@@ -3257,20 +3338,6 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
 		}
 	}
 
-	for (i = 0; i < display->ctrl_count; i++) {
-		ctrl = &display->ctrl[i];
-
-		if (!ctrl->phy || !ctrl->ctrl)
-			continue;
-
-		rc = dsi_phy_set_clk_freq(ctrl->phy, &ctrl->ctrl->clk_freq);
-		if (rc) {
-			pr_err("[%s] failed to set phy clk freq, rc=%d\n",
-			       display->name, rc);
-			goto error;
-		}
-	}
-
 	if (priv_info->phy_timing_len) {
 		for (i = 0; i < display->ctrl_count; i++) {
 			ctrl = &display->ctrl[i];
@@ -3615,6 +3682,21 @@ static int dsi_display_bind(struct device *dev,
 	pr_info("Successfully bind display panel '%s'\n", display->name);
 	display->drm_dev = drm;
 
+	for (i = 0; i < display->ctrl_count; i++) {
+		display_ctrl = &display->ctrl[i];
+
+		if (!display_ctrl->phy || !display_ctrl->ctrl)
+			continue;
+
+		rc = dsi_phy_set_clk_freq(display_ctrl->phy,
+				&display_ctrl->ctrl->clk_freq);
+		if (rc) {
+			pr_err("[%s] failed to set phy clk freq, rc=%d\n",
+					display->name, rc);
+			goto error;
+		}
+	}
+
 	/* Initialize resources for continuous splash */
 	rc = dsi_display_splash_res_init(display);
 	if (rc)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 87b9fd5..4cfd4a9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -31,6 +31,7 @@
 #define MAX_DSI_CTRLS_PER_DISPLAY             2
 #define DSI_CLIENT_NAME_SIZE		20
 #define MAX_CMDLINE_PARAM_LEN	 512
+#define MAX_CMD_PAYLOAD_SIZE	256
 /*
  * DSI Validate Mode modifiers
  * @DSI_VALIDATE_FLAG_ALLOW_ADJUST:	Allow mode validation to also do fixup
@@ -536,6 +537,15 @@ int dsi_display_set_backlight(void *display, u32 bl_lvl);
 int dsi_display_check_status(void *display);
 
 /**
+ * dsi_display_cmd_transfer() - transfer command to the panel
+ * @display:            Handle to display.
+ * @cmd_buf:            Command buffer
+ * @cmd_buf_len:        Command buffer length in bytes
+ */
+int dsi_display_cmd_transfer(void *display, const char *cmd_buffer,
+		u32 cmd_buf_len);
+
+/**
  * dsi_display_soft_reset() - perform a soft reset on DSI controller
  * @display:         Handle to display
  *
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 2567f04..07b2305 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -663,7 +663,7 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable)
 			}
 		}
 	} else {
-		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_ON &&
+		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF &&
 				dsi_phy->regulator_required) {
 			rc = dsi_pwr_enable_regulator(
 				&dsi_phy->pwr_info.phy_pwr, false);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 33778f8e..00cf225 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1193,6 +1193,8 @@ static int msm_drm_object_supports_event(struct drm_device *dev,
 		break;
 	}
 
+	drm_mode_object_unreference(arg_obj);
+
 	return ret;
 }
 
@@ -1209,6 +1211,9 @@ static int msm_register_event(struct drm_device *dev,
 		return -ENOENT;
 
 	ret = kms->funcs->register_events(kms, arg_obj, req->event, en);
+
+	drm_mode_object_unreference(arg_obj);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 08e6f79..6859f6e 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -46,6 +46,12 @@ struct msm_mmu_funcs {
 			struct dma_buf *dma_buf, int dir);
 	void (*destroy)(struct msm_mmu *mmu);
 	bool (*is_domain_secure)(struct msm_mmu *mmu);
+	int (*set_attribute)(struct msm_mmu *mmu,
+			enum iommu_attr attr, void *data);
+	int (*one_to_one_map)(struct msm_mmu *mmu, uint32_t iova,
+			uint32_t dest_address, uint32_t size, int prot);
+	int (*one_to_one_unmap)(struct msm_mmu *mmu, uint32_t dest_address,
+					uint32_t size);
 };
 
 struct msm_mmu {
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 7c879651..211acce 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -113,6 +113,74 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
 	dev_dbg(client->dev, "iommu domain detached\n");
 }
 
+static int msm_smmu_set_attribute(struct msm_mmu *mmu,
+		enum iommu_attr attr, void *data)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	struct iommu_domain *domain;
+	int ret = 0;
+
+	if (!client || !client->mmu_mapping)
+		return -ENODEV;
+
+	domain = client->mmu_mapping->domain;
+	if (!domain) {
+		DRM_ERROR("Invalid domain ret:%d\n", ret);
+		return -EINVAL;
+	}
+
+	ret = iommu_domain_set_attr(domain, attr, data);
+	if (ret)
+		DRM_ERROR("set domain attribute failed:%d\n", ret);
+
+	return ret;
+}
+
+static int msm_smmu_one_to_one_unmap(struct msm_mmu *mmu,
+				uint32_t dest_address, uint32_t size)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	struct iommu_domain *domain;
+	int ret = 0;
+
+	if (!client || !client->mmu_mapping)
+		return -ENODEV;
+
+	domain = client->mmu_mapping->domain;
+	if (!domain)
+		return -EINVAL;
+
+	ret = iommu_unmap(domain, dest_address, size);
+	if (ret != size)
+		pr_err("smmu unmap failed\n");
+
+	return 0;
+}
+
+static int msm_smmu_one_to_one_map(struct msm_mmu *mmu, uint32_t iova,
+		uint32_t dest_address, uint32_t size, int prot)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	struct iommu_domain *domain;
+	int ret = 0;
+
+	if (!client || !client->mmu_mapping)
+		return -ENODEV;
+
+	domain = client->mmu_mapping->domain;
+	if (!domain)
+		return -EINVAL;
+
+	ret = iommu_map(domain, dest_address, dest_address, size, prot);
+	if (ret)
+		pr_err("smmu map failed\n");
+
+	return ret;
+}
+
 static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
 		struct sg_table *sgt, int prot)
 {
@@ -299,6 +367,9 @@ static const struct msm_mmu_funcs funcs = {
 	.unmap_dma_buf = msm_smmu_unmap_dma_buf,
 	.destroy = msm_smmu_destroy,
 	.is_domain_secure = msm_smmu_is_domain_secure,
+	.set_attribute = msm_smmu_set_attribute,
+	.one_to_one_map = msm_smmu_one_to_one_map,
+	.one_to_one_unmap = msm_smmu_one_to_one_unmap,
 };
 
 static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
@@ -444,9 +515,7 @@ static int msm_smmu_fault_handler(struct iommu_domain *domain,
 	DRM_ERROR("SMMU device:%s", client->dev ? client->dev->kobj.name : "");
 
 	/* generate dump, but no panic */
-	SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
-			"dsi1_phy", "vbif", "dbg_bus",
-			"vbif_dbg_bus");
+	SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
 
 	/*
 	 * return -ENOSYS to allow smmu driver to dump out useful
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index c2419dc..07b5536 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -635,15 +635,19 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 
 	sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
 	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
-	hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
-	hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
 	hw_cfg.last_feature = 0;
 
 	for (i = 0; i < num_mixers && !ret; i++) {
 		hw_lm = sde_crtc->mixers[i].hw_lm;
 		hw_dspp = sde_crtc->mixers[i].hw_dspp;
+		if (!hw_lm) {
+			ret = -EINVAL;
+			continue;
+		}
 		hw_cfg.ctl = sde_crtc->mixers[i].hw_ctl;
 		hw_cfg.mixer_info = hw_lm;
+		hw_cfg.displayh = num_mixers * hw_lm->cfg.out_width;
+		hw_cfg.displayv = hw_lm->cfg.out_height;
 		switch (prop_node->feature) {
 		case SDE_CP_CRTC_DSPP_VLUT:
 			if (!hw_dspp || !hw_dspp->ops.setup_vlut) {
@@ -723,7 +727,7 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 			hw_dspp->ops.setup_gamut(hw_dspp, &hw_cfg);
 			break;
 		case SDE_CP_CRTC_LM_GC:
-			if (!hw_lm || !hw_lm->ops.setup_gc) {
+			if (!hw_lm->ops.setup_gc) {
 				ret = -EINVAL;
 				continue;
 			}
@@ -737,7 +741,7 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 			hw_dspp->ops.setup_histogram(hw_dspp, &feature_enabled);
 			break;
 		case SDE_CP_CRTC_DSPP_HIST_IRQ:
-			if (!hw_dspp || !hw_lm) {
+			if (!hw_dspp) {
 				ret = -EINVAL;
 				continue;
 			}
@@ -1028,7 +1032,7 @@ int sde_cp_crtc_set_property(struct drm_crtc *crtc,
 	 */
 	if (!sde_crtc->num_mixers ||
 	    sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
-		DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
+		DRM_INFO("Invalid mixer config act cnt %d max cnt %ld\n",
 			sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
 		ret = -EPERM;
 		goto exit;
@@ -1726,8 +1730,6 @@ static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
 	int i = 0, ret = 0;
 
 	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
-	hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
-	hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
 
 	for (i = 0; i < num_mixers && !ret; i++) {
 		hw_lm = sde_crtc->mixers[i].hw_lm;
@@ -1738,6 +1740,8 @@ static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
 			continue;
 		}
 
+		hw_cfg.displayh = num_mixers * hw_lm->cfg.out_width;
+		hw_cfg.displayv = hw_lm->cfg.out_height;
 		hw_cfg.mixer_info = hw_lm;
 		ad_cfg.prop = ad_prop;
 		ad_cfg.hw_cfg = &hw_cfg;
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index e2d937b..08b3657 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -18,6 +18,7 @@
 #include "sde_connector.h"
 #include "sde_encoder.h"
 #include <linux/backlight.h>
+#include <linux/string.h>
 #include "dsi_drm.h"
 #include "dsi_display.h"
 #include "sde_crtc.h"
@@ -1357,6 +1358,135 @@ int sde_connector_helper_reset_custom_properties(
 	return 0;
 }
 
+static int _sde_debugfs_conn_cmd_tx_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->private_data = inode->i_private;
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t _sde_debugfs_conn_cmd_tx_sts_read(struct file *file,
+		char __user *buf, size_t count, loff_t *ppos)
+{
+	struct drm_connector *connector = file->private_data;
+	struct sde_connector *c_conn;
+	char buffer[MAX_CMD_PAYLOAD_SIZE];
+	int blen = 0;
+
+	if (*ppos)
+		return 0;
+
+	if (!connector) {
+		SDE_ERROR("invalid argument, conn is NULL\n");
+		return 0;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	mutex_lock(&c_conn->lock);
+	blen = snprintf(buffer, MAX_CMD_PAYLOAD_SIZE,
+		"last_cmd_tx_sts:0x%x",
+		c_conn->last_cmd_tx_sts);
+	mutex_unlock(&c_conn->lock);
+
+	SDE_DEBUG("output: %s\n", buffer);
+	if (blen <= 0) {
+		SDE_ERROR("snprintf failed, blen %d\n", blen);
+		return 0;
+	}
+
+	if (copy_to_user(buf, buffer, blen)) {
+		SDE_ERROR("copy to user buffer failed\n");
+		return -EFAULT;
+	}
+
+	*ppos += blen;
+	return blen;
+}
+
+static ssize_t _sde_debugfs_conn_cmd_tx_write(struct file *file,
+			const char __user *p, size_t count, loff_t *ppos)
+{
+	struct drm_connector *connector = file->private_data;
+	struct sde_connector *c_conn;
+	char *input, *token, *input_copy, *input_dup = NULL;
+	const char *delim = " ";
+	u32 buf_size = 0;
+	char buffer[MAX_CMD_PAYLOAD_SIZE];
+	int rc = 0, strtoint;
+
+	if (*ppos || !connector) {
+		SDE_ERROR("invalid argument(s), conn %d\n", connector != NULL);
+		return 0;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	if (!c_conn->ops.cmd_transfer) {
+		SDE_ERROR("no cmd transfer support for connector name %s\n",
+				c_conn->name);
+		return 0;
+	}
+
+	input = kmalloc(count + 1, GFP_KERNEL);
+	if (!input)
+		return -ENOMEM;
+
+	if (copy_from_user(input, p, count)) {
+		SDE_ERROR("copy from user failed\n");
+		rc = -EFAULT;
+		goto end;
+	}
+	input[count] = '\0';
+
+	SDE_DEBUG("input: %s\n", input);
+
+	input_copy = kstrdup(input, GFP_KERNEL);
+	if (!input_copy) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	input_dup = input_copy;
+	token = strsep(&input_copy, delim);
+	while (token) {
+		rc = kstrtoint(token, 0, &strtoint);
+		if (rc) {
+			SDE_ERROR("input buffer conversion failed\n");
+			goto end;
+		}
+
+		if (buf_size >= MAX_CMD_PAYLOAD_SIZE) {
+			SDE_ERROR("buffer size exceeding the limit %d\n",
+					MAX_CMD_PAYLOAD_SIZE);
+			goto end;
+		}
+		buffer[buf_size++] = (strtoint & 0xff);
+		token = strsep(&input_copy, delim);
+	}
+	SDE_DEBUG("command packet size in bytes: %u\n", buf_size);
+	if (!buf_size)
+		goto end;
+
+	mutex_lock(&c_conn->lock);
+	rc = c_conn->ops.cmd_transfer(c_conn->display, buffer,
+			buf_size);
+	c_conn->last_cmd_tx_sts = !rc ? true : false;
+	mutex_unlock(&c_conn->lock);
+
+	rc = count;
+end:
+	kfree(input_dup);
+	kfree(input);
+	return rc;
+}
+
+static const struct file_operations conn_cmd_tx_fops = {
+	.open =		_sde_debugfs_conn_cmd_tx_open,
+	.read =		_sde_debugfs_conn_cmd_tx_sts_read,
+	.write =	_sde_debugfs_conn_cmd_tx_write,
+};
+
 #ifdef CONFIG_DEBUG_FS
 /**
  * sde_connector_init_debugfs - initialize connector debugfs
@@ -1387,6 +1517,15 @@ static int sde_connector_init_debugfs(struct drm_connector *connector)
 		return -ENOMEM;
 	}
 
+	if (sde_connector->ops.cmd_transfer) {
+		if (!debugfs_create_file("tx_cmd", 0600,
+			connector->debugfs_entry,
+			connector, &conn_cmd_tx_fops)) {
+			SDE_ERROR("failed to create connector cmd_tx\n");
+			return -ENOMEM;
+		}
+	}
+
 	return 0;
 }
 #else
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index b92c342..7cf09b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -231,6 +231,16 @@ struct sde_connector_ops {
 	 * Returns: positive value for success, negetive or zero for failure
 	 */
 	int (*check_status)(void *display);
+
+	/**
+	 * cmd_transfer - Transfer command to the connected display panel
+	 * @display: Pointer to private display handle
+	 * @cmd_buf: Command buffer
+	 * @cmd_buf_len: Command buffer length in bytes
+	 * Returns: Zero for success, negetive for failure
+	 */
+	int (*cmd_transfer)(void *display, const char *cmd_buf,
+			u32 cmd_buf_len);
 };
 
 /**
@@ -290,6 +300,7 @@ struct sde_connector_evt {
  * @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
  * @bl_scale: BL scale value for ABA feature
  * @bl_scale_ad: BL scale value for AD feature
+ * last_cmd_tx_sts: status of the last command transfer
  */
 struct sde_connector {
 	struct drm_connector base;
@@ -330,6 +341,8 @@ struct sde_connector {
 	bool bl_scale_dirty;
 	u32 bl_scale;
 	u32 bl_scale_ad;
+
+	bool last_cmd_tx_sts;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index fa53464..0334ead 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -520,11 +520,6 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
 		return;
 	}
 	priv = kms->dev->dev_private;
-
-	/* wake vote update is not required with display rsc */
-	if (kms->perf.bw_vote_mode == DISP_RSC_MODE && stop_req)
-		return;
-
 	sde_crtc = to_sde_crtc(crtc);
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 9cdef88..cc9d220 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -610,6 +610,38 @@ static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
 		return;
 }
 
+static ssize_t vsync_event_show(struct device *device,
+	struct device_attribute *attr, char *buf)
+{
+	struct drm_crtc *crtc;
+	struct sde_crtc *sde_crtc;
+
+	if (!device || !buf) {
+		SDE_ERROR("invalid input param(s)\n");
+		return -EAGAIN;
+	}
+
+	crtc = dev_get_drvdata(device);
+	sde_crtc = to_sde_crtc(crtc);
+	return scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n",
+			ktime_to_ns(sde_crtc->vblank_last_cb_time));
+}
+
+static DEVICE_ATTR_RO(vsync_event);
+static struct attribute *sde_crtc_dev_attrs[] = {
+	&dev_attr_vsync_event.attr,
+	NULL
+};
+
+static const struct attribute_group sde_crtc_attr_group = {
+	.attrs = sde_crtc_dev_attrs,
+};
+
+static const struct attribute_group *sde_crtc_attr_groups[] = {
+	&sde_crtc_attr_group,
+	NULL,
+};
+
 static void sde_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
@@ -619,6 +651,11 @@ static void sde_crtc_destroy(struct drm_crtc *crtc)
 	if (!crtc)
 		return;
 
+	if (sde_crtc->vsync_event_sf)
+		sysfs_put(sde_crtc->vsync_event_sf);
+	if (sde_crtc->sysfs_dev)
+		device_unregister(sde_crtc->sysfs_dev);
+
 	if (sde_crtc->blob_info)
 		drm_property_unreference_blob(sde_crtc->blob_info);
 	msm_property_destroy(&sde_crtc->property_info);
@@ -2306,6 +2343,10 @@ static void sde_crtc_vblank_cb(void *data)
 		sde_crtc->vblank_cb_time = ktime_get();
 	else
 		sde_crtc->vblank_cb_count++;
+
+	sde_crtc->vblank_last_cb_time = ktime_get();
+	sysfs_notify_dirent(sde_crtc->vsync_event_sf);
+
 	_sde_crtc_complete_flip(crtc, NULL);
 	drm_crtc_handle_vblank(crtc);
 	DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
@@ -4240,13 +4281,15 @@ static void sde_crtc_enable(struct drm_crtc *crtc)
 	struct sde_crtc_irq_info *node = NULL;
 	struct drm_event event;
 	u32 power_on;
-	int ret;
+	int ret, i;
+	struct sde_crtc_state *cstate;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	priv = crtc->dev->dev_private;
+	cstate = to_sde_crtc_state(crtc->state);
 
 	if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
 		SDE_ERROR("power resource is not enabled\n");
@@ -4315,6 +4358,10 @@ static void sde_crtc_enable(struct drm_crtc *crtc)
 		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
 		SDE_POWER_EVENT_PRE_DISABLE,
 		sde_crtc_handle_power_event, crtc, sde_crtc->name);
+
+	/* Enable ESD thread */
+	for (i = 0; i < cstate->num_connectors; i++)
+		sde_connector_schedule_status_work(cstate->connectors[i], true);
 }
 
 struct plane_state {
@@ -5229,12 +5276,19 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
 	}
 
 exit:
-	if (ret)
-		SDE_ERROR("%s: failed to set property%d %s: %d\n", crtc->name,
-				DRMID(property), property->name, ret);
-	else
+	if (ret) {
+		if (ret != -EPERM)
+			SDE_ERROR("%s: failed to set property%d %s: %d\n",
+				crtc->name, DRMID(property),
+				property->name, ret);
+		else
+			SDE_DEBUG("%s: failed to set property%d %s: %d\n",
+				crtc->name, DRMID(property),
+				property->name, ret);
+	} else {
 		SDE_DEBUG("%s: %s[%d] <= 0x%llx\n", crtc->name, property->name,
 				property->base.id, val);
+	}
 
 	return ret;
 }
@@ -5892,6 +5946,41 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
 	return crtc;
 }
 
+int sde_crtc_post_init(struct drm_device *dev, struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	int rc = 0;
+
+	if (!dev || !dev->primary || !dev->primary->kdev || !crtc) {
+		SDE_ERROR("invalid input param(s)\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	sde_crtc->sysfs_dev = device_create_with_groups(
+		dev->primary->kdev->class, dev->primary->kdev, 0, crtc,
+		sde_crtc_attr_groups, "sde-crtc-%d", crtc->index);
+	if (IS_ERR_OR_NULL(sde_crtc->sysfs_dev)) {
+		SDE_ERROR("crtc:%d sysfs create failed rc:%ld\n", crtc->index,
+			PTR_ERR(sde_crtc->sysfs_dev));
+		if (!sde_crtc->sysfs_dev)
+			rc = -EINVAL;
+		else
+			rc = PTR_ERR(sde_crtc->sysfs_dev);
+		goto end;
+	}
+
+	sde_crtc->vsync_event_sf = sysfs_get_dirent(
+		sde_crtc->sysfs_dev->kobj.sd, "vsync_event");
+	if (!sde_crtc->vsync_event_sf)
+		SDE_ERROR("crtc:%d vsync_event sysfs create failed\n",
+						crtc->base.id);
+
+end:
+	return rc;
+}
+
 static int _sde_crtc_event_enable(struct sde_kms *kms,
 		struct drm_crtc *crtc_drm, u32 event)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 9501d0f..589a667 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -183,6 +183,9 @@ struct sde_crtc_event {
  * @vblank_cb_count : count of vblank callback since last reset
  * @play_count    : frame count between crtc enable and disable
  * @vblank_cb_time  : ktime at vblank count reset
+ * @vblank_last_cb_time  : ktime at last vblank notification
+ * @sysfs_dev  : sysfs device node for crtc
+ * @vsync_event_sf : vsync event notifier sysfs device
  * @vblank_requested : whether the user has requested vblank events
  * @suspend         : whether or not a suspend operation is in progress
  * @enabled       : whether the SDE CRTC is currently enabled. updated in the
@@ -246,6 +249,9 @@ struct sde_crtc {
 	u32 vblank_cb_count;
 	u64 play_count;
 	ktime_t vblank_cb_time;
+	ktime_t vblank_last_cb_time;
+	struct device *sysfs_dev;
+	struct kernfs_node *vsync_event_sf;
 	bool vblank_requested;
 	bool suspend;
 	bool enabled;
@@ -545,6 +551,14 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
 struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane);
 
 /**
+ * sde_crtc_post_init - update crtc object with post initialization. It
+ *      can update the debugfs, sysfs, entires.
+ * @dev: sde device
+ * @crtc: Pointer to drm crtc structure
+ */
+int sde_crtc_post_init(struct drm_device *dev, struct drm_crtc *crtc);
+
+/**
  * sde_crtc_cancel_pending_flip - complete flip for clients on lastclose
  * @crtc: Pointer to drm crtc object
  * @file: client to cancel's file handle
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 0ed994b..4008115 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -2412,7 +2412,6 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 	struct msm_compression_info *comp_info = NULL;
 	struct drm_display_mode *cur_mode = NULL;
 	struct msm_mode_info mode_info;
-	struct drm_connector *drm_conn = NULL;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -2503,10 +2502,6 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
 
 	_sde_encoder_virt_enable_helper(drm_enc);
-
-	/* Enable ESD thread */
-	drm_conn = sde_enc->cur_master->connector;
-	sde_connector_schedule_status_work(drm_conn, true);
 }
 
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -3453,16 +3448,21 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
+	struct sde_kms *sde_kms = NULL;
+	struct msm_drm_private *priv = NULL;
 	bool needs_hw_reset = false;
 	uint32_t ln_cnt1, ln_cnt2;
 	unsigned int i;
 	int rc, ret = 0;
 
-	if (!drm_enc || !params) {
+	if (!drm_enc || !params || !drm_enc->dev ||
+		!drm_enc->dev->dev_private) {
 		SDE_ERROR("invalid args\n");
 		return -EINVAL;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(drm_enc));
@@ -3531,7 +3531,8 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		}
 	}
 
-	if (_sde_encoder_is_dsc_enabled(drm_enc)) {
+	if (_sde_encoder_is_dsc_enabled(drm_enc) &&
+		!sde_kms->splash_data.cont_splash_en) {
 		rc = _sde_encoder_dsc_setup(sde_enc, params);
 		if (rc) {
 			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
@@ -4398,8 +4399,8 @@ int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder)
 		return ret;
 	}
 
-	if (conn->encoder) {
-		conn->state->best_encoder = conn->encoder;
+	if (sde_conn->encoder) {
+		conn->state->best_encoder = sde_conn->encoder;
 		SDE_DEBUG_ENC(sde_enc,
 			"configured cstate->best_encoder to ID = %d\n",
 			conn->state->best_encoder->base.id);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index d7cbfbe..7ba9ec9 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -891,7 +891,8 @@ static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
 	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
 
 	if (phys_enc->enable_state == SDE_ENC_ENABLED) {
-		SDE_ERROR("already enabled\n");
+		if (!phys_enc->sde_kms->splash_data.cont_splash_en)
+			SDE_ERROR("already enabled\n");
 		return;
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index aaf50f6..ad27b7f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -263,12 +263,13 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
 	if (!phys_enc || !vid_enc->hw_intf || !phys_enc->hw_ctl ||
 			!phys_enc->hw_ctl->ops.get_bitmask_intf ||
 			!phys_enc->hw_ctl->ops.update_pending_flush ||
-			!vid_enc->hw_intf->ops.setup_rot_start)
+			!vid_enc->hw_intf->ops.setup_rot_start ||
+			!phys_enc->sde_kms)
 		return;
 
 	timing = &vid_enc->timing_params;
 	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
-	if (vfp_fetch_lines && rot_fetch_lines) {
+	if (rot_fetch_lines) {
 		vert_total = get_vertical_total(timing);
 		horiz_total = get_horizontal_total(timing);
 		if (vert_total >= (vfp_fetch_lines + rot_fetch_lines)) {
@@ -277,6 +278,13 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
 			    horiz_total + 1;
 			f.enable = 1;
 			f.fetch_start = rot_fetch_start_vsync_counter;
+		} else {
+			SDE_ERROR_VIDENC(vid_enc,
+				"vert_total %u rot_fetch_lines %u vfp_fetch_lines %u\n",
+				vert_total, rot_fetch_lines, vfp_fetch_lines);
+			SDE_EVT32(DRMID(phys_enc->parent), vert_total,
+				rot_fetch_lines, vfp_fetch_lines,
+				SDE_EVTLOG_ERROR);
 		}
 	}
 
@@ -290,14 +298,17 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
 		rot_fetch_lines, vfp_fetch_lines,
 		rot_fetch_start_vsync_counter);
 
-	phys_enc->hw_ctl->ops.get_bitmask_intf(
-			phys_enc->hw_ctl, &flush_mask, vid_enc->hw_intf->idx);
-	phys_enc->hw_ctl->ops.update_pending_flush(
-			phys_enc->hw_ctl, flush_mask);
+	if (!phys_enc->sde_kms->splash_data.cont_splash_en) {
+		phys_enc->hw_ctl->ops.get_bitmask_intf(
+				phys_enc->hw_ctl, &flush_mask,
+				vid_enc->hw_intf->idx);
+		phys_enc->hw_ctl->ops.update_pending_flush(
+				phys_enc->hw_ctl, flush_mask);
 
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+		vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+	}
 
 	vid_enc->rot_fetch = f;
 	vid_enc->rot_fetch_valid = true;
@@ -328,7 +339,8 @@ static void sde_encoder_phys_vid_setup_timing_engine(
 	unsigned long lock_flags;
 	struct sde_hw_intf_cfg intf_cfg = { 0 };
 
-	if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->hw_ctl ||
+			!phys_enc->hw_ctl->ops.setup_intf_cfg) {
 		SDE_ERROR("invalid encoder %d\n", phys_enc != 0);
 		return;
 	}
@@ -358,6 +370,14 @@ static void sde_encoder_phys_vid_setup_timing_engine(
 
 	drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
 
+	vid_enc->timing_params = timing_params;
+
+	if (phys_enc->sde_kms->splash_data.cont_splash_en) {
+		SDE_DEBUG_VIDENC(vid_enc,
+			"skipping intf programming since cont splash is enabled\n");
+		return;
+	}
+
 	fmt = sde_get_sde_format(fmt_fourcc);
 	SDE_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
 
@@ -371,10 +391,7 @@ static void sde_encoder_phys_vid_setup_timing_engine(
 			&timing_params, fmt);
 	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
 	programmable_fetch_config(phys_enc, &timing_params);
-
-	vid_enc->timing_params = timing_params;
 }
 
 static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
@@ -653,7 +670,8 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
 	u32 flush_mask = 0;
 
 	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
-			!phys_enc->parent->dev->dev_private) {
+			!phys_enc->parent->dev->dev_private ||
+			!phys_enc->sde_kms) {
 		SDE_ERROR("invalid encoder/device\n");
 		return;
 	}
@@ -676,7 +694,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
 	/* reset state variables until after first update */
 	vid_enc->rot_fetch_valid = false;
 
-	sde_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+	if (!phys_enc->sde_kms->splash_data.cont_splash_en)
+		sde_encoder_helper_split_config(phys_enc,
+						vid_enc->hw_intf->idx);
 
 	sde_encoder_phys_vid_setup_timing_engine(phys_enc);
 
@@ -689,6 +709,17 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
 		!sde_encoder_phys_vid_is_master(phys_enc))
 		goto skip_flush;
 
+	/**
+	 * skip flushing intf during cont. splash handoff since bootloader
+	 * has already enabled the hardware and is single buffered.
+	 */
+
+	if (phys_enc->sde_kms->splash_data.cont_splash_en) {
+		SDE_DEBUG_VIDENC(vid_enc,
+		"skipping intf flush bit set as cont. splash is enabled\n");
+		goto skip_flush;
+	}
+
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index ddff6ee..c23afc5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -3173,7 +3173,8 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
 	if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) ||
 	    IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_301) ||
 	    IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_400) ||
-	    IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_401))
+	    IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_401) ||
+	    IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_410))
 		sde_cfg->has_hdr = true;
 
 	index = sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 4437987..b8c790f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -42,47 +42,6 @@
 
 #define SDE_REG_RESET_TIMEOUT_US        2000
 
-#define MDP_CTL_FLUSH(n) ((0x2000) + (0x200*n) + CTL_FLUSH)
-#define CTL_FLUSH_LM_BIT(n) (6 + n)
-#define CTL_TOP_LM_OFFSET(index, lm) (0x2000 + (0x200 * index) + (lm * 0x4))
-
-int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
-		void __iomem *mmio)
-{
-	int i, j;
-	u32 op_mode;
-
-	if (!data) {
-		pr_err("invalid splash data\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < data->ctl_top_cnt; i++) {
-		struct ctl_top *top = &data->top[i];
-		u8 ctl_id = data->ctl_ids[i] - CTL_0;
-		u32 regval = 0;
-
-		op_mode = readl_relaxed(mmio + MDP_CTL_FLUSH(ctl_id));
-
-		/* Set border fill*/
-		regval |= CTL_MIXER_BORDER_OUT;
-
-		for (j = 0; j < top->ctl_lm_cnt; j++) {
-			u8 lm_id = top->lm[j].lm_id - LM_0;
-
-			writel_relaxed(regval,
-			mmio + CTL_TOP_LM_OFFSET(ctl_id, lm_id));
-
-			op_mode |= BIT(CTL_FLUSH_LM_BIT(lm_id));
-		}
-		op_mode |= CTL_FLUSH_MASK_CTL;
-
-		writel_relaxed(op_mode, mmio + MDP_CTL_FLUSH(ctl_id));
-	}
-	return 0;
-
-}
-
 static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index a9bd104..435fc21 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -277,15 +277,6 @@ struct sde_hw_ctl {
 };
 
 /**
- * sde_unstage_pipe_for_cont_splash - Unstage pipes for continuous splash
- * @data: pointer to sde splash data
- * @mmio: mapped register io address of MDP
- * @return: error code
- */
-int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
-		void __iomem *mmio);
-
-/**
  * sde_hw_ctl - convert base object sde_hw_base to container
  * @hw: Pointer to base hardware block
  * return: Pointer to hardware block container
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 0d85c53..9355080 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -846,7 +846,7 @@ static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
 
 	cap = ctx->cap;
 
-	if (index == SDE_SSPP_RECT_0 &&
+	if ((index == SDE_SSPP_RECT_SOLO || index == SDE_SSPP_RECT_0) &&
 			test_bit(SDE_SSPP_TS_PREFILL, &cap->features)) {
 		ts_offset = SSPP_TRAFFIC_SHAPER;
 		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_PREFILL;
@@ -855,6 +855,7 @@ static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
 		ts_offset = SSPP_TRAFFIC_SHAPER_REC1;
 		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_REC1_PREFILL;
 	} else {
+		pr_err("%s: unexpected idx:%d\n", __func__, index);
 		return;
 	}
 
@@ -888,12 +889,14 @@ static void sde_hw_sspp_setup_cdp(struct sde_hw_pipe *ctx,
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
 		return;
 
-	if (index == SDE_SSPP_RECT_0)
+	if (index == SDE_SSPP_RECT_SOLO || index == SDE_SSPP_RECT_0) {
 		cdp_cntl_offset = SSPP_CDP_CNTL;
-	else if (index == SDE_SSPP_RECT_1)
+	} else if (index == SDE_SSPP_RECT_1) {
 		cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
-	else
+	} else {
+		pr_err("%s: unexpected idx:%d\n", __func__, index);
 		return;
+	}
 
 	if (cfg->enable)
 		cdp_cntl |= BIT(0);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index bbd6f45..d39e3a8 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -526,6 +526,56 @@ static int _sde_kms_release_splash_buffer(unsigned int mem_addr,
 
 }
 
+static int _sde_kms_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+		struct sde_splash_data *data)
+{
+	int ret = 0;
+
+	if (!mmu || !data)
+		return -EINVAL;
+
+	ret = mmu->funcs->one_to_one_map(mmu, data->splash_base,
+				data->splash_base, data->splash_size,
+				IOMMU_READ | IOMMU_NOEXEC);
+	if (ret)
+		SDE_ERROR("Splash smmu map failed: %d\n", ret);
+
+	return ret;
+}
+
+static int _sde_kms_splash_smmu_unmap(struct sde_kms *sde_kms)
+{
+	struct sde_splash_data *data;
+	struct msm_mmu *mmu;
+	int rc = 0;
+
+	if (!sde_kms)
+		return -EINVAL;
+
+	data = &sde_kms->splash_data;
+	if (!data) {
+		SDE_ERROR("Invalid splash data\n");
+		return -EINVAL;
+	}
+
+	if (!sde_kms->aspace[0]) {
+		SDE_ERROR("aspace not found for sde kms node\n");
+		return -EINVAL;
+	}
+
+	mmu = sde_kms->aspace[0]->mmu;
+	if (!mmu) {
+		SDE_ERROR("mmu not found for aspace\n");
+		return -EINVAL;
+	}
+
+	if (mmu->funcs && mmu->funcs->one_to_one_unmap)
+		mmu->funcs->one_to_one_unmap(mmu, data->splash_base,
+				data->splash_size);
+
+	return rc;
+}
+
 static void sde_kms_prepare_commit(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 {
@@ -536,8 +586,6 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
 	int i, rc = 0;
-	struct drm_plane *plane;
-	bool commit_no_planes = true;
 
 	if (!kms)
 		return;
@@ -566,28 +614,8 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
 		}
 	}
 
-	if (sde_kms->splash_data.smmu_handoff_pending) {
-		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
-			if (plane->state != NULL &&
-					plane->state->crtc != NULL)
-				commit_no_planes = false;
-	}
-
-	if (sde_kms->splash_data.smmu_handoff_pending && commit_no_planes) {
-
-		rc = sde_unstage_pipe_for_cont_splash(&sde_kms->splash_data,
-						sde_kms->mmio);
-		if (rc)
-			SDE_ERROR("pipe staging failed: %d\n", rc);
-
-		rc = _sde_kms_release_splash_buffer(
-				sde_kms->splash_data.splash_base,
-				sde_kms->splash_data.splash_size);
-		if (rc)
-			SDE_ERROR("release of splash memory failed %d\n", rc);
-
+	if (sde_kms->splash_data.smmu_handoff_pending)
 		sde_kms->splash_data.smmu_handoff_pending = false;
-	}
 
 	/*
 	 * NOTE: for secure use cases we want to apply the new HW
@@ -667,12 +695,36 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
 	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
 
 	if (sde_kms->splash_data.cont_splash_en) {
+		/* Releasing splash resources as we have first frame update */
+		rc = _sde_kms_splash_smmu_unmap(sde_kms);
 		SDE_DEBUG("Disabling cont_splash feature\n");
 		sde_kms->splash_data.cont_splash_en = false;
+
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+			sde_power_data_bus_set_quota(&priv->phandle,
+				sde_kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+
 		sde_power_resource_enable(&priv->phandle,
 				sde_kms->core_client, false);
 		SDE_DEBUG("removing Vote for MDP Resources\n");
 	}
+
+	/*
+	 * Even for continuous splash disabled cases we have to release
+	 * splash memory reservation back to system after first frame update.
+	 */
+	if (sde_kms->splash_data.splash_base) {
+		rc = _sde_kms_release_splash_buffer(
+				sde_kms->splash_data.splash_base,
+				sde_kms->splash_data.splash_size);
+		if (rc)
+			pr_err("Failed to release splash memory\n");
+		sde_kms->splash_data.splash_base = 0;
+		sde_kms->splash_data.splash_size = 0;
+	}
 }
 
 static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
@@ -872,7 +924,8 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
 		.get_dst_format = dsi_display_get_dst_format,
 		.post_kickoff = dsi_conn_post_kickoff,
 		.check_status = dsi_display_check_status,
-		.enable_event = dsi_conn_enable_event
+		.enable_event = dsi_conn_enable_event,
+		.cmd_transfer = dsi_display_cmd_transfer,
 	};
 	static const struct sde_connector_ops wb_ops = {
 		.post_init =    sde_wb_connector_post_init,
@@ -885,6 +938,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
 		.get_mode_info = sde_wb_get_mode_info,
 		.get_dst_format = NULL,
 		.check_status = NULL,
+		.cmd_transfer = NULL,
 	};
 	static const struct sde_connector_ops dp_ops = {
 		.post_init  = dp_connector_post_init,
@@ -896,6 +950,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
 		.post_open  = dp_connector_post_open,
 		.check_status = NULL,
 		.pre_kickoff  = dp_connector_pre_kickoff,
+		.cmd_transfer = NULL,
 	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
@@ -1507,6 +1562,7 @@ static int sde_kms_postinit(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
 	struct drm_device *dev;
+	struct drm_crtc *crtc;
 	int rc;
 
 	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
@@ -1520,6 +1576,9 @@ static int sde_kms_postinit(struct msm_kms *kms)
 	if (rc)
 		SDE_ERROR("sde_debugfs init failed: %d\n", rc);
 
+	drm_for_each_crtc(crtc, dev)
+		sde_crtc_post_init(dev, crtc);
+
 	return rc;
 }
 
@@ -1562,6 +1621,9 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
 				&priv->phandle, sde_kms->power_event);
 
 	_sde_kms_release_displays(sde_kms);
+	(void)_sde_kms_release_splash_buffer(
+				sde_kms->splash_data.splash_base,
+				sde_kms->splash_data.splash_size);
 
 	/* safe to call these more than once during shutdown */
 	_sde_debugfs_destroy(sde_kms);
@@ -2107,39 +2169,6 @@ static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
 
 }
 
-static int _sde_kms_gen_drm_mode(struct sde_kms *sde_kms,
-				void *display,
-				struct drm_display_mode *drm_mode)
-{
-	struct dsi_display_mode *modes = NULL;
-	u32 count = 0;
-	int rc = 0;
-
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc) {
-		SDE_ERROR("failed to get num of modes, rc=%d\n", rc);
-		return rc;
-	}
-
-	SDE_DEBUG("num of modes = %d\n", count);
-
-	rc = dsi_display_get_modes(display, &modes);
-	if (rc) {
-		SDE_ERROR("failed to get modes, rc=%d\n", rc);
-		count = 0;
-		return rc;
-	}
-
-	/* TODO; currently consider modes[0] as the preferred mode */
-	dsi_convert_to_drm_mode(&modes[0], drm_mode);
-
-	SDE_DEBUG("hdisplay = %d, vdisplay = %d\n",
-		drm_mode->hdisplay, drm_mode->vdisplay);
-	drm_mode_set_name(drm_mode);
-	drm_mode_set_crtcinfo(drm_mode, 0);
-	return rc;
-}
-
 static int sde_kms_cont_splash_config(struct msm_kms *kms)
 {
 	void *display;
@@ -2152,6 +2181,9 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
 	struct drm_device *dev;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct list_head *connector_list = NULL;
+	struct drm_connector *conn_iter = NULL;
+	struct drm_connector *connector = NULL;
 
 	if (!kms) {
 		SDE_ERROR("invalid kms\n");
@@ -2204,13 +2236,46 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
 	crtc = encoder->crtc;
 	SDE_DEBUG("crtc id = %d\n", crtc->base.id);
 
-	crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
-	drm_mode = drm_mode_create(encoder->dev);
-	if (!drm_mode) {
-		SDE_ERROR("drm_mode create failed\n");
+
+	mutex_lock(&dev->mode_config.mutex);
+	connector_list = &dev->mode_config.connector_list;
+	list_for_each_entry(conn_iter, connector_list, head) {
+		/**
+		 * SDE_KMS doesn't attach more than one encoder to
+		 * a DSI connector. So it is safe to check only with the
+		 * first encoder entry. Revisit this logic if we ever have
+		 * to support continuous splash for external displays in MST
+		 * configuration.
+		 */
+		if (conn_iter &&
+			conn_iter->encoder_ids[0] == encoder->base.id) {
+			connector = conn_iter;
+			break;
+		}
+	}
+
+	if (!connector) {
+		SDE_ERROR("connector not initialized\n");
+		mutex_unlock(&dev->mode_config.mutex);
 		return -EINVAL;
 	}
-	_sde_kms_gen_drm_mode(sde_kms, display, drm_mode);
+
+	if (connector->funcs->fill_modes) {
+		connector->funcs->fill_modes(connector,
+			dev->mode_config.max_width,
+			dev->mode_config.max_height);
+	} else {
+		SDE_ERROR("fill_modes api not defined\n");
+		mutex_unlock(&dev->mode_config.mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
+
+	/* currently consider modes[0] as the preferred mode */
+	drm_mode = list_first_entry(&connector->modes,
+					struct drm_display_mode, head);
 	SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
 			drm_mode->name, drm_mode->base.id,
 			drm_mode->type, drm_mode->flags);
@@ -2466,6 +2531,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
 {
 	struct msm_mmu *mmu;
 	int i, ret;
+	int early_map = 1;
 
 	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
 		struct msm_gem_address_space *aspace;
@@ -2478,6 +2544,23 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
 			continue;
 		}
 
+		/*
+		 * Before attaching SMMU, we need to honor continuous splash
+		 * use case where hardware tries to fetch buffer from physical
+		 * address. To facilitate this requirement we need to have a
+		 * one to one mapping on SMMU until we have our first frame.
+		 */
+		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+			sde_kms->splash_data.smmu_handoff_pending) {
+			ret = mmu->funcs->set_attribute(mmu,
+				DOMAIN_ATTR_EARLY_MAP,
+				&early_map);
+			if (ret) {
+				SDE_ERROR("failed to set map att: %d\n", ret);
+				goto fail;
+			}
+		}
+
 		aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
 			mmu, "sde");
 		if (IS_ERR(aspace)) {
@@ -2496,10 +2579,37 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
 			goto fail;
 		}
 		aspace->domain_attached = true;
+		early_map = 0;
+		/* Mapping splash memory block */
+		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+			sde_kms->splash_data.smmu_handoff_pending) {
+			ret = _sde_kms_splash_smmu_map(sde_kms->dev, mmu,
+					&sde_kms->splash_data);
+			if (ret) {
+				SDE_ERROR("failed to map ret:%d\n", ret);
+				goto fail;
+			}
+			/*
+			 * Turning off early map after generating one to one
+			 * mapping for splash address space.
+			 */
+			ret = mmu->funcs->set_attribute(mmu,
+				DOMAIN_ATTR_EARLY_MAP,
+				&early_map);
+			if (ret) {
+				SDE_ERROR("failed to set map att ret:%d\n",
+									ret);
+				goto early_map_fail;
+			}
+		}
 	}
 
 	return 0;
+early_map_fail:
+	mmu->funcs->one_to_one_unmap(mmu, sde_kms->splash_data.splash_base,
+					sde_kms->splash_data.splash_size);
 fail:
+	mmu->funcs->destroy(mmu);
 	_sde_kms_mmu_destroy(sde_kms);
 
 	return ret;
@@ -2607,8 +2717,6 @@ static int _sde_kms_get_splash_data(struct sde_splash_data *data)
 	pr_info("found continuous splash base address:%lx size:%x\n",
 						data->splash_base,
 						data->splash_size);
-	data->smmu_handoff_pending = true;
-
 	return ret;
 }
 
@@ -2618,7 +2726,6 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 	struct drm_device *dev;
 	struct msm_drm_private *priv;
 	struct sde_rm *rm = NULL;
-	bool splash_mem_found = false;
 	int i, rc = -EINVAL;
 
 	if (!kms) {
@@ -2712,12 +2819,8 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 	}
 
 	rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
-	if (rc) {
+	if (rc)
 		SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
-		splash_mem_found = false;
-	} else {
-		splash_mem_found = true;
-	}
 
 	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
 		true);
@@ -2726,6 +2829,13 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 		goto error;
 	}
 
+	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+		sde_power_data_bus_set_quota(&priv->phandle,
+			sde_kms->core_client,
+			SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
+			SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA,
+			SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA);
+
 	_sde_kms_core_hw_rev_init(sde_kms);
 
 	pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
@@ -2756,11 +2866,18 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 	 * Attempt continuous splash handoff only if reserved
 	 * splash memory is found.
 	 */
-	if (splash_mem_found)
+	if (sde_kms->splash_data.splash_base)
 		sde_rm_cont_splash_res_init(&sde_kms->rm,
 					&sde_kms->splash_data,
 					sde_kms->catalog);
 
+	/*
+	 * SMMU handoff is necessary for continuous splash enabled
+	 * scenario.
+	 */
+	if (sde_kms->splash_data.cont_splash_en)
+		sde_kms->splash_data.smmu_handoff_pending = true;
+
 	/* Initialize reg dma block which is a singleton */
 	rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
 			sde_kms->dev);
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 5c72efa..3a67a22 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -2364,7 +2364,7 @@ static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
 			offset = DBGBUS_DSPP_STATUS;
 			/* keep DSPP test point enabled */
 			if (head->wr_addr != DBGBUS_DSPP)
-				writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+				writel_relaxed(0x7001, mem_base + DBGBUS_DSPP);
 		} else {
 			offset = head->wr_addr + 0x4;
 		}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 72975e7..fb7322e 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -22,6 +22,9 @@
 #define SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA	0
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA	1800000000
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	1800000000
+
 #include <linux/sde_io_util.h>
 
 /* event will be triggered before power handler disable */
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index c1af0e5..5bfed6f 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -406,6 +406,7 @@
 #define A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL      0xF810
 
 #define A6XX_RBBM_VBIF_CLIENT_QOS_CNTL   0x00010
+#define A6XX_RBBM_GPR0_CNTL              0x00018
 #define A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0001f
 #define A6XX_RBBM_INT_CLEAR_CMD          0x00037
 #define A6XX_RBBM_INT_0_MASK             0x00038
@@ -810,7 +811,7 @@
 /* GBIF registers */
 #define A6XX_GBIF_HALT                    0x3c45
 #define A6XX_GBIF_HALT_ACK                0x3c46
-#define A6XX_GBIF_HALT_MASK               0x1
+#define A6XX_GBIF_HALT_MASK               0x2
 
 #define A6XX_GBIF_PERF_PWR_CNT_EN         0x3cc0
 #define A6XX_GBIF_PERF_CNT_SEL            0x3cc2
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 08cd06b..26c1c39 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -348,7 +348,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
 		.patchid = ANY_ID,
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC |
 			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
-			ADRENO_IOCOHERENT,
+			ADRENO_IOCOHERENT | ADRENO_PREEMPTION,
 		.sqefw_name = "a630_sqe.fw",
 		.zap_name = "a630_zap",
 		.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b8006b7..8d18fc2 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -119,7 +119,6 @@ static struct adreno_device device_3d0 = {
 		.skipsaverestore = 1,
 		.usesgmem = 1,
 	},
-	.priv = BIT(ADRENO_DEVICE_PREEMPTION_EXECUTION),
 };
 
 /* Ptr to array for the current set of fault detect registers */
@@ -1431,7 +1430,8 @@ static int adreno_init(struct kgsl_device *device)
 
 	}
 
-	if (nopreempt == false) {
+	if (nopreempt == false &&
+		ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) {
 		int r = 0;
 
 		if (gpudev->preemption_init)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index ca6276e..bb173421 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -226,6 +226,10 @@ enum adreno_gpurev {
 #define ADRENO_HWCG_CTRL    3
 #define ADRENO_THROTTLING_CTRL 4
 
+/* VBIF,  GBIF halt request and ack mask */
+#define GBIF_HALT_REQUEST       0x1E0
+#define VBIF_RESET_ACK_MASK     0x00f0
+#define VBIF_RESET_ACK_TIMEOUT  100
 
 /* number of throttle counters for DCVS adjustment */
 #define ADRENO_GPMU_THROTTLE_COUNTERS 4
@@ -571,7 +575,6 @@ enum adreno_device_flags {
 	ADRENO_DEVICE_ISDB_ENABLED = 12,
 	ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED = 13,
 	ADRENO_DEVICE_HARD_RESET = 14,
-	ADRENO_DEVICE_PREEMPTION_EXECUTION = 15,
 	ADRENO_DEVICE_CORESIGHT_CX = 16,
 };
 
@@ -684,6 +687,8 @@ enum adreno_regs {
 	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
 	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
 	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
+	ADRENO_REG_RBBM_GPR0_CNTL,
+	ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
 	ADRENO_REG_VBIF_XIN_HALT_CTRL0,
 	ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 	ADRENO_REG_VBIF_VERSION,
@@ -1642,22 +1647,10 @@ static inline void adreno_set_preempt_state(struct adreno_device *adreno_dev,
 	smp_wmb();
 }
 
-static inline bool adreno_is_preemption_execution_enabled(
-				struct adreno_device *adreno_dev)
-{
-	return test_bit(ADRENO_DEVICE_PREEMPTION_EXECUTION, &adreno_dev->priv);
-}
-
-static inline bool adreno_is_preemption_setup_enabled(
-				struct adreno_device *adreno_dev)
-{
-	return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
-}
-
 static inline bool adreno_is_preemption_enabled(
 				struct adreno_device *adreno_dev)
 {
-	return 0;
+	return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
 }
 /**
  * adreno_ctx_get_rb() - Return the ringbuffer that a context should
@@ -1682,7 +1675,7 @@ static inline struct adreno_ringbuffer *adreno_ctx_get_rb(
 	 * ringbuffer
 	 */
 
-	if (!adreno_is_preemption_execution_enabled(adreno_dev))
+	if (!adreno_is_preemption_enabled(adreno_dev))
 		return &(adreno_dev->ringbuffers[0]);
 
 	/*
@@ -1890,17 +1883,15 @@ static inline bool adreno_has_gbif(struct adreno_device *adreno_dev)
  * @ack_reg: register offset to wait for acknowledge
  */
 static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device,
-	int ack_reg)
+	int ack_reg, unsigned int mask)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	unsigned long wait_for_vbif;
-	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
 	unsigned int val;
 	int ret = 0;
 
 	/* wait for the transactions to clear */
-	wait_for_vbif = jiffies + msecs_to_jiffies(100);
+	wait_for_vbif = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
 	while (1) {
 		adreno_readreg(adreno_dev, ack_reg,
 			&val);
@@ -1930,15 +1921,27 @@ static inline int adreno_vbif_clear_pending_transactions(
 	int ret = 0;
 
 	if (adreno_has_gbif(adreno_dev)) {
+		/*
+		 * Halt GBIF GX first and then CX part.
+		 * Need to release CX Halt explicitly in case of SW_RESET.
+		 * GX Halt release will be taken care by SW_RESET internally.
+		 */
+		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL,
+				GBIF_HALT_REQUEST);
+		ret = adreno_wait_for_vbif_halt_ack(device,
+				ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
+				VBIF_RESET_ACK_MASK);
+		if (ret)
+			return ret;
+
 		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask);
 		ret = adreno_wait_for_vbif_halt_ack(device,
-				ADRENO_REG_GBIF_HALT_ACK);
-		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, 0);
+				ADRENO_REG_GBIF_HALT_ACK, mask);
 	} else {
 		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0,
 			mask);
 		ret = adreno_wait_for_vbif_halt_ack(device,
-				ADRENO_REG_VBIF_XIN_HALT_CTRL1);
+				ADRENO_REG_VBIF_XIN_HALT_CTRL1, mask);
 		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
 	}
 	return ret;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index d8dc87e..b6b9ba9 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -749,7 +749,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
 
 	/* Enable the GMEM save/restore feature for preemption */
-	if (adreno_is_preemption_setup_enabled(adreno_dev))
+	if (adreno_is_preemption_enabled(adreno_dev))
 		kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
 			0x1);
 
@@ -999,7 +999,7 @@ static int a6xx_post_start(struct adreno_device *adreno_dev)
 	struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 
-	if (!adreno_is_preemption_execution_enabled(adreno_dev))
+	if (!adreno_is_preemption_enabled(adreno_dev))
 		return 0;
 
 	cmds = adreno_ringbuffer_allocspace(rb, 42);
@@ -2049,8 +2049,7 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
 	return _load_gmu_firmware(device);
 }
 
-#define VBIF_RESET_ACK_TIMEOUT	100
-#define VBIF_RESET_ACK_MASK	0x00f0
+#define GBIF_CX_HALT_MASK BIT(1)
 
 static int a6xx_soft_reset(struct adreno_device *adreno_dev)
 {
@@ -2091,6 +2090,13 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
 	if (!vbif_acked)
 		return -ETIMEDOUT;
 
+	/*
+	 * GBIF GX halt will be released automatically by sw_reset.
+	 * Release GBIF CX halt after sw_reset
+	 */
+	if (adreno_has_gbif(adreno_dev))
+		kgsl_regrmw(device, A6XX_GBIF_HALT, GBIF_CX_HALT_MASK, 0);
+
 	a6xx_sptprac_enable(adreno_dev);
 
 	return 0;
@@ -2307,8 +2313,14 @@ static int a6xx_reset(struct kgsl_device *device, int fault)
 			udelay(100);
 		}
 
-		if (acked)
-			ret = adreno_soft_reset(device);
+		if (acked) {
+			/* Make sure VBIF/GBIF is cleared before resetting */
+			ret = adreno_vbif_clear_pending_transactions(device);
+
+			if (ret == 0)
+				ret = adreno_soft_reset(device);
+		}
+
 		if (ret)
 			KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
 	}
@@ -2543,7 +2555,7 @@ static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 
-	if (adreno_is_preemption_execution_enabled(adreno_dev))
+	if (adreno_is_preemption_enabled(adreno_dev))
 		a6xx_preemption_trigger(adreno_dev);
 
 	adreno_dispatcher_schedule(device);
@@ -3647,6 +3659,9 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 				A6XX_VBIF_XIN_HALT_CTRL0),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 				A6XX_VBIF_XIN_HALT_CTRL1),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GPR0_CNTL, A6XX_RBBM_GPR0_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
+				A6XX_RBBM_VBIF_GX_RESET_STATUS),
 	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
 	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 3e9ba55..b9dd5f4 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -432,7 +432,7 @@ void a6xx_preemption_schedule(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 
-	if (!adreno_is_preemption_execution_enabled(adreno_dev))
+	if (!adreno_is_preemption_enabled(adreno_dev))
 		return;
 
 	mutex_lock(&device->mutex);
@@ -537,7 +537,7 @@ void a6xx_preemption_start(struct adreno_device *adreno_dev)
 	struct adreno_ringbuffer *rb;
 	unsigned int i;
 
-	if (!adreno_is_preemption_execution_enabled(adreno_dev))
+	if (!adreno_is_preemption_enabled(adreno_dev))
 		return;
 
 	/* Force the state to be clear */
@@ -729,7 +729,7 @@ void a6xx_preemption_context_destroy(struct kgsl_context *context)
 	struct kgsl_device *device = context->device;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
-	if (!adreno_is_preemption_setup_enabled(adreno_dev))
+	if (!adreno_is_preemption_enabled(adreno_dev))
 		return;
 
 	gpumem_free_entry(context->user_ctxt_record);
@@ -744,7 +744,7 @@ int a6xx_preemption_context_init(struct kgsl_context *context)
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	uint64_t flags = 0;
 
-	if (!adreno_is_preemption_setup_enabled(adreno_dev))
+	if (!adreno_is_preemption_enabled(adreno_dev))
 		return 0;
 
 	if (context->flags & KGSL_CONTEXT_SECURE)
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 0caf55b..472f78e 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2141,7 +2141,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 	 * Deleting uninitialized timer will block for ever on kernel debug
 	 * disable build. Hence skip del timer if it is not initialized.
 	 */
-	if (adreno_is_preemption_execution_enabled(adreno_dev))
+	if (adreno_is_preemption_enabled(adreno_dev))
 		del_timer_sync(&adreno_dev->preempt.timer);
 
 	mutex_lock(&device->mutex);
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 13d71982..aa8c2bf 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -96,7 +96,7 @@ static long adreno_ioctl_preemption_counters_query(
 	int levels_to_copy;
 
 	if (!adreno_is_a5xx(adreno_dev) ||
-		!adreno_is_preemption_execution_enabled(adreno_dev))
+		!adreno_is_preemption_enabled(adreno_dev))
 		return -EOPNOTSUPP;
 
 	if (read->size_user < size_level)
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 1a2f8ff..db6dff2 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -761,7 +761,7 @@ static int _set_ctxt_gpu(struct adreno_ringbuffer *rb,
 
 	cmds = &link[0];
 	cmds += __add_curr_ctxt_cmds(rb, cmds, drawctxt);
-	result = adreno_ringbuffer_issuecmds(rb, 0, link,
+	result = adreno_ringbuffer_issue_internal_cmds(rb, 0, link,
 			(unsigned int)(cmds - link));
 	return result;
 }
@@ -834,7 +834,7 @@ static int _set_pagetable_gpu(struct adreno_ringbuffer *rb,
 	 * This returns the per context timestamp but we need to
 	 * use the global timestamp for iommu clock disablement
 	 */
-	result = adreno_ringbuffer_issuecmds(rb,
+	result = adreno_ringbuffer_issue_internal_cmds(rb,
 			KGSL_CMD_FLAGS_PMODE, link,
 			(unsigned int)(cmds - link));
 
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 94fdbc2..5020750 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -823,8 +823,8 @@ static int _perfcounter_enable_default(struct adreno_device *adreno_dev,
 		*cmds++ = cp_register(adreno_dev, reg->select, 1);
 		*cmds++ = countable;
 		/* submit to highest priority RB always */
-		ret = adreno_ringbuffer_issuecmds(rb, KGSL_CMD_FLAGS_PMODE,
-						buf, cmds-buf);
+		ret = adreno_ringbuffer_issue_internal_cmds(rb,
+				KGSL_CMD_FLAGS_PMODE, buf, cmds-buf);
 		if (ret)
 			return ret;
 		/*
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 01d9f71..52a35c4 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -37,6 +37,11 @@
 #define RB_GPUADDR(_rb, _pos) \
 	((_rb)->buffer_desc.gpuaddr + ((_pos) * sizeof(unsigned int)))
 
+static inline bool is_internal_cmds(unsigned int flags)
+{
+	return (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
+}
+
 static void adreno_get_submit_time(struct adreno_device *adreno_dev,
 		struct adreno_ringbuffer *rb,
 		struct adreno_submit_time *time)
@@ -260,7 +265,7 @@ int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt)
 			return status;
 	}
 
-	if (nopreempt == false)
+	if (nopreempt == false && ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
 		adreno_dev->num_ringbuffers = gpudev->num_prio_levels;
 	else
 		adreno_dev->num_ringbuffers = 1;
@@ -389,7 +394,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 
 	if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
-		!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+		!is_internal_cmds(flags))
 		return -ENOENT;
 
 	/* On fault return error so that we don't keep submitting */
@@ -399,7 +404,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	rb->timestamp++;
 
 	/* If this is a internal IB, use the global timestamp for it */
-	if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+	if (!drawctxt || is_internal_cmds(flags))
 		timestamp = rb->timestamp;
 	else {
 		context_id = drawctxt->base.id;
@@ -428,7 +433,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	 */
 	profile_ready = drawctxt &&
 		adreno_profile_assignments_ready(&adreno_dev->profile) &&
-		!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
+		!is_internal_cmds(flags);
 
 	/*
 	 * reserve space to temporarily turn off protected mode
@@ -438,7 +443,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	/* 2 dwords to store the start of command sequence */
 	total_sizedwords += 2;
 	/* internal ib command identifier for the ringbuffer */
-	total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
+	total_sizedwords += is_internal_cmds(flags) ? 2 : 0;
 
 	total_sizedwords += (secured_ctxt) ? 26 : 0;
 
@@ -455,11 +460,11 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 		total_sizedwords += 4;
 
 	if (gpudev->preemption_pre_ibsubmit &&
-			adreno_is_preemption_execution_enabled(adreno_dev))
+			adreno_is_preemption_enabled(adreno_dev))
 		total_sizedwords += 27;
 
 	if (gpudev->preemption_post_ibsubmit &&
-			adreno_is_preemption_execution_enabled(adreno_dev))
+			adreno_is_preemption_enabled(adreno_dev))
 		total_sizedwords += 10;
 
 	/*
@@ -472,7 +477,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	total_sizedwords += 8; /* sop timestamp */
 	total_sizedwords += 5; /* eop timestamp */
 
-	if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+	if (drawctxt && !is_internal_cmds(flags)) {
 		/* global timestamp without cache flush for non-zero context */
 		total_sizedwords += 4;
 	}
@@ -511,12 +516,12 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	*ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
 	*ringcmds++ = KGSL_CMD_IDENTIFIER;
 
-	if (adreno_is_preemption_execution_enabled(adreno_dev) &&
+	if (adreno_is_preemption_enabled(adreno_dev) &&
 				gpudev->preemption_pre_ibsubmit)
 		ringcmds += gpudev->preemption_pre_ibsubmit(
 					adreno_dev, rb, ringcmds, context);
 
-	if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
+	if (is_internal_cmds(flags)) {
 		*ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
 		*ringcmds++ = KGSL_CMD_INTERNAL_IDENTIFIER;
 	}
@@ -553,7 +558,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 				&flags, &ringcmds);
 
 	/* start-of-pipeline timestamp for the context */
-	if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+	if (drawctxt && !is_internal_cmds(flags))
 		ringcmds += cp_mem_write(adreno_dev, ringcmds,
 			MEMSTORE_ID_GPU_ADDR(device, context_id, soptimestamp),
 			timestamp);
@@ -627,12 +632,12 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	 * set and hence the rb timestamp will be used in else statement below.
 	 */
 	*ringcmds++ = cp_mem_packet(adreno_dev, CP_EVENT_WRITE, 3, 1);
-	if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+	if (drawctxt || is_internal_cmds(flags))
 		*ringcmds++ = CACHE_FLUSH_TS | (1 << 31);
 	else
 		*ringcmds++ = CACHE_FLUSH_TS;
 
-	if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+	if (drawctxt && !is_internal_cmds(flags)) {
 		ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
 			MEMSTORE_ID_GPU_ADDR(device, context_id, eoptimestamp));
 		*ringcmds++ = timestamp;
@@ -669,7 +674,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 		ringcmds += cp_secure_mode(adreno_dev, ringcmds, 0);
 
 	if (gpudev->preemption_post_ibsubmit &&
-			adreno_is_preemption_execution_enabled(adreno_dev))
+				adreno_is_preemption_enabled(adreno_dev))
 		ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
 			ringcmds);
 
@@ -693,7 +698,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 }
 
 int
-adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
+adreno_ringbuffer_issue_internal_cmds(struct adreno_ringbuffer *rb,
 				unsigned int flags,
 				unsigned int *cmds,
 				int sizedwords)
@@ -874,10 +879,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 			dwords += 2;
 	}
 
-	if (adreno_is_preemption_execution_enabled(adreno_dev)) {
+	if (adreno_is_preemption_enabled(adreno_dev))
 		if (gpudev->preemption_yield_enable)
 			dwords += 8;
-	}
 
 	/*
 	 * Prior to SQE FW version 1.49, there was only one marker for
@@ -952,10 +956,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 	if (gpudev->ccu_invalidate)
 		cmds += gpudev->ccu_invalidate(adreno_dev, cmds);
 
-	if (adreno_is_preemption_execution_enabled(adreno_dev)) {
+	if (adreno_is_preemption_enabled(adreno_dev))
 		if (gpudev->preemption_yield_enable)
 			cmds += gpudev->preemption_yield_enable(cmds);
-	}
 
 	if (kernel_profiling) {
 		cmds += _get_alwayson_counter(adreno_dev, cmds,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index fbee627..1dfdb5b 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -158,7 +158,7 @@ void adreno_ringbuffer_stop(struct adreno_device *adreno_dev);
 
 void adreno_ringbuffer_close(struct adreno_device *adreno_dev);
 
-int adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
+int adreno_ringbuffer_issue_internal_cmds(struct adreno_ringbuffer *rb,
 					unsigned int flags,
 					unsigned int *cmdaddr,
 					int sizedwords);
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index e309ab0..2d2c9e5 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -223,14 +223,17 @@ static int _preemption_store(struct adreno_device *adreno_dev,
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 
-	if (test_bit(ADRENO_DEVICE_PREEMPTION_EXECUTION,
-		&adreno_dev->priv) == val)
-		return 0;
-
 	mutex_lock(&device->mutex);
 
+	if (!(ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) ||
+		(test_bit(ADRENO_DEVICE_PREEMPTION,
+		&adreno_dev->priv) == val)) {
+		mutex_unlock(&device->mutex);
+		return 0;
+	}
+
 	kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
-	change_bit(ADRENO_DEVICE_PREEMPTION_EXECUTION, &adreno_dev->priv);
+	change_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
 	adreno_dev->cur_rb = &(adreno_dev->ringbuffers[0]);
 	kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
 
@@ -258,7 +261,7 @@ static int _gmu_idle_level_store(struct adreno_device *adreno_dev,
 
 static unsigned int _preemption_show(struct adreno_device *adreno_dev)
 {
-	return adreno_is_preemption_execution_enabled(adreno_dev);
+	return adreno_is_preemption_enabled(adreno_dev);
 }
 
 static int _hwcg_store(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 5d07380..f57fbb6 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -529,6 +529,16 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv,
 	struct kgsl_device *device = dev_priv->device;
 	char name[64];
 	int ret = 0, id;
+	struct kgsl_process_private  *proc_priv = dev_priv->process_priv;
+
+	if (atomic_read(&proc_priv->ctxt_count) > KGSL_MAX_CONTEXTS_PER_PROC) {
+		KGSL_DRV_ERR(device,
+			"Per process context limit reached for pid %u",
+			dev_priv->process_priv->pid);
+		return -ENOSPC;
+	}
+
+	atomic_inc(&proc_priv->ctxt_count);
 
 	id = _kgsl_get_context_id(device);
 	if (id == -ENOSPC) {
@@ -547,7 +557,7 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv,
 			KGSL_DRV_INFO(device,
 				"cannot have more than %zu contexts due to memstore limitation\n",
 				KGSL_MEMSTORE_MAX);
-
+		atomic_dec(&proc_priv->ctxt_count);
 		return id;
 	}
 
@@ -580,6 +590,7 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv,
 
 out:
 	if (ret) {
+		atomic_dec(&proc_priv->ctxt_count);
 		write_lock(&device->context_lock);
 		idr_remove(&dev_priv->device->context_idr, id);
 		write_unlock(&device->context_lock);
@@ -669,6 +680,7 @@ kgsl_context_destroy(struct kref *kref)
 			device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
 		}
 
+		atomic_dec(&context->proc_priv->ctxt_count);
 		idr_remove(&device->context_idr, context->id);
 		context->id = KGSL_CONTEXT_INVALID;
 	}
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 023e63e..f4a2de5 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -50,11 +50,12 @@
 /* The number of memstore arrays limits the number of contexts allowed.
  * If more contexts are needed, update multiple for MEMSTORE_SIZE
  */
-#define KGSL_MEMSTORE_SIZE	((int)(PAGE_SIZE * 2))
+#define KGSL_MEMSTORE_SIZE	((int)(PAGE_SIZE * 8))
 #define KGSL_MEMSTORE_GLOBAL	(0)
 #define KGSL_PRIORITY_MAX_RB_LEVELS 4
 #define KGSL_MEMSTORE_MAX	(KGSL_MEMSTORE_SIZE / \
 	sizeof(struct kgsl_devmemstore) - 1 - KGSL_PRIORITY_MAX_RB_LEVELS)
+#define KGSL_MAX_CONTEXTS_PER_PROC 200
 
 #define MEMSTORE_RB_OFFSET(rb, field)	\
 	KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 6fca1e15..7c3bff7 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -441,6 +441,7 @@ struct kgsl_context {
  * @syncsource_idr: sync sources created by this process
  * @syncsource_lock: Spinlock to protect the syncsource idr
  * @fd_count: Counter for the number of FDs for this process
+ * @ctxt_count: Count for the number of contexts for this process
  */
 struct kgsl_process_private {
 	unsigned long priv;
@@ -460,6 +461,7 @@ struct kgsl_process_private {
 	struct idr syncsource_idr;
 	spinlock_t syncsource_lock;
 	int fd_count;
+	atomic_t ctxt_count;
 };
 
 /**
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index b354ef2..0338c5f 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -363,58 +363,19 @@ static int _attach_pt(struct kgsl_iommu_pt *iommu_pt,
 	return ret;
 }
 
-static int _lock_if_secure_mmu(struct kgsl_memdesc *memdesc,
-		struct kgsl_mmu *mmu)
-{
-	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
-
-	if (!kgsl_memdesc_is_secured(memdesc))
-		return 0;
-
-	if (!kgsl_mmu_is_secured(mmu))
-		return -EINVAL;
-
-	mutex_lock(&device->mutex);
-	if (kgsl_active_count_get(device)) {
-		mutex_unlock(&device->mutex);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void _unlock_if_secure_mmu(struct kgsl_memdesc *memdesc,
-		struct kgsl_mmu *mmu)
-{
-	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
-
-	if (!kgsl_memdesc_is_secured(memdesc) || !kgsl_mmu_is_secured(mmu))
-		return;
-
-	kgsl_active_count_put(device);
-	mutex_unlock(&device->mutex);
-}
-
 static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
-		struct kgsl_memdesc *memdesc,
 		uint64_t gpuaddr, phys_addr_t physaddr,
 		uint64_t size, unsigned int flags)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	int ret;
 
-	ret = _lock_if_secure_mmu(memdesc, pt->mmu);
-	if (ret)
-		return ret;
-
 	_iommu_sync_mmu_pc(true);
 
 	ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr, size, flags);
 
 	_iommu_sync_mmu_pc(false);
 
-	_unlock_if_secure_mmu(memdesc, pt->mmu);
-
 	if (ret) {
 		KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
 			gpuaddr, size, flags, ret);
@@ -425,15 +386,10 @@ static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
 }
 
 static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
-		struct kgsl_memdesc *memdesc, uint64_t addr, uint64_t size)
+		uint64_t addr, uint64_t size)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	size_t unmapped = 0;
-	int ret;
-
-	ret = _lock_if_secure_mmu(memdesc, pt->mmu);
-	if (ret)
-		return ret;
 
 	_iommu_sync_mmu_pc(true);
 
@@ -441,8 +397,6 @@ static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
 
 	_iommu_sync_mmu_pc(false);
 
-	_unlock_if_secure_mmu(memdesc, pt->mmu);
-
 	if (unmapped != size) {
 		KGSL_CORE_ERR("unmap err: 0x%016llx, 0x%llx, %zd\n",
 			addr, size, unmapped);
@@ -453,8 +407,7 @@ static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
 }
 
 static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
-		uint64_t addr, struct kgsl_memdesc *memdesc,
-		struct scatterlist *sg, int nents,
+		uint64_t addr, struct scatterlist *sg, int nents,
 		uint64_t offset, uint64_t size, unsigned int flags)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
@@ -466,10 +419,6 @@ static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
 	phys_addr_t physaddr;
 	int ret;
 
-	ret = _lock_if_secure_mmu(memdesc, pt->mmu);
-	if (ret)
-		return ret;
-
 	_iommu_sync_mmu_pc(true);
 
 	for_each_sg(sg, s, nents, i) {
@@ -509,11 +458,9 @@ static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
 
 	_iommu_sync_mmu_pc(false);
 
-	_unlock_if_secure_mmu(memdesc, pt->mmu);
-
 	if (size != 0) {
 		/* Cleanup on error */
-		_iommu_unmap_sync_pc(pt, memdesc, addr, mapped);
+		_iommu_unmap_sync_pc(pt, addr, mapped);
 		KGSL_CORE_ERR(
 			"map sg offset err: 0x%016llX, %d, %x, %zd\n",
 			addr, nents, flags, mapped);
@@ -524,17 +471,11 @@ static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
 }
 
 static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
-		uint64_t addr, struct kgsl_memdesc *memdesc,
-		struct scatterlist *sg, int nents,
+		uint64_t addr, struct scatterlist *sg, int nents,
 		unsigned int flags)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	size_t mapped;
-	int ret;
-
-	ret = _lock_if_secure_mmu(memdesc, pt->mmu);
-	if (ret)
-		return ret;
 
 	_iommu_sync_mmu_pc(true);
 
@@ -542,8 +483,6 @@ static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
 
 	_iommu_sync_mmu_pc(false);
 
-	_unlock_if_secure_mmu(memdesc, pt->mmu);
-
 	if (mapped == 0) {
 		KGSL_CORE_ERR("map sg err: 0x%016llX, %d, %x, %zd\n",
 			addr, nents, flags, mapped);
@@ -1754,7 +1693,7 @@ kgsl_iommu_unmap_offset(struct kgsl_pagetable *pt,
 	if (addr == 0)
 		return -EINVAL;
 
-	return _iommu_unmap_sync_pc(pt, memdesc, addr + offset, size);
+	return _iommu_unmap_sync_pc(pt, addr + offset, size);
 }
 
 static int
@@ -1819,7 +1758,7 @@ static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
 		physaddr = page_to_phys(kgsl_guard_page);
 	}
 
-	return _iommu_map_sync_pc(pt, memdesc, gpuaddr, physaddr,
+	return _iommu_map_sync_pc(pt, gpuaddr, physaddr,
 			kgsl_memdesc_guard_page_size(memdesc),
 			protflags & ~IOMMU_WRITE);
 }
@@ -1864,14 +1803,13 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
 	if (IS_ERR(sgt))
 		return PTR_ERR(sgt);
 
-	ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt->sgl,
-				sgt->nents, flags);
+	ret = _iommu_map_sg_sync_pc(pt, addr, sgt->sgl, sgt->nents, flags);
 	if (ret)
 		goto done;
 
 	ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
 	if (ret)
-		_iommu_unmap_sync_pc(pt, memdesc, addr, size);
+		_iommu_unmap_sync_pc(pt, addr, size);
 
 done:
 	if (memdesc->pages != NULL)
@@ -1910,8 +1848,7 @@ static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt,
 			0, size, GFP_KERNEL);
 	if (ret == 0) {
 		ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset,
-				memdesc, sgt.sgl, sgt.nents,
-				IOMMU_READ | IOMMU_NOEXEC);
+				sgt.sgl, sgt.nents, IOMMU_READ | IOMMU_NOEXEC);
 		sg_free_table(&sgt);
 	}
 
@@ -1964,7 +1901,7 @@ static int _map_to_one_page(struct kgsl_pagetable *pt, uint64_t addr,
 	ret = sg_alloc_table_from_pages(&sgt, pages, count,
 			0, size, GFP_KERNEL);
 	if (ret == 0) {
-		ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt.sgl,
+		ret = _iommu_map_sg_sync_pc(pt, addr, sgt.sgl,
 				sgt.nents, map_flags);
 		sg_free_table(&sgt);
 	}
@@ -2013,7 +1950,7 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
 				memdesc, physoffset, size, protflags);
 	else
 		ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
-				memdesc, sgt->sgl, sgt->nents,
+				sgt->sgl, sgt->nents,
 				physoffset, size, protflags);
 
 	if (memdesc->pages != NULL)
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 8d242f8..95c887c 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -95,6 +95,14 @@
 	  programmable ATB replicator sends the ATB trace stream from the
 	  ETB/ETF to the TPIUi and ETR.
 
+config CORESIGHT_DBGUI
+	bool "CoreSight DebugUI driver"
+	help
+	  This driver provides support for DebugUI that helps to capture
+	  the value at a specified address. It allows configuring DebugUI
+	  to select specified address and trigger mode based on user
+	  input.
+
 config CORESIGHT_STM
 	bool "CoreSight System Trace Macrocell driver"
 	depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index cb47ecd..157ec9c 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -22,6 +22,7 @@
 obj-$(CONFIG_CORESIGHT_TPDM) += coresight-tpdm.o
 obj-$(CONFIG_CORESIGHT_EVENT) += coresight-event.o
 obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
+obj-$(CONFIG_CORESIGHT_DBGUI) += coresight-dbgui.o
 obj-$(CONFIG_CORESIGHT_TGU) += coresight-tgu.o
 obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
 obj-$(CONFIG_CORESIGHT_HWEVENT) += coresight-hwevent.o
diff --git a/drivers/hwtracing/coresight/coresight-dbgui.c b/drivers/hwtracing/coresight/coresight-dbgui.c
new file mode 100644
index 0000000..e4feea2
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-dbgui.c
@@ -0,0 +1,965 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/coresight.h>
+#include <soc/qcom/memory_dump.h>
+
+#include "coresight-priv.h"
+
+#define dbgui_writel(drvdata, val, off) \
+			 __raw_writel((val), drvdata->base + off)
+#define dbgui_readl(drvdata, off)	__raw_readl(drvdata->base + off)
+
+#define DBGUI_LOCK(drvdata)						\
+do {									\
+	mb(); /* ensure configuration take effect before we lock it */	\
+	dbgui_writel(drvdata, 0x0, CORESIGHT_LAR);			\
+} while (0)
+
+#define DBGUI_UNLOCK(drvdata)						\
+do {									\
+	dbgui_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);	\
+	mb(); /* ensure unlock take effect before we configure */	\
+} while (0)
+
+/* DBGUI registers */
+#define DBGUI_SECURE		(0x000)
+#define DBGUI_CTL		(0x004)
+#define DBGUI_CTL_MASK		(0x008)
+#define DBGUI_SWTRIG		(0x00C)
+#define DBGUI_STATUS		(0x010)
+#define DBGUI_HWE_MASK		(0x014)
+#define DBGUI_CTR_VAL		(0x018)
+#define DBGUI_CTR_EN		(0x01C)
+#define DBGUI_NUM_REGS_RD	(0x020)
+#define DBGUI_ATB_REG		(0x024)
+
+#define DBGUI_ADDRn(drvdata, n)	(drvdata->addr_offset + 4*n)
+#define DBGUI_DATAn(drvdata, n)	(drvdata->data_offset + 4*n)
+
+#define DBGUI_TRIG_MASK		0xF0001
+#define DBGUI_MAX_ADDR_VAL		64
+#define DBGUI_TS_VALID			BIT(15)
+#define DBGUI_ATB_TRACE_EN		BIT(0)
+#define DBGUI_TIMER_CTR_OVERRIDE	BIT(1)
+#define DBGUI_TIMER_CTR_EN		BIT(0)
+
+/* ATID for DBGUI */
+#define APB_ATID		50
+#define AHB_ATID		52
+
+enum dbgui_trig_type {
+	DBGUI_TRIG_SW =	BIT(0),
+	DBGUI_TRIG_TIMER =	BIT(16),
+	DBGUI_TRIG_HWE =	BIT(17),
+	DBGUI_TRIG_WDOG =	BIT(18),
+	DBGUI_TRIG_CTI =	BIT(19),
+};
+
+struct dbgui_drvdata {
+	void __iomem		*base;
+	bool			enable;
+	uint32_t		addr_offset;
+	uint32_t		data_offset;
+	uint32_t		size;
+	struct device		*dev;
+	struct coresight_device	*csdev;
+	struct clk		*clk;
+	struct mutex		mutex;
+	uint32_t		trig_mask;
+	bool			capture_enable;
+	bool			ts_enable;
+	bool			timer_override_enable;
+	bool			handoff_enable;
+	uint32_t		nr_apb_regs;
+	uint32_t		nr_ahb_regs;
+	uint32_t		hwe_mask;
+	uint32_t		addr_idx;
+	uint32_t		timeout_val;
+	uint32_t		addr_val[DBGUI_MAX_ADDR_VAL];
+	uint32_t		data_val[DBGUI_MAX_ADDR_VAL];
+	struct  msm_dump_data	reg_data;
+};
+
+static struct dbgui_drvdata *dbgui_drvdata;
+
+static void dbgui_enable_atb_trace(struct dbgui_drvdata *drvdata)
+{
+	uint32_t reg;
+
+	reg = dbgui_readl(drvdata, DBGUI_ATB_REG);
+	reg |= DBGUI_ATB_TRACE_EN | APB_ATID << 8 | AHB_ATID << 1;
+	dbgui_writel(drvdata, reg, DBGUI_ATB_REG);
+}
+
+static void dbgui_disable_atb_trace(struct dbgui_drvdata *drvdata)
+{
+	uint32_t reg;
+
+	reg = dbgui_readl(drvdata, DBGUI_ATB_REG);
+	reg &= ~DBGUI_ATB_TRACE_EN;
+	dbgui_writel(drvdata, reg, DBGUI_ATB_REG);
+}
+
+static void dbgui_enable_timestamp(struct dbgui_drvdata *drvdata)
+{
+	uint32_t reg;
+
+	reg = dbgui_readl(drvdata, DBGUI_ATB_REG);
+	reg |= DBGUI_TS_VALID;
+	dbgui_writel(drvdata, reg, DBGUI_ATB_REG);
+}
+
+static void dbgui_disable_timestamp(struct dbgui_drvdata *drvdata)
+{
+	uint32_t reg;
+
+	reg = dbgui_readl(drvdata, DBGUI_ATB_REG);
+	reg &= ~DBGUI_TS_VALID;
+	dbgui_writel(drvdata, reg, DBGUI_ATB_REG);
+}
+
+static void dbgui_wait_for_pending_actions(struct dbgui_drvdata *drvdata)
+{
+	int count;
+	uint32_t reg_val;
+
+	for (count = TIMEOUT_US; reg_val =
+			dbgui_readl(drvdata, DBGUI_STATUS),
+			BMVAL(reg_val, 4, 7) != 0
+			&& BVAL(reg_val, 0) != 0 && count > 0; count--)
+		udelay(1);
+
+	WARN(count == 0,
+		"timeout while waiting for pending action: STATUS %#x\n",
+		dbgui_readl(drvdata, DBGUI_STATUS));
+}
+
+static void __dbgui_capture_enable(struct dbgui_drvdata *drvdata)
+{
+	int i;
+	uint32_t reg_val;
+
+	DBGUI_UNLOCK(drvdata);
+
+	dbgui_wait_for_pending_actions(drvdata);
+	dbgui_writel(drvdata, 0x1, DBGUI_SECURE);
+	dbgui_writel(drvdata, 0x1, DBGUI_CTL);
+
+	reg_val = dbgui_readl(drvdata, DBGUI_NUM_REGS_RD);
+	reg_val &= ~0xFF;
+	reg_val |= (drvdata->nr_apb_regs | drvdata->nr_ahb_regs << 8);
+	dbgui_writel(drvdata, reg_val, DBGUI_NUM_REGS_RD);
+
+	for (i = 0; i < drvdata->size; i++) {
+		if (drvdata->addr_val[i])
+			dbgui_writel(drvdata, drvdata->addr_val[i],
+				     DBGUI_ADDRn(drvdata, i));
+	}
+
+	if (!(drvdata->trig_mask & DBGUI_TRIG_TIMER) && drvdata->timeout_val) {
+		dbgui_writel(drvdata, drvdata->timeout_val, DBGUI_CTR_VAL);
+
+		reg_val = dbgui_readl(drvdata, DBGUI_CTR_EN);
+		if (drvdata->timer_override_enable)
+			reg_val |= DBGUI_TIMER_CTR_OVERRIDE;
+
+		reg_val |= DBGUI_TIMER_CTR_EN;
+		dbgui_writel(drvdata, reg_val, DBGUI_CTR_EN);
+	}
+
+	if (!(drvdata->trig_mask & DBGUI_TRIG_HWE))
+		dbgui_writel(drvdata, drvdata->hwe_mask, DBGUI_HWE_MASK);
+
+	dbgui_writel(drvdata, drvdata->trig_mask, DBGUI_CTL_MASK);
+
+	DBGUI_LOCK(drvdata);
+};
+
+static int dbgui_capture_enable(struct dbgui_drvdata *drvdata)
+{
+	int ret = 0;
+
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->capture_enable)
+		goto out;
+
+	if (drvdata->trig_mask == DBGUI_TRIG_MASK) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		goto out;
+
+	if (!drvdata->handoff_enable)
+		__dbgui_capture_enable(drvdata);
+	drvdata->capture_enable = true;
+	mutex_unlock(&drvdata->mutex);
+
+	dev_info(drvdata->dev, "DebugUI capture enabled\n");
+	return 0;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static void __dbgui_capture_disable(struct dbgui_drvdata *drvdata)
+{
+	DBGUI_UNLOCK(drvdata);
+
+	dbgui_wait_for_pending_actions(drvdata);
+
+	/* mask all the triggers */
+	dbgui_writel(drvdata, DBGUI_TRIG_MASK, DBGUI_CTL_MASK);
+
+	DBGUI_LOCK(drvdata);
+}
+
+static int dbgui_capture_disable(struct dbgui_drvdata *drvdata)
+{
+	int ret = 0;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->capture_enable)
+		goto out;
+
+	/* don't allow capture disable while its enabled as a trace source */
+	if (drvdata->enable) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	__dbgui_capture_disable(drvdata);
+	clk_disable_unprepare(drvdata->clk);
+	drvdata->capture_enable = false;
+	mutex_unlock(&drvdata->mutex);
+
+	dev_info(drvdata->dev, "DebugUI capture disabled\n");
+	return 0;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static int __dbgui_enable(struct dbgui_drvdata *drvdata)
+{
+	DBGUI_UNLOCK(drvdata);
+
+	dbgui_enable_atb_trace(drvdata);
+	if (drvdata->ts_enable)
+		dbgui_enable_timestamp(drvdata);
+
+	DBGUI_LOCK(drvdata);
+	return 0;
+}
+
+static int dbgui_enable(struct coresight_device *csdev,
+			struct perf_event *event, u32 mode)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	mutex_lock(&drvdata->mutex);
+
+	if (!drvdata->capture_enable) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	__dbgui_enable(drvdata);
+	drvdata->enable = true;
+	mutex_unlock(&drvdata->mutex);
+
+	dev_info(drvdata->dev, "DebugUI tracing enabled\n");
+	return 0;
+}
+
+static void __dbgui_disable(struct dbgui_drvdata *drvdata)
+{
+	DBGUI_UNLOCK(drvdata);
+
+	dbgui_disable_atb_trace(drvdata);
+	if (drvdata->ts_enable)
+		dbgui_disable_timestamp(drvdata);
+
+	DBGUI_LOCK(drvdata);
+}
+
+static void dbgui_disable(struct coresight_device *csdev,
+			  struct perf_event *event)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	mutex_lock(&drvdata->mutex);
+	__dbgui_disable(drvdata);
+	drvdata->enable = false;
+	mutex_unlock(&drvdata->mutex);
+
+	dev_info(drvdata->dev, "DebugUI tracing disabled\n");
+}
+
+static int dbgui_trace_id(struct coresight_device *csdev)
+{
+	return 0;
+}
+
+static const struct coresight_ops_source dbgui_source_ops = {
+	.trace_id = dbgui_trace_id,
+	.enable = dbgui_enable,
+	.disable = dbgui_disable,
+};
+
+/* DebugUI may already be configured for capture, so retrieve current state */
+static void dbgui_handoff(struct dbgui_drvdata *drvdata)
+{
+	uint32_t val;
+	int i;
+
+	drvdata->handoff_enable = true;
+
+	drvdata->trig_mask = dbgui_readl(drvdata, DBGUI_CTL_MASK);
+	drvdata->hwe_mask = dbgui_readl(drvdata, DBGUI_HWE_MASK);
+	drvdata->timeout_val = dbgui_readl(drvdata, DBGUI_CTR_VAL);
+
+	val = dbgui_readl(drvdata, DBGUI_NUM_REGS_RD);
+	drvdata->nr_ahb_regs = (val >> 8) & 0xF;
+	drvdata->nr_apb_regs = val & 0xF;
+
+	val = dbgui_readl(drvdata, DBGUI_ATB_REG);
+	if (val & DBGUI_TS_VALID)
+		drvdata->ts_enable = true;
+
+	val = dbgui_readl(drvdata, DBGUI_CTR_EN);
+	if (val & DBGUI_TIMER_CTR_OVERRIDE)
+		drvdata->timer_override_enable = true;
+
+	for (i = 0; i < drvdata->size; i++)
+		drvdata->addr_val[i] = dbgui_readl(drvdata,
+						   DBGUI_ADDRn(drvdata, i));
+
+	if (drvdata->trig_mask != DBGUI_TRIG_MASK)
+		dbgui_capture_enable(drvdata);
+
+	drvdata->handoff_enable = false;
+}
+
+static const struct coresight_ops dbgui_cs_ops = {
+	.source_ops = &dbgui_source_ops,
+};
+
+static ssize_t dbgui_store_trig_mask(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf,
+				     size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->trig_mask = val & DBGUI_TRIG_MASK;
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_trig_mask(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->trig_mask);
+};
+static DEVICE_ATTR(trig_mask, 0644,
+		   dbgui_show_trig_mask, dbgui_store_trig_mask);
+
+static ssize_t dbgui_store_timer_override_enable(struct device *dev,
+						 struct device_attribute *attr,
+						 const char *buf,
+						 size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->timer_override_enable = val ? true : false;
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_timer_override_enable(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n",
+			 drvdata->timer_override_enable);
+};
+static DEVICE_ATTR(timer_override_enable, 0644,
+		   dbgui_show_timer_override_enable,
+		   dbgui_store_timer_override_enable);
+
+static ssize_t dbgui_store_ts_enable(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf,
+				     size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->ts_enable = val ? true : false;
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_ts_enable(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->ts_enable);
+};
+static DEVICE_ATTR(ts_enable, 0644,
+		   dbgui_show_ts_enable, dbgui_store_ts_enable);
+
+static ssize_t dbgui_store_hwe_mask(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->hwe_mask = val;
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_hwe_mask(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->hwe_mask);
+};
+static DEVICE_ATTR(hwe_mask, 0644,
+		   dbgui_show_hwe_mask, dbgui_store_hwe_mask);
+
+static ssize_t dbgui_store_sw_trig(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf,
+				   size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	if (!val)
+		return 0;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->capture_enable) {
+		mutex_unlock(&drvdata->mutex);
+		return -EINVAL;
+	}
+
+	dbgui_wait_for_pending_actions(drvdata);
+	DBGUI_UNLOCK(drvdata);
+
+	/* clear status register and free the sequencer */
+	dbgui_writel(drvdata, 0x1, DBGUI_CTL);
+
+	/* fire a software trigger */
+	dbgui_writel(drvdata, 0x1, DBGUI_SWTRIG);
+
+	DBGUI_LOCK(drvdata);
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+static DEVICE_ATTR(sw_trig, 0200, NULL, dbgui_store_sw_trig);
+
+static ssize_t dbgui_store_nr_ahb_regs(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	if (val > drvdata->size)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->nr_ahb_regs = val;
+
+	/*
+	 * Please make sure nr_ahb_regs + nr_apb_regs isn't greater than
+	 * drvdata->size. If sum is greater than size, The last setting
+	 * of nr_ahb_regs or nr_apb_regs takes high priority.
+	 */
+	if (drvdata->nr_apb_regs + drvdata->nr_ahb_regs > drvdata->size)
+		drvdata->nr_apb_regs = drvdata->size -
+					drvdata->nr_ahb_regs;
+
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_nr_ahb_regs(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->nr_ahb_regs);
+};
+static DEVICE_ATTR(nr_ahb_regs, 0644, dbgui_show_nr_ahb_regs,
+		   dbgui_store_nr_ahb_regs);
+
+static ssize_t dbgui_store_nr_apb_regs(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	if (val > drvdata->size)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->nr_apb_regs = val;
+
+	if (drvdata->nr_apb_regs + drvdata->nr_ahb_regs > drvdata->size)
+		drvdata->nr_ahb_regs = drvdata->size -
+					drvdata->nr_apb_regs;
+
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_nr_apb_regs(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->nr_apb_regs);
+};
+static DEVICE_ATTR(nr_apb_regs, 0644, dbgui_show_nr_apb_regs,
+		   dbgui_store_nr_apb_regs);
+
+static ssize_t dbgui_show_size(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->size);
+};
+static DEVICE_ATTR(size, 0644, dbgui_show_size, NULL);
+
+static ssize_t dbgui_store_timeout_val(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->timeout_val = val;
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_timeout_val(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->timeout_val);
+};
+static DEVICE_ATTR(timeout_val, 0644, dbgui_show_timeout_val,
+		   dbgui_store_timeout_val);
+
+static ssize_t dbgui_store_addr_idx(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	if (val >= drvdata->size)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->addr_idx = val;
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_addr_idx(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->addr_idx);
+};
+static DEVICE_ATTR(addr_idx, 0644, dbgui_show_addr_idx,
+		   dbgui_store_addr_idx);
+
+static ssize_t dbgui_store_addr_val(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t size)
+{
+	uint32_t val;
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->addr_val[drvdata->addr_idx] = val;
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+
+static ssize_t dbgui_show_addr_val(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t len = 0;
+	int i;
+
+	mutex_lock(&drvdata->mutex);
+	for (i = 0; i < drvdata->size; i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				 "[%02d]:0x%08x%s\n",
+				 i, drvdata->addr_val[i],
+				 drvdata->addr_idx == i ?
+				 " *" : "");
+	mutex_unlock(&drvdata->mutex);
+
+	return len;
+};
+static DEVICE_ATTR(addr_val, 0644, dbgui_show_addr_val,
+		   dbgui_store_addr_val);
+
+static ssize_t dbgui_show_data_val(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t len = 0;
+	int i;
+	uint32_t val, trig_mask;
+
+	if (!drvdata->capture_enable)
+		return 0;
+
+	dbgui_wait_for_pending_actions(drvdata);
+
+	mutex_lock(&drvdata->mutex);
+
+	DBGUI_UNLOCK(drvdata);
+
+	/*
+	 * If the timer trigger is enabled, data might change while we read it.
+	 * We mask all the trggers here to avoid this.
+	 */
+	trig_mask = dbgui_readl(drvdata, DBGUI_CTL_MASK);
+	dbgui_writel(drvdata, DBGUI_TRIG_MASK, DBGUI_CTL_MASK);
+
+	for (i = 0; i < drvdata->size; i++) {
+		val = dbgui_readl(drvdata, DBGUI_DATAn(drvdata, i));
+		drvdata->data_val[i] = val;
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				 "[%02d]:0x%08x\n",
+				 i, drvdata->data_val[i]);
+	}
+	dbgui_writel(drvdata, trig_mask, DBGUI_CTL_MASK);
+
+	DBGUI_LOCK(drvdata);
+
+	mutex_unlock(&drvdata->mutex);
+
+	return len;
+};
+static DEVICE_ATTR(data_val, 0444, dbgui_show_data_val, NULL);
+
+static ssize_t dbgui_store_capture_enable(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t size)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	uint32_t val, ret;
+
+	if (kstrtoul(buf, 16, (unsigned long *)&val))
+		return -EINVAL;
+
+	if (val)
+		ret = dbgui_capture_enable(drvdata);
+	else
+		ret = dbgui_capture_disable(drvdata);
+
+	if (ret)
+		return ret;
+	return size;
+}
+
+static ssize_t dbgui_show_capture_enable(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct dbgui_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->capture_enable);
+};
+static DEVICE_ATTR(capture_enable, 0644,
+		   dbgui_show_capture_enable,
+		   dbgui_store_capture_enable);
+
+static struct attribute *dbgui_attrs[] = {
+	&dev_attr_sw_trig.attr,
+	&dev_attr_trig_mask.attr,
+	&dev_attr_capture_enable.attr,
+	&dev_attr_ts_enable.attr,
+	&dev_attr_hwe_mask.attr,
+	&dev_attr_timer_override_enable.attr,
+	&dev_attr_size.attr,
+	&dev_attr_nr_ahb_regs.attr,
+	&dev_attr_nr_apb_regs.attr,
+	&dev_attr_timeout_val.attr,
+	&dev_attr_addr_idx.attr,
+	&dev_attr_addr_val.attr,
+	&dev_attr_data_val.attr,
+	NULL,
+};
+
+static struct attribute_group dbgui_attr_grp = {
+	.attrs = dbgui_attrs,
+};
+
+static const struct attribute_group *dbgui_attr_grps[] = {
+	&dbgui_attr_grp,
+	NULL,
+};
+
+static int dbgui_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct coresight_platform_data *pdata;
+	struct dbgui_drvdata *drvdata;
+	struct resource *res;
+	struct coresight_desc *desc;
+	struct msm_dump_entry dump_entry;
+	void *baddr;
+
+	pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+	pdev->dev.platform_data = pdata;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+	mutex_init(&drvdata->mutex);
+
+	drvdata->clk = devm_clk_get(dev, "apb_pclk");
+	if (IS_ERR(drvdata->clk))
+		return PTR_ERR(drvdata->clk);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbgui-base");
+	if (!res) {
+		dev_info(dev, "DBGUI base not specified\n");
+		return -ENODEV;
+	}
+
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	if (!coresight_authstatus_enabled(drvdata->base))
+		goto err;
+
+	clk_disable_unprepare(drvdata->clk);
+
+	baddr = devm_kzalloc(dev, resource_size(res), GFP_KERNEL);
+	if (baddr) {
+		drvdata->reg_data.addr = virt_to_phys(baddr);
+		drvdata->reg_data.len = resource_size(res);
+		dump_entry.id = MSM_DUMP_DATA_DBGUI_REG;
+		dump_entry.addr = virt_to_phys(&drvdata->reg_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+					     &dump_entry);
+		if (ret) {
+			devm_kfree(dev, baddr);
+			dev_err(dev, "DBGUI REG dump setup failed\n");
+		}
+	} else {
+		dev_err(dev, "DBGUI REG dump allocation failed\n");
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+			"qcom,dbgui-addr-offset",
+			&drvdata->addr_offset);
+	if (ret)
+		return -EINVAL;
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+			"qcom,dbgui-data-offset",
+			&drvdata->data_offset);
+	if (ret)
+		return -EINVAL;
+
+	if (drvdata->addr_offset >= resource_size(res)
+			|| drvdata->data_offset >= resource_size(res)) {
+		dev_err(dev, "Invalid address or data offset\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+			"qcom,dbgui-size",
+			&drvdata->size);
+	if (ret || drvdata->size > DBGUI_MAX_ADDR_VAL)
+		return -EINVAL;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	dbgui_handoff(drvdata);
+	clk_disable_unprepare(drvdata->clk);
+	/*
+	 * To provide addr_offset, data_offset and size via a global variable.
+	 * NOTE: Only single dbgui device is supported now.
+	 */
+	dbgui_drvdata = drvdata;
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+	desc->ops = &dbgui_cs_ops;
+	desc->pdata = pdev->dev.platform_data;
+	desc->dev = &pdev->dev;
+	desc->groups = dbgui_attr_grps;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev))
+		return PTR_ERR(drvdata->csdev);
+
+	dev_info(dev, "DebugUI initializaed\n");
+	return 0;
+err:
+	clk_disable_unprepare(drvdata->clk);
+	return -EPERM;
+}
+
+static int dbgui_remove(struct platform_device *pdev)
+{
+	struct dbgui_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	coresight_unregister(drvdata->csdev);
+	return 0;
+}
+
+static const struct of_device_id dbgui_match[] = {
+	{.compatible = "qcom,coresight-dbgui"},
+	{}
+};
+
+static struct platform_driver dbgui_driver = {
+	.probe          = dbgui_probe,
+	.remove         = dbgui_remove,
+	.driver = {
+		.name   = "coresight-dbgui",
+		.owner  = THIS_MODULE,
+		.of_match_table = dbgui_match,
+	},
+};
+
+static int __init dbgui_init(void)
+{
+	return platform_driver_register(&dbgui_driver);
+}
+module_init(dbgui_init);
+
+static void __exit dbgui_exit(void)
+{
+	return platform_driver_unregister(&dbgui_driver);
+}
+module_exit(dbgui_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight DebugUI driver");
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index e174102..108ed032 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -82,6 +82,7 @@ struct hbtp_data {
 	bool override_disp_coords;
 	bool manage_afe_power_ana;
 	bool manage_power_dig;
+	bool regulator_enabled;
 	u32 power_on_delay;
 	u32 power_off_delay;
 	bool manage_pin_ctrl;
@@ -360,6 +361,11 @@ static int hbtp_pdev_power_on(struct hbtp_data *hbtp, bool on)
 	if (!on)
 		goto reg_off;
 
+	if (hbtp->regulator_enabled) {
+		pr_debug("%s: regulator already enabled\n", __func__);
+		return 0;
+	}
+
 	if (hbtp->vcc_ana) {
 		ret = reg_set_load_check(hbtp->vcc_ana,
 			hbtp->afe_load_ua);
@@ -403,9 +409,16 @@ static int hbtp_pdev_power_on(struct hbtp_data *hbtp, bool on)
 		}
 	}
 
+	hbtp->regulator_enabled = true;
+
 	return 0;
 
 reg_off:
+	if (!hbtp->regulator_enabled) {
+		pr_debug("%s: regulator not enabled\n", __func__);
+		return 0;
+	}
+
 	if (hbtp->vcc_dig) {
 		reg_set_load_check(hbtp->vcc_dig, 0);
 		regulator_disable(hbtp->vcc_dig);
@@ -422,6 +435,9 @@ static int hbtp_pdev_power_on(struct hbtp_data *hbtp, bool on)
 		reg_set_load_check(hbtp->vcc_ana, 0);
 		regulator_disable(hbtp->vcc_ana);
 	}
+
+	hbtp->regulator_enabled = false;
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 84402e4..7e6d999 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -254,6 +254,35 @@ int cam_context_handle_release_dev(struct cam_context *ctx,
 	return rc;
 }
 
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid flush device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.flush_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
+			ctx, cmd);
+	} else {
+		CAM_ERR(CAM_CORE, "No flush device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
 int cam_context_handle_config_dev(struct cam_context *ctx,
 	struct cam_config_dev_cmd *cmd)
 {
@@ -409,8 +438,10 @@ int cam_context_deinit(struct cam_context *ctx)
 void cam_context_putref(struct cam_context *ctx)
 {
 	kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
-	CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
-		ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
+	CAM_DBG(CAM_CORE,
+		"ctx device hdl %ld, ref count %d, dev_name %s",
+		ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)),
+		ctx->dev_name);
 }
 
 void cam_context_getref(struct cam_context *ctx)
@@ -419,6 +450,8 @@ void cam_context_getref(struct cam_context *ctx)
 		/* should never happen */
 		WARN(1, "cam_context_getref fail\n");
 	}
-	CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
-		ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
+	CAM_DBG(CAM_CORE,
+		"ctx device hdl %ld, ref count %d, dev_name %s",
+		ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)),
+		ctx->dev_name);
 }
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 6d1589e..c823b7a 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -84,6 +84,7 @@ struct cam_ctx_request {
  * @config_dev:            Function pointer for config device
  * @start_dev:             Function pointer for start device
  * @stop_dev:              Function pointer for stop device
+ * @flush_dev:             Function pointer for flush device
  *
  */
 struct cam_ctx_ioctl_ops {
@@ -97,6 +98,8 @@ struct cam_ctx_ioctl_ops {
 			struct cam_start_stop_dev_cmd *cmd);
 	int (*stop_dev)(struct cam_context *ctx,
 			struct cam_start_stop_dev_cmd *cmd);
+	int (*flush_dev)(struct cam_context *ctx,
+			struct cam_flush_dev_cmd *cmd);
 };
 
 /**
@@ -306,6 +309,18 @@ int cam_context_handle_config_dev(struct cam_context *ctx,
 		struct cam_config_dev_cmd *cmd);
 
 /**
+ * cam_context_handle_flush_dev()
+ *
+ * @brief:        Handle flush device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Flush device command payload
+ *
+ */
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+		struct cam_flush_dev_cmd *cmd);
+
+/**
  * cam_context_handle_start_dev()
  *
  * @brief:        Handle start device command
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 6b872b9..aab1a1a 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -146,6 +146,7 @@ static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
 {
 	struct cam_ctx_request *req = data;
 	struct cam_context *ctx = NULL;
+	struct cam_flush_dev_cmd flush_cmd;
 	struct cam_req_mgr_apply_request apply;
 	int rc;
 
@@ -169,14 +170,22 @@ static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
 		 * in a critical section which is provided by this
 		 * mutex.
 		 */
+		if (status == CAM_SYNC_STATE_SIGNALED_ERROR) {
+			CAM_DBG(CAM_CTXT, "fence error: %d", sync_obj);
+			flush_cmd.req_id = req->request_id;
+			cam_context_flush_req_to_hw(ctx, &flush_cmd);
+			cam_context_putref(ctx);
+			return;
+		}
+
 		mutex_lock(&ctx->sync_mutex);
 		if (!req->flushed) {
 			cam_context_apply_req_to_hw(req, &apply);
 			mutex_unlock(&ctx->sync_mutex);
 		} else {
-			mutex_unlock(&ctx->sync_mutex);
-			req->ctx = NULL;
 			req->flushed = 0;
+			req->ctx = NULL;
+			mutex_unlock(&ctx->sync_mutex);
 			spin_lock(&ctx->lock);
 			list_del_init(&req->list);
 			list_add_tail(&req->list, &ctx->free_req_list);
@@ -413,6 +422,174 @@ int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
 	return rc;
 }
 
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
+{
+	struct cam_hw_flush_args flush_args;
+	struct list_head temp_list;
+	struct cam_ctx_request *req;
+	uint32_t i;
+	int rc = 0;
+
+	/*
+	 * flush pending requests, take the sync lock to synchronize with the
+	 * sync callback thread so that the sync cb thread does not try to
+	 * submit request to h/w while the request is being flushed
+	 */
+	mutex_lock(&ctx->sync_mutex);
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->pending_req_list, &temp_list);
+	spin_unlock(&ctx->lock);
+	flush_args.num_req_pending = 0;
+	while (!list_empty(&temp_list)) {
+		req = list_first_entry(&temp_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req->flushed = 1;
+		flush_args.flush_req_pending[flush_args.num_req_pending++] =
+			req->req_priv;
+		for (i = 0; i < req->num_out_map_entries; i++)
+			if (req->out_map_entries[i].sync_id != -1)
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+	}
+	mutex_unlock(&ctx->sync_mutex);
+
+	if (ctx->hw_mgr_intf->hw_flush) {
+		flush_args.num_req_active = 0;
+		spin_lock(&ctx->lock);
+		INIT_LIST_HEAD(&temp_list);
+		list_splice_init(&ctx->active_req_list, &temp_list);
+		list_for_each_entry(req, &temp_list, list) {
+			flush_args.flush_req_active[flush_args.num_req_active++]
+				= req->req_priv;
+		}
+		spin_unlock(&ctx->lock);
+
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+			flush_args.flush_type = CAM_FLUSH_TYPE_ALL;
+			ctx->hw_mgr_intf->hw_flush(
+				ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+		}
+	}
+
+	while (!list_empty(&temp_list)) {
+		req = list_first_entry(&temp_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		for (i = 0; i < req->num_out_map_entries; i++)
+			if (req->out_map_entries[i].sync_id != -1) {
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+
+		spin_lock(&ctx->lock);
+		list_add_tail(&req->list, &ctx->free_req_list);
+		spin_unlock(&ctx->lock);
+		req->ctx = NULL;
+	}
+	INIT_LIST_HEAD(&ctx->active_req_list);
+
+	return rc;
+}
+
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	struct cam_ctx_request *req = NULL;
+	struct cam_hw_flush_args flush_args;
+	uint32_t i;
+	int rc = 0;
+
+	flush_args.num_req_pending = 0;
+	flush_args.num_req_active = 0;
+	mutex_lock(&ctx->sync_mutex);
+	spin_lock(&ctx->lock);
+	list_for_each_entry(req, &ctx->pending_req_list, list) {
+		if (req->request_id != cmd->req_id)
+			continue;
+
+		req->flushed = 1;
+		flush_args.flush_req_pending[flush_args.num_req_pending++] =
+			req->req_priv;
+		break;
+	}
+	spin_unlock(&ctx->lock);
+	mutex_unlock(&ctx->sync_mutex);
+
+	if (ctx->hw_mgr_intf->hw_flush) {
+		if (!flush_args.num_req_pending) {
+			spin_lock(&ctx->lock);
+			list_for_each_entry(req, &ctx->active_req_list, list) {
+				if (req->request_id != cmd->req_id)
+					continue;
+
+				flush_args.flush_req_active[
+					flush_args.num_req_active++] =
+					req->req_priv;
+				break;
+			}
+			spin_unlock(&ctx->lock);
+		}
+
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+			flush_args.flush_type = CAM_FLUSH_TYPE_REQ;
+			ctx->hw_mgr_intf->hw_flush(
+				ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+		}
+	}
+
+	if (req) {
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			list_del_init(&req->list);
+			for (i = 0; i < req->num_out_map_entries; i++)
+				if (req->out_map_entries[i].sync_id != -1)
+					cam_sync_signal(
+						req->out_map_entries[i].sync_id,
+						CAM_SYNC_STATE_SIGNALED_ERROR);
+			spin_lock(&ctx->lock);
+			list_add_tail(&req->list, &ctx->free_req_list);
+			spin_unlock(&ctx->lock);
+			req->ctx = NULL;
+		}
+	}
+
+	return rc;
+}
+
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+
+	int rc = 0;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "HW interface is not ready");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (cmd->flush_type == CAM_FLUSH_TYPE_ALL)
+		rc = cam_context_flush_ctx_to_hw(ctx);
+	else if (cmd->flush_type == CAM_FLUSH_TYPE_REQ)
+		rc = cam_context_flush_req_to_hw(ctx, cmd);
+	else {
+		rc = -EINVAL;
+		CAM_ERR(CAM_CORE, "Invalid flush type %d", cmd->flush_type);
+	}
+
+end:
+	return rc;
+}
+
 int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
 	struct cam_start_stop_dev_cmd *cmd)
 {
@@ -457,10 +634,7 @@ int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
 int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
 {
 	int rc = 0;
-	uint32_t i;
 	struct cam_hw_stop_args stop;
-	struct cam_ctx_request *req;
-	struct list_head temp_list;
 
 	if (!ctx) {
 		CAM_ERR(CAM_CTXT, "Invalid input param");
@@ -478,27 +652,11 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
 	if (rc)
 		goto end;
 
-	/*
-	 * flush pending requests, take the sync lock to synchronize with the
-	 * sync callback thread so that the sync cb thread does not try to
-	 * submit request to h/w while the request is being flushed
-	 */
-	mutex_lock(&ctx->sync_mutex);
-	INIT_LIST_HEAD(&temp_list);
-	spin_lock(&ctx->lock);
-	list_splice_init(&ctx->pending_req_list, &temp_list);
-	spin_unlock(&ctx->lock);
-	while (!list_empty(&temp_list)) {
-		req = list_first_entry(&temp_list,
-				struct cam_ctx_request, list);
-		list_del_init(&req->list);
-		req->flushed = 1;
-		for (i = 0; i < req->num_out_map_entries; i++)
-			if (req->out_map_entries[i].sync_id != -1)
-				cam_sync_signal(req->out_map_entries[i].sync_id,
-					CAM_SYNC_STATE_SIGNALED_ERROR);
+	if (ctx->ctxt_to_hw_map) {
+		rc = cam_context_flush_ctx_to_hw(ctx);
+		if (rc)
+			goto end;
 	}
-	mutex_unlock(&ctx->sync_mutex);
 
 	/* stop hw first */
 	if (ctx->hw_mgr_intf->hw_stop) {
@@ -507,36 +665,6 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
 			&stop);
 	}
 
-	/*
-	 * flush active queue, at this point h/w layer below does not have any
-	 * reference to requests in active queue.
-	 */
-	INIT_LIST_HEAD(&temp_list);
-	spin_lock(&ctx->lock);
-	list_splice_init(&ctx->active_req_list, &temp_list);
-	spin_unlock(&ctx->lock);
-
-	while (!list_empty(&temp_list)) {
-		req = list_first_entry(&temp_list,
-				struct cam_ctx_request, list);
-		list_del_init(&req->list);
-		CAM_DBG(CAM_CTXT, "signal fence in active list. fence num %d",
-			req->num_out_map_entries);
-		for (i = 0; i < req->num_out_map_entries; i++)
-			if (req->out_map_entries[i].sync_id != -1)
-				cam_sync_signal(req->out_map_entries[i].sync_id,
-					CAM_SYNC_STATE_SIGNALED_ERROR);
-		/*
-		 * The spin lock should be taken here to guard the free list,
-		 * as sync cb thread could be adding a pending req to free list
-		 */
-		spin_lock(&ctx->lock);
-		list_add_tail(&req->list, &ctx->free_req_list);
-		req->ctx = NULL;
-		spin_unlock(&ctx->lock);
-	}
-
 end:
 	return rc;
 }
-
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
index 45d9e56..9b95ead 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
@@ -26,5 +26,10 @@ int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
 int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
 	struct cam_start_stop_dev_cmd *cmd);
 int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd);
 
 #endif /* _CAM_CONTEXT_UTILS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
index 3a997ae..bd2b789 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
@@ -34,6 +34,7 @@
  * @read:                  Function pointer for read hardware registers
  * @write:                 Function pointer for Write hardware registers
  * @process_cmd:           Function pointer for additional hardware controls
+ * @flush_cmd:             Function pointer for flush requests
  *
  */
 struct cam_hw_ops {
@@ -59,6 +60,8 @@ struct cam_hw_ops {
 		void *write_args, uint32_t arg_size);
 	int (*process_cmd)(void *hw_priv,
 		uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+	int (*flush)(void *hw_priv,
+		void *flush_args, uint32_t arg_size);
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index 4746152..a90b3d9 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -182,6 +182,26 @@ struct cam_hw_config_args {
 };
 
 /**
+ * struct cam_hw_flush_args - Flush arguments
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_req_pending:       Num request to flush, valid when flush type is REQ
+ * @flush_req_pending:     Request pending pointers to flush
+ * @num_req_active:        Num request to flush, valid when flush type is REQ
+ * @flush_req_active:      Request active pointers to flush
+ * @flush_type:            The flush type
+ *
+ */
+struct cam_hw_flush_args {
+	void                           *ctxt_to_hw_map;
+	uint32_t                        num_req_pending;
+	void                           *flush_req_pending[20];
+	uint32_t                        num_req_active;
+	void                           *flush_req_active[20];
+	enum flush_type_t               flush_type;
+};
+
+/**
  * cam_hw_mgr_intf - HW manager interface
  *
  * @hw_mgr_priv:           HW manager object
@@ -205,6 +225,7 @@ struct cam_hw_config_args {
  *                         hardware manager
  * @hw_open:               Function pointer for HW init
  * @hw_close:              Function pointer for HW deinit
+ * @hw_flush:              Function pointer for HW flush
  *
  */
 struct cam_hw_mgr_intf {
@@ -222,6 +243,7 @@ struct cam_hw_mgr_intf {
 	int (*hw_cmd)(void *hw_priv, void *write_args);
 	int (*hw_open)(void *hw_priv, void *fw_download_args);
 	int (*hw_close)(void *hw_priv, void *hw_close_args);
+	int (*hw_flush)(void *hw_priv, void *hw_flush_args);
 };
 
 #endif /* _CAM_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 1f0213e..a5977b3 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -192,6 +192,39 @@ static int __cam_node_handle_config_dev(struct cam_node *node,
 	return rc;
 }
 
+static int __cam_node_handle_flush_dev(struct cam_node *node,
+	struct cam_flush_dev_cmd *flush)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!flush)
+		return -EINVAL;
+
+	if (flush->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (flush->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(flush->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			flush->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_flush_dev(ctx, flush);
+	if (rc)
+		CAM_ERR(CAM_CORE, "FLush failure for node %s", node->name);
+
+	return rc;
+}
+
 static int __cam_node_handle_release_dev(struct cam_node *node,
 	struct cam_release_dev_cmd *release)
 {
@@ -491,6 +524,20 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
 		}
 		break;
 	}
+	case CAM_FLUSH_REQ: {
+		struct cam_flush_dev_cmd flush;
+
+		if (copy_from_user(&flush, (void __user *)cmd->handle,
+			sizeof(flush)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_flush_dev(node, &flush);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"flush device failed(rc = %d)", rc);
+		}
+		break;
+	}
 	default:
 		CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
 		rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
index 78c1dd3..04d65dd 100644
--- a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -124,6 +124,17 @@ static int __cam_fd_ctx_release_dev_in_activated(struct cam_context *ctx,
 	return rc;
 }
 
+static int __cam_fd_ctx_flush_dev_in_activated(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to flush device, rc=%d", rc);
+
+	return rc;
+}
 static int __cam_fd_ctx_config_dev_in_activated(
 	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
 {
@@ -191,6 +202,7 @@ static struct cam_ctx_ops
 			.stop_dev = __cam_fd_ctx_stop_dev_in_activated,
 			.release_dev = __cam_fd_ctx_release_dev_in_activated,
 			.config_dev = __cam_fd_ctx_config_dev_in_activated,
+			.flush_dev = __cam_fd_ctx_flush_dev_in_activated,
 		},
 		.crm_ops = {},
 		.irq_ops = __cam_fd_ctx_handle_irq_in_activated,
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index e57066d..a15ccdc 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -207,7 +207,7 @@ static int cam_fd_mgr_util_get_device(struct cam_fd_hw_mgr *hw_mgr,
 		return -EINVAL;
 	}
 
-	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+	CAM_DBG(CAM_FD, "ctx_index=%u, hw_ctx=%d", hw_ctx->ctx_index,
 		hw_ctx->device_index);
 
 	*hw_device = &hw_mgr->hw_device[hw_ctx->device_index];
@@ -335,7 +335,7 @@ static int cam_fd_mgr_util_select_device(struct cam_fd_hw_mgr *hw_mgr,
 	/* Update required info in hw context */
 	hw_ctx->device_index = i;
 
-	CAM_DBG(CAM_FD, "ctx index=%d, device_index=%d", hw_ctx->ctx_index,
+	CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
 		hw_ctx->device_index);
 
 	return 0;
@@ -1239,7 +1239,7 @@ static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args)
 		return -EPERM;
 	}
 
-	CAM_DBG(CAM_FD, "ctx index=%d, device_index=%d", hw_ctx->ctx_index,
+	CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
 		hw_ctx->device_index);
 
 	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
@@ -1266,26 +1266,125 @@ static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args)
 	return rc;
 }
 
-static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
-	struct cam_fd_hw_mgr_ctx *hw_ctx)
+static int cam_fd_mgr_hw_flush_req(void *hw_mgr_priv,
+	struct cam_hw_flush_args *flush_args)
 {
 	int rc = 0;
-	struct cam_fd_mgr_frame_request *frame_req, *req_temp;
-	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
 	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
 	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	uint32_t i = 0;
 
-	if (!hw_mgr_priv || !hw_ctx) {
-		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
-			hw_mgr_priv, hw_ctx);
-		return -EINVAL;
-	}
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
 
-	if (!hw_ctx->ctx_in_use) {
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
 		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
 		return -EPERM;
 	}
-	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_mgr->frame_req_mutex);
+	for (i = 0; i < flush_args->num_req_active; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_active[i];
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_pending_list_high, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+			break;
+		}
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_pending_list_normal, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+			break;
+		}
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_processing_list, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+
+			mutex_lock(&hw_device->lock);
+			if ((hw_device->ready_to_process == true) ||
+				(hw_device->cur_hw_ctx != hw_ctx))
+				goto unlock_dev_flush_req;
+
+			if (hw_device->hw_intf->hw_ops.stop) {
+				hw_stop_args.hw_ctx = hw_ctx;
+				rc = hw_device->hw_intf->hw_ops.stop(
+					hw_device->hw_intf->hw_priv,
+					&hw_stop_args,
+					sizeof(hw_stop_args));
+				if (rc) {
+					CAM_ERR(CAM_FD,
+						"Failed in HW Stop %d", rc);
+					goto unlock_dev_flush_req;
+				}
+				hw_device->ready_to_process = true;
+			}
+
+unlock_dev_flush_req:
+			mutex_unlock(&hw_device->lock);
+			break;
+		}
+	}
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	for (i = 0; i < flush_args->num_req_pending; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_pending[i];
+		cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&flush_req);
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_flush_ctx(void *hw_mgr_priv,
+	struct cam_hw_flush_args *flush_args)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	uint32_t i = 0;
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
+
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
 		hw_ctx->device_index);
 
 	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
@@ -1317,28 +1416,64 @@ static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
 			continue;
 
 		list_del_init(&frame_req->list);
+		mutex_lock(&hw_device->lock);
+		if ((hw_device->ready_to_process == true) ||
+			(hw_device->cur_hw_ctx != hw_ctx))
+			goto unlock_dev_flush_ctx;
+
+		if (hw_device->hw_intf->hw_ops.stop) {
+			hw_stop_args.hw_ctx = hw_ctx;
+			rc = hw_device->hw_intf->hw_ops.stop(
+				hw_device->hw_intf->hw_priv, &hw_stop_args,
+				sizeof(hw_stop_args));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+				goto unlock_dev_flush_ctx;
+			}
+			hw_device->ready_to_process = true;
+		}
+
+unlock_dev_flush_ctx:
+	mutex_unlock(&hw_device->lock);
 	}
 	mutex_unlock(&hw_mgr->frame_req_mutex);
 
-	mutex_lock(&hw_device->lock);
-	if ((hw_device->ready_to_process == true) ||
-		(hw_device->cur_hw_ctx != hw_ctx))
-		goto end;
-
-	if (hw_device->hw_intf->hw_ops.stop) {
-		hw_stop_args.hw_ctx = hw_ctx;
-		rc = hw_device->hw_intf->hw_ops.stop(
-			hw_device->hw_intf->hw_priv, &hw_stop_args,
-			sizeof(hw_stop_args));
-		if (rc) {
-			CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
-			goto end;
-		}
-		hw_device->ready_to_process = true;
+	for (i = 0; i < flush_args->num_req_pending; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_pending[i];
+		cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&flush_req);
 	}
 
-end:
-	mutex_unlock(&hw_device->lock);
+	return rc;
+}
+
+static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
+	void *hw_flush_args)
+{
+	int rc = 0;
+	struct cam_hw_flush_args *flush_args =
+		(struct cam_hw_flush_args *)hw_flush_args;
+
+	if (!hw_mgr_priv || !hw_flush_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_flush_args);
+		return -EINVAL;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_REQ:
+		rc = cam_fd_mgr_hw_flush_req(hw_mgr_priv, flush_args);
+		break;
+	case CAM_FLUSH_TYPE_ALL:
+		rc = cam_fd_mgr_hw_flush_ctx(hw_mgr_priv, flush_args);
+		break;
+	default:
+		rc = -EINVAL;
+		CAM_ERR(CAM_FD, "Invalid flush type %d",
+			flush_args->flush_type);
+		break;
+	}
 	return rc;
 }
 
@@ -1363,7 +1498,7 @@ static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
 		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
 		return -EPERM;
 	}
-	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
 		hw_ctx->device_index);
 
 	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
@@ -1375,10 +1510,6 @@ static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
 	CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
 		hw_device->ready_to_process);
 
-	rc = cam_fd_mgr_hw_flush(hw_mgr, hw_ctx);
-	if (rc)
-		CAM_ERR(CAM_FD, "FD failed to flush");
-
 	if (hw_device->hw_intf->hw_ops.deinit) {
 		hw_deinit_args.hw_ctx = hw_ctx;
 		hw_deinit_args.ctx_hw_private = hw_ctx->ctx_hw_private;
@@ -1791,6 +1922,7 @@ int cam_fd_hw_mgr_init(struct device_node *of_node,
 	hw_mgr_intf->hw_read = NULL;
 	hw_mgr_intf->hw_write = NULL;
 	hw_mgr_intf->hw_close = NULL;
+	hw_mgr_intf->hw_flush = cam_fd_mgr_hw_flush;
 
 	return rc;
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 0c37994..d47350c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -70,6 +70,18 @@ static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx,
 	return rc;
 }
 
+static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to flush device");
+
+	return rc;
+}
+
 static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
 	struct cam_config_dev_cmd *cmd)
 {
@@ -140,6 +152,7 @@ static struct cam_ctx_ops
 			.release_dev = __cam_icp_release_dev_in_acquired,
 			.start_dev = __cam_icp_start_dev_in_acquired,
 			.config_dev = __cam_icp_config_dev_in_ready,
+			.flush_dev = __cam_icp_flush_dev_in_ready,
 		},
 		.crm_ops = {},
 		.irq_ops = __cam_icp_handle_buf_done_in_ready,
@@ -150,6 +163,7 @@ static struct cam_ctx_ops
 			.stop_dev = __cam_icp_stop_dev_in_ready,
 			.release_dev = __cam_icp_release_dev_in_ready,
 			.config_dev = __cam_icp_config_dev_in_ready,
+			.flush_dev = __cam_icp_flush_dev_in_ready,
 		},
 		.crm_ops = {},
 		.irq_ops = __cam_icp_handle_buf_done_in_ready,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index c18a5e4..25e1ce7 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -93,6 +93,8 @@ int cam_bps_init_hw(void *device_priv,
 			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
+	} else {
+		core_info->clk_enable = true;
 	}
 
 	return rc;
@@ -119,9 +121,10 @@ int cam_bps_deinit_hw(void *device_priv,
 		return -EINVAL;
 	}
 
-	rc = cam_bps_disable_soc_resources(soc_info);
+	rc = cam_bps_disable_soc_resources(soc_info, core_info->clk_enable);
 	if (rc)
 		CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
+	core_info->clk_enable = false;
 
 	if (core_info->cpas_start) {
 		if (cam_cpas_stop(core_info->cpas_handle))
@@ -276,8 +279,30 @@ int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
 		uint32_t clk_rate = *(uint32_t *)cmd_args;
 
 		CAM_DBG(CAM_ICP, "bps_src_clk rate = %d", (int)clk_rate);
+		if (!core_info->clk_enable) {
+			cam_bps_handle_pc(bps_dev);
+			cam_cpas_reg_write(core_info->cpas_handle,
+				CAM_CPAS_REG_CPASTOP,
+				hw_info->pwr_ctrl, true, 0x0);
+			rc = cam_bps_toggle_clk(soc_info, true);
+			if (rc)
+				CAM_ERR(CAM_ICP, "Enable failed");
+			else
+				core_info->clk_enable = true;
+			rc = cam_bps_handle_resume(bps_dev);
+			if (rc)
+				CAM_ERR(CAM_ICP, "handle resume failed");
+		}
+		CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
 		rc = cam_bps_update_clk_rate(soc_info, clk_rate);
-	}
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed to update clk");
+		}
+		break;
+	case CAM_ICP_BPS_CMD_DISABLE_CLK:
+		if (core_info->clk_enable == true)
+			cam_bps_toggle_clk(soc_info, false);
+		core_info->clk_enable = false;
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
index 0a28bb4f..d979321 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
@@ -33,6 +33,7 @@ struct cam_bps_device_core_info {
 	struct cam_bps_device_hw_info *bps_hw_info;
 	uint32_t cpas_handle;
 	bool cpas_start;
+	bool clk_enable;
 };
 
 int cam_bps_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 400e1e7..b7b636c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -72,11 +72,13 @@ int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info)
 	return rc;
 }
 
-int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk)
 {
 	int rc = 0;
 
-	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+		false);
 	if (rc)
 		CAM_ERR(CAM_ICP, "disable platform failed");
 
@@ -142,3 +144,15 @@ int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
 	return cam_soc_util_set_clk_rate(soc_info->clk[soc_info->src_clk_idx],
 		soc_info->clk_name[soc_info->src_clk_idx], clk_rate);
 }
+
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+	int rc = 0;
+
+	if (clk_enable)
+		rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+	else
+		cam_soc_util_clk_disable_default(soc_info);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
index 2dd2c08..18f3015 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
@@ -20,7 +20,8 @@ int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
 
 int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info);
 
-int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk);
 
 int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info);
 
@@ -28,4 +29,5 @@ int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info);
 
 int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
 	uint32_t clk_rate);
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
 #endif /* _CAM_BPS_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 29a1b9a..f44fcc0 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -53,6 +53,9 @@
 #define ICP_WORKQ_TASK_CMD_TYPE 1
 #define ICP_WORKQ_TASK_MSG_TYPE 2
 
+#define ICP_DEV_TYPE_TO_CLK_TYPE(dev_type) \
+	((dev_type == CAM_ICP_RES_TYPE_BPS) ? ICP_CLK_HW_BPS : ICP_CLK_HW_IPE)
+
 static struct cam_icp_hw_mgr icp_hw_mgr;
 
 static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
@@ -60,7 +63,7 @@ static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
 	struct cam_hw_intf *a5_dev_intf = NULL;
 	int rc;
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
 		return -EINVAL;
@@ -225,6 +228,104 @@ static int cam_icp_clk_idx_from_req_id(struct cam_icp_hw_ctx_data *ctx_data,
 	return 0;
 }
 
+static int cam_icp_ctx_clk_info_init(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	ctx_data->clk_info.curr_fc = 0;
+	ctx_data->clk_info.base_clk = 0;
+	ctx_data->clk_info.uncompressed_bw = 0;
+	ctx_data->clk_info.compressed_bw = 0;
+	cam_icp_supported_clk_rates(&icp_hw_mgr, ctx_data);
+
+	return 0;
+}
+
+static int32_t cam_icp_deinit_idle_clk(void *priv, void *data)
+{
+	struct cam_icp_hw_mgr *hw_mgr = (struct cam_icp_hw_mgr *)priv;
+	struct clk_work_data *task_data = (struct clk_work_data *)data;
+	struct cam_icp_clk_info *clk_info =
+		(struct cam_icp_clk_info *)task_data->data;
+	uint32_t id;
+	uint32_t i;
+	uint32_t curr_clk_rate;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+
+	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
+	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+
+	clk_info->base_clk = 0;
+	clk_info->curr_clk = 0;
+	clk_info->over_clked = 0;
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx_data = &hw_mgr->ctx_data[i];
+		mutex_lock(&ctx_data->ctx_mutex);
+		if ((ctx_data->state != CAM_ICP_CTX_STATE_FREE) &&
+			(ICP_DEV_TYPE_TO_CLK_TYPE(ctx_data->
+			icp_dev_acquire_info->dev_type) == clk_info->hw_type))
+			cam_icp_ctx_clk_info_init(ctx_data);
+		mutex_unlock(&ctx_data->ctx_mutex);
+	}
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		return -EINVAL;
+	}
+
+	if (clk_info->hw_type == ICP_CLK_HW_BPS) {
+		dev_intf = bps_dev_intf;
+		id = CAM_ICP_BPS_CMD_DISABLE_CLK;
+	} else if (clk_info->hw_type == ICP_CLK_HW_IPE) {
+		dev_intf = ipe0_dev_intf;
+		id = CAM_ICP_IPE_CMD_DISABLE_CLK;
+	} else {
+		CAM_ERR(CAM_ICP, "Error");
+		return 0;
+	}
+
+	CAM_DBG(CAM_ICP, "Disable %d", clk_info->hw_type);
+
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&curr_clk_rate, sizeof(curr_clk_rate));
+
+	if (clk_info->hw_type != ICP_CLK_HW_BPS)
+		if (ipe1_dev_intf)
+			ipe1_dev_intf->hw_ops.process_cmd(
+				ipe1_dev_intf->hw_priv, id,
+				&curr_clk_rate, sizeof(curr_clk_rate));
+
+	return 0;
+}
+
+static void cam_icp_timer_cb(unsigned long data)
+{
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct clk_work_data *task_data;
+	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+	spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+		return;
+	}
+
+	task_data = (struct clk_work_data *)task->payload;
+	task_data->data = timer->parent;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_deinit_idle_clk;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
 static int cam_icp_clk_info_init(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_icp_hw_ctx_data *ctx_data)
 {
@@ -237,21 +338,36 @@ static int cam_icp_clk_info_init(struct cam_icp_hw_mgr *hw_mgr,
 		hw_mgr->clk_info[i].over_clked = 0;
 		hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 		hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		hw_mgr->clk_info[i].hw_type = i;
 	}
 	hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
 
 	return 0;
 }
 
-static int cam_icp_ctx_clk_info_init(struct cam_icp_hw_ctx_data *ctx_data)
+static int cam_icp_timer_start(struct cam_icp_hw_mgr *hw_mgr)
 {
-	ctx_data->clk_info.curr_fc = 0;
-	ctx_data->clk_info.base_clk = 0;
-	ctx_data->clk_info.uncompressed_bw = 0;
-	ctx_data->clk_info.compressed_bw = 0;
-	cam_icp_supported_clk_rates(&icp_hw_mgr, ctx_data);
+	int rc = 0;
+	int i;
 
-	return 0;
+	for (i = 0; i < ICP_CLK_HW_MAX; i++)  {
+		if (!hw_mgr->clk_info[i].watch_dog) {
+			rc = crm_timer_init(&hw_mgr->clk_info[i].watch_dog,
+				3000, &hw_mgr->clk_info[i], &cam_icp_timer_cb);
+			if (rc)
+				CAM_ERR(CAM_ICP, "Failed to start timer %d", i);
+		}
+	}
+
+	return rc;
+}
+
+static void cam_icp_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+{
+	if (!hw_mgr->bps_ctxt_cnt)
+		crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
+	else if (!hw_mgr->ipe_ctxt_cnt)
+		crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
 }
 
 static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles,
@@ -335,7 +451,6 @@ static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr,
 	 *      no need to update the clock
 	 */
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
 	ctx_data->clk_info.base_clk = base_clk;
 	hw_mgr_clk_info->over_clked = 0;
 	if (clk_info->frame_cycles > ctx_data->clk_info.curr_fc) {
@@ -360,6 +475,7 @@ static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr,
 			rc = true;
 		}
 	}
+	ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return rc;
@@ -552,10 +668,15 @@ static bool cam_icp_check_clk_update(struct cam_icp_hw_mgr *hw_mgr,
 	uint64_t req_id;
 	struct cam_icp_clk_info *hw_mgr_clk_info;
 
-	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		crm_timer_reset(hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
 		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
-	else
+		CAM_DBG(CAM_ICP, "Reset bps timer");
+	} else {
+		crm_timer_reset(hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
 		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+		CAM_DBG(CAM_ICP, "Reset ipe timer");
+	}
 
 	if (icp_hw_mgr.icp_debug_clk)
 		return cam_icp_debug_clk_update(hw_mgr_clk_info);
@@ -627,9 +748,9 @@ static int cam_icp_update_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_hw_intf *bps_dev_intf = NULL;
 	struct cam_hw_intf *dev_intf = NULL;
 
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
 
 
 	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
@@ -670,9 +791,9 @@ static int cam_icp_update_cpas_vote(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_icp_clk_info *clk_info;
 	struct cam_icp_cpas_vote clk_update;
 
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
 
 	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
 		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
@@ -729,9 +850,9 @@ static int cam_icp_mgr_ipe_bps_resume(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_hw_intf *bps_dev_intf = NULL;
 	int rc = 0;
 
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
 
 	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
 		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
@@ -793,9 +914,9 @@ static int cam_icp_mgr_ipe_bps_power_collapse(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
 
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
 
 	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
 		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
@@ -997,6 +1118,17 @@ static int cam_icp_mgr_cleanup_ctx(struct cam_icp_hw_ctx_data *ctx_data)
 		clear_bit(i, ctx_data->hfi_frame_process.bitmap);
 	}
 
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+		if (!hfi_frame_process->in_free_resource[i])
+			continue;
+
+		CAM_INFO(CAM_ICP, "Delete merged sync in object: %d",
+			ctx_data->hfi_frame_process.in_free_resource[i]);
+		cam_sync_destroy(
+			ctx_data->hfi_frame_process.in_free_resource[i]);
+		ctx_data->hfi_frame_process.in_resource[i] = 0;
+	}
+
 	return 0;
 }
 
@@ -1009,6 +1141,7 @@ static int cam_icp_mgr_handle_frame_process(uint32_t *msg_ptr, int flag)
 	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
 	struct hfi_frame_process_info *hfi_frame_process;
 	struct cam_hw_done_event_data buf_data;
+	uint32_t clk_type;
 
 	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
 	request_id = ioconfig_ack->user_data2;
@@ -1020,6 +1153,10 @@ static int cam_icp_mgr_handle_frame_process(uint32_t *msg_ptr, int flag)
 	CAM_DBG(CAM_ICP, "ctx : %pK, request_id :%lld",
 		(void *)ctx_data->context_priv, request_id);
 
+	clk_type = ICP_DEV_TYPE_TO_CLK_TYPE(ctx_data->icp_dev_acquire_info->
+		dev_type);
+	crm_timer_reset(icp_hw_mgr.clk_info[clk_type].watch_dog);
+
 	mutex_lock(&ctx_data->ctx_mutex);
 	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
 		mutex_unlock(&ctx_data->ctx_mutex);
@@ -1615,7 +1752,7 @@ static int cam_icp_mgr_send_pc_prep(struct cam_icp_hw_mgr *hw_mgr)
 	unsigned long rem_jiffies;
 	int timeout = 5000;
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
 		return -EINVAL;
@@ -1646,9 +1783,9 @@ static int cam_ipe_bps_deint(struct cam_icp_hw_mgr *hw_mgr)
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
 
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
 	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
 		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
 		return 0;
@@ -1670,7 +1807,7 @@ static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
 
 	CAM_DBG(CAM_ICP, "ENTER");
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
 		return -EINVAL;
@@ -1692,7 +1829,7 @@ static int cam_icp_mgr_hfi_resume(struct cam_icp_hw_mgr *hw_mgr)
 	struct cam_hw_info *a5_dev = NULL;
 	struct hfi_mem_info hfi_mem;
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
 		return -EINVAL;
@@ -1740,7 +1877,7 @@ static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
 	struct cam_hw_intf *a5_dev_intf = NULL;
 
 	CAM_DBG(CAM_ICP, "Enter");
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
@@ -1819,7 +1956,7 @@ static int cam_icp_mgr_abort_handle(
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		CAM_DBG(CAM_ICP, "FW timeout/err in abort handle command");
+		CAM_ERR(CAM_ICP, "FW timeout/err in abort handle command");
 	}
 
 	kfree(abort_cmd);
@@ -1941,10 +2078,10 @@ static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr)
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
 
 	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
 		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
@@ -1974,7 +2111,7 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
 		return 0;
 	}
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_DBG(CAM_ICP, "a5_dev_intf is NULL");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -2019,10 +2156,10 @@ static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr)
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
 
 	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
 		CAM_ERR(CAM_ICP, "dev intfs are wrong");
@@ -2067,7 +2204,7 @@ static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
 	struct cam_icp_a5_set_irq_cb irq_cb;
 	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
 		return -EINVAL;
@@ -2116,7 +2253,7 @@ static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
 	struct cam_hw_info *a5_dev = NULL;
 	struct hfi_mem_info hfi_mem;
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
 		return -EINVAL;
@@ -2158,7 +2295,7 @@ static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
 	unsigned long rem_jiffies;
 	int timeout = 5000;
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
 		return -EINVAL;
@@ -2203,7 +2340,7 @@ static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
 		return rc;
 	}
 
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev_intf = hw_mgr->a5_dev_intf;
 	if (!a5_dev_intf) {
 		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -2702,6 +2839,175 @@ static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data)
 		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
 	}
 	mutex_unlock(&ctx_data->ctx_mutex);
+	return 0;
+}
+
+static int cam_icp_mgr_delete_sync(void *priv, void *data)
+{
+	struct hfi_cmd_work_data *task_data = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)data;
+	ctx_data = task_data->data;
+
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Null Context");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->in_free_resource[idx])
+			continue;
+		//cam_sync_destroy(
+			//ctx_data->hfi_frame_process.in_free_resource[idx]);
+		ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return 0;
+}
+
+static int cam_icp_mgr_delete_sync_obj(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	struct crm_workq_task *task;
+	struct hfi_cmd_work_data *task_data;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)ctx_data;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_delete_sync;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_icp_mgr_flush_all(struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+	bool clear_in_resource = false;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			ctx_data->hfi_frame_process.in_free_resource[idx] =
+				ctx_data->hfi_frame_process.in_resource[idx];
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+		}
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		clear_in_resource = true;
+	}
+
+	if (clear_in_resource)
+		cam_icp_mgr_delete_sync_obj(ctx_data);
+
+	return 0;
+}
+
+static int cam_icp_mgr_flush_req(struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	int64_t request_id;
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+	bool clear_in_resource = false;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	request_id = *(int64_t *)flush_args->flush_req_pending[0];
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		if (hfi_frame_process->request_id[idx] != request_id)
+			continue;
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			ctx_data->hfi_frame_process.in_free_resource[idx] =
+				ctx_data->hfi_frame_process.in_resource[idx];
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+		}
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		clear_in_resource = true;
+	}
+
+	if (clear_in_resource)
+		cam_icp_mgr_delete_sync_obj(ctx_data);
+
+	return 0;
+}
+
+static int cam_icp_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
+{
+	struct cam_hw_flush_args *flush_args = hw_flush_args;
+	struct cam_icp_hw_ctx_data *ctx_data;
+
+	if ((!hw_priv) || (!hw_flush_args)) {
+		CAM_ERR(CAM_ICP, "Input params are Null:");
+		return -EINVAL;
+	}
+
+	ctx_data = flush_args->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Ctx data is NULL");
+		return -EINVAL;
+	}
+
+	if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+		(flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+		CAM_ERR(CAM_ICP, "Invalid lush type: %d",
+			flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		if (flush_args->num_req_active)
+			cam_icp_mgr_abort_handle(ctx_data);
+		mutex_lock(&ctx_data->ctx_mutex);
+		cam_icp_mgr_flush_all(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	case CAM_FLUSH_TYPE_REQ:
+		mutex_lock(&ctx_data->ctx_mutex);
+		if (flush_args->num_req_active) {
+			CAM_ERR(CAM_ICP, "Flush request is not supported");
+			mutex_unlock(&ctx_data->ctx_mutex);
+			return -EINVAL;
+		}
+		if (flush_args->num_req_pending)
+			cam_icp_mgr_flush_req(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid flush type: %d",
+			flush_args->flush_type);
+		return -EINVAL;
+	}
 
 	return 0;
 }
@@ -2754,6 +3060,9 @@ static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
 	}
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
+	if (!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt)
+		cam_icp_timer_stop(hw_mgr);
+
 	return rc;
 }
 
@@ -3043,6 +3352,10 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 			goto ubwc_cfg_failed;
 		}
 	}
+
+	if (!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt)
+		cam_icp_timer_start(hw_mgr);
+
 	rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
 	if (rc) {
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -3276,6 +3589,13 @@ static int cam_icp_mgr_init_devs(struct device_node *of_node)
 		of_node_put(child_node);
 	}
 
+	icp_hw_mgr.a5_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_A5][0];
+	icp_hw_mgr.bps_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_BPS][0];
+	icp_hw_mgr.ipe0_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_IPE][0];
+	if (icp_hw_mgr.ipe1_enable)
+		icp_hw_mgr.ipe1_dev_intf =
+			icp_hw_mgr.devices[CAM_ICP_DEV_IPE][1];
+
 	return 0;
 compat_hw_name_failed:
 	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
@@ -3360,6 +3680,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
 	hw_mgr_intf->hw_config = cam_icp_mgr_config_hw;
 	hw_mgr_intf->hw_open = cam_icp_mgr_hw_open;
 	hw_mgr_intf->hw_close = cam_icp_mgr_hw_close;
+	hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush;
 
 	icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	mutex_init(&icp_hw_mgr.hw_mgr_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index e8919e8..43d7a4a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -25,6 +25,7 @@
 #include "cam_mem_mgr.h"
 #include "cam_smmu_api.h"
 #include "cam_soc_util.h"
+#include "cam_req_mgr_timer.h"
 
 #define CAM_ICP_ROLE_PARENT     1
 #define CAM_ICP_ROLE_CHILD      2
@@ -111,6 +112,16 @@ struct hfi_msg_work_data {
 };
 
 /**
+  * struct clk_work_data
+  * @type: Task type
+  * @data: Pointer to clock info
+  */
+struct clk_work_data {
+	uint32_t type;
+	void *data;
+};
+
+/**
  * struct hfi_frame_process_info
  * @hfi_frame_cmd: Frame process command info
  * @bitmap: Bitmap for hfi_frame_cmd
@@ -131,6 +142,7 @@ struct hfi_frame_process_info {
 	uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
 	uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
 	uint32_t in_resource[CAM_FRAME_CMD_MAX];
+	uint32_t in_free_resource[CAM_FRAME_CMD_MAX];
 	uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
 	struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
 };
@@ -206,8 +218,10 @@ struct icp_cmd_generic_blob {
  * @curr_clk: Current clock of hadrware
  * @threshold: Threshold for overclk count
  * @over_clked: Over clock count
- * #uncompressed_bw: Current bandwidth voting
+ * @uncompressed_bw: Current bandwidth voting
  * @compressed_bw: Current compressed bandwidth voting
+ * @hw_type: IPE/BPS device type
+ * @watch_dog: watchdog timer handle
  */
 struct cam_icp_clk_info {
 	uint32_t base_clk;
@@ -216,6 +230,8 @@ struct cam_icp_clk_info {
 	uint32_t over_clked;
 	uint64_t uncompressed_bw;
 	uint64_t compressed_bw;
+	uint32_t hw_type;
+	struct cam_req_mgr_timer *watch_dog;
 };
 
 /**
@@ -290,6 +306,10 @@ struct cam_icp_hw_mgr {
 	bool ipe1_enable;
 	bool bps_enable;
 	uint32_t core_info;
+	struct cam_hw_intf *a5_dev_intf;
+	struct cam_hw_intf *ipe0_dev_intf;
+	struct cam_hw_intf *ipe1_dev_intf;
+	struct cam_hw_intf *bps_dev_intf;
 };
 
 static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
index d79187f..4f07172 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
@@ -27,6 +27,7 @@ enum cam_icp_bps_cmd_type {
 	CAM_ICP_BPS_CMD_CPAS_START,
 	CAM_ICP_BPS_CMD_CPAS_STOP,
 	CAM_ICP_BPS_CMD_UPDATE_CLK,
+	CAM_ICP_BPS_CMD_DISABLE_CLK,
 	CAM_ICP_BPS_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
index 697757e..0943bef 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
@@ -27,6 +27,7 @@ enum cam_icp_ipe_cmd_type {
 	CAM_ICP_IPE_CMD_CPAS_START,
 	CAM_ICP_IPE_CMD_CPAS_STOP,
 	CAM_ICP_IPE_CMD_UPDATE_CLK,
+	CAM_ICP_IPE_CMD_DISABLE_CLK,
 	CAM_ICP_IPE_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 8630e34..5b4156a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -91,6 +91,8 @@ int cam_ipe_init_hw(void *device_priv,
 			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
+	} else {
+		core_info->clk_enable = true;
 	}
 
 	return rc;
@@ -117,9 +119,10 @@ int cam_ipe_deinit_hw(void *device_priv,
 		return -EINVAL;
 	}
 
-	rc = cam_ipe_disable_soc_resources(soc_info);
+	rc = cam_ipe_disable_soc_resources(soc_info, core_info->clk_enable);
 	if (rc)
 		CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc);
+	core_info->clk_enable = false;
 
 	if (core_info->cpas_start) {
 		if (cam_cpas_stop(core_info->cpas_handle))
@@ -267,8 +270,31 @@ int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
 		uint32_t clk_rate = *(uint32_t *)cmd_args;
 
 		CAM_DBG(CAM_ICP, "ipe_src_clk rate = %d", (int)clk_rate);
-		rc = cam_ipe_update_clk_rate(soc_info, clk_rate);
+		if (!core_info->clk_enable) {
+			cam_ipe_handle_pc(ipe_dev);
+			cam_cpas_reg_write(core_info->cpas_handle,
+				CAM_CPAS_REG_CPASTOP,
+				hw_info->pwr_ctrl, true, 0x0);
+			rc = cam_ipe_toggle_clk(soc_info, true);
+			if (rc)
+				CAM_ERR(CAM_ICP, "Enable failed");
+			else
+				core_info->clk_enable = true;
+			rc = cam_ipe_handle_resume(ipe_dev);
+			if (rc)
+				CAM_ERR(CAM_ICP, "handle resume failed");
 		}
+		CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
+
+		rc = cam_ipe_update_clk_rate(soc_info, clk_rate);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed to update clk");
+		}
+		break;
+	case CAM_ICP_IPE_CMD_DISABLE_CLK:
+		if (core_info->clk_enable == true)
+			cam_ipe_toggle_clk(soc_info, false);
+		core_info->clk_enable = false;
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
index bd83972..65d3490 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
@@ -33,6 +33,7 @@ struct cam_ipe_device_core_info {
 	struct cam_ipe_device_hw_info *ipe_hw_info;
 	uint32_t cpas_handle;
 	bool cpas_start;
+	bool clk_enable;
 };
 
 int cam_ipe_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 71af1a2..289d7d4 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -125,11 +125,13 @@ int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
 	return rc;
 }
 
-int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk)
 {
 	int rc = 0;
 
-	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+		false);
 	if (rc)
 		CAM_ERR(CAM_ICP, "enable platform failed");
 
@@ -145,3 +147,15 @@ int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
 	return cam_soc_util_set_clk_rate(soc_info->clk[soc_info->src_clk_idx],
 		soc_info->clk_name[soc_info->src_clk_idx], clk_rate);
 }
+
+int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+	int rc = 0;
+
+	if (clk_enable)
+		rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+	else
+		cam_soc_util_clk_disable_default(soc_info);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
index 8e5a38a..5385bde 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
@@ -20,7 +20,8 @@ int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info,
 
 int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
 
-int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk);
 
 int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info);
 
@@ -28,4 +29,5 @@ int cam_ipe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info);
 
 int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
 	uint32_t clk_rate);
+int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
 #endif /* CAM_IPE_SOC_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 187aeaf..4a7a4f2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -177,7 +177,10 @@ static int cam_vfe_irq_err_top_half(uint32_t    evt_id,
 	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue\n");
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
 		return rc;
 	}
 
@@ -431,7 +434,10 @@ static int cam_vfe_irq_top_half(uint32_t    evt_id,
 	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue\n");
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
 		return rc;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index a2fbbd7..c166113 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1169,13 +1169,19 @@ static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id,
 
 	rsrc_data = wm_res->res_priv;
 
-	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
 
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
 			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+
 		return rc;
 	}
 
@@ -1665,14 +1671,20 @@ static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
 
 	rsrc_data = comp_grp->res_priv;
 
-	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
-	CAM_DBG(CAM_ISP, "IRQ status_2 = %x", th_payload->evt_status_arr[2]);
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
 
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
 			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+
 		return rc;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index 4589a22..1ccef0d 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -51,6 +51,18 @@ static int __cam_jpeg_ctx_release_dev_in_acquired(struct cam_context *ctx,
 	return rc;
 }
 
+static int __cam_jpeg_ctx_flush_dev_in_acquired(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to flush device");
+
+	return rc;
+}
+
 static int __cam_jpeg_ctx_config_dev_in_acquired(struct cam_context *ctx,
 	struct cam_config_dev_cmd *cmd)
 {
@@ -100,6 +112,7 @@ static struct cam_ctx_ops
 			.release_dev = __cam_jpeg_ctx_release_dev_in_acquired,
 			.config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
 			.stop_dev = __cam_jpeg_ctx_stop_dev_in_acquired,
+			.flush_dev = __cam_jpeg_ctx_flush_dev_in_acquired,
 		},
 		.crm_ops = { },
 		.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index e401549..65922dd 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -556,6 +556,7 @@ static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
 	p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
 
 	request_id = (uint64_t)config_args->priv;
+	p_cfg_req->req_id = request_id;
 	hw_update_entries = config_args->hw_update_entries;
 	CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %lld",
 		ctx_data, request_id, (uint64_t)config_args->priv);
@@ -779,13 +780,92 @@ static int cam_jpeg_mgr_flush(void *hw_mgr_priv,
 			hw_cfg_args.ctxt_to_hw_map != ctx_data)
 			continue;
 
-		CAM_INFO(CAM_JPEG, "deleting req %pK", cfg_req);
 		list_del_init(&cfg_req->list);
 	}
 
 	return rc;
 }
 
+
+static int cam_jpeg_mgr_flush_req(void *hw_mgr_priv,
+	struct cam_jpeg_hw_ctx_data *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_cfg_req *cfg_req, *req_temp;
+	int64_t request_id;
+
+	if (!hw_mgr || !ctx_data || !flush_args) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	request_id = *(int64_t *)flush_args->flush_req_pending[0];
+	list_for_each_entry_safe(cfg_req, req_temp,
+		&hw_mgr->hw_config_req_list, list) {
+		if (cfg_req->hw_cfg_args.ctxt_to_hw_map
+			!= ctx_data)
+			continue;
+
+		if (cfg_req->req_id != request_id)
+			continue;
+
+		list_del_init(&cfg_req->list);
+	}
+
+	return 0;
+}
+
+static int cam_jpeg_mgr_hw_flush(void *hw_mgr_priv, void *flush_hw_args)
+{
+	int rc = 0;
+	struct cam_hw_flush_args *flush_args = flush_hw_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+
+	if (!hw_mgr || !flush_args || !flush_args->ctxt_to_hw_map) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)flush_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+		(flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+		CAM_ERR(CAM_JPEG, "Invalid flush type: %d",
+			flush_args->flush_type);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data);
+		if ((rc))
+			CAM_ERR(CAM_JPEG, "Flush failed %d", rc);
+		break;
+	case CAM_FLUSH_TYPE_REQ:
+		rc = cam_jpeg_mgr_flush_req(hw_mgr_priv, ctx_data, flush_args);
+		CAM_ERR(CAM_JPEG, "Flush per request is not supported");
+		break;
+	default:
+		CAM_ERR(CAM_JPEG, "Invalid flush type: %d",
+			flush_args->flush_type);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
 static int cam_jpeg_mgr_hw_stop(void *hw_mgr_priv, void *stop_hw_args)
 {
 	int rc;
@@ -1281,6 +1361,7 @@ int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
 	hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw;
 	hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
+	hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush;
 	hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop;
 
 	mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
index 9e3418d..dce47d2 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -75,11 +75,13 @@ struct cam_jpeg_hw_cdm_info_t {
  * @list_head: List head
  * @hw_cfg_args: Hw config args
  * @dev_type: Dev type for cfg request
+ * @req_id: Request Id
  */
 struct cam_jpeg_hw_cfg_req {
 	struct list_head list;
 	struct cam_hw_config_args hw_cfg_args;
 	uint32_t dev_type;
+	int64_t req_id;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
index 0aa5ade..1ab3143 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -91,6 +91,17 @@ static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
 	return rc;
 }
 
+static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed to flush device");
+
+	return rc;
+}
 static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
 	struct cam_start_stop_dev_cmd *cmd)
 {
@@ -187,6 +198,7 @@ static struct cam_ctx_ops
 			.config_dev = __cam_lrme_ctx_config_dev_in_activated,
 			.release_dev = __cam_lrme_ctx_release_dev_in_activated,
 			.stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
+			.flush_dev = __cam_lrme_ctx_flush_dev_in_activated,
 		},
 		.crm_ops = {},
 		.irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index 448086d..20b8586 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -648,6 +648,86 @@ static int cam_lrme_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
 	return rc;
 }
 
+static int cam_lrme_mgr_hw_flush(void *hw_mgr_priv, void *hw_flush_args)
+{	int rc = 0, i;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_flush_args *args;
+	struct cam_lrme_device *hw_device;
+	struct cam_lrme_frame_request *frame_req = NULL, *req_to_flush = NULL;
+	struct cam_lrme_frame_request **req_list = NULL;
+	uint32_t device_index;
+	struct cam_lrme_hw_flush_args lrme_flush_args;
+	uint32_t priority;
+
+	if (!hw_mgr_priv || !hw_flush_args) {
+		CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+			hw_mgr_priv, hw_flush_args);
+		return -EINVAL;
+	}
+
+	args = (struct cam_hw_flush_args *)hw_flush_args;
+	device_index = ((uint64_t)args->ctxt_to_hw_map & 0xF);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+		return -EPERM;
+	}
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+		goto end;
+	}
+
+	req_list = (struct cam_lrme_frame_request **)args->flush_req_pending;
+	for (i = 0; i < args->num_req_pending; i++) {
+		frame_req = req_list[i];
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&frame_req->frame_list, &hw_mgr->free_req_lock);
+	}
+
+	req_list = (struct cam_lrme_frame_request **)args->flush_req_active;
+	for (i = 0; i < args->num_req_active; i++) {
+		frame_req = req_list[i];
+		priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+		spin_lock((priority == CAM_LRME_PRIORITY_HIGH) ?
+			&hw_device->high_req_lock :
+			&hw_device->normal_req_lock);
+		if (!list_empty(&frame_req->frame_list)) {
+			list_del_init(&frame_req->frame_list);
+			cam_lrme_mgr_util_put_frame_req(
+				&hw_mgr->frame_free_list,
+				&frame_req->frame_list,
+				&hw_mgr->free_req_lock);
+		} else
+			req_to_flush = frame_req;
+		spin_unlock((priority == CAM_LRME_PRIORITY_HIGH) ?
+			&hw_device->high_req_lock :
+			&hw_device->normal_req_lock);
+	}
+	if (!req_to_flush)
+		goto end;
+	if (hw_device->hw_intf.hw_ops.flush) {
+		lrme_flush_args.ctxt_to_hw_map = req_to_flush->ctxt_to_hw_map;
+		lrme_flush_args.flush_type = args->flush_type;
+		lrme_flush_args.req_to_flush = req_to_flush;
+		rc = hw_device->hw_intf.hw_ops.flush(hw_device->hw_intf.hw_priv,
+			&lrme_flush_args,
+			sizeof(lrme_flush_args));
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Failed in HW Stop %d", rc);
+			goto end;
+		}
+	} else {
+		CAM_ERR(CAM_LRME, "No stop ops");
+		goto end;
+	}
+
+end:
+	return rc;
+}
+
+
 static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args)
 {
 	int rc = 0;
@@ -1026,6 +1106,7 @@ int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
 	hw_mgr_intf->hw_read = NULL;
 	hw_mgr_intf->hw_write = NULL;
 	hw_mgr_intf->hw_close = NULL;
+	hw_mgr_intf->hw_flush = cam_lrme_mgr_hw_flush;
 
 	g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
 
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
index dbd969c..3fc9032 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -411,6 +411,131 @@ static int cam_lrme_hw_util_submit_req(struct cam_lrme_core *lrme_core,
 	return rc;
 }
 
+static int cam_lrme_hw_util_flush_ctx(struct cam_hw_info *lrme_hw,
+	void *ctxt_to_hw_map)
+{
+	int rc = -ENODEV;
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	struct cam_lrme_hw_cb_args cb_args;
+	struct cam_lrme_frame_request *req_proc, *req_submit;
+	struct cam_lrme_hw_submit_args submit_args;
+
+	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "reset failed");
+		return rc;
+	}
+
+	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+	req_proc = lrme_core->req_proc;
+	req_submit = lrme_core->req_submit;
+	lrme_core->req_proc = NULL;
+	lrme_core->req_submit = NULL;
+
+	if (req_submit && req_submit->ctxt_to_hw_map == ctxt_to_hw_map) {
+		cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+		cb_args.frame_req = req_submit;
+		if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+			lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+				hw_mgr_cb.data, &cb_args);
+	} else if (req_submit) {
+		submit_args.frame_req = req_submit;
+		submit_args.hw_update_entries = req_submit->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			req_submit->num_hw_update_entries;
+		rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Submit failed");
+		lrme_core->req_submit = req_submit;
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+	}
+
+	if (req_proc && req_proc->ctxt_to_hw_map == ctxt_to_hw_map) {
+		cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+		cb_args.frame_req = req_proc;
+		if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+			lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+				hw_mgr_cb.data, &cb_args);
+	} else if (req_proc) {
+		submit_args.frame_req = req_proc;
+		submit_args.hw_update_entries = req_proc->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			req_proc->num_hw_update_entries;
+		rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Submit failed");
+		lrme_core->req_submit = req_proc;
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+	}
+
+	return rc;
+}
+
+static int cam_lrme_hw_util_flush_req(struct cam_hw_info *lrme_hw,
+	struct cam_lrme_frame_request *req_to_flush)
+{
+	int rc = -ENODEV;
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	struct cam_lrme_hw_cb_args cb_args;
+	struct cam_lrme_frame_request *req_proc, *req_submit;
+	struct cam_lrme_hw_submit_args submit_args;
+
+	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "reset failed");
+		return rc;
+	}
+
+	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+	req_proc = lrme_core->req_proc;
+	req_submit = lrme_core->req_submit;
+	lrme_core->req_proc = NULL;
+	lrme_core->req_submit = NULL;
+
+	if (req_submit && req_submit == req_to_flush) {
+		cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+		cb_args.frame_req = req_submit;
+		if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+			lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+				hw_mgr_cb.data, &cb_args);
+	} else if (req_submit) {
+		submit_args.frame_req = req_submit;
+		submit_args.hw_update_entries = req_submit->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			req_submit->num_hw_update_entries;
+		rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Submit failed");
+		lrme_core->req_submit = req_submit;
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+	}
+
+	if (req_proc && req_proc == req_to_flush) {
+		cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+		cb_args.frame_req = req_proc;
+		if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+			lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+				hw_mgr_cb.data, &cb_args);
+	} else if (req_proc) {
+		submit_args.frame_req = req_proc;
+		submit_args.hw_update_entries = req_proc->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			req_proc->num_hw_update_entries;
+		rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Submit failed");
+		lrme_core->req_submit = req_proc;
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+	}
+
+	return rc;
+}
+
+
 static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw)
 {
 	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
@@ -595,7 +720,10 @@ int cam_lrme_hw_process_irq(void *priv, void *data)
 
 	if (top_irq_status & (1 << 4)) {
 		CAM_DBG(CAM_LRME, "IDLE");
-
+		if (!lrme_core->req_proc) {
+			CAM_DBG(CAM_LRME, "No frame request to process idle");
+			goto end;
+		}
 		rc = cam_lrme_hw_util_process_idle(lrme_hw, &cb_args);
 		if (rc) {
 			CAM_ERR(CAM_LRME, "Process idle failed");
@@ -868,6 +996,81 @@ int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
 	return 0;
 }
 
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size)
+{
+	struct cam_lrme_core         *lrme_core = NULL;
+	struct cam_hw_info           *lrme_hw = hw_priv;
+	struct cam_lrme_hw_flush_args *flush_args =
+		(struct cam_lrme_hw_flush_args *)hw_flush_args;
+	int rc = -ENODEV;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_LRME, "Invalid arguments %pK", hw_priv);
+		return -EINVAL;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING &&
+		lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING &&
+		lrme_core->state == CAM_LRME_CORE_STATE_REQ_PROC_PEND) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_DBG(CAM_LRME, "Stop not needed in %d state",
+			lrme_core->state);
+		return 0;
+	}
+
+	if (!lrme_core->req_proc && !lrme_core->req_submit) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_DBG(CAM_LRME, "no req in device");
+		return 0;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		if ((!lrme_core->req_submit ||
+			lrme_core->req_submit->ctxt_to_hw_map !=
+			flush_args->ctxt_to_hw_map) &&
+			(!lrme_core->req_proc ||
+			lrme_core->req_proc->ctxt_to_hw_map !=
+			flush_args->ctxt_to_hw_map)) {
+			mutex_unlock(&lrme_hw->hw_mutex);
+			CAM_DBG(CAM_LRME, "hw running on different ctx");
+			return 0;
+		}
+		rc = cam_lrme_hw_util_flush_ctx(lrme_hw,
+			flush_args->ctxt_to_hw_map);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Flush all failed");
+		break;
+
+	case CAM_FLUSH_TYPE_REQ:
+		if ((!lrme_core->req_submit ||
+			lrme_core->req_submit != flush_args->req_to_flush) &&
+			(!lrme_core->req_proc ||
+			lrme_core->req_proc != flush_args->req_to_flush)) {
+			mutex_unlock(&lrme_hw->hw_mutex);
+			CAM_DBG(CAM_LRME, "hw running on different ctx");
+			return 0;
+		}
+		rc = cam_lrme_hw_util_flush_req(lrme_hw,
+			flush_args->req_to_flush);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Flush req failed");
+		break;
+
+	default:
+		CAM_ERR(CAM_LRME, "Unsupported flush type");
+		break;
+	}
+
+	mutex_unlock(&lrme_hw->hw_mutex);
+
+	return rc;
+}
+
 int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
 	uint32_t arg_size)
 {
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
index 2e63752..da42c84 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -201,6 +201,7 @@ static int cam_lrme_hw_dev_probe(struct platform_device *pdev)
 	lrme_hw_intf.hw_ops.read = NULL;
 	lrme_hw_intf.hw_ops.write = NULL;
 	lrme_hw_intf.hw_ops.process_cmd = cam_lrme_hw_process_cmd;
+	lrme_hw_intf.hw_ops.flush = cam_lrme_hw_flush;
 	lrme_hw_intf.hw_type = CAM_HW_LRME;
 
 	rc = cam_cdm_get_iommu_handle("lrmecdm", &lrme_core->cdm_iommu);
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 46e9d5d..3d230af 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -31,6 +31,7 @@ int cam_sync_create(int32_t *sync_obj, const char *name)
 		idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
 		if (idx >= CAM_SYNC_MAX_OBJS)
 			return -ENOMEM;
+		CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
 		bit = test_and_set_bit(idx, sync_dev->bitmap);
 	} while (bit);
 
@@ -97,6 +98,8 @@ int cam_sync_register_callback(sync_callback cb_func,
 		INIT_WORK(&sync_cb->cb_dispatch_work,
 			cam_sync_util_cb_dispatch);
 		sync_cb->status = row->state;
+		CAM_DBG(CAM_SYNC, "Callback trigger for sync object:%d",
+			sync_cb->sync_obj);
 		queue_work(sync_dev->work_queue,
 			&sync_cb->cb_dispatch_work);
 
@@ -134,6 +137,8 @@ int cam_sync_deregister_callback(sync_callback cb_func,
 		return -EINVAL;
 	}
 
+	CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
+		sync_obj);
 	list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
 		if (sync_cb->callback_func == cb_func &&
 			sync_cb->cb_data == userdata) {
@@ -202,6 +207,9 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status)
 	rc = cam_sync_util_add_to_signalable_list(sync_obj, status, &sync_list);
 	if (rc < 0) {
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		CAM_ERR(CAM_SYNC,
+			"Error: Unable to add sync object :%d to signalable list",
+			sync_obj);
 		return rc;
 	}
 
@@ -261,6 +269,7 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status)
 		}
 
 		/* Dispatch kernel callbacks if any were registered earlier */
+
 		list_for_each_entry_safe(sync_cb,
 			temp_sync_cb, &signalable_row->callback_list, list) {
 			sync_cb->status = list_info->status;
@@ -347,7 +356,7 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
 		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 		return -EINVAL;
 	}
-
+	CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
 	*merged_obj = idx;
 	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index afac68d..ed69829 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -51,6 +51,8 @@ int cam_sync_init_object(struct sync_table_row *table,
 	init_completion(&row->signaled);
 	INIT_LIST_HEAD(&row->callback_list);
 	INIT_LIST_HEAD(&row->user_payload_list);
+	CAM_DBG(CAM_SYNC, "Sync object Initialised: sync_id:%u row_state:%u ",
+		row->sync_id, row->state);
 
 	return 0;
 }
@@ -215,6 +217,7 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
 		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 		return -EINVAL;
 	}
+	row->state = CAM_SYNC_STATE_INVALID;
 
 	/* Object's child and parent objects will be added into this list */
 	INIT_LIST_HEAD(&temp_child_list);
@@ -303,7 +306,6 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
 		kfree(sync_cb);
 	}
 
-	row->state = CAM_SYNC_STATE_INVALID;
 	memset(row, 0, sizeof(*row));
 	clear_bit(idx, sync_dev->bitmap);
 	INIT_LIST_HEAD(&row->callback_list);
@@ -312,6 +314,7 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
 	INIT_LIST_HEAD(&row->user_payload_list);
 	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 
+	CAM_DBG(CAM_SYNC, "Destroying sync obj:%d successful", idx);
 	return 0;
 }
 
@@ -349,6 +352,8 @@ void cam_sync_util_send_v4l2_event(uint32_t id,
 	memcpy(payload_data, payload, len);
 
 	v4l2_event_queue(sync_dev->vdev, &event);
+	CAM_DBG(CAM_SYNC, "send v4l2 event for sync_obj :%d",
+		sync_obj);
 }
 
 int cam_sync_util_validate_merge(uint32_t *sync_obj, uint32_t num_objs)
@@ -391,6 +396,8 @@ int cam_sync_util_add_to_signalable_list(int32_t sync_obj,
 	signalable_info->status = status;
 
 	list_add_tail(&signalable_info->list, sync_list);
+	CAM_DBG(CAM_SYNC, "Add sync_obj :%d with status :%d to signalable list",
+		sync_obj, status);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 786107b..bd56310 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -325,7 +325,7 @@ int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name)
  *
  * @return:             success or failure
  */
-static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
 	enum cam_vote_level clk_level)
 {
 	int i, rc = 0;
@@ -372,7 +372,7 @@ static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
  *
  * @return:             success or failure
  */
-static void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
+void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
 {
 	int i;
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 4a87d50..4b57d54 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -611,4 +611,9 @@ static inline uint32_t cam_soc_util_r_mb(struct cam_hw_soc_info *soc_info,
 int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
 	uint32_t base_index, uint32_t offset, int size);
 
+void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info);
+
+int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level clk_level);
+
 #endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index a455357..fd031d7 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -3190,15 +3190,12 @@ void sde_rotator_core_dump(struct sde_rot_mgr *mgr)
 	}
 
 	sde_rotator_resource_ctrl(mgr, true);
-	/* dump first snapshot */
+
 	if (mgr->ops_hw_dump_status)
-		mgr->ops_hw_dump_status(mgr->hw_data);
+		mgr->ops_hw_dump_status(mgr);
 
 	SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
 
-	/* dump second snapshot for comparison */
-	if (mgr->ops_hw_dump_status)
-		mgr->ops_hw_dump_status(mgr->hw_data);
 	sde_rotator_resource_ctrl(mgr, false);
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 1c9c91d..faa47a6 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -28,6 +28,7 @@
 #define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8020010
 #define MB_SIZE_IN_PIXEL (16 * 16)
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
+#define MAX_VP9D_INST_COUNT 6
 
 static const char *const mpeg_video_stream_format[] = {
 	"NAL Format Start Codes",
@@ -551,6 +552,24 @@ struct msm_vidc_format vdec_formats[] = {
 	},
 };
 
+static bool msm_vidc_check_for_vp9d_overload(struct msm_vidc_core *core)
+{
+	u32 vp9d_instance_count = 0;
+	struct msm_vidc_inst *inst = NULL;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst->session_type == MSM_VIDC_DECODER &&
+			inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9)
+			vp9d_instance_count++;
+	}
+	mutex_unlock(&core->lock);
+
+	if (vp9d_instance_count > MAX_VP9D_INST_COUNT)
+		return true;
+	return false;
+}
+
 int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
 {
 	struct msm_vidc_format *fmt = NULL;
@@ -649,6 +668,14 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
+		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9) {
+			if (msm_vidc_check_for_vp9d_overload(inst->core)) {
+				dprintk(VIDC_ERR, "VP9 Decode overload\n");
+				rc = -ENOTSUPP;
+				goto err_invalid_fmt;
+			}
+		}
+
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to open instance\n");
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 7bb6d89..dd749d6 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -21,7 +21,7 @@
 #define MIN_BIT_RATE 32000
 #define MAX_BIT_RATE 300000000
 #define DEFAULT_BIT_RATE 64000
-#define BIT_RATE_STEP 100
+#define BIT_RATE_STEP 1
 #define DEFAULT_FRAME_RATE 15
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
 #define MAX_SLICE_BYTE_SIZE ((MAX_BIT_RATE)>>3)
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 5020ae5..0c914ac 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -44,6 +44,7 @@ static const struct pin_config_item conf_items[] = {
 	PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
 	PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
 	PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true),
+	PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
 	PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
 	PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
 	PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
@@ -172,6 +173,8 @@ static const struct pinconf_generic_params dt_params[] = {
 	{ "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
 	{ "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
 	{ "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
+	{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
+	{ "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 },
 	{ "output-high", PIN_CONFIG_OUTPUT, 1, },
 	{ "output-low", PIN_CONFIG_OUTPUT, 0, },
 	{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 2ecbd22..0991a99 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -437,6 +437,9 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
 	case PIN_CONFIG_INPUT_ENABLE:
 		arg = pad->input_enabled;
 		break;
+	case PIN_CONFIG_OUTPUT_ENABLE:
+		arg = pad->output_enabled;
+		break;
 	case PIN_CONFIG_OUTPUT:
 		arg = pad->out_value;
 		break;
@@ -513,6 +516,9 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
 		case PIN_CONFIG_INPUT_ENABLE:
 			pad->input_enabled = arg ? true : false;
 			break;
+		case PIN_CONFIG_OUTPUT_ENABLE:
+			pad->output_enabled = arg ? true : false;
+			break;
 		case PIN_CONFIG_OUTPUT:
 			pad->output_enabled = true;
 			pad->out_value = arg;
@@ -563,14 +569,6 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
 	if (ret < 0)
 		return ret;
 
-	val = PMIC_GPIO_MODE_DIGITAL_INPUT;
-	if (pad->output_enabled) {
-		if (pad->input_enabled)
-			val = PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT;
-		else
-			val = PMIC_GPIO_MODE_DIGITAL_OUTPUT;
-	}
-
 	if (pad->dtest_buffer != INT_MAX) {
 		val = pad->dtest_buffer;
 		if (pad->lv_mv_type)
@@ -582,6 +580,14 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
 			return ret;
 	}
 
+	val = PMIC_GPIO_MODE_DIGITAL_INPUT;
+	if (pad->output_enabled) {
+		if (pad->input_enabled)
+			val = PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT;
+		else
+			val = PMIC_GPIO_MODE_DIGITAL_OUTPUT;
+	}
+
 	if (pad->lv_mv_type) {
 		if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
 			val = PMIC_GPIO_MODE_ANALOG_PASS_THRU;
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 154ac26..66a68cd 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -261,133 +261,6 @@ static ssize_t gsi_dump_ch(struct file *file,
 	return count;
 }
 
-static ssize_t gsi_dump_ee(struct file *file,
-		const char __user *buf, size_t count, loff_t *ppos)
-{
-	uint32_t val;
-
-	val = gsi_readl(gsi_ctx->base +
-		GSI_GSI_MANAGER_EE_QOS_n_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d QOS 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val);
-	if (gsi_ctx->per.ver == GSI_VER_1_0) {
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
-	} else if (gsi_ctx->per.ver == GSI_VER_1_2) {
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
-	} else if (gsi_ctx->per.ver == GSI_VER_1_3) {
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
-	} else if (gsi_ctx->per.ver == GSI_VER_2_0) {
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
-		val = gsi_readl(gsi_ctx->base +
-			GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
-		TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
-	} else {
-		WARN_ON(1);
-	}
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_MCS_CODE_VER_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d MCS_CODE_VER 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d TYPE_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d CH_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d EV_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d IEOB_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d GLOB_IRQ_EN 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d GSI_IRQ_EN 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d INTSET 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_MSI_BASE_LSB_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d MSI_BASE_LSB 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_MSI_BASE_MSB_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d MSI_BASE_MSB 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_INT_VEC_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d INT_VEC 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d SCR0 0x%x\n", gsi_ctx->per.ee, val);
-	val = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_CNTXT_SCRATCH_1_OFFS(gsi_ctx->per.ee));
-	TERR("EE%2d SCR1 0x%x\n", gsi_ctx->per.ee, val);
-
-	return count;
-}
-
-static ssize_t gsi_dump_map(struct file *file,
-		const char __user *buf, size_t count, loff_t *ppos)
-{
-	struct gsi_chan_ctx *ctx;
-	uint32_t val1;
-	uint32_t val2;
-	int i;
-
-	TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap);
-	for (i = 0; i < gsi_ctx->max_ch; i++) {
-		ctx = &gsi_ctx->chan[i];
-
-		if (ctx->allocated) {
-			TERR("VIRT CH%2d -> VIRT EV%2d\n", ctx->props.ch_id,
-				ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
-			val1 = gsi_readl(gsi_ctx->base +
-				GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(i,
-					gsi_ctx->per.ee));
-			TERR("VIRT CH%2d -> PHYS CH%2d\n", ctx->props.ch_id,
-				val1 &
-				GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK);
-			if (ctx->evtr) {
-				val2 = gsi_readl(gsi_ctx->base +
-				GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(
-					ctx->evtr->id, gsi_ctx->per.ee));
-				TERR("VRT EV%2d -> PHYS EV%2d\n", ctx->evtr->id,
-				val2 &
-				GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK);
-			}
-			TERR("\n");
-		}
-	}
-
-	return count;
-}
-
 static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx)
 {
 	if (!ctx->allocated)
@@ -797,14 +670,6 @@ const struct file_operations gsi_ch_dump_ops = {
 	.write = gsi_dump_ch,
 };
 
-const struct file_operations gsi_ee_dump_ops = {
-	.write = gsi_dump_ee,
-};
-
-const struct file_operations gsi_map_ops = {
-	.write = gsi_dump_map,
-};
-
 const struct file_operations gsi_stats_ops = {
 	.write = gsi_dump_stats,
 };
@@ -832,7 +697,6 @@ const struct file_operations gsi_ipc_low_ops = {
 void gsi_debugfs_init(void)
 {
 	static struct dentry *dfile;
-	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
 	const mode_t write_only_mode = S_IWUSR | S_IWGRP;
 
 	dent = debugfs_create_dir("gsi", 0);
@@ -855,20 +719,6 @@ void gsi_debugfs_init(void)
 		goto fail;
 	}
 
-	dfile = debugfs_create_file("ee_dump", read_only_mode, dent,
-			0, &gsi_ee_dump_ops);
-	if (!dfile || IS_ERR(dfile)) {
-		TERR("fail to create ee_dump file\n");
-		goto fail;
-	}
-
-	dfile = debugfs_create_file("map", read_only_mode, dent,
-			0, &gsi_map_ops);
-	if (!dfile || IS_ERR(dfile)) {
-		TERR("fail to create map file\n");
-		goto fail;
-	}
-
 	dfile = debugfs_create_file("stats", write_only_mode, dent,
 			0, &gsi_stats_ops);
 	if (!dfile || IS_ERR(dfile)) {
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 90920d9..d274490 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -741,6 +741,10 @@ static int ipa3_usb_register_pm(enum ipa3_usb_transport_type ttype)
 		&ipa3_usb_ctx->ttype_ctx[ttype];
 	int result;
 
+	/* create PM resources for the first tethering protocol only */
+	if (ipa3_usb_ctx->num_init_prot > 0)
+		return 0;
+
 	memset(&ttype_ctx->pm_ctx.reg_params, 0,
 		sizeof(ttype_ctx->pm_ctx.reg_params));
 	ttype_ctx->pm_ctx.reg_params.name = (ttype == IPA_USB_TRANSPORT_DPL) ?
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index 4958c69..583c0ac8 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -2113,6 +2113,7 @@ static int rndis_ipa_ep_registers_cfg(
 {
 	int result;
 	struct ipa_ep_cfg *usb_to_ipa_ep_cfg;
+	int add = 0;
 
 	if (deaggr_enable) {
 		usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_en;
@@ -2120,17 +2121,18 @@ static int rndis_ipa_ep_registers_cfg(
 	} else {
 		usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_dis;
 		RNDIS_IPA_DEBUG("deaggregation disabled\n");
+		add = sizeof(struct rndis_pkt_hdr);
 	}
 
 	if (is_vlan_mode) {
 		usb_to_ipa_ep_cfg->hdr.hdr_len =
-			VLAN_ETH_HLEN + sizeof(struct rndis_pkt_hdr);
+			VLAN_ETH_HLEN + add;
 		ipa_to_usb_ep_cfg.hdr.hdr_len =
 			VLAN_ETH_HLEN + sizeof(struct rndis_pkt_hdr);
 		ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = VLAN_ETH_HLEN;
 	} else {
 		usb_to_ipa_ep_cfg->hdr.hdr_len =
-			ETH_HLEN + sizeof(struct rndis_pkt_hdr);
+			ETH_HLEN + add;
 		ipa_to_usb_ep_cfg.hdr.hdr_len =
 			ETH_HLEN + sizeof(struct rndis_pkt_hdr);
 		ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = ETH_HLEN;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 07dc7b0..3faf204 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -1999,6 +1999,7 @@ static int ipa_q6_set_ex_path_dis_agg(void)
 	int index;
 	struct ipa_register_write *reg_write;
 	int retval;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	desc = kcalloc(ipa_ctx->ipa_num_pipes, sizeof(struct ipa_desc),
 			GFP_KERNEL);
@@ -2016,7 +2017,7 @@ static int ipa_q6_set_ex_path_dis_agg(void)
 		if (ipa_ctx->ep[ep_idx].valid &&
 			ipa_ctx->ep[ep_idx].skip_ep_cfg) {
 			BUG_ON(num_descs >= ipa_ctx->ipa_num_pipes);
-			reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
+			reg_write = kzalloc(sizeof(*reg_write), flag);
 
 			if (!reg_write) {
 				IPAERR("failed to allocate memory\n");
@@ -2049,7 +2050,7 @@ static int ipa_q6_set_ex_path_dis_agg(void)
 			continue;
 		if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
 			IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) {
-			reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
+			reg_write = kzalloc(sizeof(*reg_write), flag);
 
 			if (!reg_write) {
 				IPAERR("failed to allocate memory\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index c018fc9..a297f24 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -132,6 +132,7 @@ static struct dentry *dfile_ipa_poll_iteration;
 static char dbg_buff[IPA_MAX_MSG_LEN];
 static char *active_clients_buf;
 static s8 ep_reg_idx;
+static void *ipa_ipc_low_buff;
 
 int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
 {
@@ -1834,23 +1835,20 @@ static ssize_t ipa_enable_ipc_low(struct file *file,
 	if (kstrtos8(dbg_buff, 0, &option))
 		return -EFAULT;
 
+	mutex_lock(&ipa_ctx->lock);
 	if (option) {
-		if (!ipa_ctx->logbuf_low) {
-			ipa_ctx->logbuf_low =
+		if (!ipa_ipc_low_buff) {
+			ipa_ipc_low_buff =
 				ipc_log_context_create(IPA_IPC_LOG_PAGES,
 				"ipa_low", 0);
+			if (ipa_ipc_low_buff == NULL)
+				IPAERR("failed to get logbuf_low\n");
 		}
-
-		if (ipa_ctx->logbuf_low == NULL) {
-			IPAERR("failed to get logbuf_low\n");
-			return -EFAULT;
-		}
-
+		ipa_ctx->logbuf_low = ipa_ipc_low_buff;
 	} else {
-		if (ipa_ctx->logbuf_low)
-			ipc_log_context_destroy(ipa_ctx->logbuf_low);
-			ipa_ctx->logbuf_low = NULL;
+		ipa_ctx->logbuf_low = NULL;
 	}
+	mutex_unlock(&ipa_ctx->lock);
 
 	return count;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 980b1f3..ffca1f5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -4472,7 +4472,7 @@ int ipa_tag_process(struct ipa_desc desc[],
 	}
 
 	/* IP_PACKET_INIT IC for tag status to be sent to apps */
-	pkt_init = kzalloc(sizeof(*pkt_init), GFP_KERNEL);
+	pkt_init = kzalloc(sizeof(*pkt_init), flag);
 	if (!pkt_init) {
 		IPAERR("failed to allocate memory\n");
 		res = -ENOMEM;
@@ -4491,7 +4491,7 @@ int ipa_tag_process(struct ipa_desc desc[],
 	desc_idx++;
 
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
 	if (!reg_write_nop) {
 		IPAERR("no mem\n");
 		res = -ENOMEM;
@@ -4510,7 +4510,7 @@ int ipa_tag_process(struct ipa_desc desc[],
 	desc_idx++;
 
 	/* status IC */
-	status = kzalloc(sizeof(*status), GFP_KERNEL);
+	status = kzalloc(sizeof(*status), flag);
 	if (!status) {
 		IPAERR("no mem\n");
 		res = -ENOMEM;
@@ -4546,7 +4546,7 @@ int ipa_tag_process(struct ipa_desc desc[],
 	atomic_set(&comp->cnt, 2);
 
 	/* dummy packet to send to IPA. packet payload is a completion object */
-	dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
+	dummy_skb = alloc_skb(sizeof(comp), flag);
 	if (!dummy_skb) {
 		IPAERR("failed to allocate memory\n");
 		res = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 92177f1..7e55024 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -2648,7 +2648,7 @@ static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
 	if (index == MAX_NUM_OF_MUX_CHANNEL) {
 		IPAWANERR("%s is an invalid iface name\n",
 			  data->interface_name);
-		return -EFAULT;
+		return -ENODEV;
 	}
 
 	mux_id = mux_channel[index].mux_id;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
index 5ef3063..0c1cabf 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
@@ -61,7 +61,7 @@ static dev_t device;
 
 static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-	int retval = 0;
+	int retval = 0, rc = 0;
 	u32 pyld_sz;
 	u8 *param = NULL;
 
@@ -184,10 +184,14 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 			retval = -EFAULT;
 			break;
 		}
-		if (rmnet_ipa_set_data_quota(
-		(struct wan_ioctl_set_data_quota *)param)) {
+		rc = rmnet_ipa_set_data_quota(
+			(struct wan_ioctl_set_data_quota *)param);
+		if (rc != 0) {
 			IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
-			retval = -EFAULT;
+			if (rc == -ENODEV)
+				retval = -ENODEV;
+			else
+				retval = -EFAULT;
 			break;
 		}
 		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 57b988b..f994db5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4650,8 +4650,7 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
 		return -EFAULT;
 	}
 
-	if (count > 0)
-		dbg_buff[count] = '\0';
+	dbg_buff[count] = '\0';
 
 	IPADBG("user input string %s\n", dbg_buff);
 
@@ -4680,11 +4679,15 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
 			return count;
 		}
 
+		/* trim ending newline character if any */
+		if (count && (dbg_buff[count - 1] == '\n'))
+			dbg_buff[count - 1] = '\0';
+
 		if (!strcasecmp(dbg_buff, "MHI")) {
 			ipa3_ctx->ipa_config_is_mhi = true;
 			pr_info(
 				"IPA is loading with MHI configuration\n");
-		} else if (!strcmp(dbg_buff, "1\n")) {
+		} else if (!strcmp(dbg_buff, "1")) {
 			pr_info(
 				"IPA is loading with non MHI configuration\n");
 		} else {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index be342cb..bafc3ca 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -352,7 +352,7 @@ static int do_clk_scaling(void)
 	clk_scaling = &ipa_pm_ctx->clk_scaling;
 
 	mutex_lock(&ipa_pm_ctx->client_mutex);
-	IPA_PM_DBG("clock scaling started\n");
+	IPA_PM_DBG_LOW("clock scaling started\n");
 	tput = calculate_throughput();
 	ipa_pm_ctx->aggregated_tput = tput;
 	set_current_threshold();
@@ -364,7 +364,7 @@ static int do_clk_scaling(void)
 			new_th_idx++;
 	}
 
-	IPA_PM_DBG("old idx was at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
+	IPA_PM_DBG_LOW("old idx was at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
 
 
 	if (ipa_pm_ctx->clk_scaling.cur_vote != new_th_idx) {
@@ -372,7 +372,7 @@ static int do_clk_scaling(void)
 		ipa3_set_clock_plan_from_pm(ipa_pm_ctx->clk_scaling.cur_vote);
 	}
 
-	IPA_PM_DBG("new idx is at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
+	IPA_PM_DBG_LOW("new idx is at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
 
 	return 0;
 }
@@ -683,6 +683,11 @@ int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl)
 {
 	struct ipa_pm_client *client;
 
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	if (params == NULL || hdl == NULL || params->name == NULL) {
 		IPA_PM_ERR("Invalid Params\n");
 		return -EINVAL;
@@ -749,6 +754,11 @@ int ipa_pm_deregister(u32 hdl)
 	int i;
 	unsigned long flags;
 
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	if (hdl >= IPA_PM_MAX_CLIENTS) {
 		IPA_PM_ERR("Invalid Param\n");
 		return -EINVAL;
@@ -806,6 +816,11 @@ int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer)
 {
 	int idx;
 
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	if (hdl >= IPA_PM_MAX_CLIENTS || consumer < 0 ||
 		consumer >= IPA_CLIENT_MAX) {
 		IPA_PM_ERR("invalid params\n");
@@ -924,6 +939,11 @@ static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync)
  */
 int ipa_pm_activate(u32 hdl)
 {
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
 		IPA_PM_ERR("Invalid Param\n");
 		return -EINVAL;
@@ -941,6 +961,11 @@ int ipa_pm_activate(u32 hdl)
  */
 int ipa_pm_activate_sync(u32 hdl)
 {
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
 		IPA_PM_ERR("Invalid Param\n");
 		return -EINVAL;
@@ -961,6 +986,11 @@ int ipa_pm_deferred_deactivate(u32 hdl)
 	struct ipa_pm_client *client;
 	unsigned long flags;
 
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
 		IPA_PM_ERR("Invalid Param\n");
 		return -EINVAL;
@@ -1008,6 +1038,11 @@ int ipa_pm_deactivate_all_deferred(void)
 	struct ipa_pm_client *client;
 	unsigned long flags;
 
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
 		client = ipa_pm_ctx->clients[i];
 
@@ -1061,13 +1096,19 @@ int ipa_pm_deactivate_all_deferred(void)
  */
 int ipa_pm_deactivate_sync(u32 hdl)
 {
-	struct ipa_pm_client *client = ipa_pm_ctx->clients[hdl];
+	struct ipa_pm_client *client;
 	unsigned long flags;
 
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
 		IPA_PM_ERR("Invalid Param\n");
 		return -EINVAL;
 	}
+	client = ipa_pm_ctx->clients[hdl];
 
 	cancel_delayed_work_sync(&client->deactivate_work);
 
@@ -1111,6 +1152,11 @@ int ipa_pm_handle_suspend(u32 pipe_bitmask)
 	struct ipa_pm_client *client;
 	bool client_notified[IPA_PM_MAX_CLIENTS] = { false };
 
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	IPA_PM_DBG_LOW("bitmask: %d",  pipe_bitmask);
 
 	if (pipe_bitmask == 0)
@@ -1146,14 +1192,20 @@ int ipa_pm_handle_suspend(u32 pipe_bitmask)
  */
 int ipa_pm_set_perf_profile(u32 hdl, int throughput)
 {
-	struct ipa_pm_client *client = ipa_pm_ctx->clients[hdl];
+	struct ipa_pm_client *client;
 	unsigned long flags;
 
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
 	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL
 		|| throughput < 0) {
 		IPA_PM_ERR("Invalid Params\n");
 		return -EINVAL;
 	}
+	client = ipa_pm_ctx->clients[hdl];
 
 	mutex_lock(&ipa_pm_ctx->client_mutex);
 	if (client->group == IPA_PM_GROUP_DEFAULT)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 6a5e85b..7421eb8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1583,7 +1583,6 @@ int ipa3_get_clients_from_rm_resource(
 		clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
 		clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
 		clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
-		clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
 		break;
 	case IPA_RM_RESOURCE_MHI_CONS:
 		clients->names[i++] = IPA_CLIENT_MHI_CONS;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 0444b67..cee0989 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -2950,7 +2950,7 @@ static int rmnet_ipa3_set_data_quota_modem(
 	if (index == MAX_NUM_OF_MUX_CHANNEL) {
 		IPAWANERR("%s is an invalid iface name\n",
 			  data->interface_name);
-		return -EFAULT;
+		return -ENODEV;
 	}
 
 	mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 0f85e12..246f32e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -74,7 +74,7 @@ static long ipa3_wan_ioctl(struct file *filp,
 		unsigned int cmd,
 		unsigned long arg)
 {
-	int retval = 0;
+	int retval = 0, rc = 0;
 	u32 pyld_sz;
 	u8 *param = NULL;
 
@@ -249,10 +249,14 @@ static long ipa3_wan_ioctl(struct file *filp,
 			retval = -EFAULT;
 			break;
 		}
-		if (rmnet_ipa3_set_data_quota(
-		(struct wan_ioctl_set_data_quota *)param)) {
+		rc = rmnet_ipa3_set_data_quota(
+			(struct wan_ioctl_set_data_quota *)param);
+		if (rc != 0) {
 			IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
-			retval = -EFAULT;
+			if (retval == -ENODEV)
+				retval = -ENODEV;
+			else
+				retval = -EFAULT;
 			break;
 		}
 		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index 7496f28..0f5c61e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -128,7 +128,8 @@ int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
 			TETH_ERR("fail to deactivate modem %d\n", res);
 			return res;
 		}
-		res = ipa_pm_destroy();
+		res = ipa_pm_deregister(ipa3_teth_ctx->modem_pm_hdl);
+		ipa3_teth_ctx->modem_pm_hdl = ~0;
 	} else {
 		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
 					IPA_RM_RESOURCE_Q6_CONS);
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index bfc401a..5b31889 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -301,12 +301,29 @@ static void msm_restart_prepare(const char *cmd)
 			__raw_writel(0x7766550a, restart_reason);
 		} else if (!strncmp(cmd, "oem-", 4)) {
 			unsigned long code;
+			unsigned long reset_reason;
 			int ret;
 
 			ret = kstrtoul(cmd + 4, 16, &code);
-			if (!ret)
+			if (!ret) {
+				/* Bit-2 to bit-7 of SOFT_RB_SPARE for hard
+				 * reset reason:
+				 * Value 0 to 31 for common defined features
+				 * Value 32 to 63 for oem specific features
+				 */
+				reset_reason = code +
+						PON_RESTART_REASON_OEM_MIN;
+				if (reset_reason > PON_RESTART_REASON_OEM_MAX ||
+				   reset_reason < PON_RESTART_REASON_OEM_MIN) {
+					pr_err("Invalid oem reset reason: %lx\n",
+						reset_reason);
+				} else {
+					qpnp_pon_set_restart_reason(
+						reset_reason);
+				}
 				__raw_writel(0x6f656d00 | (code & 0xff),
 					     restart_reason);
+			}
 		} else if (!strncmp(cmd, "edl", 3)) {
 			enable_emergency_dload_mode();
 		} else {
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 785cf23..ee54efc 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -320,6 +320,8 @@ static struct device_attribute power_supply_attrs[] = {
 	POWER_SUPPLY_ATTR(pd_voltage_min),
 	POWER_SUPPLY_ATTR(sdp_current_max),
 	POWER_SUPPLY_ATTR(connector_type),
+	POWER_SUPPLY_ATTR(parallel_batfet_mode),
+	POWER_SUPPLY_ATTR(min_icl),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
index 25ad740..35aa6cc 100644
--- a/drivers/power/supply/qcom/Kconfig
+++ b/drivers/power/supply/qcom/Kconfig
@@ -91,4 +91,15 @@
 	  module. It also allows userspace code to read diagnostics of voltage
 	  and current measured during certain phases of the pulses.
 
+config QPNP_TYPEC
+	tristate "QPNP Type-C driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable QPNP Type-C driver.
+	  The QPNP Type-C module supports the USB type-C protocol. It supports
+	  type-C cable detection and other type-C parameters such as
+	  current-capability and CC-orientation. The module does not support
+	  USB power-delivery. The driver adds support to report these type-C
+	  parameters via the power-supply framework.
+
 endmenu
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index 21f63ee..7350c30 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -7,3 +7,4 @@
 obj-$(CONFIG_QPNP_SMB2)		+= step-chg-jeita.o battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
 obj-$(CONFIG_SMB138X_CHARGER)	+= step-chg-jeita.o smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
 obj-$(CONFIG_QPNP_QNOVO)	+= qpnp-qnovo.o battery.o
+obj-$(CONFIG_QPNP_TYPEC)	+= qpnp-typec.o
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index aa5b1b0..3f8c727 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -44,9 +44,12 @@
 #define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
+#define ICL_LIMIT_VOTER			"ICL_LIMIT_VOTER"
 
 struct pl_data {
 	int			pl_mode;
+	int			pl_batfet_mode;
+	int			pl_min_icl_ua;
 	int			slave_pct;
 	int			slave_fcc_ua;
 	int			restricted_current;
@@ -92,6 +95,8 @@ module_param_named(debug_mask, debug_mask, int, 0600);
 			pr_debug(fmt, ##__VA_ARGS__);		\
 	} while (0)
 
+#define IS_USBIN(mode)	((mode == POWER_SUPPLY_PL_USBIN_USBIN) \
+			|| (mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
 enum {
 	VER = 0,
 	SLAVE_PCT,
@@ -102,19 +107,19 @@ enum {
 /*******
  * ICL *
  ********/
-static void split_settled(struct pl_data *chip)
+static int get_settled_split(struct pl_data *chip, int *main_icl_ua,
+				int *slave_icl_ua, int *total_settled_icl_ua)
 {
 	int slave_icl_pct, total_current_ua;
 	int slave_ua = 0, main_settled_ua = 0;
 	union power_supply_propval pval = {0, };
 	int rc, total_settled_ua = 0;
 
-	if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
-		&& (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
-		return;
+	if (!IS_USBIN(chip->pl_mode))
+		return -EINVAL;
 
 	if (!chip->main_psy)
-		return;
+		return -EINVAL;
 
 	if (!get_effective_result_locked(chip->pl_disable_votable)) {
 		/* read the aicl settled value */
@@ -122,11 +127,10 @@ static void split_settled(struct pl_data *chip)
 			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
 		if (rc < 0) {
 			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
-			return;
+			return rc;
 		}
 		main_settled_ua = pval.intval;
-		/* slave gets 10 percent points less for ICL */
-		slave_icl_pct = max(0, chip->slave_pct - 10);
+		slave_icl_pct = max(0, chip->slave_pct);
 		slave_ua = ((main_settled_ua + chip->pl_settled_ua)
 						* slave_icl_pct) / 100;
 		total_settled_ua = main_settled_ua + chip->pl_settled_ua;
@@ -138,18 +142,63 @@ static void split_settled(struct pl_data *chip)
 			chip->usb_psy = power_supply_get_by_name("usb");
 		if (!chip->usb_psy) {
 			pr_err("Couldn't get usbpsy while splitting settled\n");
-			return;
+			return -ENOENT;
 		}
 		/* no client is voting, so get the total current from charger */
 		rc = power_supply_get_property(chip->usb_psy,
 			POWER_SUPPLY_PROP_HW_CURRENT_MAX, &pval);
 		if (rc < 0) {
 			pr_err("Couldn't get max current rc=%d\n", rc);
-			return;
+			return rc;
 		}
 		total_current_ua = pval.intval;
 	}
 
+	*main_icl_ua = total_current_ua - slave_ua;
+	*slave_icl_ua = slave_ua;
+	*total_settled_icl_ua = total_settled_ua;
+
+	pl_dbg(chip, PR_PARALLEL,
+		"Split total_current_ua=%d total_settled_ua=%d main_settled_ua=%d slave_ua=%d\n",
+		total_current_ua, total_settled_ua, main_settled_ua, slave_ua);
+
+	return 0;
+}
+
+static int validate_parallel_icl(struct pl_data *chip, bool *disable)
+{
+	int rc = 0;
+	int main_ua = 0, slave_ua = 0, total_settled_ua = 0;
+
+	if (!IS_USBIN(chip->pl_mode)
+		|| get_effective_result_locked(chip->pl_disable_votable))
+		return 0;
+
+	rc = get_settled_split(chip, &main_ua, &slave_ua, &total_settled_ua);
+	if (rc < 0) {
+		pr_err("Couldn't  get split current rc=%d\n", rc);
+		return rc;
+	}
+
+	if (slave_ua < chip->pl_min_icl_ua)
+		*disable = true;
+	else
+		*disable = false;
+
+	return 0;
+}
+
+static void split_settled(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc, main_ua, slave_ua, total_settled_ua;
+
+	rc = get_settled_split(chip, &main_ua, &slave_ua, &total_settled_ua);
+	if (rc < 0) {
+		pr_err("Couldn't  get split current rc=%d\n", rc);
+		return;
+	}
+
 	/*
 	 * If there is an increase in slave share
 	 * (Also handles parallel enable case)
@@ -159,7 +208,7 @@ static void split_settled(struct pl_data *chip)
 	 *	Set slave ICL then main ICL.
 	 */
 	if (slave_ua > chip->pl_settled_ua) {
-		pval.intval = total_current_ua - slave_ua;
+		pval.intval = main_ua;
 		/* Set ICL on main charger */
 		rc = power_supply_set_property(chip->main_psy,
 				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
@@ -187,7 +236,7 @@ static void split_settled(struct pl_data *chip)
 			return;
 		}
 
-		pval.intval = total_current_ua - slave_ua;
+		pval.intval = main_ua;
 		/* Set ICL on main charger */
 		rc = power_supply_set_property(chip->main_psy,
 				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
@@ -201,9 +250,6 @@ static void split_settled(struct pl_data *chip)
 	chip->total_settled_ua = total_settled_ua;
 	chip->pl_settled_ua = slave_ua;
 
-	pl_dbg(chip, PR_PARALLEL,
-		"Split total_current_ua=%d main_settled_ua=%d slave_ua=%d\n",
-		total_current_ua, main_settled_ua, slave_ua);
 }
 
 static ssize_t version_show(struct class *c, struct class_attribute *attr,
@@ -228,14 +274,21 @@ static ssize_t slave_pct_show(struct class *c, struct class_attribute *attr,
 static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr,
 			const char *ubuf, size_t count)
 {
-	struct pl_data *chip = container_of(c, struct pl_data,
-			qcom_batt_class);
+	struct pl_data *chip = container_of(c, struct pl_data, qcom_batt_class);
+	int rc;
 	unsigned long val;
+	bool disable = false;
 
 	if (kstrtoul(ubuf, 10, &val))
 		return -EINVAL;
 
 	chip->slave_pct = val;
+
+	rc = validate_parallel_icl(chip, &disable);
+	if (rc < 0)
+		return rc;
+
+	vote(chip->pl_disable_votable, ICL_LIMIT_VOTER, disable, 0);
 	rerun_election(chip->fcc_votable);
 	rerun_election(chip->fv_votable);
 	split_settled(chip);
@@ -247,7 +300,7 @@ static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr,
  * RESTRICTED CHARGIGNG *
  ************************/
 static ssize_t restrict_chg_show(struct class *c, struct class_attribute *attr,
-			char *ubuf)
+		char *ubuf)
 {
 	struct pl_data *chip = container_of(c, struct pl_data,
 			qcom_batt_class);
@@ -367,11 +420,11 @@ static void get_fcc_split(struct pl_data *chip, int total_ua,
 	*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
 
 	/*
-	 * In USBIN_USBIN configuration with internal rsense parallel
-	 * charger's current goes through main charger's BATFET, keep
-	 * the main charger's FCC to the votable result.
+	 * In stacked BATFET configuration charger's current goes
+	 * through main charger's BATFET, keep the main charger's FCC
+	 * to the votable result.
 	 */
-	if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+	if (chip->pl_batfet_mode == POWER_SUPPLY_PL_STACKED_BATFET)
 		*master_ua = max(0, total_ua);
 	else
 		*master_ua = max(0, total_ua - *slave_ua);
@@ -464,11 +517,9 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 				&slave_fcc_ua);
 
 		if (slave_fcc_ua > MINIMUM_PARALLEL_FCC_UA) {
-			chip->slave_fcc_ua = slave_fcc_ua;
 			vote(chip->pl_disable_votable, PL_FCC_LOW_VOTER,
 							false, 0);
 		} else {
-			chip->slave_fcc_ua = 0;
 			vote(chip->pl_disable_votable, PL_FCC_LOW_VOTER,
 							true, 0);
 		}
@@ -622,11 +673,9 @@ static int pl_disable_vote_callback(struct votable *votable,
 {
 	struct pl_data *chip = data;
 	union power_supply_propval pval = {0, };
-	int master_fcc_ua, total_fcc_ua, slave_fcc_ua;
-	int rc;
-
-	chip->total_settled_ua = 0;
-	chip->pl_settled_ua = 0;
+	int master_fcc_ua = 0, total_fcc_ua = 0, slave_fcc_ua = 0;
+	int rc = 0;
+	bool disable = false;
 
 	if (!is_main_available(chip))
 		return -ENODEV;
@@ -638,6 +687,16 @@ static int pl_disable_vote_callback(struct votable *votable,
 		cancel_delayed_work_sync(&chip->pl_awake_work);
 		vote(chip->pl_awake_votable, PL_VOTER, true, 0);
 
+		rc = validate_parallel_icl(chip, &disable);
+		if (rc < 0)
+			return rc;
+
+		if (disable) {
+			pr_info("Parallel ICL is less than min ICL(%d), skipping parallel enable\n",
+					chip->pl_min_icl_ua);
+			return 0;
+		}
+
 		 /* enable parallel charging */
 		rc = power_supply_get_property(chip->pl_psy,
 				POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
@@ -720,8 +779,7 @@ static int pl_disable_vote_callback(struct votable *votable,
 			pr_err("Couldn't change slave suspend state rc=%d\n",
 				rc);
 
-		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
-			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+		if (IS_USBIN(chip->pl_mode))
 			split_settled(chip);
 		/*
 		 * we could have been enabled while in taper mode,
@@ -748,8 +806,7 @@ static int pl_disable_vote_callback(struct votable *votable,
 			(master_fcc_ua * 100) / total_fcc_ua,
 			(slave_fcc_ua * 100) / total_fcc_ua);
 	} else {
-		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
-			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+		if (IS_USBIN(chip->pl_mode))
 			split_settled(chip);
 
 		/* pl_psy may be NULL while in the disable branch */
@@ -772,11 +829,16 @@ static int pl_disable_vote_callback(struct votable *votable,
 			return rc;
 		}
 
+		/* reset parallel FCC */
+		chip->slave_fcc_ua = 0;
 		rerun_election(chip->fv_votable);
 
 		cancel_delayed_work_sync(&chip->pl_awake_work);
 		schedule_delayed_work(&chip->pl_awake_work,
 						msecs_to_jiffies(5000));
+
+		chip->total_settled_ua = 0;
+		chip->pl_settled_ua = 0;
 	}
 
 	pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
@@ -847,8 +909,7 @@ static bool is_parallel_available(struct pl_data *chip)
 	chip->pl_mode = pval.intval;
 
 	/* Disable autonomous votage increments for USBIN-USBIN */
-	if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
-		|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+	if (IS_USBIN(chip->pl_mode)) {
 		if (!chip->hvdcp_hw_inov_dis_votable)
 			chip->hvdcp_hw_inov_dis_votable =
 					find_votable("HVDCP_HW_INOV_DIS");
@@ -860,6 +921,20 @@ static bool is_parallel_available(struct pl_data *chip)
 			return false;
 	}
 
+	rc = power_supply_get_property(chip->pl_psy,
+		       POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get parallel batfet mode rc=%d\n",
+				rc);
+		return false;
+	}
+	chip->pl_batfet_mode = pval.intval;
+
+	pval.intval = 0;
+	power_supply_get_property(chip->pl_psy, POWER_SUPPLY_PROP_MIN_ICL,
+					&pval);
+	chip->pl_min_icl_ua = pval.intval;
+
 	vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
 
 	return true;
@@ -881,9 +956,6 @@ static void handle_main_charge_type(struct pl_data *chip)
 	if ((pval.intval != POWER_SUPPLY_CHARGE_TYPE_FAST)
 		&& (pval.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
 		vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
-		vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
-		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
-				false, 0);
 		chip->charge_type = pval.intval;
 		return;
 	}
@@ -922,6 +994,7 @@ static void handle_settled_icl_change(struct pl_data *chip)
 	int main_settled_ua;
 	int main_limited;
 	int total_current_ua;
+	bool disable = false;
 
 	total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
 
@@ -957,11 +1030,7 @@ static void handle_settled_icl_change(struct pl_data *chip)
 
 	rerun_election(chip->fcc_votable);
 
-	if (get_effective_result(chip->pl_disable_votable))
-		return;
-
-	if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
-			|| chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
+	if (IS_USBIN(chip->pl_mode)) {
 		/*
 		 * call aicl split only when USBIN_USBIN and enabled
 		 * and if settled current has changed by more than 300mA
@@ -975,8 +1044,17 @@ static void handle_settled_icl_change(struct pl_data *chip)
 
 		/* If ICL change is small skip splitting */
 		if (abs(new_total_settled_ua - chip->total_settled_ua)
-						> MIN_ICL_CHANGE_DELTA_UA)
-			split_settled(chip);
+						> MIN_ICL_CHANGE_DELTA_UA) {
+			rc = validate_parallel_icl(chip, &disable);
+			if (rc < 0)
+				return;
+
+			vote(chip->pl_disable_votable, ICL_LIMIT_VOTER,
+						disable, 0);
+			if (!get_effective_result_locked(
+						chip->pl_disable_votable))
+				split_settled(chip);
+		}
 	}
 }
 
@@ -1009,6 +1087,34 @@ static void handle_parallel_in_taper(struct pl_data *chip)
 	}
 }
 
+static void handle_usb_change(struct pl_data *chip)
+{
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+	if (!chip->usb_psy) {
+		pr_err("Couldn't get usbpsy\n");
+		return;
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get present from USB rc=%d\n", rc);
+		return;
+	}
+
+	if (!pval.intval) {
+		/* USB removed: remove all stale votes */
+		vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+				false, 0);
+		vote(chip->pl_disable_votable, ICL_LIMIT_VOTER, false, 0);
+	}
+}
+
 static void status_change_work(struct work_struct *work)
 {
 	struct pl_data *chip = container_of(work,
@@ -1033,6 +1139,7 @@ static void status_change_work(struct work_struct *work)
 
 	is_parallel_available(chip);
 
+	handle_usb_change(chip);
 	handle_main_charge_type(chip);
 	handle_settled_icl_change(chip);
 	handle_parallel_in_taper(chip);
diff --git a/drivers/power/supply/qcom/qpnp-typec.c b/drivers/power/supply/qcom/qpnp-typec.c
new file mode 100644
index 0000000..3c74be0
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-typec.c
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt)	"TYPEC: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/usb/class-dual-role.h>
+
+#define CREATE_MASK(NUM_BITS, POS) \
+	((unsigned char) (((1 << (NUM_BITS)) - 1) << (POS)))
+#define TYPEC_MASK(MSB_BIT, LSB_BIT) \
+	CREATE_MASK(MSB_BIT - LSB_BIT + 1, LSB_BIT)
+
+/* Interrupt offsets */
+#define INT_RT_STS_REG(base)		(base + 0x10)
+#define DFP_DETECT_BIT			BIT(3)
+#define UFP_DETECT_BIT			BIT(1)
+
+#define TYPEC_UFP_STATUS_REG(base)	(base +	0x08)
+#define TYPEC_CCOUT_BIT			BIT(7)
+#define TYPEC_CCOUT_OPEN_BIT		BIT(6)
+#define TYPEC_CURRENT_MASK		TYPEC_MASK(2, 0)
+#define TYPEC_RDSTD_BIT			BIT(2)
+#define TYPEC_RD1P5_BIT			BIT(1)
+
+#define TYPEC_DFP_STATUS_REG(base)	(base +	0x09)
+#define VALID_DFP_MASK			TYPEC_MASK(6, 4)
+
+#define TYPEC_SW_CTL_REG(base)		(base + 0x52)
+
+#define TYPEC_STD_MA			900
+#define TYPEC_MED_MA			1500
+#define TYPEC_HIGH_MA			3000
+
+#define QPNP_TYPEC_DEV_NAME	"qcom,qpnp-typec"
+#define TYPEC_PSY_NAME		"typec"
+#define DUAL_ROLE_DESC_NAME	"otg_default"
+
+enum cc_line_state {
+	CC_1,
+	CC_2,
+	OPEN,
+};
+
+struct typec_wakeup_source {
+	struct wakeup_source	source;
+	unsigned long		enabled;
+};
+
+static void typec_stay_awake(struct typec_wakeup_source *source)
+{
+	if (!__test_and_set_bit(0, &source->enabled)) {
+		__pm_stay_awake(&source->source);
+		pr_debug("enabled source %s\n", source->source.name);
+	}
+}
+
+static void typec_relax(struct typec_wakeup_source *source)
+{
+	if (__test_and_clear_bit(0, &source->enabled)) {
+		__pm_relax(&source->source);
+		pr_debug("disabled source %s\n", source->source.name);
+	}
+}
+
+struct qpnp_typec_chip {
+	struct device		*dev;
+	struct spmi_device	*spmi;
+	struct power_supply	*batt_psy;
+	struct power_supply	type_c_psy;
+	struct regulator	*ss_mux_vreg;
+	struct mutex		typec_lock;
+	spinlock_t		rw_lock;
+
+	u16			base;
+
+	/* IRQs */
+	int			vrd_changed;
+	int			ufp_detach;
+	int			ufp_detect;
+	int			dfp_detach;
+	int			dfp_detect;
+	int			vbus_err;
+	int			vconn_oc;
+
+	/* Configurations */
+	int			cc_line_state;
+	int			current_ma;
+	int			ssmux_gpio;
+	enum of_gpio_flags	gpio_flag;
+	int			typec_state;
+
+	/* Dual role support */
+	bool				role_reversal_supported;
+	bool				in_force_mode;
+	int				force_mode;
+	struct dual_role_phy_instance	*dr_inst;
+	struct dual_role_phy_desc	dr_desc;
+	struct delayed_work		role_reversal_check;
+	struct typec_wakeup_source	role_reversal_wakeup_source;
+};
+
+/* current mode */
+static char *mode_text[] = {
+	"ufp", "dfp", "none"
+};
+
+/* SPMI operations */
+static int __qpnp_typec_read(struct spmi_device *spmi, u8 *val, u16 addr,
+			int count)
+{
+	int rc;
+
+	rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, addr, val, count);
+	if (rc)
+		pr_err("spmi read failed addr=0x%02x sid=0x%02x rc=%d\n",
+				addr, spmi->sid, rc);
+
+	return rc;
+}
+
+static int __qpnp_typec_write(struct spmi_device *spmi, u8 *val, u16 addr,
+			int count)
+{
+	int rc;
+
+	rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, addr, val, count);
+	if (rc)
+		pr_err("spmi write failed addr=0x%02x sid=0x%02x rc=%d\n",
+				addr, spmi->sid, rc);
+	return rc;
+}
+
+static int qpnp_typec_read(struct qpnp_typec_chip *chip, u8 *val, u16 addr,
+			int count)
+{
+	int rc;
+	unsigned long flags;
+	struct spmi_device *spmi = chip->spmi;
+
+	if (addr == 0) {
+		pr_err("addr cannot be zero addr=0x%02x sid=0x%02x\n",
+				addr, spmi->sid);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&chip->rw_lock, flags);
+	rc = __qpnp_typec_read(spmi, val, addr, count);
+	spin_unlock_irqrestore(&chip->rw_lock, flags);
+
+	return rc;
+}
+
+static int qpnp_typec_masked_write(struct qpnp_typec_chip *chip, u16 base,
+			u8 mask, u8 val)
+{
+	u8 reg;
+	int rc;
+	unsigned long flags;
+	struct spmi_device *spmi = chip->spmi;
+
+	spin_lock_irqsave(&chip->rw_lock, flags);
+	rc = __qpnp_typec_read(spmi, &reg, base, 1);
+	if (rc) {
+		pr_err("spmi read failed: addr=%03X, rc=%d\n", base, rc);
+		goto out;
+	}
+
+	reg &= ~mask;
+	reg |= val & mask;
+
+	pr_debug("addr = 0x%x writing 0x%x\n", base, reg);
+
+	rc = __qpnp_typec_write(spmi, &reg, base, 1);
+	if (rc) {
+		pr_err("spmi write failed: addr=%03X, rc=%d\n", base, rc);
+		goto out;
+	}
+
+out:
+	spin_unlock_irqrestore(&chip->rw_lock, flags);
+	return rc;
+}
+
+
+
+static int set_property_on_battery(struct qpnp_typec_chip *chip,
+				enum power_supply_property prop)
+{
+	int rc = 0;
+	union power_supply_propval ret = {0, };
+
+	if (!chip->batt_psy) {
+		chip->batt_psy = power_supply_get_by_name("battery");
+		if (!chip->batt_psy) {
+			pr_err("no batt psy found\n");
+			return -ENODEV;
+		}
+	}
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+		ret.intval = chip->current_ma;
+		rc = chip->batt_psy->set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CURRENT_CAPABILITY, &ret);
+		if (rc)
+			pr_err("failed to set current max rc=%d\n", rc);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_MODE:
+		/*
+		 * Notify the typec mode to charger. This is useful in the DFP
+		 * case where there is no notification of OTG insertion to the
+		 * charger driver.
+		 */
+		ret.intval = chip->typec_state;
+		rc = chip->batt_psy->set_property(chip->batt_psy,
+				POWER_SUPPLY_PROP_TYPEC_MODE, &ret);
+		if (rc)
+			pr_err("failed to set typec mode rc=%d\n", rc);
+		break;
+	default:
+		pr_err("invalid request\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int get_max_current(u8 reg)
+{
+	if (!reg)
+		return 0;
+
+	return (reg & TYPEC_RDSTD_BIT) ? TYPEC_STD_MA :
+		(reg & TYPEC_RD1P5_BIT) ? TYPEC_MED_MA : TYPEC_HIGH_MA;
+}
+
+static int qpnp_typec_configure_ssmux(struct qpnp_typec_chip *chip,
+				enum cc_line_state cc_line)
+{
+	int rc = 0;
+
+	if (cc_line != chip->cc_line_state) {
+		switch (cc_line) {
+		case OPEN:
+			if (chip->ss_mux_vreg) {
+				rc = regulator_disable(chip->ss_mux_vreg);
+				if (rc) {
+					pr_err("failed to disable ssmux regulator rc=%d\n",
+							rc);
+					return rc;
+				}
+			}
+
+			if (chip->ssmux_gpio) {
+				rc = gpio_direction_input(chip->ssmux_gpio);
+				if (rc) {
+					pr_err("failed to configure ssmux gpio rc=%d\n",
+							rc);
+					return rc;
+				}
+			}
+			break;
+		case CC_1:
+		case CC_2:
+			if (chip->ss_mux_vreg) {
+				rc = regulator_enable(chip->ss_mux_vreg);
+				if (rc) {
+					pr_err("failed to enable ssmux regulator rc=%d\n",
+							rc);
+					return rc;
+				}
+			}
+
+			if (chip->ssmux_gpio) {
+				rc = gpio_direction_output(chip->ssmux_gpio,
+					(chip->gpio_flag == OF_GPIO_ACTIVE_LOW)
+						? !cc_line : cc_line);
+				if (rc) {
+					pr_err("failed to configure ssmux gpio rc=%d\n",
+							rc);
+					return rc;
+				}
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+#define UFP_EN_BIT			BIT(5)
+#define DFP_EN_BIT			BIT(4)
+#define FORCE_MODE_MASK			TYPEC_MASK(5, 4)
+static int qpnp_typec_force_mode(struct qpnp_typec_chip *chip, int mode)
+{
+	int rc = 0;
+	u8 reg = (mode == DUAL_ROLE_PROP_MODE_UFP) ? UFP_EN_BIT
+			: (mode == DUAL_ROLE_PROP_MODE_DFP) ? DFP_EN_BIT : 0x0;
+
+	if (chip->force_mode != mode) {
+		rc = qpnp_typec_masked_write(chip,
+			TYPEC_SW_CTL_REG(chip->base), FORCE_MODE_MASK, reg);
+		if (rc) {
+			pr_err("Failed to force typeC mode rc=%d\n", rc);
+		} else {
+			chip->force_mode = mode;
+			pr_debug("Forced mode: %s\n",
+					mode_text[chip->force_mode]);
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_typec_handle_usb_insertion(struct qpnp_typec_chip *chip, u8 reg)
+{
+	int rc;
+	enum cc_line_state cc_line_state;
+
+	cc_line_state = (reg & TYPEC_CCOUT_OPEN_BIT) ?
+		OPEN : (reg & TYPEC_CCOUT_BIT) ? CC_2 : CC_1;
+	rc = qpnp_typec_configure_ssmux(chip, cc_line_state);
+	if (rc) {
+		pr_err("failed to configure ss-mux rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->cc_line_state = cc_line_state;
+
+	pr_debug("CC_line state = %d\n", cc_line_state);
+
+	return 0;
+}
+
+static int qpnp_typec_handle_detach(struct qpnp_typec_chip *chip)
+{
+	int rc;
+
+	rc = qpnp_typec_configure_ssmux(chip, OPEN);
+	if (rc) {
+		pr_err("failed to configure SSMUX rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->cc_line_state = OPEN;
+	chip->current_ma = 0;
+	chip->typec_state = POWER_SUPPLY_TYPE_UNKNOWN;
+	chip->type_c_psy.type = POWER_SUPPLY_TYPE_UNKNOWN;
+	rc = set_property_on_battery(chip, POWER_SUPPLY_PROP_TYPEC_MODE);
+	if (rc)
+		pr_err("failed to set TYPEC MODE on battery psy rc=%d\n", rc);
+
+	pr_debug("CC_line state = %d current_ma = %d in_force_mode = %d\n",
+			chip->cc_line_state, chip->current_ma,
+			chip->in_force_mode);
+
+	/* handle role reversal */
+	if (chip->role_reversal_supported && !chip->in_force_mode) {
+		rc = qpnp_typec_force_mode(chip, DUAL_ROLE_PROP_MODE_NONE);
+		if (rc)
+			pr_err("Failed to set DRP mode rc=%d\n", rc);
+	}
+
+	if (chip->dr_inst)
+		dual_role_instance_changed(chip->dr_inst);
+
+	return rc;
+}
+
+/* Interrupt handlers */
+static irqreturn_t vrd_changed_handler(int irq, void *_chip)
+{
+	int rc, old_current;
+	u8 reg;
+	struct qpnp_typec_chip *chip = _chip;
+
+	pr_debug("vrd changed triggered\n");
+
+	mutex_lock(&chip->typec_lock);
+	rc = qpnp_typec_read(chip, &reg, TYPEC_UFP_STATUS_REG(chip->base), 1);
+	if (rc) {
+		pr_err("failed to read status reg rc=%d\n", rc);
+		goto out;
+	}
+
+	old_current = chip->current_ma;
+	chip->current_ma = get_max_current(reg & TYPEC_CURRENT_MASK);
+
+	/* only notify if current is valid and changed at runtime */
+	if (chip->current_ma && (old_current != chip->current_ma)) {
+		rc = set_property_on_battery(chip,
+				POWER_SUPPLY_PROP_CURRENT_CAPABILITY);
+		if (rc)
+			pr_err("failed to set INPUT CURRENT MAX on battery psy rc=%d\n",
+					rc);
+	}
+
+	pr_debug("UFP status reg = 0x%x old current = %dma new current = %dma\n",
+			reg, old_current, chip->current_ma);
+
+out:
+	mutex_unlock(&chip->typec_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t vconn_oc_handler(int irq, void *_chip)
+{
+	pr_warn("vconn oc triggered\n");
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ufp_detect_handler(int irq, void *_chip)
+{
+	int rc;
+	u8 reg;
+	struct qpnp_typec_chip *chip = _chip;
+
+	pr_debug("ufp detect triggered\n");
+
+	mutex_lock(&chip->typec_lock);
+	rc = qpnp_typec_read(chip, &reg, TYPEC_UFP_STATUS_REG(chip->base), 1);
+	if (rc) {
+		pr_err("failed to read status reg rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = qpnp_typec_handle_usb_insertion(chip, reg);
+	if (rc) {
+		pr_err("failed to handle USB insertion rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->current_ma = get_max_current(reg & TYPEC_CURRENT_MASK);
+	/* device in UFP state */
+	chip->typec_state = POWER_SUPPLY_TYPE_UFP;
+	chip->type_c_psy.type = POWER_SUPPLY_TYPE_UFP;
+	rc = set_property_on_battery(chip, POWER_SUPPLY_PROP_TYPEC_MODE);
+	if (rc)
+		pr_err("failed to set TYPEC MODE on battery psy rc=%d\n", rc);
+
+	if (chip->dr_inst)
+		dual_role_instance_changed(chip->dr_inst);
+
+	pr_debug("UFP status reg = 0x%x current = %dma\n",
+			reg, chip->current_ma);
+
+out:
+	mutex_unlock(&chip->typec_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ufp_detach_handler(int irq, void *_chip)
+{
+	int rc;
+	struct qpnp_typec_chip *chip = _chip;
+
+	pr_debug("ufp detach triggered\n");
+
+	mutex_lock(&chip->typec_lock);
+	rc = qpnp_typec_handle_detach(chip);
+	if (rc)
+		pr_err("failed to handle UFP detach rc=%d\n", rc);
+
+	mutex_unlock(&chip->typec_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dfp_detect_handler(int irq, void *_chip)
+{
+	int rc;
+	u8 reg[2];
+	struct qpnp_typec_chip *chip = _chip;
+
+	pr_debug("dfp detect trigerred\n");
+
+	mutex_lock(&chip->typec_lock);
+	rc = qpnp_typec_read(chip, reg, TYPEC_UFP_STATUS_REG(chip->base), 2);
+	if (rc) {
+		pr_err("failed to read status reg rc=%d\n", rc);
+		goto out;
+	}
+
+	if (reg[1] & VALID_DFP_MASK) {
+		rc = qpnp_typec_handle_usb_insertion(chip, reg[0]);
+		if (rc) {
+			pr_err("failed to handle USB insertion rc=%d\n", rc);
+			goto out;
+		}
+
+		chip->typec_state = POWER_SUPPLY_TYPE_DFP;
+		chip->type_c_psy.type = POWER_SUPPLY_TYPE_DFP;
+		chip->current_ma = 0;
+		rc = set_property_on_battery(chip,
+				POWER_SUPPLY_PROP_TYPEC_MODE);
+		if (rc)
+			pr_err("failed to set TYPEC MODE on battery psy rc=%d\n",
+					rc);
+	}
+
+	if (chip->dr_inst)
+		dual_role_instance_changed(chip->dr_inst);
+
+	pr_debug("UFP status reg = 0x%x DFP status reg = 0x%x\n",
+			reg[0], reg[1]);
+
+out:
+	mutex_unlock(&chip->typec_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dfp_detach_handler(int irq, void *_chip)
+{
+	int rc;
+	struct qpnp_typec_chip *chip = _chip;
+
+	pr_debug("dfp detach triggered\n");
+
+	mutex_lock(&chip->typec_lock);
+	rc = qpnp_typec_handle_detach(chip);
+	if (rc)
+		pr_err("failed to handle DFP detach rc=%d\n", rc);
+
+	mutex_unlock(&chip->typec_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t vbus_err_handler(int irq, void *_chip)
+{
+	int rc;
+	struct qpnp_typec_chip *chip = _chip;
+
+	pr_debug("vbus_err triggered\n");
+
+	mutex_lock(&chip->typec_lock);
+	rc = qpnp_typec_handle_detach(chip);
+	if (rc)
+		pr_err("failed to handle VBUS_ERR rc==%d\n", rc);
+
+	mutex_unlock(&chip->typec_lock);
+
+	return IRQ_HANDLED;
+}
+
+static int qpnp_typec_parse_dt(struct qpnp_typec_chip *chip)
+{
+	int rc;
+	struct device_node *node = chip->dev->of_node;
+
+	/* SS-Mux configuration gpio */
+	if (of_find_property(node, "qcom,ssmux-gpio", NULL)) {
+		chip->ssmux_gpio = of_get_named_gpio_flags(node,
+				"qcom,ssmux-gpio", 0, &chip->gpio_flag);
+		if (!gpio_is_valid(chip->ssmux_gpio)) {
+			if (chip->ssmux_gpio != -EPROBE_DEFER)
+				pr_err("failed to get ss-mux config gpio=%d\n",
+						chip->ssmux_gpio);
+			return chip->ssmux_gpio;
+		}
+
+		rc = devm_gpio_request(chip->dev, chip->ssmux_gpio,
+				"typec_mux_config_gpio");
+		if (rc) {
+			pr_err("failed to request ss-mux gpio rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* SS-Mux regulator */
+	if (of_find_property(node, "ss-mux-supply", NULL)) {
+		chip->ss_mux_vreg = devm_regulator_get(chip->dev, "ss-mux");
+		if (IS_ERR(chip->ss_mux_vreg))
+			return PTR_ERR(chip->ss_mux_vreg);
+	}
+
+	chip->role_reversal_supported = of_property_read_bool(node,
+					"qcom,role-reversal-supported");
+	return 0;
+}
+
+static int qpnp_typec_determine_initial_status(struct qpnp_typec_chip *chip)
+{
+	int rc;
+	u8 rt_reg;
+
+	rc = qpnp_typec_read(chip, &rt_reg, INT_RT_STS_REG(chip->base), 1);
+	if (rc) {
+		pr_err("failed to read RT status reg rc=%d\n", rc);
+		return rc;
+	}
+	pr_debug("RT status reg = 0x%x\n", rt_reg);
+
+	chip->cc_line_state = OPEN;
+	chip->typec_state = POWER_SUPPLY_TYPE_UNKNOWN;
+	chip->type_c_psy.type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+	if (rt_reg & DFP_DETECT_BIT) {
+		/* we are in DFP state*/
+		dfp_detect_handler(0, chip);
+	} else if (rt_reg & UFP_DETECT_BIT) {
+		/* we are in UFP state */
+		ufp_detect_handler(0, chip);
+	}
+
+	return 0;
+}
+
+#define REQUEST_IRQ(chip, irq, irq_name, irq_handler, flags, wake, rc)	\
+do {									\
+	irq = spmi_get_irq_byname(chip->spmi, NULL, irq_name);		\
+	if (irq < 0) {							\
+		pr_err("Unable to get " irq_name " irq\n");		\
+		rc |= -ENXIO;						\
+	}								\
+	rc = devm_request_threaded_irq(chip->dev,			\
+			irq, NULL, irq_handler, flags, irq_name,	\
+			chip);						\
+	if (rc < 0) {							\
+		pr_err("Unable to request " irq_name " irq: %d\n", rc);	\
+		rc |= -ENXIO;						\
+	}								\
+									\
+	if (wake)							\
+		enable_irq_wake(irq);					\
+} while (0)
+
+static int qpnp_typec_request_irqs(struct qpnp_typec_chip *chip)
+{
+	int rc = 0;
+	unsigned long flags = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
+
+	REQUEST_IRQ(chip, chip->vrd_changed, "vrd-change", vrd_changed_handler,
+			flags, true, rc);
+	REQUEST_IRQ(chip, chip->ufp_detach, "ufp-detach", ufp_detach_handler,
+			flags, true, rc);
+	REQUEST_IRQ(chip, chip->ufp_detect, "ufp-detect", ufp_detect_handler,
+			flags, true, rc);
+	REQUEST_IRQ(chip, chip->dfp_detach, "dfp-detach", dfp_detach_handler,
+			flags, true, rc);
+	REQUEST_IRQ(chip, chip->dfp_detect, "dfp-detect", dfp_detect_handler,
+			flags, true, rc);
+	REQUEST_IRQ(chip, chip->vbus_err, "vbus-err", vbus_err_handler,
+			flags, true, rc);
+	REQUEST_IRQ(chip, chip->vconn_oc, "vconn-oc", vconn_oc_handler,
+			flags, true, rc);
+
+	return rc;
+}
+
+static enum power_supply_property qpnp_typec_properties[] = {
+	POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+	POWER_SUPPLY_PROP_TYPE,
+};
+
+static int qpnp_typec_get_property(struct power_supply *psy,
+				enum power_supply_property prop,
+				union power_supply_propval *val)
+{
+	struct qpnp_typec_chip *chip = container_of(psy,
+					struct qpnp_typec_chip, type_c_psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = chip->typec_state;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+		val->intval = chip->current_ma;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#define ROLE_REVERSAL_DELAY_MS		500
+static void qpnp_typec_role_check_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct qpnp_typec_chip *chip = container_of(dwork,
+				struct qpnp_typec_chip, role_reversal_check);
+	int rc;
+
+	mutex_lock(&chip->typec_lock);
+	switch (chip->force_mode) {
+	case DUAL_ROLE_PROP_MODE_UFP:
+		if (chip->typec_state != POWER_SUPPLY_TYPE_UFP) {
+			pr_debug("Role-reversal not latched to UFP in %d msec resetting to DRP mode\n",
+					ROLE_REVERSAL_DELAY_MS);
+			rc = qpnp_typec_force_mode(chip,
+						DUAL_ROLE_PROP_MODE_NONE);
+			if (rc)
+				pr_err("Failed to set DRP mode rc=%d\n", rc);
+		}
+		chip->in_force_mode = false;
+		break;
+	case DUAL_ROLE_PROP_MODE_DFP:
+		if (chip->typec_state != POWER_SUPPLY_TYPE_DFP) {
+			pr_debug("Role-reversal not latched to DFP in %d msec resetting to DRP mode\n",
+					ROLE_REVERSAL_DELAY_MS);
+			rc = qpnp_typec_force_mode(chip,
+						DUAL_ROLE_PROP_MODE_NONE);
+			if (rc)
+				pr_err("Failed to set DRP mode rc=%d\n", rc);
+		}
+		chip->in_force_mode = false;
+		break;
+	default:
+		pr_debug("Already in DRP mode\n");
+		break;
+	}
+	mutex_unlock(&chip->typec_lock);
+	typec_relax(&chip->role_reversal_wakeup_source);
+}
+
+enum dual_role_property qpnp_typec_dr_properties[] = {
+	DUAL_ROLE_PROP_SUPPORTED_MODES,
+	DUAL_ROLE_PROP_MODE,
+	DUAL_ROLE_PROP_PR,
+	DUAL_ROLE_PROP_DR,
+};
+
+static int qpnp_typec_dr_is_writeable(struct dual_role_phy_instance *dual_role,
+					enum dual_role_property prop)
+{
+	int rc;
+
+	switch (prop) {
+	case DUAL_ROLE_PROP_MODE:
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+	}
+	return rc;
+}
+
+static int qpnp_typec_dr_set_property(struct dual_role_phy_instance *dual_role,
+					enum dual_role_property prop,
+					const unsigned int *val)
+{
+	int rc = 0;
+	struct qpnp_typec_chip *chip = dual_role_get_drvdata(dual_role);
+
+	if (!chip || (chip->typec_state == POWER_SUPPLY_TYPE_UNKNOWN))
+		return -EINVAL;
+
+	switch (prop) {
+	case DUAL_ROLE_PROP_MODE:
+		/* Force role */
+		mutex_lock(&chip->typec_lock);
+		if (chip->in_force_mode) {
+			pr_debug("Already in mode transition skipping request\n");
+			mutex_unlock(&chip->typec_lock);
+			return 0;
+		}
+		switch (*val) {
+		case DUAL_ROLE_PROP_MODE_UFP:
+			rc = qpnp_typec_force_mode(chip,
+						DUAL_ROLE_PROP_MODE_UFP);
+			if (rc)
+				pr_err("Failed to force UFP mode rc=%d\n", rc);
+			else
+				chip->in_force_mode = true;
+			break;
+		case DUAL_ROLE_PROP_MODE_DFP:
+			rc = qpnp_typec_force_mode(chip,
+						DUAL_ROLE_PROP_MODE_DFP);
+			if (rc)
+				pr_err("Failed to force DFP mode rc=%d\n", rc);
+			else
+				chip->in_force_mode = true;
+			break;
+		default:
+			pr_debug("Invalid role(not DFP/UFP) %d\n", *val);
+			rc = -EINVAL;
+		}
+		mutex_unlock(&chip->typec_lock);
+
+		/*
+		 * schedule delayed work to check if device latched to the
+		 * requested mode.
+		 */
+		if (!rc && chip->in_force_mode) {
+			cancel_delayed_work_sync(&chip->role_reversal_check);
+			typec_stay_awake(&chip->role_reversal_wakeup_source);
+			schedule_delayed_work(&chip->role_reversal_check,
+				msecs_to_jiffies(ROLE_REVERSAL_DELAY_MS));
+		}
+		break;
+	default:
+		pr_debug("Invalid DUAL ROLE request %d\n", prop);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int qpnp_typec_dr_get_property(struct dual_role_phy_instance *dual_role,
+					enum dual_role_property prop,
+					unsigned int *val)
+{
+	struct qpnp_typec_chip *chip = dual_role_get_drvdata(dual_role);
+	unsigned int mode, power_role, data_role;
+
+	if (!chip)
+		return -EINVAL;
+
+	switch (chip->typec_state) {
+	case POWER_SUPPLY_TYPE_UFP:
+		mode = DUAL_ROLE_PROP_MODE_UFP;
+		power_role = DUAL_ROLE_PROP_PR_SNK;
+		data_role = DUAL_ROLE_PROP_DR_DEVICE;
+		break;
+	case POWER_SUPPLY_TYPE_DFP:
+		mode = DUAL_ROLE_PROP_MODE_DFP;
+		power_role = DUAL_ROLE_PROP_PR_SRC;
+		data_role = DUAL_ROLE_PROP_DR_HOST;
+		break;
+	default:
+		mode = DUAL_ROLE_PROP_MODE_NONE;
+		power_role = DUAL_ROLE_PROP_PR_NONE;
+		data_role = DUAL_ROLE_PROP_DR_NONE;
+	};
+
+	switch (prop) {
+	case DUAL_ROLE_PROP_SUPPORTED_MODES:
+		*val = chip->dr_desc.supported_modes;
+		break;
+	case DUAL_ROLE_PROP_MODE:
+		*val = mode;
+		break;
+	case DUAL_ROLE_PROP_PR:
+		*val = power_role;
+		break;
+	case DUAL_ROLE_PROP_DR:
+		*val = data_role;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_typec_probe(struct spmi_device *spmi)
+{
+	int rc;
+	struct resource *resource;
+	struct qpnp_typec_chip *chip;
+
+	resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+	if (!resource) {
+		pr_err("Unable to get spmi resource for TYPEC\n");
+		return -EINVAL;
+	}
+
+	chip = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_typec_chip),
+			GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &spmi->dev;
+	chip->spmi = spmi;
+
+	/* parse DT */
+	rc = qpnp_typec_parse_dt(chip);
+	if (rc) {
+		pr_err("failed to parse DT rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->base = resource->start;
+	dev_set_drvdata(&spmi->dev, chip);
+	device_init_wakeup(&spmi->dev, 1);
+	mutex_init(&chip->typec_lock);
+	spin_lock_init(&chip->rw_lock);
+
+	/* determine initial status */
+	rc = qpnp_typec_determine_initial_status(chip);
+	if (rc) {
+		pr_err("failed to determine initial state rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->type_c_psy.name		= TYPEC_PSY_NAME;
+	chip->type_c_psy.get_property	= qpnp_typec_get_property;
+	chip->type_c_psy.properties	= qpnp_typec_properties;
+	chip->type_c_psy.num_properties	= ARRAY_SIZE(qpnp_typec_properties);
+
+	rc = power_supply_register(chip->dev, &chip->type_c_psy);
+	if (rc < 0) {
+		pr_err("Unable to register  type_c_psy rc=%d\n", rc);
+		goto out;
+	}
+
+	if (chip->role_reversal_supported) {
+		chip->force_mode = DUAL_ROLE_PROP_MODE_NONE;
+		wakeup_source_init(&chip->role_reversal_wakeup_source.source,
+					"typec_role_reversal_wake");
+		INIT_DELAYED_WORK(&chip->role_reversal_check,
+					qpnp_typec_role_check_work);
+		/* Register for android TypeC dual role framework */
+		chip->dr_desc.name		= DUAL_ROLE_DESC_NAME;
+		chip->dr_desc.properties	= qpnp_typec_dr_properties;
+		chip->dr_desc.get_property	= qpnp_typec_dr_get_property;
+		chip->dr_desc.set_property	= qpnp_typec_dr_set_property;
+		chip->dr_desc.property_is_writeable =
+					qpnp_typec_dr_is_writeable;
+		chip->dr_desc.supported_modes	=
+					DUAL_ROLE_SUPPORTED_MODES_DFP_AND_UFP;
+		chip->dr_desc.num_properties	=
+					ARRAY_SIZE(qpnp_typec_dr_properties);
+
+		chip->dr_inst = devm_dual_role_instance_register(chip->dev,
+					&chip->dr_desc);
+		if (IS_ERR(chip->dr_inst)) {
+			pr_err("Failed to initialize dual role\n");
+			rc = PTR_ERR(chip->dr_inst);
+			goto unregister_psy;
+		} else {
+			chip->dr_inst->drv_data = chip;
+		}
+	}
+
+	/* All irqs */
+	rc = qpnp_typec_request_irqs(chip);
+	if (rc) {
+		pr_err("failed to request irqs rc=%d\n", rc);
+		goto unregister_psy;
+	}
+
+	pr_info("TypeC successfully probed state=%d CC-line-state=%d\n",
+			chip->typec_state, chip->cc_line_state);
+	return 0;
+
+unregister_psy:
+	power_supply_unregister(&chip->type_c_psy);
+out:
+	mutex_destroy(&chip->typec_lock);
+	if (chip->role_reversal_supported)
+		wakeup_source_trash(&chip->role_reversal_wakeup_source.source);
+	return rc;
+}
+
+static int qpnp_typec_remove(struct spmi_device *spmi)
+{
+	int rc;
+	struct qpnp_typec_chip *chip = dev_get_drvdata(&spmi->dev);
+
+	if (chip->role_reversal_supported) {
+		cancel_delayed_work_sync(&chip->role_reversal_check);
+		wakeup_source_trash(&chip->role_reversal_wakeup_source.source);
+	}
+	rc = qpnp_typec_configure_ssmux(chip, OPEN);
+	if (rc)
+		pr_err("failed to configure SSMUX rc=%d\n", rc);
+
+	mutex_destroy(&chip->typec_lock);
+	dev_set_drvdata(chip->dev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id qpnp_typec_match_table[] = {
+	{ .compatible = QPNP_TYPEC_DEV_NAME, },
+	{}
+};
+
+static struct spmi_driver qpnp_typec_driver = {
+	.probe		= qpnp_typec_probe,
+	.remove		= qpnp_typec_remove,
+	.driver		= {
+		.name		= QPNP_TYPEC_DEV_NAME,
+		.owner		= THIS_MODULE,
+		.of_match_table	= qpnp_typec_match_table,
+	},
+};
+
+/*
+ * qpnp_typec_init() - register spmi driver for qpnp-typec
+ */
+static int __init qpnp_typec_init(void)
+{
+	return spmi_driver_register(&qpnp_typec_driver);
+}
+module_init(qpnp_typec_init);
+
+static void __exit qpnp_typec_exit(void)
+{
+	spmi_driver_unregister(&qpnp_typec_driver);
+}
+module_exit(qpnp_typec_exit);
+
+MODULE_DESCRIPTION("QPNP type-C driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_TYPEC_DEV_NAME);
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index 4b2e9c8..dfedece 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -461,6 +461,7 @@ struct smb1351_charger {
 
 	int			parallel_pin_polarity_setting;
 	int			parallel_mode;
+	int			pl_batfet_mode;
 	bool			parallel_charger;
 	bool			parallel_charger_suspended;
 	bool			bms_controlled_charging;
@@ -1417,6 +1418,7 @@ static enum power_supply_property smb1351_parallel_properties[] = {
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_PARALLEL_MODE,
 	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE,
 };
 
 static int smb1351_parallel_set_chg_suspend(struct smb1351_charger *chip,
@@ -1706,6 +1708,9 @@ static int smb1351_parallel_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		val->intval = chip->parallel_charger_suspended;
 		break;
+	case POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE:
+		val->intval = chip->pl_batfet_mode;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -2212,7 +2217,7 @@ static int smb1351_usbin_uv_handler(struct smb1351_charger *chip, u8 status)
 static int smb1351_usbin_ov_handler(struct smb1351_charger *chip, u8 status)
 {
 	int rc;
-	u8 reg;
+	u8 reg = 0;
 	union power_supply_propval pval = {0, };
 
 	rc = smb1351_read_reg(chip, IRQ_E_REG, &reg);
@@ -3197,6 +3202,10 @@ static int smb1351_parallel_charger_probe(struct i2c_client *client,
 	else
 		chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN;
 
+	chip->pl_batfet_mode = POWER_SUPPLY_PL_NON_STACKED_BATFET;
+	if (of_property_read_bool(node, "qcom,stacked-batfet"))
+		chip->pl_batfet_mode = POWER_SUPPLY_PL_STACKED_BATFET;
+
 	i2c_set_clientdata(client, chip);
 
 	chip->parallel_psy_d.name = "parallel";
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index a279e98..833a8da 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -32,6 +32,7 @@
 /* SMB1355 registers, different than mentioned in smb-reg.h */
 
 #define CHGR_BASE	0x1000
+#define ANA2_BASE	0x1100
 #define BATIF_BASE	0x1200
 #define USBIN_BASE	0x1300
 #define MISC_BASE	0x1600
@@ -64,6 +65,9 @@
 #define CHGR_PRE_TO_FAST_THRESHOLD_CFG_REG	(CHGR_BASE + 0x74)
 #define PRE_TO_FAST_CHARGE_THRESHOLD_MASK	GENMASK(2, 0)
 
+#define ANA2_TR_SBQ_ICL_1X_REF_OFFSET_REG	(ANA2_BASE + 0xF5)
+#define TR_SBQ_ICL_1X_REF_OFFSET		GENMASK(4, 0)
+
 #define POWER_MODE_HICCUP_CFG			(BATIF_BASE + 0x72)
 #define MAX_HICCUP_DUETO_BATDIS_MASK		GENMASK(5, 2)
 #define HICCUP_TIMEOUT_CFG_MASK			GENMASK(1, 0)
@@ -87,6 +91,9 @@
 #define DIE_TEMP_UB_HOT_BIT			BIT(1)
 #define DIE_TEMP_LB_HOT_BIT			BIT(0)
 
+#define MISC_RT_STS_REG				(MISC_BASE + 0x10)
+#define HARD_ILIMIT_RT_STS_BIT			BIT(5)
+
 #define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
 #define BARK_BITE_WDOG_PET_BIT			BIT(0)
 
@@ -98,6 +105,9 @@
 #define WDOG_TIMER_EN_ON_PLUGIN_BIT		BIT(1)
 #define WDOG_TIMER_EN_BIT			BIT(0)
 
+#define MISC_CUST_SDCDC_CLK_CFG_REG		(MISC_BASE + 0xA0)
+#define SWITCHER_CLK_FREQ_MASK			GENMASK(3, 0)
+
 #define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
 #define BITE_WDOG_DISABLE_CHARGING_CFG_BIT	BIT(7)
 #define SNARL_WDOG_TIMEOUT_MASK			GENMASK(6, 4)
@@ -112,6 +122,34 @@
 #define MISC_CHGR_TRIM_OPTIONS_REG		(MISC_BASE + 0x55)
 #define CMD_RBIAS_EN_BIT			BIT(2)
 
+#define MISC_ENG_SDCDC_INPUT_CURRENT_CFG1_REG	(MISC_BASE + 0xC8)
+#define PROLONG_ISENSE_MASK			GENMASK(7, 6)
+#define PROLONG_ISENSEM_SHIFT			6
+#define SAMPLE_HOLD_DELAY_MASK			GENMASK(5, 2)
+#define SAMPLE_HOLD_DELAY_SHIFT			2
+#define DISABLE_ILIMIT_BIT			BIT(0)
+
+#define MISC_ENG_SDCDC_INPUT_CURRENT_CFG2_REG	(MISC_BASE + 0xC9)
+#define INPUT_CURRENT_LIMIT_SOURCE_BIT		BIT(7)
+#define TC_ISENSE_AMPLIFIER_MASK		GENMASK(6, 4)
+#define TC_ISENSE_AMPLIFIER_SHIFT		4
+#define HS_II_CORRECTION_MASK			GENMASK(3, 0)
+
+#define MISC_ENG_SDCDC_RESERVE3_REG		(MISC_BASE + 0xCB)
+#define VDDCAP_SHORT_DISABLE_TRISTATE_BIT	BIT(7)
+#define PCL_SHUTDOWN_BUCK_BIT			BIT(6)
+#define ISENSE_TC_CORRECTION_BIT		BIT(5)
+#define II_SOURCE_BIT				BIT(4)
+#define SCALE_SLOPE_COMP_MASK			GENMASK(3, 0)
+
+#define USBIN_CURRENT_LIMIT_CFG_REG		(USBIN_BASE + 0x70)
+#define USB_TR_SCPATH_ICL_1X_GAIN_REG		(USBIN_BASE + 0xF2)
+#define TR_SCPATH_ICL_1X_GAIN_MASK		GENMASK(5, 0)
+
+#define IS_USBIN(mode)				\
+	((mode == POWER_SUPPLY_PL_USBIN_USBIN) \
+	 || (mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+
 struct smb_chg_param {
 	const char	*name;
 	u16		reg;
@@ -123,6 +161,7 @@ struct smb_chg_param {
 struct smb_params {
 	struct smb_chg_param	fcc;
 	struct smb_chg_param	ov;
+	struct smb_chg_param	usb_icl;
 };
 
 static struct smb_params v1_params = {
@@ -140,6 +179,13 @@ static struct smb_params v1_params = {
 		.max_u	= 5000000,
 		.step_u	= 10000,
 	},
+	.usb_icl	= {
+		.name   = "usb input current limit",
+		.reg    = USBIN_CURRENT_LIMIT_CFG_REG,
+		.min_u  = 100000,
+		.max_u  = 5000000,
+		.step_u = 30000,
+	},
 };
 
 struct smb_irq_info {
@@ -155,6 +201,8 @@ struct smb_iio {
 
 struct smb_dt_props {
 	bool	disable_ctm;
+	int	pl_mode;
+	int	pl_batfet_mode;
 };
 
 struct smb1355 {
@@ -317,6 +365,21 @@ static void die_temp_work(struct work_struct *work)
 			msecs_to_jiffies(DIE_TEMP_MEAS_PERIOD_MS));
 }
 
+static int smb1355_get_prop_input_current_limited(struct smb1355 *chip,
+					union power_supply_propval *pval)
+{
+	int rc;
+	u8 stat = 0;
+
+	rc = smb1355_read(chip, MISC_RT_STS_REG, &stat);
+	if (rc < 0)
+		pr_err("Couldn't read SMB1355_BATTERY_STATUS_3 rc=%d\n", rc);
+
+	pval->intval = !!(stat & HARD_ILIMIT_RT_STS_BIT);
+
+	return 0;
+}
+
 static irqreturn_t smb1355_handle_chg_state_change(int irq, void *data)
 {
 	struct smb1355 *chip = data;
@@ -369,6 +432,23 @@ static int smb1355_parse_dt(struct smb1355 *chip)
 	chip->dt.disable_ctm =
 		of_property_read_bool(node, "qcom,disable-ctm");
 
+	/*
+	 * If parallel-mode property is not present default
+	 * parallel configuration is USBMID-USBMID.
+	 */
+	rc = of_property_read_u32(node,
+		"qcom,parallel-mode", &chip->dt.pl_mode);
+	if (rc < 0)
+		chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID;
+
+	/*
+	 * If stacked-batfet property is not present default
+	 * configuration is NON-STACKED-BATFET.
+	 */
+	chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_NON_STACKED_BATFET;
+	if (of_property_read_bool(node, "qcom,stacked-batfet"))
+		chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_STACKED_BATFET;
+
 	return rc;
 }
 
@@ -388,6 +468,10 @@ static enum power_supply_property smb1355_parallel_props[] = {
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_PARALLEL_MODE,
 	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+	POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_MIN_ICL,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
 };
 
 static int smb1355_get_prop_batt_charge_type(struct smb1355 *chip,
@@ -455,6 +539,7 @@ static int smb1355_get_prop_charger_temp_max(struct smb1355 *chip,
 	return rc;
 }
 
+#define MIN_PARALLEL_ICL_UA		250000
 static int smb1355_parallel_get_prop(struct power_supply *psy,
 				     enum power_supply_property prop,
 				     union power_supply_propval *val)
@@ -498,7 +583,7 @@ static int smb1355_parallel_get_prop(struct power_supply *psy,
 		val->strval = chip->name;
 		break;
 	case POWER_SUPPLY_PROP_PARALLEL_MODE:
-		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
+		val->intval = chip->dt.pl_mode;
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
 		if (chip->c_health == -EINVAL)
@@ -506,6 +591,25 @@ static int smb1355_parallel_get_prop(struct power_supply *psy,
 		else
 			val->intval = chip->c_health;
 		break;
+	case POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE:
+		val->intval = chip->dt.pl_batfet_mode;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		if (IS_USBIN(chip->dt.pl_mode))
+			rc = smb1355_get_prop_input_current_limited(chip, val);
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if (IS_USBIN(chip->dt.pl_mode))
+			rc = smb1355_get_charge_param(chip,
+					&chip->param.usb_icl, &val->intval);
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_MIN_ICL:
+		val->intval = MIN_PARALLEL_ICL_UA;
+		break;
 	default:
 		pr_err_ratelimited("parallel psy get prop %d not supported\n",
 			prop);
@@ -563,6 +667,28 @@ static int smb1355_set_parallel_charging(struct smb1355 *chip, bool disable)
 	return 0;
 }
 
+static int smb1355_set_current_max(struct smb1355 *chip, int curr)
+{
+	int rc = 0;
+
+	if (!IS_USBIN(chip->dt.pl_mode))
+		return 0;
+
+	if ((curr / 1000) < 100) {
+		/* disable parallel path (ICL < 100mA) */
+		rc = smb1355_set_parallel_charging(chip, true);
+	} else {
+		rc = smb1355_set_parallel_charging(chip, false);
+		if (rc < 0)
+			return rc;
+
+		rc = smb1355_set_charge_param(chip,
+				&chip->param.usb_icl, curr);
+	}
+
+	return rc;
+}
+
 static int smb1355_parallel_set_prop(struct power_supply *psy,
 				     enum power_supply_property prop,
 				     const union power_supply_propval *val)
@@ -574,6 +700,9 @@ static int smb1355_parallel_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smb1355_set_parallel_charging(chip, (bool)val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smb1355_set_current_max(chip, val->intval);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smb1355_set_charge_param(chip, &chip->param.ov,
 						val->intval);
@@ -824,6 +953,73 @@ static int smb1355_init_hw(struct smb1355 *chip)
 		return rc;
 	}
 
+	/* USBIN-USBIN configuration */
+	if (IS_USBIN(chip->dt.pl_mode)) {
+		/* set swicther clock frequency to 700kHz */
+		rc = smb1355_masked_write(chip, MISC_CUST_SDCDC_CLK_CFG_REG,
+				SWITCHER_CLK_FREQ_MASK, 0x03);
+		if (rc < 0) {
+			pr_err("Couldn't set MISC_CUST_SDCDC_CLK_CFG rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/*
+		 * configure compensation for input current limit (ICL) loop
+		 * accuracy, scale slope compensation using 30k resistor.
+		 */
+		rc = smb1355_masked_write(chip, MISC_ENG_SDCDC_RESERVE3_REG,
+				II_SOURCE_BIT | SCALE_SLOPE_COMP_MASK,
+				II_SOURCE_BIT);
+		if (rc < 0) {
+			pr_err("Couldn't set MISC_ENG_SDCDC_RESERVE3_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* configuration to improve ICL accuracy */
+		rc = smb1355_masked_write(chip,
+				MISC_ENG_SDCDC_INPUT_CURRENT_CFG1_REG,
+				PROLONG_ISENSE_MASK | SAMPLE_HOLD_DELAY_MASK,
+				((uint8_t)0x0C << SAMPLE_HOLD_DELAY_SHIFT));
+		if (rc < 0) {
+			pr_err("Couldn't set MISC_ENG_SDCDC_INPUT_CURRENT_CFG1_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = smb1355_masked_write(chip,
+				MISC_ENG_SDCDC_INPUT_CURRENT_CFG2_REG,
+				INPUT_CURRENT_LIMIT_SOURCE_BIT
+				| HS_II_CORRECTION_MASK,
+			       INPUT_CURRENT_LIMIT_SOURCE_BIT | 0xC);
+
+		if (rc < 0) {
+			pr_err("Couldn't set MISC_ENG_SDCDC_INPUT_CURRENT_CFG2_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* configure DAC offset */
+		rc = smb1355_masked_write(chip,
+				ANA2_TR_SBQ_ICL_1X_REF_OFFSET_REG,
+				TR_SBQ_ICL_1X_REF_OFFSET, 0x00);
+		if (rc < 0) {
+			pr_err("Couldn't set ANA2_TR_SBQ_ICL_1X_REF_OFFSET_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* configure DAC gain */
+		rc = smb1355_masked_write(chip, USB_TR_SCPATH_ICL_1X_GAIN_REG,
+				TR_SCPATH_ICL_1X_GAIN_MASK, 0x22);
+		if (rc < 0) {
+			pr_err("Couldn't set USB_TR_SCPATH_ICL_1X_GAIN_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return 0;
 }
 
@@ -991,7 +1187,11 @@ static int smb1355_probe(struct platform_device *pdev)
 		goto cleanup;
 	}
 
-	pr_info("%s probed successfully\n", chip->name);
+	pr_info("%s probed successfully pl_mode=%s batfet_mode=%s\n",
+		chip->name,
+		IS_USBIN(chip->dt.pl_mode) ? "USBIN-USBIN" : "USBMID-USBMID",
+		(chip->dt.pl_batfet_mode == POWER_SUPPLY_PL_STACKED_BATFET)
+			? "STACKED_BATFET" : "NON-STACKED_BATFET");
 	return rc;
 
 cleanup:
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 28c3512..f1df8f0 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -97,6 +97,7 @@ struct smb_dt_props {
 	int	chg_temp_max_mdegc;
 	int	connector_temp_max_mdegc;
 	int	pl_mode;
+	int	pl_batfet_mode;
 };
 
 struct smb138x {
@@ -111,6 +112,11 @@ module_param_named(
 	debug_mask, __debug_mask, int, 0600
 );
 
+static int __try_sink_enabled;
+module_param_named(
+	try_sink_enabled, __try_sink_enabled, int, 0600
+);
+
 static irqreturn_t smb138x_handle_slave_chg_state_change(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
@@ -200,6 +206,10 @@ static int smb138x_parse_dt(struct smb138x *chip)
 	if (rc < 0)
 		chip->dt.connector_temp_max_mdegc = 105000;
 
+	chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_NON_STACKED_BATFET;
+	if (of_property_read_bool(node, "qcom,stacked-batfet"))
+		chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_STACKED_BATFET;
+
 	return 0;
 }
 
@@ -215,6 +225,7 @@ static enum power_supply_property smb138x_usb_props[] = {
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_REAL_TYPE,
 	POWER_SUPPLY_PROP_TYPEC_MODE,
 	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
 	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
@@ -251,6 +262,9 @@ static int smb138x_usb_get_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_TYPE:
 		val->intval = chg->usb_psy_desc.type;
 		break;
+	case POWER_SUPPLY_PROP_REAL_TYPE:
+		val->intval = chg->real_charger_type;
+		break;
 	case POWER_SUPPLY_PROP_TYPEC_MODE:
 		val->intval = chg->typec_mode;
 		break;
@@ -332,6 +346,120 @@ static int smb138x_init_usb_psy(struct smb138x *chip)
 	return 0;
 }
 
+/*****************************
+ * USB MAIN PSY REGISTRATION *
+ *****************************/
+
+static enum power_supply_property smb138x_usb_main_props[] = {
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+	POWER_SUPPLY_PROP_FCC_DELTA,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smb138x_usb_main_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fcc,
+							&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_MAIN;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED:
+		rc = smblib_get_prop_input_voltage_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_FCC_DELTA:
+		rc = smblib_get_prop_fcc_delta(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_icl_current(chg, &val->intval);
+		break;
+	default:
+		pr_debug("get prop %d is not supported in usb-main\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb138x_usb_main_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_icl_current(chg, val->intval);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc < 0)
+		pr_err("Couldn't set prop %d, rc=%d\n", psp, rc);
+
+	return rc;
+}
+
+static const struct power_supply_desc usb_main_psy_desc = {
+	.name           = "main",
+	.type           = POWER_SUPPLY_TYPE_MAIN,
+	.properties     = smb138x_usb_main_props,
+	.num_properties = ARRAY_SIZE(smb138x_usb_main_props),
+	.get_property   = smb138x_usb_main_get_prop,
+	.set_property   = smb138x_usb_main_set_prop,
+};
+
+static int smb138x_init_usb_main_psy(struct smb138x *chip)
+{
+	struct power_supply_config usb_main_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	usb_main_cfg.drv_data = chip;
+	usb_main_cfg.of_node = chg->dev->of_node;
+	chg->usb_main_psy = devm_power_supply_register(chg->dev,
+						  &usb_main_psy_desc,
+						  &usb_main_cfg);
+	if (IS_ERR(chg->usb_main_psy)) {
+		pr_err("Couldn't register USB main power supply\n");
+		return PTR_ERR(chg->usb_main_psy);
+	}
+
+	return 0;
+}
+
 /*************************
  * BATT PSY REGISTRATION *
  *************************/
@@ -541,6 +669,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
 	POWER_SUPPLY_PROP_PARALLEL_MODE,
 	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
 	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+	POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE,
 };
 
 static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -615,6 +744,9 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
 		/* Not in ship mode as long as device is active */
 		val->intval = 0;
 		break;
+	case POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE:
+		val->intval = chip->dt.pl_batfet_mode;
+		break;
 	default:
 		pr_err("parallel power supply get prop %d not supported\n",
 			prop);
@@ -992,6 +1124,17 @@ static int smb138x_init_hw(struct smb138x *chip)
 		return rc;
 	}
 
+	/* enable usb-src-change interrupt sources */
+	rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				APSD_IRQ_EN_CFG_BIT | HVDCP_IRQ_EN_CFG_BIT
+			      | AUTH_IRQ_EN_CFG_BIT | VADP_IRQ_EN_CFG_BIT,
+				APSD_IRQ_EN_CFG_BIT | HVDCP_IRQ_EN_CFG_BIT
+			      | AUTH_IRQ_EN_CFG_BIT | VADP_IRQ_EN_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
 	/* configure to a fixed 700khz freq to avoid tdie errors */
 	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
 	if (rc < 0) {
@@ -1173,7 +1316,7 @@ static struct smb_irq_info smb138x_irqs[] = {
 	},
 	[OTG_OVERCURRENT_IRQ] = {
 		.name		= "otg-overcurrent",
-		.handler	= smblib_handle_debug,
+		.handler	= smblib_handle_otg_overcurrent,
 	},
 	[OTG_OC_DIS_SW_STS_IRQ] = {
 		.name		= "otg-oc-dis-sw-sts",
@@ -1388,6 +1531,21 @@ static int smb138x_request_interrupts(struct smb138x *chip)
 	return rc;
 }
 
+static void smb138x_free_interrupts(struct smb_charger *chg)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb138x_irqs); i++) {
+		if (smb138x_irqs[i].irq > 0) {
+			if (smb138x_irqs[i].wake)
+				disable_irq_wake(smb138x_irqs[i].irq);
+
+			devm_free_irq(chg->dev, smb138x_irqs[i].irq,
+					smb138x_irqs[i].irq_data);
+		}
+	}
+}
+
 /*********
  * PROBE *
  *********/
@@ -1467,6 +1625,12 @@ static int smb138x_master_probe(struct smb138x *chip)
 		goto cleanup;
 	}
 
+	rc = smb138x_init_usb_main_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize main usb psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
 	rc = smb138x_init_batt_psy(chip);
 	if (rc < 0) {
 		pr_err("Couldn't initialize batt psy rc=%d\n", rc);
@@ -1495,7 +1659,9 @@ static int smb138x_master_probe(struct smb138x *chip)
 	return rc;
 
 cleanup:
+	smb138x_free_interrupts(chg);
 	smblib_deinit(chg);
+
 	return rc;
 }
 
@@ -1614,6 +1780,7 @@ static int smb138x_probe(struct platform_device *pdev)
 
 	chip->chg.dev = &pdev->dev;
 	chip->chg.debug_mask = &__debug_mask;
+	chip->chg.try_sink_enabled = &__try_sink_enabled;
 	chip->chg.irq_info = smb138x_irqs;
 	chip->chg.name = "SMB";
 
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8060142..1a93164 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -658,6 +658,7 @@ static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 
 	entry.str = str;
 	entry.lba = lba;
+	entry->cmd_id = cmd_id;
 	entry.transfer_len = transfer_len;
 	entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	entry.tag = tag;
@@ -5561,8 +5562,15 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 			 * UFS device needs urgent BKOPs.
 			 */
 			if (!hba->pm_op_in_progress &&
-			    ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
-				schedule_work(&hba->eeh_work);
+			    ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) {
+				/*
+				 * Prevent suspend once eeh_work is scheduled
+				 * to avoid deadlock between ufshcd_suspend
+				 * and exception event handler.
+				 */
+				if (schedule_work(&hba->eeh_work))
+					pm_runtime_get_noresume(hba->dev);
+			}
 			break;
 		case UPIU_TRANSACTION_REJECT_UPIU:
 			/* TODO: handle Reject UPIU Response */
@@ -6146,6 +6154,13 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
 
 out:
 	ufshcd_scsi_unblock_requests(hba);
+	/*
+	 * pm_runtime_get_noresume is called while scheduling
+	 * eeh_work to avoid suspend racing with exception work.
+	 * Hence decrement usage counter using pm_runtime_put_noidle
+	 * to allow suspend on completion of exception event handler.
+	 */
+	pm_runtime_put_noidle(hba->dev);
 	pm_runtime_put(hba->dev);
 	return;
 }
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 22b7236..3b37a37 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -689,7 +689,14 @@
 endif # MSM_PM
 
 config QCOM_DCC_V2
-	bool "Qualcomm Technologies Data Capture and Compare enigne support for V2"
+	bool "Qualcomm Technologies Data Capture and Compare engine support for V2"
+	help
+	  This option enables driver for Data Capture and Compare engine. DCC
+	  driver provides interface to configure DCC block and read back
+	  captured data from DCC's internal SRAM.
+
+config QCOM_DCC
+	bool "QCOM Data Capture and Compare engine support"
 	help
 	  This option enables driver for Data Capture and Compare engine. DCC
 	  driver provides interface to configure DCC block and read back
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 6deadc0..efa702f 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -80,6 +80,7 @@
 obj-$(CONFIG_MSM_QBT1000) += qbt1000.o
 obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o
 obj-$(CONFIG_MSM_IDLE_STATS)	+= lpm-stats.o
+obj-$(CONFIG_QCOM_DCC) += dcc.o
 obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
 obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
 obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
diff --git a/drivers/soc/qcom/dcc.c b/drivers/soc/qcom/dcc.c
new file mode 100644
index 0000000..43dddb4
--- /dev/null
+++ b/drivers/soc/qcom/dcc.c
@@ -0,0 +1,1361 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/scm.h>
+
+#define RPM_MISC_REQ_TYPE	0x6373696d
+#define RPM_MISC_DDR_DCC_ENABLE 0x32726464
+
+#define TIMEOUT_US		(100)
+
+#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb)	((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n)		((val & BIT(n)) >> n)
+
+#define dcc_writel(drvdata, val, off)					\
+	__raw_writel((val), drvdata->base + off)
+#define dcc_readl(drvdata, off)						\
+	__raw_readl(drvdata->base + off)
+
+#define dcc_sram_writel(drvdata, val, off)				\
+	__raw_writel((val), drvdata->ram_base + off)
+#define dcc_sram_readl(drvdata, off)					\
+	__raw_readl(drvdata->ram_base + off)
+
+/* DCC registers */
+#define DCC_HW_VERSION		(0x00)
+#define DCC_HW_INFO		(0x04)
+#define DCC_CGC_CFG		(0x10)
+#define DCC_LL			(0x14)
+#define DCC_RAM_CFG		(0x18)
+#define DCC_CFG			(0x1C)
+#define DCC_SW_CTL		(0x20)
+#define DCC_STATUS		(0x24)
+#define DCC_FETCH_ADDR		(0x28)
+#define DCC_SRAM_ADDR		(0x2C)
+#define DCC_INT_ENABLE		(0x30)
+#define DCC_INT_STATUS		(0x34)
+#define DCC_QSB_CFG		(0x38)
+
+#define DCC_REG_DUMP_MAGIC_V2		(0x42445953)
+#define DCC_REG_DUMP_VER		(1)
+
+#define MAX_DCC_OFFSET		(0xFF * 4)
+#define MAX_DCC_LEN		0x7F
+
+#define SCM_SVC_DISABLE_XPU	0x23
+
+enum dcc_func_type {
+	DCC_FUNC_TYPE_CAPTURE,
+	DCC_FUNC_TYPE_CRC,
+};
+
+static const char * const str_dcc_func_type[] = {
+	[DCC_FUNC_TYPE_CAPTURE]		= "cap",
+	[DCC_FUNC_TYPE_CRC]		= "crc",
+};
+
+enum dcc_data_sink {
+	DCC_DATA_SINK_ATB,
+	DCC_DATA_SINK_SRAM
+};
+
+static const char * const str_dcc_data_sink[] = {
+	[DCC_DATA_SINK_ATB]		= "atb",
+	[DCC_DATA_SINK_SRAM]		= "sram",
+};
+
+struct rpm_trig_req {
+	uint32_t    enable;
+	uint32_t    reserved;
+};
+
+struct dcc_config_entry {
+	uint32_t		base;
+	uint32_t		offset;
+	uint32_t		len;
+	uint32_t		index;
+	struct list_head	list;
+};
+
+struct dcc_drvdata {
+	void __iomem		*base;
+	uint32_t		reg_size;
+	struct device		*dev;
+	struct mutex		mutex;
+	void __iomem		*ram_base;
+	uint32_t		ram_size;
+	struct clk		*clk;
+	enum dcc_data_sink	data_sink;
+	enum dcc_func_type	func_type;
+	uint32_t		ram_cfg;
+	bool			enable;
+	bool			interrupt_disable;
+	char			*sram_node;
+	struct cdev		sram_dev;
+	struct class		*sram_class;
+	struct list_head	config_head;
+	uint32_t		nr_config;
+	void			*reg_buf;
+	struct msm_dump_data	reg_data;
+	bool			save_reg;
+	void			*sram_buf;
+	struct msm_dump_data	sram_data;
+	struct rpm_trig_req	rpm_trig_req;
+	struct msm_rpm_kvp	rpm_kvp;
+	bool			xpu_scm_avail;
+	uint64_t		xpu_addr;
+	uint32_t		xpu_unlock_count;
+};
+
+static int dcc_cfg_xpu(struct dcc_drvdata *drvdata, bool enable)
+{
+	struct scm_desc desc = {0};
+
+	desc.args[0] = drvdata->xpu_addr;
+	desc.args[1] = enable;
+	desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+
+	return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, SCM_SVC_DISABLE_XPU), &desc);
+}
+
+static int dcc_xpu_lock(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->xpu_scm_avail)
+		goto err;
+
+	if (drvdata->xpu_unlock_count == 0)
+		goto err;
+
+	if (drvdata->xpu_unlock_count == 1) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			goto err;
+
+		/* make sure all access to DCC are completed */
+		mb();
+
+		ret = dcc_cfg_xpu(drvdata, 1);
+		if (ret)
+			dev_err(drvdata->dev, "Falied to lock DCC XPU.\n");
+
+		clk_disable_unprepare(drvdata->clk);
+	}
+
+	if (!ret)
+		drvdata->xpu_unlock_count--;
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static int dcc_xpu_unlock(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->xpu_scm_avail)
+		goto err;
+
+	if (drvdata->xpu_unlock_count == 0) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			goto err;
+
+		ret = dcc_cfg_xpu(drvdata, 0);
+		if (ret)
+			dev_err(drvdata->dev, "Falied to unlock DCC XPU.\n");
+
+		clk_disable_unprepare(drvdata->clk);
+	}
+
+	if (!ret)
+		drvdata->xpu_unlock_count++;
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static bool dcc_ready(struct dcc_drvdata *drvdata)
+{
+	uint32_t val;
+
+	/* poll until DCC ready */
+	if (!readl_poll_timeout((drvdata->base + DCC_STATUS), val,
+				(BVAL(val, 4) == 1), 1, TIMEOUT_US))
+		return true;
+
+	return false;
+}
+
+static int dcc_sw_trigger(struct dcc_drvdata *drvdata)
+{
+	int ret;
+
+	ret = 0;
+	mutex_lock(&drvdata->mutex);
+
+	if (!drvdata->enable) {
+		dev_err(drvdata->dev,
+			"DCC is disabled. Can't send sw trigger.\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (!dcc_ready(drvdata)) {
+		dev_err(drvdata->dev, "DCC is not ready!\n");
+		ret = -EBUSY;
+		goto err;
+	}
+
+	dcc_writel(drvdata, 1, DCC_SW_CTL);
+
+	if (!dcc_ready(drvdata)) {
+		dev_err(drvdata->dev,
+			"DCC is busy after receiving sw tigger.\n");
+		ret = -EBUSY;
+		goto err;
+	}
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static int __dcc_ll_cfg(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+	uint32_t sram_offset = 0;
+	uint32_t prev_addr, addr;
+	uint32_t prev_off = 0, off;
+	uint32_t link;
+	uint32_t pos, total_len = 0;
+	struct dcc_config_entry *entry;
+
+	if (list_empty(&drvdata->config_head)) {
+		dev_err(drvdata->dev,
+			"No configuration is available to program in DCC SRAM!\n");
+		return -EINVAL;
+	}
+
+	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+
+	prev_addr = 0;
+	link = 0;
+
+	list_for_each_entry(entry, &drvdata->config_head, list) {
+		/* Address type */
+		addr = (entry->base >> 4) & BM(0, 27);
+		addr |= BIT(31);
+		off = entry->offset/4;
+		total_len += entry->len * 4;
+
+		if (!prev_addr || prev_addr != addr || prev_off > off) {
+			/* Check if we need to write link of prev entry */
+			if (link) {
+				dcc_sram_writel(drvdata, link, sram_offset);
+				sram_offset += 4;
+			}
+
+			/* Write address */
+			dcc_sram_writel(drvdata, addr, sram_offset);
+			sram_offset += 4;
+
+			/* Reset link and prev_off */
+			link = 0;
+			prev_off = 0;
+		}
+
+		if ((off - prev_off) > 0xFF || entry->len > MAX_DCC_LEN) {
+			dev_err(drvdata->dev,
+				"DCC: Progamming error! Base: 0x%x, offset 0x%x.\n",
+				entry->base, entry->offset);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		if (link) {
+			/*
+			 * link already has one offset-length so new
+			 * offset-length needs to be placed at bits [31:16]
+			 */
+			pos = 16;
+
+			/* Clear bits [31:16] */
+			link &= BM(0, 15);
+
+		} else {
+			/*
+			 * link is empty, so new offset-length needs to be
+			 * placed at bits [15:0]
+			 */
+			pos = 0;
+			link = 1 << 16;
+		}
+
+		/* write new offset-length pair to correct position */
+		link |= (((off-prev_off) & BM(0, 7)) |
+			 ((entry->len << 8) & BM(8, 14))) << pos;
+
+		if (pos) {
+			dcc_sram_writel(drvdata, link, sram_offset);
+			sram_offset += 4;
+			link = 0;
+		}
+
+		prev_off  = off;
+		prev_addr = addr;
+	}
+
+	if (link) {
+		dcc_sram_writel(drvdata, link, sram_offset);
+		sram_offset += 4;
+	}
+
+	/* Setting zero to indicate end of the list */
+	dcc_sram_writel(drvdata, 0, sram_offset);
+	sram_offset += 4;
+
+	/* check if the data will overstep */
+	if (drvdata->data_sink == DCC_DATA_SINK_SRAM
+	    && drvdata->func_type == DCC_FUNC_TYPE_CAPTURE) {
+		if (sram_offset + total_len > drvdata->ram_size) {
+			sram_offset += total_len;
+			goto overstep;
+		}
+	} else {
+		if (sram_offset > drvdata->ram_size)
+			goto overstep;
+	}
+
+	drvdata->ram_cfg = (sram_offset  / 4);
+	return 0;
+overstep:
+	ret = -EINVAL;
+	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+	dev_err(drvdata->dev, "DCC SRAM oversteps, 0x%x (0x%x)\n",
+		sram_offset, drvdata->ram_size);
+err:
+	return ret;
+}
+
+static void __dcc_reg_dump(struct dcc_drvdata *drvdata)
+{
+	uint32_t *reg_buf;
+
+	if (!drvdata->reg_buf)
+		return;
+
+	drvdata->reg_data.version = DCC_REG_DUMP_VER;
+
+	reg_buf = drvdata->reg_buf;
+
+	reg_buf[0] = dcc_readl(drvdata, DCC_HW_VERSION);
+	reg_buf[1] = dcc_readl(drvdata, DCC_HW_INFO);
+	reg_buf[2] = dcc_readl(drvdata, DCC_CGC_CFG);
+	reg_buf[3] = dcc_readl(drvdata, DCC_LL);
+	reg_buf[4] = dcc_readl(drvdata, DCC_RAM_CFG);
+	reg_buf[5] = dcc_readl(drvdata, DCC_CFG);
+	reg_buf[6] = dcc_readl(drvdata, DCC_SW_CTL);
+	reg_buf[7] = dcc_readl(drvdata, DCC_STATUS);
+	reg_buf[8] = dcc_readl(drvdata, DCC_FETCH_ADDR);
+	reg_buf[9] = dcc_readl(drvdata, DCC_SRAM_ADDR);
+	reg_buf[10] = dcc_readl(drvdata, DCC_INT_ENABLE);
+	reg_buf[11] = dcc_readl(drvdata, DCC_INT_STATUS);
+	reg_buf[12] = dcc_readl(drvdata, DCC_QSB_CFG);
+
+	drvdata->reg_data.magic = DCC_REG_DUMP_MAGIC_V2;
+}
+
+static void __dcc_first_crc(struct dcc_drvdata *drvdata)
+{
+	int i;
+
+	/*
+	 * Need to send 2 triggers to DCC. First trigger sets CRC error status
+	 * bit. So need second trigger to reset this bit.
+	 */
+	for (i = 0; i < 2; i++) {
+		if (!dcc_ready(drvdata))
+			dev_err(drvdata->dev, "DCC is not ready!\n");
+
+		dcc_writel(drvdata, 1, DCC_SW_CTL);
+	}
+
+	/* Clear CRC error interrupt */
+	dcc_writel(drvdata, BIT(0), DCC_INT_STATUS);
+}
+
+static int dcc_enable(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+
+	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->enable) {
+		dev_err(drvdata->dev, "DCC is already enabled!\n");
+		mutex_unlock(&drvdata->mutex);
+		return 0;
+	}
+
+	/* 1. Prepare and enable DCC clock */
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		goto err;
+
+	dcc_writel(drvdata, 0, DCC_LL);
+
+	/* 2. Program linked-list in the SRAM */
+	ret = __dcc_ll_cfg(drvdata);
+	if (ret)
+		goto err_prog_ll;
+
+	/* 3. If in capture mode program DCC_RAM_CFG reg */
+	if (drvdata->func_type == DCC_FUNC_TYPE_CAPTURE)
+		dcc_writel(drvdata, drvdata->ram_cfg, DCC_RAM_CFG);
+
+	/* 4. Configure data sink and function type */
+	dcc_writel(drvdata, ((drvdata->data_sink << 4) | (drvdata->func_type)),
+		   DCC_CFG);
+
+	/* 5. Clears interrupt status register */
+	dcc_writel(drvdata, 0, DCC_INT_ENABLE);
+	dcc_writel(drvdata, (BIT(4) | BIT(0)), DCC_INT_STATUS);
+
+	/* Make sure all config is written in sram */
+	mb();
+
+	/* 6. Set LL bit */
+	dcc_writel(drvdata, 1, DCC_LL);
+	drvdata->enable = 1;
+
+	if (drvdata->func_type == DCC_FUNC_TYPE_CRC) {
+		__dcc_first_crc(drvdata);
+
+		/* Enable CRC error interrupt */
+		if (!drvdata->interrupt_disable)
+			dcc_writel(drvdata, BIT(0), DCC_INT_ENABLE);
+	}
+
+	/* Save DCC registers */
+	if (drvdata->save_reg)
+		__dcc_reg_dump(drvdata);
+
+err_prog_ll:
+	if (!drvdata->enable)
+		clk_disable_unprepare(drvdata->clk);
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static int __dcc_rpm_sw_trigger(struct dcc_drvdata *drvdata, bool enable)
+{
+	int ret = 0;
+	struct msm_rpm_kvp *rpm_kvp = &drvdata->rpm_kvp;
+
+	if (enable == drvdata->rpm_trig_req.enable)
+		return 0;
+
+	if (enable && (!drvdata->enable || drvdata->func_type !=
+		       DCC_FUNC_TYPE_CRC)) {
+		dev_err(drvdata->dev,
+			"DCC: invalid state! Can't send sw trigger req to rpm\n");
+		return -EINVAL;
+	}
+
+	drvdata->rpm_trig_req.enable = enable;
+	rpm_kvp->key = RPM_MISC_DDR_DCC_ENABLE;
+	rpm_kvp->length = sizeof(struct rpm_trig_req);
+	rpm_kvp->data = (void *)(&drvdata->rpm_trig_req);
+
+	ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
+				   RPM_MISC_REQ_TYPE, 0, rpm_kvp, 1);
+	if (ret) {
+		dev_err(drvdata->dev,
+			"DCC: SW trigger %s req to rpm failed %d\n",
+			(enable ? "enable" : "disable"), ret);
+		drvdata->rpm_trig_req.enable = !enable;
+	}
+
+	return ret;
+}
+
+static void dcc_disable(struct dcc_drvdata *drvdata)
+{
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->mutex);
+		return;
+	}
+
+	/* Send request to RPM to disable DCC SW trigger */
+
+	if (__dcc_rpm_sw_trigger(drvdata, 0))
+		dev_err(drvdata->dev,
+			"DCC: Request to RPM to disable SW trigger failed.\n");
+
+	if (!dcc_ready(drvdata))
+		dev_err(drvdata->dev, "DCC is not ready! Disabling DCC...\n");
+
+	dcc_writel(drvdata, 0, DCC_LL);
+	drvdata->enable = 0;
+
+	/* Save DCC registers */
+	if (drvdata->save_reg)
+		__dcc_reg_dump(drvdata);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	mutex_unlock(&drvdata->mutex);
+}
+
+static ssize_t dcc_show_func_type(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 str_dcc_func_type[drvdata->func_type]);
+}
+
+static ssize_t dcc_store_func_type(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	char str[10] = "";
+	int ret;
+
+	if (strlen(buf) >= 10)
+		return -EINVAL;
+
+	strlcpy(str, buf, sizeof(str));
+
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->enable) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (!strcmp(str, str_dcc_func_type[DCC_FUNC_TYPE_CAPTURE]))
+		drvdata->func_type = DCC_FUNC_TYPE_CAPTURE;
+	else if (!strcmp(str, str_dcc_func_type[DCC_FUNC_TYPE_CRC]))
+		drvdata->func_type = DCC_FUNC_TYPE_CRC;
+	else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = size;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(func_type, 0644,
+		   dcc_show_func_type, dcc_store_func_type);
+
+static ssize_t dcc_show_data_sink(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 str_dcc_data_sink[drvdata->data_sink]);
+}
+
+static ssize_t dcc_store_data_sink(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	char str[10] = "";
+	int ret;
+
+	if (strlen(buf) >= 10)
+		return -EINVAL;
+
+	strlcpy(str, buf, sizeof(str));
+
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->enable) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (!strcmp(str, str_dcc_data_sink[DCC_DATA_SINK_SRAM]))
+		drvdata->data_sink = DCC_DATA_SINK_SRAM;
+	else if (!strcmp(str, str_dcc_data_sink[DCC_DATA_SINK_ATB]))
+		drvdata->data_sink = DCC_DATA_SINK_ATB;
+	else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = size;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(data_sink, 0644,
+		   dcc_show_data_sink, dcc_store_data_sink);
+
+static ssize_t dcc_store_trigger(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t size)
+{
+	int ret = 0;
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		return ret;
+
+	ret = dcc_sw_trigger(drvdata);
+	if (!ret)
+		ret = size;
+
+	dcc_xpu_lock(drvdata);
+	return ret;
+}
+static DEVICE_ATTR(trigger, 0200, NULL, dcc_store_trigger);
+
+static ssize_t dcc_show_enable(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->enable);
+}
+
+static ssize_t dcc_store_enable(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret = 0;
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		return ret;
+
+	if (val)
+		ret = dcc_enable(drvdata);
+	else
+		dcc_disable(drvdata);
+
+	if (!ret)
+		ret = size;
+
+	dcc_xpu_lock(drvdata);
+	return ret;
+
+}
+static DEVICE_ATTR(enable, 0644, dcc_show_enable,
+		   dcc_store_enable);
+
+static ssize_t dcc_show_config(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	struct dcc_config_entry *entry;
+	char local_buf[64];
+	int len = 0, count = 0;
+
+	buf[0] = '\0';
+
+	mutex_lock(&drvdata->mutex);
+	list_for_each_entry(entry, &drvdata->config_head, list) {
+		len = snprintf(local_buf, 64,
+			       "Index: 0x%x, Base: 0x%x, Offset: 0x%x, len: 0x%x\n",
+			       entry->index, entry->base,
+			       entry->offset, entry->len);
+
+		if ((count + len) > PAGE_SIZE) {
+			dev_err(dev, "DCC: Couldn't write complete config!\n");
+			break;
+		}
+
+		strlcat(buf, local_buf, PAGE_SIZE);
+		count += len;
+	}
+
+	mutex_unlock(&drvdata->mutex);
+
+	return count;
+}
+
+static int dcc_config_add(struct dcc_drvdata *drvdata, unsigned int addr,
+			  unsigned int len)
+{
+	int ret;
+	struct dcc_config_entry *entry, *pentry;
+	unsigned int base, offset;
+
+	mutex_lock(&drvdata->mutex);
+
+	if (!len) {
+		dev_err(drvdata->dev, "DCC: Invalid length!\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	base = addr & BM(4, 31);
+
+	if (!list_empty(&drvdata->config_head)) {
+		pentry = list_last_entry(&drvdata->config_head,
+					 struct dcc_config_entry, list);
+
+		if (addr >= (pentry->base + pentry->offset) &&
+		    addr <= (pentry->base + pentry->offset + MAX_DCC_OFFSET)) {
+
+			/* Re-use base address from last entry */
+			base =  pentry->base;
+
+			/*
+			 * Check if new address is contiguous to last entry's
+			 * addresses. If yes then we can re-use last entry and
+			 * just need to update its length.
+			 */
+			if ((pentry->len * 4 + pentry->base + pentry->offset)
+			    == addr) {
+				len += pentry->len;
+
+				/*
+				 * Check if last entry can hold additional new
+				 * length. If yes then we don't need to create
+				 * a new entry else we need to add a new entry
+				 * with same base but updated offset.
+				 */
+				if (len > MAX_DCC_LEN)
+					pentry->len = MAX_DCC_LEN;
+				else
+					pentry->len = len;
+
+				/*
+				 * Update start addr and len for remaining
+				 * addresses, which will be part of new
+				 * entry.
+				 */
+				addr = pentry->base + pentry->offset +
+					pentry->len * 4;
+				len -= pentry->len;
+			}
+		}
+	}
+
+	offset = addr - base;
+
+	while (len) {
+		entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		entry->base = base;
+		entry->offset = offset;
+		entry->len = min_t(uint32_t, len, MAX_DCC_LEN);
+		entry->index = drvdata->nr_config++;
+		INIT_LIST_HEAD(&entry->list);
+		list_add_tail(&entry->list, &drvdata->config_head);
+
+		len -= entry->len;
+		offset += MAX_DCC_LEN * 4;
+	}
+
+	mutex_unlock(&drvdata->mutex);
+	return 0;
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static ssize_t dcc_store_config(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret;
+	unsigned int base, len;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	int nval;
+
+	nval = sscanf(buf, "%x %u", &base, &len);
+	if (nval <= 0 || nval > 2)
+		return -EINVAL;
+
+	if (nval == 1)
+		len = 1;
+
+	ret = dcc_config_add(drvdata, base, len);
+	if (ret)
+		return ret;
+
+	return size;
+
+}
+static DEVICE_ATTR(config, 0644, dcc_show_config,
+		   dcc_store_config);
+
+static void dcc_config_reset(struct dcc_drvdata *drvdata)
+{
+	struct dcc_config_entry *entry, *temp;
+
+	mutex_lock(&drvdata->mutex);
+
+	list_for_each_entry_safe(entry, temp, &drvdata->config_head, list) {
+		list_del(&entry->list);
+		devm_kfree(drvdata->dev, entry);
+		drvdata->nr_config--;
+	}
+
+	mutex_unlock(&drvdata->mutex);
+}
+
+static ssize_t dcc_store_config_reset(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	if (val)
+		dcc_config_reset(drvdata);
+
+	return size;
+}
+static DEVICE_ATTR(config_reset, 0200, NULL, dcc_store_config_reset);
+
+static ssize_t dcc_show_crc_error(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		return ret;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->enable) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+			(unsigned int)BVAL(dcc_readl(drvdata, DCC_STATUS), 0));
+err:
+	mutex_unlock(&drvdata->mutex);
+	dcc_xpu_lock(drvdata);
+	return ret;
+}
+static DEVICE_ATTR(crc_error, 0444, dcc_show_crc_error, NULL);
+
+static ssize_t dcc_show_ready(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		return ret;
+
+	mutex_lock(&drvdata->mutex);
+	if (!drvdata->enable) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+			(unsigned int)BVAL(dcc_readl(drvdata, DCC_STATUS), 4));
+err:
+	mutex_unlock(&drvdata->mutex);
+	dcc_xpu_lock(drvdata);
+	return ret;
+}
+static DEVICE_ATTR(ready, 0444, dcc_show_ready, NULL);
+
+static ssize_t dcc_show_interrupt_disable(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->interrupt_disable);
+}
+
+static ssize_t dcc_store_interrupt_disable(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t size)
+{
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->interrupt_disable = (val ? 1:0);
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(interrupt_disable, 0644,
+		   dcc_show_interrupt_disable, dcc_store_interrupt_disable);
+
+static ssize_t dcc_show_rpm_sw_trigger_on(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->rpm_trig_req.enable);
+}
+
+static ssize_t dcc_store_rpm_sw_trigger_on(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t size)
+{
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	__dcc_rpm_sw_trigger(drvdata, !!val);
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(rpm_sw_trigger_on, 0644,
+		   dcc_show_rpm_sw_trigger_on, dcc_store_rpm_sw_trigger_on);
+
+static ssize_t dcc_store_xpu_unlock(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	ret = val ? dcc_xpu_unlock(drvdata) : dcc_xpu_lock(drvdata);
+	if (!ret)
+		ret = size;
+
+	return ret;
+}
+static DEVICE_ATTR(xpu_unlock, 0200, NULL, dcc_store_xpu_unlock);
+
+static const struct device_attribute *dcc_attrs[] = {
+	&dev_attr_func_type,
+	&dev_attr_data_sink,
+	&dev_attr_trigger,
+	&dev_attr_enable,
+	&dev_attr_config,
+	&dev_attr_config_reset,
+	&dev_attr_ready,
+	&dev_attr_crc_error,
+	&dev_attr_interrupt_disable,
+	&dev_attr_rpm_sw_trigger_on,
+	&dev_attr_xpu_unlock,
+	NULL,
+};
+
+static int dcc_create_files(struct device *dev,
+			    const struct device_attribute **attrs)
+{
+	int ret = 0, i;
+
+	for (i = 0; attrs[i] != NULL; i++) {
+		ret = device_create_file(dev, attrs[i]);
+		if (ret) {
+			dev_err(dev, "DCC: Couldn't create sysfs attribute: %s!\n",
+				attrs[i]->attr.name);
+			break;
+		}
+	}
+	return ret;
+}
+
+static int dcc_sram_open(struct inode *inode, struct file *file)
+{
+	struct dcc_drvdata *drvdata = container_of(inode->i_cdev,
+						   struct dcc_drvdata,
+						   sram_dev);
+	file->private_data = drvdata;
+
+	return  dcc_xpu_unlock(drvdata);
+}
+
+static ssize_t dcc_sram_read(struct file *file, char __user *data,
+			     size_t len, loff_t *ppos)
+{
+	int ret;
+	unsigned char *buf;
+	struct dcc_drvdata *drvdata = file->private_data;
+
+	/* EOF check */
+	if (drvdata->ram_size <= *ppos)
+		return 0;
+
+	if ((*ppos + len) > drvdata->ram_size)
+		len = (drvdata->ram_size - *ppos);
+
+	buf = kzalloc(len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret) {
+		kfree(buf);
+		return ret;
+	}
+
+	memcpy_fromio(buf, (drvdata->ram_base + *ppos), len);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	if (copy_to_user(data, buf, len)) {
+		dev_err(drvdata->dev,
+			"DCC: Couldn't copy all data to user!\n");
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+
+	return len;
+}
+
+static int dcc_sram_release(struct inode *inode, struct file *file)
+{
+	struct dcc_drvdata *drvdata = file->private_data;
+
+	return dcc_xpu_lock(drvdata);
+}
+
+static const struct file_operations dcc_sram_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dcc_sram_open,
+	.read		= dcc_sram_read,
+	.release	= dcc_sram_release,
+	.llseek		= no_llseek,
+};
+
+static int dcc_sram_dev_register(struct dcc_drvdata *drvdata)
+{
+	int ret;
+	struct device *device;
+	dev_t dev;
+
+	ret = alloc_chrdev_region(&dev, 0, 1, drvdata->sram_node);
+	if (ret)
+		goto err_alloc;
+
+	cdev_init(&drvdata->sram_dev, &dcc_sram_fops);
+
+	drvdata->sram_dev.owner = THIS_MODULE;
+	ret = cdev_add(&drvdata->sram_dev, dev, 1);
+	if (ret)
+		goto err_cdev_add;
+
+	drvdata->sram_class = class_create(THIS_MODULE,
+					   drvdata->sram_node);
+	if (IS_ERR(drvdata->sram_class)) {
+		ret = PTR_ERR(drvdata->sram_class);
+		goto err_class_create;
+	}
+
+	device = device_create(drvdata->sram_class, NULL,
+			       drvdata->sram_dev.dev, drvdata,
+			       drvdata->sram_node);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		goto err_dev_create;
+	}
+
+	return 0;
+err_dev_create:
+	class_destroy(drvdata->sram_class);
+err_class_create:
+	cdev_del(&drvdata->sram_dev);
+err_cdev_add:
+	unregister_chrdev_region(drvdata->sram_dev.dev, 1);
+err_alloc:
+	return ret;
+}
+
+static void dcc_sram_dev_deregister(struct dcc_drvdata *drvdata)
+{
+	device_destroy(drvdata->sram_class, drvdata->sram_dev.dev);
+	class_destroy(drvdata->sram_class);
+	cdev_del(&drvdata->sram_dev);
+	unregister_chrdev_region(drvdata->sram_dev.dev, 1);
+}
+
+static int dcc_sram_dev_init(struct dcc_drvdata *drvdata)
+{
+	int ret = 0;
+	size_t node_size;
+	char *node_name = "dcc_sram";
+	struct device *dev = drvdata->dev;
+
+	node_size = strlen(node_name) + 1;
+
+	drvdata->sram_node = devm_kzalloc(dev, node_size, GFP_KERNEL);
+	if (!drvdata->sram_node)
+		return -ENOMEM;
+
+	strlcpy(drvdata->sram_node, node_name, node_size);
+	ret = dcc_sram_dev_register(drvdata);
+	if (ret)
+		dev_err(drvdata->dev, "DCC: sram node not registered.\n");
+
+	return ret;
+}
+
+static void dcc_sram_dev_exit(struct dcc_drvdata *drvdata)
+{
+	dcc_sram_dev_deregister(drvdata);
+}
+
+static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata)
+{
+	int ret;
+	struct device *dev = drvdata->dev;
+	struct msm_dump_entry reg_dump_entry, sram_dump_entry;
+
+	/* Allocate memory for dcc reg dump */
+	drvdata->reg_buf = devm_kzalloc(dev, drvdata->reg_size, GFP_KERNEL);
+	if (drvdata->reg_buf) {
+		strlcpy(drvdata->reg_data.name, "KDCC_REG",
+				 sizeof(drvdata->reg_data.name));
+		drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf);
+		drvdata->reg_data.len = drvdata->reg_size;
+		reg_dump_entry.id = MSM_DUMP_DATA_DCC_REG;
+		reg_dump_entry.addr = virt_to_phys(&drvdata->reg_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+					     &reg_dump_entry);
+		if (ret) {
+			dev_err(dev, "DCC REG dump setup failed\n");
+			devm_kfree(dev, drvdata->reg_buf);
+		}
+	} else {
+		dev_err(dev, "DCC REG dump allocation failed\n");
+	}
+
+	/* Allocate memory for dcc sram dump */
+	drvdata->sram_buf = devm_kzalloc(dev, drvdata->ram_size, GFP_KERNEL);
+	if (drvdata->sram_buf) {
+		strlcpy(drvdata->sram_data.name, "KDCC_SRAM",
+				 sizeof(drvdata->sram_data.name));
+		drvdata->sram_data.addr = virt_to_phys(drvdata->sram_buf);
+		drvdata->sram_data.len = drvdata->ram_size;
+		sram_dump_entry.id = MSM_DUMP_DATA_DCC_SRAM;
+		sram_dump_entry.addr = virt_to_phys(&drvdata->sram_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+					     &sram_dump_entry);
+		if (ret) {
+			dev_err(dev, "DCC SRAM dump setup failed\n");
+			devm_kfree(dev, drvdata->sram_buf);
+		}
+	} else {
+		dev_err(dev, "DCC SRAM dump allocation failed\n");
+	}
+}
+
+static int dcc_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct device *dev = &pdev->dev;
+	struct dcc_drvdata *drvdata;
+	struct resource *res;
+	const char *data_sink;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dcc-base");
+	if (!res)
+		return -EINVAL;
+
+	drvdata->reg_size = resource_size(res);
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "dcc-ram-base");
+	if (!res)
+		return -EINVAL;
+
+	drvdata->ram_size = resource_size(res);
+	drvdata->ram_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->ram_base)
+		return -ENOMEM;
+
+	drvdata->clk = devm_clk_get(dev, "apb_pclk");
+	if (IS_ERR(drvdata->clk)) {
+		ret = PTR_ERR(drvdata->clk);
+		goto err;
+	}
+
+	drvdata->save_reg = of_property_read_bool(pdev->dev.of_node,
+						  "qcom,save-reg");
+
+	mutex_init(&drvdata->mutex);
+
+	INIT_LIST_HEAD(&drvdata->config_head);
+	drvdata->nr_config = 0;
+	drvdata->xpu_scm_avail = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "dcc-xpu-base");
+	if (res) {
+		if (scm_is_call_available(SCM_SVC_MP,
+					  SCM_SVC_DISABLE_XPU) > 0) {
+			drvdata->xpu_scm_avail = 1;
+			drvdata->xpu_addr = res->start;
+		} else {
+			dev_err(dev, "scm call is not available\n");
+			return -EINVAL;
+		}
+	} else {
+		dev_info(dev, "DCC XPU is not specified\n");
+	}
+
+	ret = dcc_xpu_unlock(drvdata);
+	if (ret)
+		goto err;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret) {
+		dcc_xpu_lock(drvdata);
+		goto err;
+	}
+
+	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+
+	dcc_xpu_lock(drvdata);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	drvdata->data_sink = DCC_DATA_SINK_SRAM;
+	ret = of_property_read_string(pdev->dev.of_node, "qcom,data-sink",
+				      &data_sink);
+	if (!ret) {
+		for (i = 0; i < ARRAY_SIZE(str_dcc_data_sink); i++)
+			if (!strcmp(data_sink, str_dcc_data_sink[i])) {
+				drvdata->data_sink = i;
+				break;
+			}
+
+		if (i == ARRAY_SIZE(str_dcc_data_sink)) {
+			dev_err(dev, "Unknown sink type for DCC! Using '%s' as data sink\n",
+				str_dcc_data_sink[drvdata->data_sink]);
+		}
+	}
+
+	ret = dcc_sram_dev_init(drvdata);
+	if (ret)
+		goto err;
+
+	ret = dcc_create_files(dev, dcc_attrs);
+	if (ret)
+		goto err;
+
+	dcc_allocate_dump_mem(drvdata);
+	return 0;
+err:
+	return ret;
+}
+
+static int dcc_remove(struct platform_device *pdev)
+{
+	struct dcc_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	dcc_sram_dev_exit(drvdata);
+
+	dcc_config_reset(drvdata);
+
+	return 0;
+}
+
+static const struct of_device_id msm_dcc_match[] = {
+	{ .compatible = "qcom,dcc"},
+	{}
+};
+
+static struct platform_driver dcc_driver = {
+	.probe          = dcc_probe,
+	.remove         = dcc_remove,
+	.driver         = {
+		.name   = "msm-dcc",
+		.owner	= THIS_MODULE,
+		.of_match_table	= msm_dcc_match,
+	},
+};
+
+static int __init dcc_init(void)
+{
+	return platform_driver_register(&dcc_driver);
+}
+module_init(dcc_init);
+
+static void __exit dcc_exit(void)
+{
+	platform_driver_unregister(&dcc_driver);
+}
+module_exit(dcc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM data capture and compare engine");
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index cff407e..457dc5f 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -1610,14 +1610,6 @@ static struct platform_driver dcc_driver = {
 
 static int __init dcc_init(void)
 {
-	int ret;
-
-	ret = scm_is_secure_device();
-	if (ret == 0) {
-		pr_info("DCC is not available\n");
-		return -ENODEV;
-	}
-
 	return platform_driver_register(&dcc_driver);
 }
 pure_initcall(dcc_init);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index e6fd52e..b315a97 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -3610,6 +3610,7 @@ int glink_start_rx_rt(void *handle)
 	glink_put_ch_ctx(ctx);
 	return ret;
 }
+EXPORT_SYMBOL(glink_start_rx_rt);
 
 /**
  * glink_end_rx_rt() - Vote for RT thread priority on RX.
@@ -3637,6 +3638,7 @@ int glink_end_rx_rt(void *handle)
 	glink_put_ch_ctx(ctx);
 	return ret;
 }
+EXPORT_SYMBOL(glink_end_rx_rt);
 
 /**
  * glink_rpm_rx_poll() - Poll and receive any available events
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index e475041..0c7171a 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -281,6 +281,7 @@ enum icnss_driver_state {
 	ICNSS_SHUTDOWN_DONE,
 	ICNSS_HOST_TRIGGERED_PDR,
 	ICNSS_FW_DOWN,
+	ICNSS_DRIVER_UNLOADING,
 };
 
 struct ce_irq_list {
@@ -1149,6 +1150,16 @@ bool icnss_is_fw_ready(void)
 }
 EXPORT_SYMBOL(icnss_is_fw_ready);
 
+bool icnss_is_fw_down(void)
+{
+	if (!penv)
+		return false;
+	else
+		return test_bit(ICNSS_FW_DOWN, &penv->state);
+}
+EXPORT_SYMBOL(icnss_is_fw_down);
+
+
 int icnss_power_off(struct device *dev)
 {
 	struct icnss_priv *priv = dev_get_drvdata(dev);
@@ -2279,9 +2290,11 @@ static int icnss_driver_event_unregister_driver(void *data)
 		goto out;
 	}
 
+	set_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
 	if (penv->ops)
 		penv->ops->remove(&penv->pdev->dev);
 
+	clear_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
 	clear_bit(ICNSS_DRIVER_PROBED, &penv->state);
 
 	penv->ops = NULL;
@@ -2304,8 +2317,10 @@ static int icnss_call_driver_remove(struct icnss_priv *priv)
 	if (!priv->ops || !priv->ops->remove)
 		return 0;
 
+	set_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
 	penv->ops->remove(&priv->pdev->dev);
 
+	clear_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
 	clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
 
 	icnss_hw_power_off(penv);
@@ -2511,7 +2526,8 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 		icnss_ignore_qmi_timeout(true);
 
 		fw_down_data.crashed = !!notif->crashed;
-		if (test_bit(ICNSS_FW_READY, &priv->state))
+		if (test_bit(ICNSS_FW_READY, &priv->state) &&
+		    !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
 			icnss_call_driver_uevent(priv,
 						 ICNSS_UEVENT_FW_DOWN,
 						 &fw_down_data);
@@ -2655,7 +2671,8 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
 		icnss_ignore_qmi_timeout(true);
 
 		fw_down_data.crashed = event_data->crashed;
-		if (test_bit(ICNSS_FW_READY, &priv->state))
+		if (test_bit(ICNSS_FW_READY, &priv->state) &&
+		    !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
 			icnss_call_driver_uevent(priv,
 						 ICNSS_UEVENT_FW_DOWN,
 						 &fw_down_data);
@@ -3815,6 +3832,8 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
 		case ICNSS_FW_DOWN:
 			seq_puts(s, "FW DOWN");
 			continue;
+		case ICNSS_DRIVER_UNLOADING:
+			seq_puts(s, "DRIVER UNLOADING");
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
index 1103360..15569b1 100644
--- a/drivers/soc/qcom/msm_bus/Makefile
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -10,7 +10,7 @@
 		msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o
 	obj-$(CONFIG_OF) += msm_bus_of_rpmh.o
 else
-	obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o \
+	obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \
 		msm_bus_bimc_adhoc.o msm_bus_noc_adhoc.o
 	obj-$(CONFIG_OF) += msm_bus_of_adhoc.o
 endif
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
index d995746..b6104f0 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -73,26 +73,26 @@ static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
  * "util" file for these common func/macros.
  *
  */
-uint64_t msm_bus_div64(unsigned int w, uint64_t bw)
+uint64_t msm_bus_div64(uint64_t num, unsigned int base)
 {
-	uint64_t *b = &bw;
+	uint64_t *n = &num;
 
-	if ((bw > 0) && (bw < w))
+	if ((num > 0) && (num < base))
 		return 1;
 
-	switch (w) {
+	switch (base) {
 	case 0:
 		WARN(1, "AXI: Divide by 0 attempted\n");
-	case 1: return bw;
-	case 2: return (bw >> 1);
-	case 4: return (bw >> 2);
-	case 8: return (bw >> 3);
-	case 16: return (bw >> 4);
-	case 32: return (bw >> 5);
+	case 1: return num;
+	case 2: return (num >> 1);
+	case 4: return (num >> 2);
+	case 8: return (num >> 3);
+	case 16: return (num >> 4);
+	case 32: return (num >> 5);
 	}
 
-	do_div(*b, w);
-	return *b;
+	do_div(*n, base);
+	return *n;
 }
 
 int msm_bus_device_match_adhoc(struct device *dev, void *id)
@@ -452,19 +452,18 @@ static uint64_t scheme1_agg_scheme(struct msm_bus_node_device_type *bus_dev,
 
 	if (util_fact && (util_fact != 100)) {
 		sum_ab *= util_fact;
-		sum_ab = msm_bus_div64(100, sum_ab);
+		sum_ab = msm_bus_div64(sum_ab, 100);
 	}
 
 	if (vrail_comp && (vrail_comp != 100)) {
 		max_ib *= 100;
-		max_ib = msm_bus_div64(vrail_comp, max_ib);
+		max_ib = msm_bus_div64(max_ib, vrail_comp);
 	}
 
 	/* Account for multiple channels if any */
 	if (bus_dev->node_info->agg_params.num_aggports > 1)
-		sum_ab = msm_bus_div64(
-				bus_dev->node_info->agg_params.num_aggports,
-					sum_ab);
+		sum_ab = msm_bus_div64(sum_ab,
+				bus_dev->node_info->agg_params.num_aggports);
 
 	if (!bus_dev->node_info->agg_params.buswidth) {
 		MSM_BUS_WARN("No bus width found for %d. Using default\n",
@@ -473,8 +472,8 @@ static uint64_t scheme1_agg_scheme(struct msm_bus_node_device_type *bus_dev,
 	}
 
 	bw_max_hz = max(max_ib, sum_ab);
-	bw_max_hz = msm_bus_div64(bus_dev->node_info->agg_params.buswidth,
-					bw_max_hz);
+	bw_max_hz = msm_bus_div64(bw_max_hz,
+				bus_dev->node_info->agg_params.buswidth);
 
 	return bw_max_hz;
 }
@@ -517,19 +516,18 @@ static uint64_t legacy_agg_scheme(struct msm_bus_node_device_type *bus_dev,
 
 	if (util_fact && (util_fact != 100)) {
 		sum_ab *= util_fact;
-		sum_ab = msm_bus_div64(100, sum_ab);
+		sum_ab = msm_bus_div64(sum_ab, 100);
 	}
 
 	if (vrail_comp && (vrail_comp != 100)) {
 		max_ib *= 100;
-		max_ib = msm_bus_div64(vrail_comp, max_ib);
+		max_ib = msm_bus_div64(max_ib, vrail_comp);
 	}
 
 	/* Account for multiple channels if any */
 	if (bus_dev->node_info->agg_params.num_aggports > 1)
-		sum_ab = msm_bus_div64(
-				bus_dev->node_info->agg_params.num_aggports,
-					sum_ab);
+		sum_ab = msm_bus_div64(sum_ab,
+				bus_dev->node_info->agg_params.num_aggports);
 
 	if (!bus_dev->node_info->agg_params.buswidth) {
 		MSM_BUS_WARN("No bus width found for %d. Using default\n",
@@ -538,8 +536,8 @@ static uint64_t legacy_agg_scheme(struct msm_bus_node_device_type *bus_dev,
 	}
 
 	bw_max_hz = max(max_ib, sum_ab);
-	bw_max_hz = msm_bus_div64(bus_dev->node_info->agg_params.buswidth,
-					bw_max_hz);
+	bw_max_hz = msm_bus_div64(bw_max_hz,
+				bus_dev->node_info->agg_params.buswidth);
 
 	return bw_max_hz;
 }
@@ -873,7 +871,7 @@ static void unregister_client_adhoc(uint32_t cl)
 	}
 
 	curr = client->curr;
-	if (curr >= pdata->num_usecases) {
+	if ((curr < 0) || (curr >= pdata->num_usecases)) {
 		MSM_BUS_ERR("Invalid index Defaulting curr to 0");
 		curr = 0;
 	}
@@ -1111,75 +1109,6 @@ static int update_client_paths(struct msm_bus_client *client, bool log_trns,
 	return ret;
 }
 
-static int query_client_paths(struct msm_bus_client *client, bool log_trns,
-							unsigned int idx)
-{
-	int lnode, src, dest, cur_idx;
-	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
-	int i, ret = 0;
-	struct msm_bus_scale_pdata *pdata;
-	struct device *src_dev;
-
-	if (!client) {
-		MSM_BUS_ERR("Client handle  Null");
-		ret = -ENXIO;
-		goto exit_update_client_paths;
-	}
-
-	pdata = client->pdata;
-	if (!pdata) {
-		MSM_BUS_ERR("Client pdata Null");
-		ret = -ENXIO;
-		goto exit_update_client_paths;
-	}
-
-	cur_idx = client->curr;
-	client->curr = idx;
-	for (i = 0; i < pdata->usecase->num_paths; i++) {
-		src = pdata->usecase[idx].vectors[i].src;
-		dest = pdata->usecase[idx].vectors[i].dst;
-
-		lnode = client->src_pnode[i];
-		src_dev = client->src_devs[i];
-		req_clk = client->pdata->usecase[idx].vectors[i].ib;
-		req_bw = client->pdata->usecase[idx].vectors[i].ab;
-		if (cur_idx < 0) {
-			curr_clk = 0;
-			curr_bw = 0;
-		} else {
-			curr_clk =
-				client->pdata->usecase[cur_idx].vectors[i].ib;
-			curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
-			MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
-					curr_bw, curr_clk);
-		}
-
-		if (pdata->active_only) {
-			slp_clk = 0;
-			slp_bw = 0;
-		} else {
-			slp_clk = req_clk;
-			slp_bw = req_bw;
-		}
-
-		ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk,
-			slp_bw, curr_clk, curr_bw, lnode, pdata->active_only);
-
-		if (ret) {
-			MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
-					__func__, ret, pdata->active_only);
-			goto exit_update_client_paths;
-		}
-
-		if (log_trns)
-			getpath_debug(src, lnode, pdata->active_only);
-	}
-	commit_data();
-exit_update_client_paths:
-	return ret;
-}
-
-
 static int update_context(uint32_t cl, bool active_only,
 					unsigned int ctx_idx)
 {
@@ -1352,8 +1281,8 @@ static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
 	commit_data();
 	cl->cur_act_ib = ib;
 	cl->cur_act_ab = ab;
-	cl->cur_slp_ib = slp_ib;
-	cl->cur_slp_ab = slp_ab;
+	cl->cur_dual_ib = slp_ib;
+	cl->cur_dual_ab = slp_ab;
 
 	if (log_transaction)
 		getpath_debug(cl->mas, cl->first_hop, cl->active_only);
@@ -1378,18 +1307,18 @@ static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
 
 	if ((cl->cur_act_ib == act_ib) &&
 		(cl->cur_act_ab == act_ab) &&
-		(cl->cur_slp_ib == slp_ib) &&
-		(cl->cur_slp_ab == slp_ab)) {
+		(cl->cur_dual_ib == slp_ib) &&
+		(cl->cur_dual_ab == slp_ab)) {
 		MSM_BUS_ERR("No change in vote");
 		goto exit_change_context;
 	}
 
 	if (!slp_ab && !slp_ib)
 		cl->active_only = true;
-	msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_slp_ib);
-	ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib, slp_ab,
-				cl->cur_act_ab, cl->cur_act_ab,  cl->first_hop,
-				cl->active_only);
+	msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_dual_ib);
+	ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib,
+				slp_ab, cl->cur_act_ab, cl->cur_act_ab,
+				cl->first_hop, cl->active_only);
 	if (ret) {
 		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
 				__func__, ret, cl->active_only);
@@ -1398,8 +1327,8 @@ static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
 	commit_data();
 	cl->cur_act_ib = act_ib;
 	cl->cur_act_ab = act_ab;
-	cl->cur_slp_ib = slp_ib;
-	cl->cur_slp_ab = slp_ab;
+	cl->cur_dual_ib = slp_ib;
+	cl->cur_dual_ab = slp_ab;
 	trace_bus_update_request_end(cl->name);
 exit_change_context:
 	rt_mutex_unlock(&msm_bus_adhoc_lock);
@@ -1421,6 +1350,7 @@ static void unregister_adhoc(struct msm_bus_client_handle *cl)
 				cl->first_hop, cl->active_only);
 	commit_data();
 	msm_bus_dbg_remove_client(cl);
+	kfree(cl->name);
 	kfree(cl);
 exit_unregister_client:
 	rt_mutex_unlock(&msm_bus_adhoc_lock);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
index 95f61aa..95c127d 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -549,8 +549,8 @@ static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
 
 	if (info && info->num_qports &&
 		((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) {
-		bw = msm_bus_div64(info->num_qports,
-				dev->node_bw[ACTIVE_CTX].sum_ab);
+		bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab,
+				info->num_qports);
 
 		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
 				info->id, bw);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
index 8e1fc0a..cb4c8b3 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
@@ -201,6 +201,7 @@ static struct msm_bus_node_device_type *msm_bus_floor_init_dev(
 		sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
 
 	if (!node_info) {
+		pr_err("%s:Bus node info alloc failed\n", __func__);
 		devm_kfree(dev, bus_node);
 		bus_node = ERR_PTR(-ENOMEM);
 		goto exit_init_bus_dev;
@@ -462,6 +463,8 @@ static int msm_bus_floor_setup_floor_dev(
 	cl_ptr->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
 	if (!cl_ptr->dev) {
 		ret = -ENOMEM;
+		pr_err("%s: Failed to create device bus %d", __func__,
+			bus_node->node_info->id);
 		goto err_setup_floor_dev;
 	}
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
index 269d09a..ee9b7af 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
@@ -478,8 +478,10 @@ void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
 		copy_size = new_size;
 
 	ret = devm_kzalloc(dev, new_size, flags);
-	if (!ret)
+	if (!ret) {
+		MSM_BUS_ERR("%s: Error Reallocating memory", __func__);
 		goto exit_realloc_devmem;
+	}
 
 	memcpy(ret, p, copy_size);
 	devm_kfree(dev, p);
@@ -716,6 +718,7 @@ static int msm_bus_fabric_init(struct device *dev,
 	fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
 								GFP_KERNEL);
 	if (!fabdev) {
+		MSM_BUS_ERR("Fabric alloc failed\n");
 		ret = -ENOMEM;
 		goto exit_fabric_init;
 	}
@@ -827,8 +830,8 @@ static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
 
 	if (!bus_node || !pdata) {
 		ret = -ENXIO;
-		MSM_BUS_ERR("%s: NULL pointers for pdata or bus_node",
-			__func__);
+		MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p",
+			__func__, pdata, bus_node);
 		goto exit_copy_node_info;
 	}
 
@@ -968,6 +971,7 @@ static struct device *msm_bus_device_init(
 
 	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
 	if (!bus_node) {
+		MSM_BUS_ERR("%s:Bus node alloc failed\n", __func__);
 		kfree(bus_dev);
 		bus_dev = NULL;
 		goto exit_device_init;
@@ -978,6 +982,7 @@ static struct device *msm_bus_device_init(
 	node_info = devm_kzalloc(bus_dev,
 			sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
 	if (!node_info) {
+		MSM_BUS_ERR("%s:Bus node info alloc failed\n", __func__);
 		devm_kfree(bus_dev, bus_node);
 		kfree(bus_dev);
 		bus_dev = NULL;
@@ -1210,6 +1215,9 @@ static int msm_bus_device_probe(struct platform_device *pdev)
 
 	devm_kfree(&pdev->dev, pdata->info);
 	devm_kfree(&pdev->dev, pdata);
+
+	dev_info(&pdev->dev, "Bus scaling driver probe successful\n");
+
 exit_device_probe:
 	return ret;
 }
@@ -1288,4 +1296,4 @@ int __init msm_bus_device_init_driver(void)
 	}
 	return platform_driver_register(&msm_bus_rules_driver);
 }
-subsys_initcall(msm_bus_device_init_driver);
+fs_initcall(msm_bus_device_init_driver);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
index f51939f..2303e82 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -410,8 +410,8 @@ static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev,
 			NOC_QOS_MODE_LIMITER))) {
 		struct msm_bus_noc_qos_bw qos_bw;
 
-		bw = msm_bus_div64(info->num_qports,
-				dev->node_bw[ACTIVE_CTX].sum_ab);
+		bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab,
+				info->num_qports);
 
 		for (i = 0; i < info->num_qports; i++) {
 			if (!info->qport) {
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
index d0c0e51..9a5fff6 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -72,8 +72,11 @@ static int *get_arr(struct platform_device *pdev,
 	}
 
 	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-	if ((size > 0) && ZERO_OR_NULL_PTR(arr))
+	if ((size > 0) && ZERO_OR_NULL_PTR(arr)) {
+		dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
+				prop);
 		return NULL;
+	}
 
 	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
 	if (ret) {
@@ -99,8 +102,11 @@ static struct msm_bus_fab_device_type *get_fab_device_info(
 	fab_dev = devm_kzalloc(&pdev->dev,
 			sizeof(struct msm_bus_fab_device_type),
 			GFP_KERNEL);
-	if (!fab_dev)
+	if (!fab_dev) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for fab_dev\n");
 		return NULL;
+	}
 
 	ret = of_property_read_string(dev_node, "qcom,base-name", &base_name);
 	if (ret) {
@@ -231,6 +237,7 @@ static int msm_bus_of_parse_clk_array(struct device_node *dev_node,
 			(clks * sizeof(struct nodeclk)), GFP_KERNEL);
 
 	if (!(*clk_arr)) {
+		dev_err(&pdev->dev, "Error allocating clk nodes for %d\n", id);
 		ret = -ENOMEM;
 		*num_clks = 0;
 		goto exit_of_parse_clk_array;
@@ -606,11 +613,6 @@ static int get_bus_node_device_data(
 			}
 			of_node_put(qos_clk_node);
 		}
-
-		if (msmbus_coresight_init_adhoc(pdev, dev_node))
-			dev_warn(&pdev->dev,
-				 "Coresight support absent for bus: %d\n",
-				  node_device->node_info->id);
 	} else {
 		node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
 							"bus_qos_clk");
@@ -699,8 +701,11 @@ struct msm_bus_device_node_registration
 	pdata = devm_kzalloc(&pdev->dev,
 			sizeof(struct msm_bus_device_node_registration),
 			GFP_KERNEL);
-	if (!pdata)
+	if (!pdata) {
+		dev_err(&pdev->dev,
+				"Error: Memory allocation for pdata failed\n");
 		return NULL;
+	}
 
 	pdata->num_devices = of_get_child_count(of_node);
 
@@ -708,8 +713,11 @@ struct msm_bus_device_node_registration
 			sizeof(struct msm_bus_node_device_type) *
 			pdata->num_devices, GFP_KERNEL);
 
-	if (!pdata->info)
+	if (!pdata->info) {
+		dev_err(&pdev->dev,
+			"Error: Memory allocation for pdata->info failed\n");
 		goto node_reg_err;
+	}
 
 	ret = 0;
 	for_each_child_of_node(of_node, child_node) {
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
index 63fc336..8ecbf01 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -159,6 +159,7 @@ static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
 				cd->mas_arb[i].hw_id,
 				cd->mas_arb[i].bw);
 			break;
+		}
 		cd->mas_arb[i].dirty = false;
 	}
 
@@ -179,6 +180,7 @@ static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
 				cd->slv_arb[i].hw_id,
 				cd->slv_arb[i].bw);
 			break;
+		}
 		cd->slv_arb[i].dirty = false;
 		}
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
index 4cff9f2..3fec29d 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rules.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -93,7 +93,10 @@ static struct rule_node_info *gen_node(u32 id, void *data)
 
 	if (!node_match) {
 		node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL);
+		if (!node_match) {
+			pr_err("%s: Cannot allocate memory", __func__);
 			goto exit_node_match;
+		}
 
 		node_match->id = id;
 		node_match->cur_rule = NULL;
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
index 9c764aa..310a186 100644
--- a/drivers/soc/qcom/smp2p_sleepstate.c
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -37,13 +37,12 @@ static int sleepstate_pm_notifier(struct notifier_block *nb,
 	switch (event) {
 	case PM_SUSPEND_PREPARE:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
-		msleep(25); /* To be tuned based on SMP2P latencies */
 		msm_ipc_router_set_ws_allowed(true);
 		break;
 
 	case PM_POST_SUSPEND:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
-		msleep(25); /* To be tuned based on SMP2P latencies */
+		usleep_range(10000, 10500); /* Tuned based on SMP2P latencies */
 		msm_ipc_router_set_ws_allowed(false);
 		break;
 	}
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index f868336..9d53391 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -88,6 +88,10 @@ static struct ion_heap_desc ion_heap_meta[] = {
 		.name	= ION_QSECOM_HEAP_NAME,
 	},
 	{
+		.id	= ION_QSECOM_TA_HEAP_ID,
+		.name	= ION_QSECOM_TA_HEAP_NAME,
+	},
+	{
 		.id	= ION_SPSS_HEAP_ID,
 		.name	= ION_SPSS_HEAP_NAME,
 	},
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index 84598db..4f9dd73 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -33,6 +33,7 @@ enum ion_heap_ids {
 	ION_CP_MFC_HEAP_ID = 12,
 	ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
 	ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+	ION_QSECOM_TA_HEAP_ID = 19,
 	ION_CAMERA_HEAP_ID = 20, /* 8660 only */
 	ION_SYSTEM_CONTIG_HEAP_ID = 21,
 	ION_ADSP_HEAP_ID = 22,
@@ -130,6 +131,7 @@ enum cp_mem_usage {
 #define ION_PIL1_HEAP_NAME  "pil_1"
 #define ION_PIL2_HEAP_NAME  "pil_2"
 #define ION_QSECOM_HEAP_NAME	"qsecom"
+#define ION_QSECOM_TA_HEAP_NAME	"qsecom_ta"
 #define ION_SECURE_HEAP_NAME	"secure_heap"
 #define ION_SECURE_DISPLAY_HEAP_NAME "secure_display"
 
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index acbd26b..27bf54b 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -58,4 +58,4 @@
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
 obj-$(CONFIG_GENERIC_ADC_THERMAL)	+= thermal-generic-adc.o
 obj-$(CONFIG_THERMAL_QPNP_ADC_TM)	+= qpnp-adc-tm.o
-obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o
+obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o tsens-mtc.o
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 411588e..152b2a2 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1320,8 +1320,11 @@ static int qpnp_adc_tm_disable_rearm_high_thresholds(
 		return rc;
 	}
 
-	queue_work(chip->sensor[sensor_num].req_wq,
-				&chip->sensor[sensor_num].work);
+	if (!queue_work(chip->sensor[sensor_num].req_wq,
+				&chip->sensor[sensor_num].work)) {
+		/* The item is already queued, reduce the count */
+		atomic_dec(&chip->wq_cnt);
+	}
 
 	return rc;
 }
@@ -1408,8 +1411,11 @@ static int qpnp_adc_tm_disable_rearm_low_thresholds(
 		return rc;
 	}
 
-	queue_work(chip->sensor[sensor_num].req_wq,
-				&chip->sensor[sensor_num].work);
+	if (!queue_work(chip->sensor[sensor_num].req_wq,
+				&chip->sensor[sensor_num].work)) {
+		/* The item is already queued, reduce the count */
+		atomic_dec(&chip->wq_cnt);
+	}
 
 	return rc;
 }
@@ -1625,13 +1631,14 @@ static irqreturn_t qpnp_adc_tm_rc_thr_isr(int irq, void *data)
 	}
 
 	if (sensor_low_notify_num) {
-		atomic_inc(&chip->wq_cnt);
-		queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
+		if (queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work))
+			atomic_inc(&chip->wq_cnt);
 	}
 
 	if (sensor_high_notify_num) {
-		atomic_inc(&chip->wq_cnt);
-		queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work);
+		if (queue_work(chip->high_thr_wq,
+				&chip->trigger_high_thr_work))
+			atomic_inc(&chip->wq_cnt);
 	}
 
 	return IRQ_HANDLED;
diff --git a/drivers/thermal/tsens-dbg.c b/drivers/thermal/tsens-dbg.c
index 2e795b1..e1fc6b9 100644
--- a/drivers/thermal/tsens-dbg.c
+++ b/drivers/thermal/tsens-dbg.c
@@ -12,7 +12,9 @@
  */
 
 #include <asm/arch_timer.h>
+#include <linux/platform_device.h>
 #include "tsens.h"
+#include "tsens-mtc.h"
 
 /* debug defines */
 #define	TSENS_DBG_BUS_ID_0			0
@@ -42,6 +44,177 @@ struct tsens_dbg_func {
 	int (*dbg_func)(struct tsens_device *, u32, u32, int *);
 };
 
+static ssize_t
+zonemask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Zone =%d th1=%d th2=%d\n", tmdev->mtcsys.zone_mtc,
+				tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+}
+
+static ssize_t
+zonemask_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = sscanf(buf, "%d %d %d", &tmdev->mtcsys.zone_mtc,
+				&tmdev->mtcsys.th1, &tmdev->mtcsys.th2);
+
+	if (ret != TSENS_ZONEMASK_PARAMS) {
+		pr_err("Invalid command line arguments\n");
+		count = -EINVAL;
+	} else {
+		pr_debug("store zone_mtc=%d th1=%d th2=%d\n",
+				tmdev->mtcsys.zone_mtc,
+				tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+		ret = tsens_set_mtc_zone_sw_mask(tmdev->mtcsys.zone_mtc,
+					tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+		if (ret < 0) {
+			pr_err("Invalid command line arguments\n");
+			count = -EINVAL;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t
+zonelog_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret, zlog[TSENS_MTC_ZONE_LOG_SIZE];
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = tsens_get_mtc_zone_log(tmdev->mtcsys.zone_log, zlog);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Log[0]=%d\nLog[1]=%d\nLog[2]=%d\nLog[3]=%d\nLog[4]=%d\nLog[5]=%d\n",
+			zlog[0], zlog[1], zlog[2], zlog[3], zlog[4], zlog[5]);
+}
+
+static ssize_t
+zonelog_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = kstrtou32(buf, 0, &tmdev->mtcsys.zone_log);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+zonehist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret, zhist[TSENS_MTC_ZONE_HISTORY_SIZE];
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = tsens_get_mtc_zone_history(tmdev->mtcsys.zone_hist, zhist);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Cool = %d\nYellow = %d\nRed = %d\n",
+			zhist[0], zhist[1], zhist[2]);
+}
+
+static ssize_t
+zonehist_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = kstrtou32(buf, 0, &tmdev->mtcsys.zone_hist);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static struct device_attribute tsens_mtc_dev_attr[] = {
+	__ATTR(zonemask, 0644, zonemask_show, zonemask_store),
+	__ATTR(zonelog, 0644, zonelog_show, zonelog_store),
+	__ATTR(zonehist, 0644, zonehist_show, zonehist_store),
+};
+
+static int tsens_dbg_mtc_data(struct tsens_device *data,
+					u32 id, u32 dbg_type, int *val)
+{
+	int result = 0, i;
+	struct tsens_device *tmdev = NULL;
+	struct device_attribute *attr_ptr = NULL;
+
+	attr_ptr = tsens_mtc_dev_attr;
+	tmdev = data;
+
+	for (i = 0; i < ARRAY_SIZE(tsens_mtc_dev_attr); i++) {
+		result = device_create_file(&tmdev->pdev->dev, &attr_ptr[i]);
+		if (result < 0)
+			goto error;
+	}
+
+	return result;
+
+error:
+	for (i--; i >= 0; i--)
+		device_remove_file(&tmdev->pdev->dev, &attr_ptr[i]);
+
+	return result;
+}
+
 static int tsens_dbg_log_temp_reads(struct tsens_device *data, u32 id,
 					u32 dbg_type, int *temp)
 {
@@ -206,6 +379,7 @@ static struct tsens_dbg_func dbg_arr[] = {
 	[TSENS_DBG_LOG_INTERRUPT_TIMESTAMP] = {
 			tsens_dbg_log_interrupt_timestamp},
 	[TSENS_DBG_LOG_BUS_ID_DATA] = {tsens_dbg_log_bus_id_data},
+	[TSENS_DBG_MTC_DATA] = {tsens_dbg_mtc_data},
 };
 
 int tsens2xxx_dbg(struct tsens_device *data, u32 id, u32 dbg_type, int *val)
diff --git a/drivers/thermal/tsens-mtc.c b/drivers/thermal/tsens-mtc.c
new file mode 100644
index 0000000..529503f
--- /dev/null
+++ b/drivers/thermal/tsens-mtc.c
@@ -0,0 +1,195 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "tsens.h"
+#include "tsens-mtc.h"
+
+struct tsens_device *tsens_controller_is_present(void)
+{
+	struct tsens_device *tmdev_chip = NULL;
+
+	if (list_empty(&tsens_device_list)) {
+		pr_err("%s: TSENS controller not available\n", __func__);
+		return tmdev_chip;
+	}
+
+	list_for_each_entry(tmdev_chip, &tsens_device_list, list)
+		return tmdev_chip;
+
+	return tmdev_chip;
+}
+EXPORT_SYMBOL(tsens_controller_is_present);
+
+static int tsens_mtc_reset_history_counter(unsigned int zone)
+{
+	unsigned int reg_cntl, is_valid;
+	void __iomem *sensor_addr;
+	struct tsens_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_SW_MASK_ADDR(tmdev->tsens_tm_addr);
+	reg_cntl = readl_relaxed((sensor_addr +
+		(zone * TSENS_SN_ADDR_OFFSET)));
+	is_valid = (reg_cntl & TSENS_RESET_HISTORY_MASK)
+				>> TSENS_RESET_HISTORY_SHIFT;
+	if (!is_valid) {
+		/*Enable the bit to reset counter*/
+		writel_relaxed(reg_cntl | (1 << TSENS_RESET_HISTORY_SHIFT),
+				(sensor_addr + (zone * TSENS_SN_ADDR_OFFSET)));
+		reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+		pr_debug("tsens : zone =%d reg=%x\n", zone, reg_cntl);
+	}
+
+	/*Disble the bit to start counter*/
+	writel_relaxed(reg_cntl & ~(1 << TSENS_RESET_HISTORY_SHIFT),
+				(sensor_addr + (zone * TSENS_SN_ADDR_OFFSET)));
+	reg_cntl = readl_relaxed((sensor_addr +
+			(zone * TSENS_SN_ADDR_OFFSET)));
+	pr_debug("tsens : zone =%d reg=%x\n", zone, reg_cntl);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_mtc_reset_history_counter);
+
+int tsens_set_mtc_zone_sw_mask(unsigned int zone, unsigned int th1_enable,
+				unsigned int th2_enable)
+{
+	unsigned int reg_cntl;
+	void __iomem *sensor_addr;
+	struct tsens_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_SW_MASK_ADDR
+					(tmdev->tsens_tm_addr);
+
+	if (th1_enable && th2_enable)
+		writel_relaxed(TSENS_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (!th1_enable && !th2_enable)
+		writel_relaxed(TSENS_MTC_DISABLE,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (th1_enable && !th2_enable)
+		writel_relaxed(TSENS_TH1_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (!th1_enable && th2_enable)
+		writel_relaxed(TSENS_TH2_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	pr_debug("tsens : zone =%d th1=%d th2=%d reg=%x\n",
+		zone, th1_enable, th2_enable, reg_cntl);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_set_mtc_zone_sw_mask);
+
+int tsens_get_mtc_zone_log(unsigned int zone, void *zone_log)
+{
+	unsigned int i, reg_cntl, is_valid, log[TSENS_MTC_ZONE_LOG_SIZE];
+	int *zlog = (int *)zone_log;
+	void __iomem *sensor_addr;
+	struct tsens_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_LOG(tmdev->tsens_tm_addr);
+
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	is_valid = (reg_cntl & TSENS_LOGS_VALID_MASK)
+				>> TSENS_LOGS_VALID_SHIFT;
+	if (is_valid) {
+		log[0] = (reg_cntl & TSENS_LOGS_LATEST_MASK);
+		log[1] = (reg_cntl & TSENS_LOGS_LOG1_MASK)
+				>> TSENS_LOGS_LOG1_SHIFT;
+		log[2] = (reg_cntl & TSENS_LOGS_LOG2_MASK)
+				>> TSENS_LOGS_LOG2_SHIFT;
+		log[3] = (reg_cntl & TSENS_LOGS_LOG3_MASK)
+				>> TSENS_LOGS_LOG3_SHIFT;
+		log[4] = (reg_cntl & TSENS_LOGS_LOG4_MASK)
+				>> TSENS_LOGS_LOG4_SHIFT;
+		log[5] = (reg_cntl & TSENS_LOGS_LOG5_MASK)
+				>> TSENS_LOGS_LOG5_SHIFT;
+		for (i = 0; i < (TSENS_MTC_ZONE_LOG_SIZE); i++) {
+			*(zlog+i) = log[i];
+			pr_debug("Log[%d]=%d\n", i, log[i]);
+		}
+	} else {
+		pr_debug("tsens: Valid bit disabled\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_mtc_zone_log);
+
+int tsens_get_mtc_zone_history(unsigned int zone, void *zone_hist)
+{
+	unsigned int i, reg_cntl, hist[TSENS_MTC_ZONE_HISTORY_SIZE];
+	int *zhist = (int *)zone_hist;
+	void __iomem *sensor_addr;
+	struct tsens_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_HISTORY(tmdev->tsens_tm_addr);
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+
+	hist[0] = (reg_cntl & TSENS_PS_COOL_CMD_MASK);
+	hist[1] = (reg_cntl & TSENS_PS_YELLOW_CMD_MASK)
+			>> TSENS_PS_YELLOW_CMD_SHIFT;
+	hist[2] = (reg_cntl & TSENS_PS_RED_CMD_MASK)
+			>> TSENS_PS_RED_CMD_SHIFT;
+	for (i = 0; i < (TSENS_MTC_ZONE_HISTORY_SIZE); i++) {
+		*(zhist+i) = hist[i];
+		pr_debug("tsens : %d\n", hist[i]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_mtc_zone_history);
diff --git a/drivers/thermal/tsens-mtc.h b/drivers/thermal/tsens-mtc.h
new file mode 100644
index 0000000..979513f
--- /dev/null
+++ b/drivers/thermal/tsens-mtc.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define TSENS_NUM_MTC_ZONES_SUPPORT 3
+#define TSENS_TM_MTC_ZONE0_SW_MASK_ADDR(n)      ((n) + 0x140)
+#define TSENS_TM_MTC_ZONE0_LOG(n)               ((n) + 0x150)
+#define TSENS_TM_MTC_ZONE0_HISTORY(n)           ((n) + 0x160)
+#define TSENS_SN_ADDR_OFFSET             0x4
+#define TSENS_RESET_HISTORY_MASK        0x4
+#define TSENS_ZONEMASK_PARAMS           3
+#define TSENS_MTC_ZONE_LOG_SIZE         6
+#define TSENS_MTC_ZONE_HISTORY_SIZE     3
+
+#define TSENS_TH1_MTC_IN_EFFECT               BIT(0)
+#define TSENS_TH2_MTC_IN_EFFECT               BIT(1)
+#define TSENS_MTC_IN_EFFECT                     0x3
+#define TSENS_MTC_DISABLE                       0x0
+
+#define TSENS_LOGS_VALID_MASK      0x40000000
+#define TSENS_LOGS_VALID_SHIFT     30
+#define TSENS_LOGS_LATEST_MASK    0x0000001f
+#define TSENS_LOGS_LOG1_MASK      0x000003e0
+#define TSENS_LOGS_LOG2_MASK      0x00007c00
+#define TSENS_LOGS_LOG3_MASK      0x000f8000
+#define TSENS_LOGS_LOG4_MASK      0x01f00000
+#define TSENS_LOGS_LOG5_MASK      0x3e000000
+#define TSENS_LOGS_LOG1_SHIFT     5
+#define TSENS_LOGS_LOG2_SHIFT     10
+#define TSENS_LOGS_LOG3_SHIFT     15
+#define TSENS_LOGS_LOG4_SHIFT     20
+#define TSENS_LOGS_LOG5_SHIFT     25
+
+#define TSENS_PS_RED_CMD_MASK   0x3ff00000
+#define TSENS_PS_YELLOW_CMD_MASK        0x000ffc00
+#define TSENS_PS_COOL_CMD_MASK  0x000003ff
+#define TSENS_PS_YELLOW_CMD_SHIFT       0xa
+#define TSENS_PS_RED_CMD_SHIFT  0x14
+
+#define TSENS_RESET_HISTORY_SHIFT       2
+
+#define TSENS_ZONEMASK_PARAMS           3
+#define TSENS_MTC_ZONE_LOG_SIZE         6
+#define TSENS_MTC_ZONE_HISTORY_SIZE     3
+
+extern int tsens_get_mtc_zone_history(unsigned int zone, void *zone_hist);
+extern struct tsens_device *tsens_controller_is_present(void);
+extern int tsens_set_mtc_zone_sw_mask(unsigned int zone,
+			unsigned int th1_enable, unsigned int th2_enable);
+extern int tsens_get_mtc_zone_log(unsigned int zone, void *zone_log);
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index ec2d592..ae4741d 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -32,6 +32,7 @@ enum tsens_dbg_type {
 	TSENS_DBG_LOG_TEMP_READS,
 	TSENS_DBG_LOG_INTERRUPT_TIMESTAMP,
 	TSENS_DBG_LOG_BUS_ID_DATA,
+	TSENS_DBG_MTC_DATA,
 	TSENS_DBG_LOG_MAX
 };
 
@@ -114,6 +115,15 @@ struct tsens_data {
 	u32				cycle_compltn_monitor_mask;
 	bool				wd_bark;
 	u32				wd_bark_mask;
+	bool				mtc;
+};
+
+struct tsens_mtc_sysfs {
+	uint32_t	zone_log;
+	int			zone_mtc;
+	int			th1;
+	int			th2;
+	uint32_t	zone_hist;
 };
 
 struct tsens_device {
@@ -130,8 +140,10 @@ struct tsens_device {
 	spinlock_t			tsens_upp_low_lock;
 	const struct tsens_data		*ctrl_data;
 	struct tsens_sensor		sensor[0];
+	struct tsens_mtc_sysfs	mtcsys;
 };
 
 extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
+extern struct list_head tsens_device_list;
 
 #endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index fd625ae..50c847f 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -570,6 +570,11 @@ static int tsens2xxx_hw_init(struct tsens_device *tmdev)
 	spin_lock_init(&tmdev->tsens_crit_lock);
 	spin_lock_init(&tmdev->tsens_upp_low_lock);
 
+	if (tmdev->ctrl_data->mtc) {
+		if (tmdev->ops->dbg)
+			tmdev->ops->dbg(tmdev, 0, TSENS_DBG_MTC_DATA, NULL);
+	}
+
 	return 0;
 }
 
@@ -628,6 +633,7 @@ const struct tsens_data data_tsens2xxx = {
 	.wd_bark			= false,
 	.wd_bark_mask			= 1,
 	.ops				= &ops_tsens2xxx,
+	.mtc				= true,
 };
 
 const struct tsens_data data_tsens23xx = {
@@ -636,6 +642,7 @@ const struct tsens_data data_tsens23xx = {
 	.wd_bark			= true,
 	.wd_bark_mask			= 1,
 	.ops				= &ops_tsens2xxx,
+	.mtc				= false,
 };
 
 const struct tsens_data data_tsens24xx = {
@@ -645,4 +652,5 @@ const struct tsens_data data_tsens24xx = {
 	/* Enable Watchdog monitoring by unmasking */
 	.wd_bark_mask			= 0,
 	.ops				= &ops_tsens2xxx,
+	.mtc				= false,
 };
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 5c70da8..de7fefc 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -229,6 +229,7 @@ struct dwc3_msm {
 	struct power_supply	*usb_psy;
 	struct work_struct	vbus_draw_work;
 	bool			in_host_mode;
+	bool			in_device_mode;
 	enum usb_device_speed	max_rh_port_speed;
 	unsigned int		tx_fifo_size;
 	bool			vbus_active;
@@ -2002,7 +2003,7 @@ static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
 	unsigned long timeout;
 	u32 reg = 0;
 
-	if ((mdwc->in_host_mode || mdwc->vbus_active)
+	if ((mdwc->in_host_mode || mdwc->in_device_mode)
 			&& dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
 		if (!atomic_read(&mdwc->in_p3)) {
 			dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
@@ -2144,7 +2145,7 @@ static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
 		dbg_event(0xFF, "IRQ_DIS", uirq->irq);
 		disable_irq_wake(uirq->irq);
 		disable_irq_nosync(uirq->irq);
-		uirq->enable = true;
+		uirq->enable = false;
 	}
 }
 
@@ -2265,7 +2266,8 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
 	clk_disable_unprepare(mdwc->xo_clk);
 
 	/* Perform controller power collapse */
-	if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
+	if (!mdwc->in_host_mode && (!mdwc->in_device_mode ||
+					mdwc->in_restart)) {
 		mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
@@ -2307,7 +2309,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
 	 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
 	 * case of host bus suspend and device bus suspend.
 	 */
-	if (mdwc->vbus_active || mdwc->in_host_mode) {
+	if (mdwc->in_device_mode || mdwc->in_host_mode) {
 		if (mdwc->use_pdc_interrupts) {
 			enable_usb_pdc_interrupt(mdwc, true);
 		} else {
@@ -2320,6 +2322,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
 	}
 
 	dev_info(mdwc->dev, "DWC3 in low power mode\n");
+	dbg_event(0xFF, "Ctl Sus", atomic_read(&dwc->in_lpm));
 	mutex_unlock(&mdwc->suspend_resume_mutex);
 	return 0;
 }
@@ -3898,6 +3901,7 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
 		dwc3_msm_block_reset(mdwc, false);
 
 		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+		mdwc->in_device_mode = true;
 		usb_gadget_vbus_connect(&dwc->gadget);
 #ifdef CONFIG_SMP
 		mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
@@ -3916,6 +3920,7 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
 		msm_dwc3_perf_vote_update(mdwc, false);
 		pm_qos_remove_request(&mdwc->pm_qos_req_dma);
 
+		mdwc->in_device_mode = false;
 		usb_gadget_vbus_disconnect(&dwc->gadget);
 		usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
 		usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 4e7de00..cbce880 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -272,7 +272,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
 	return ret;
 }
 
-static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
 {
 	struct dwc3_ep		*dep;
 
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 70c00d2..0ffe351 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3134,6 +3134,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 	dwc->test_mode = false;
 
+	/*
+	 * From SNPS databook section 8.1.2
+	 * the EP0 should be in setup phase. So ensure
+	 * that EP0 is in setup phase by issuing a stall
+	 * and restart if EP0 is not in setup phase.
+	 */
+	if (dwc->ep0state != EP0_SETUP_PHASE)
+		dwc3_ep0_stall_and_restart(dwc);
+
 	dwc3_stop_active_transfers(dwc);
 	dwc3_clear_stall_all_ep(dwc);
 
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 25d8d8f..8275e56 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -98,6 +98,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
 void dwc3_ep0_interrupt(struct dwc3 *dwc,
 		const struct dwc3_event_depevt *event);
 void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc);
 int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b040fdd..d3e0ca5 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -182,6 +182,12 @@
 config USB_F_RNDIS
 	tristate
 
+config USB_F_QCRNDIS
+	tristate
+
+config USB_F_RMNET_BAM
+	tristate
+
 config USB_F_MASS_STORAGE
 	tristate
 
@@ -312,6 +318,14 @@
 	  On hardware that can't implement the full protocol,
 	  a simple CDC subset is used, placing fewer demands on USB.
 
+config USB_CONFIGFS_QCRNDIS
+	bool "QCRNDIS"
+	depends on USB_CONFIGFS
+	depends on RNDIS_IPA
+	depends on NET
+	select USB_U_ETHER
+	select USB_F_QCRNDIS
+
 config USB_CONFIGFS_RNDIS
 	bool "RNDIS"
 	depends on USB_CONFIGFS
@@ -328,6 +342,12 @@
 	   XP, you'll need to download drivers from Microsoft's website; a URL
 	   is given in comments found in that info file.
 
+config USB_CONFIGFS_RMNET_BAM
+	bool "RMNET_BAM"
+	depends on USB_CONFIGFS
+	depends on IPA
+	select USB_F_RMNET_BAM
+
 config USB_CONFIGFS_EEM
 	bool "Ethernet Emulation Model (EEM)"
 	depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 960c2cc..0e96740 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -64,3 +64,7 @@
 obj-$(CONFIG_USB_F_GSI)         += usb_f_gsi.o
 usb_f_qdss-y			:= f_qdss.o u_qdss.o
 obj-$(CONFIG_USB_F_QDSS)        += usb_f_qdss.o
+usb_f_qcrndis-y			:= f_qc_rndis.o rndis.o u_data_ipa.o
+obj-$(CONFIG_USB_F_QCRNDIS)	+= usb_f_qcrndis.o
+usb_f_rmnet_bam-y		:= f_rmnet.o u_ctrl_qti.o
+obj-$(CONFIG_USB_F_RMNET_BAM)	+= usb_f_rmnet_bam.o
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index f0042ec..fd49fc4 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -48,7 +48,7 @@ static void *ffs_ipc_log;
 #define ffs_log(fmt, ...) do { \
 	ipc_log_string(ffs_ipc_log, "%s: " fmt,  __func__, \
 			##__VA_ARGS__); \
-	pr_debug(fmt, ##__VA_ARGS__); \
+	pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
 } while (0)
 
 /* Reference counter handling */
@@ -67,6 +67,18 @@ __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
 static int __must_check
 __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
 
+/* ffs instance status */
+static DEFINE_MUTEX(ffs_ep_lock);
+static bool ffs_inst_exist;
+static struct f_fs_opts *g_opts;
+
+/* Free instance structures */
+static void ffs_inst_clean(struct f_fs_opts *opts);
+static void ffs_inst_clean_delay(void);
+static int ffs_inst_exist_check(void);
+
+/* Global ffs_data pointer */
+static struct ffs_data *g_ffs_data;
 
 /* The function structure ***************************************************/
 
@@ -353,6 +365,10 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
 	ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
 		ffs->state, ffs->setup_state, ffs->flags);
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	/* Fast check if setup was canceled */
 	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
 		return -EIDRM;
@@ -539,6 +555,10 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
 	ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
 		ffs->state, ffs->setup_state, ffs->flags);
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	/* Fast check if setup was canceled */
 	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
 		return -EIDRM;
@@ -639,12 +659,17 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
 static int ffs_ep0_open(struct inode *inode, struct file *file)
 {
 	struct ffs_data *ffs = inode->i_private;
+	int ret;
 
 	ENTER();
 
 	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
 		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	/* to get updated opened atomic variable value */
 	smp_mb__before_atomic();
 	if (atomic_read(&ffs->opened))
@@ -684,6 +709,10 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
 	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
 		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
 		struct ffs_function *func = ffs->func;
 		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
@@ -705,6 +734,10 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
 	ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
 		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	poll_wait(file, &ffs->ev.waitq, wait);
 
 	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
@@ -1198,12 +1231,17 @@ static int
 ffs_epfile_open(struct inode *inode, struct file *file)
 {
 	struct ffs_epfile *epfile = inode->i_private;
+	int ret;
 
 	ENTER();
 
 	ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
 		epfile->ffs->setup_state, epfile->ffs->flags);
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
@@ -1255,11 +1293,16 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 {
 	struct ffs_io_data io_data, *p = &io_data;
 	ssize_t res;
+	int ret;
 
 	ENTER();
 
 	ffs_log("enter");
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	if (!is_sync_kiocb(kiocb)) {
 		p = kmalloc(sizeof(io_data), GFP_KERNEL);
 		if (unlikely(!p))
@@ -1296,11 +1339,16 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
 	struct ffs_io_data io_data, *p = &io_data;
 	ssize_t res;
+	int ret;
 
 	ENTER();
 
 	ffs_log("enter");
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	if (!is_sync_kiocb(kiocb)) {
 		p = kmalloc(sizeof(io_data), GFP_KERNEL);
 		if (unlikely(!p))
@@ -1376,6 +1424,10 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
 	ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
 		epfile->ffs->setup_state, epfile->ffs->flags);
 
+	ret = ffs_inst_exist_check();
+	if (ret < 0)
+		return ret;
+
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
@@ -1731,7 +1783,6 @@ ffs_fs_kill_sb(struct super_block *sb)
 	if (sb->s_fs_info) {
 		ffs_release_dev(sb->s_fs_info);
 		ffs_data_closed(sb->s_fs_info);
-		ffs_data_put(sb->s_fs_info);
 	}
 
 	ffs_log("exit");
@@ -1829,11 +1880,16 @@ static void ffs_data_put(struct ffs_data *ffs)
 	smp_mb__before_atomic();
 	if (unlikely(atomic_dec_and_test(&ffs->ref))) {
 		pr_info("%s(): freeing\n", __func__);
+		/* Clear g_ffs_data */
+		ffs_dev_lock();
+		g_ffs_data = NULL;
+		ffs_dev_unlock();
 		ffs_data_clear(ffs);
 		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
 		       waitqueue_active(&ffs->ep0req_completion.wait));
 		kfree(ffs->dev_name);
 		kfree(ffs);
+		ffs_inst_clean_delay();
 	}
 
 	ffs_log("exit");
@@ -1900,6 +1956,11 @@ static struct ffs_data *ffs_data_new(void)
 	/* XXX REVISIT need to update it in some places, or do we? */
 	ffs->ev.can_stall = 1;
 
+	/* Store ffs to g_ffs_data */
+	ffs_dev_lock();
+	g_ffs_data = ffs;
+	ffs_dev_unlock();
+
 	ffs_log("exit");
 
 	return ffs;
@@ -3798,15 +3859,69 @@ static struct config_item_type ffs_func_type = {
 
 /* Function registration interface ******************************************/
 
+static int ffs_inst_exist_check(void)
+{
+	mutex_lock(&ffs_ep_lock);
+
+	if (unlikely(ffs_inst_exist == false)) {
+		mutex_unlock(&ffs_ep_lock);
+		pr_err_ratelimited(
+				"%s: f_fs instance freed already.\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	mutex_unlock(&ffs_ep_lock);
+
+	return 0;
+}
+
+static void ffs_inst_clean(struct f_fs_opts *opts)
+{
+	g_opts = NULL;
+	ffs_dev_lock();
+	_ffs_free_dev(opts->dev);
+	ffs_dev_unlock();
+	kfree(opts);
+}
+
+static void ffs_inst_clean_delay(void)
+{
+	mutex_lock(&ffs_ep_lock);
+
+	if (unlikely(ffs_inst_exist == false)) {
+		if (g_opts) {
+			ffs_inst_clean(g_opts);
+			pr_err_ratelimited("%s: Delayed free memory\n",
+					__func__);
+		}
+		mutex_unlock(&ffs_ep_lock);
+		return;
+	}
+
+	mutex_unlock(&ffs_ep_lock);
+}
+
 static void ffs_free_inst(struct usb_function_instance *f)
 {
 	struct f_fs_opts *opts;
 
 	opts = to_f_fs_opts(f);
-	ffs_dev_lock();
-	_ffs_free_dev(opts->dev);
-	ffs_dev_unlock();
-	kfree(opts);
+
+	mutex_lock(&ffs_ep_lock);
+	if (opts->dev->ffs_data
+			&& atomic_read(&opts->dev->ffs_data->opened)) {
+		ffs_inst_exist = false;
+		mutex_unlock(&ffs_ep_lock);
+		ffs_log("%s: Dev is open, free mem when dev close\n",
+				__func__);
+		return;
+	}
+
+	ffs_inst_clean(opts);
+	ffs_inst_exist = false;
+	g_opts = NULL;
+	mutex_unlock(&ffs_ep_lock);
 }
 
 #define MAX_INST_NAME_LEN	40
@@ -3826,6 +3941,14 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
 	if (!ptr)
 		return -ENOMEM;
 
+	mutex_lock(&ffs_ep_lock);
+	if (g_opts) {
+		mutex_unlock(&ffs_ep_lock);
+		ffs_log("%s: prev inst do not freed yet\n", __func__);
+		return -EBUSY;
+	}
+	mutex_unlock(&ffs_ep_lock);
+
 	opts = to_f_fs_opts(fi);
 	tmp = NULL;
 
@@ -3840,10 +3963,27 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
 	}
 	opts->dev->name_allocated = true;
 
+	/*
+	 * If ffs instance is freed and created once, new allocated
+	 * opts->dev need to initialize opts->dev->ffs_data, and
+	 * ffs_private_data also need to update new allocated opts->dev
+	 * address.
+	 */
+	if (g_ffs_data)
+		opts->dev->ffs_data = g_ffs_data;
+
+	if (opts->dev->ffs_data)
+		opts->dev->ffs_data->private_data = opts->dev;
+
 	ffs_dev_unlock();
 
 	kfree(tmp);
 
+	mutex_lock(&ffs_ep_lock);
+	ffs_inst_exist = true;
+	g_opts = opts;
+	mutex_unlock(&ffs_ep_lock);
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index b1a4a29..7e4e7ce 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -226,12 +226,12 @@ int ipa_usb_notify_cb(enum ipa_usb_notify_event event,
 				return -ENOMEM;
 			}
 			cpkt_notify_speed->type = GSI_CTRL_NOTIFY_SPEED;
-			spin_lock_irqsave(&gsi->c_port.lock, flags);
+			spin_lock(&gsi->c_port.lock);
 			list_add_tail(&cpkt_notify_connect->list,
 					&gsi->c_port.cpkt_resp_q);
 			list_add_tail(&cpkt_notify_speed->list,
 					&gsi->c_port.cpkt_resp_q);
-			spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+			spin_unlock(&gsi->c_port.lock);
 			gsi_ctrl_send_notification(gsi);
 		}
 
@@ -1939,9 +1939,11 @@ static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
 {
 	u32 len_in = 0, len_out = 0;
 	int ret = 0;
+	struct device *dev;
 
 	log_event_dbg("allocate trb's buffer\n");
 
+	dev = gsi->d_port.gadget->dev.parent;
 	if (gsi->d_port.in_ep && !gsi->d_port.in_request.buf_base_addr) {
 		log_event_dbg("IN: num_bufs:=%zu, buf_len=%zu\n",
 			gsi->d_port.in_request.num_bufs,
@@ -1950,7 +1952,7 @@ static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
 		len_in = gsi->d_port.in_request.buf_len *
 				gsi->d_port.in_request.num_bufs;
 		gsi->d_port.in_request.buf_base_addr =
-			dma_zalloc_coherent(gsi->d_port.gadget->dev.parent,
+			dma_zalloc_coherent(dev->parent,
 			len_in, &gsi->d_port.in_request.dma, GFP_KERNEL);
 		if (!gsi->d_port.in_request.buf_base_addr) {
 			dev_err(&gsi->d_port.gadget->dev,
@@ -1969,7 +1971,7 @@ static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
 		len_out = gsi->d_port.out_request.buf_len *
 				gsi->d_port.out_request.num_bufs;
 		gsi->d_port.out_request.buf_base_addr =
-			dma_zalloc_coherent(gsi->d_port.gadget->dev.parent,
+			dma_zalloc_coherent(dev->parent,
 			len_out, &gsi->d_port.out_request.dma, GFP_KERNEL);
 		if (!gsi->d_port.out_request.buf_base_addr) {
 			dev_err(&gsi->d_port.gadget->dev,
@@ -1985,7 +1987,7 @@ static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
 
 fail:
 	if (len_in && gsi->d_port.in_request.buf_base_addr) {
-		dma_free_coherent(gsi->d_port.gadget->dev.parent, len_in,
+		dma_free_coherent(dev->parent, len_in,
 				gsi->d_port.in_request.buf_base_addr,
 				gsi->d_port.in_request.dma);
 		gsi->d_port.in_request.buf_base_addr = NULL;
@@ -2004,7 +2006,7 @@ static void gsi_free_trb_buffer(struct f_gsi *gsi)
 			gsi->d_port.out_request.buf_base_addr) {
 		len = gsi->d_port.out_request.buf_len *
 			gsi->d_port.out_request.num_bufs;
-		dma_free_coherent(gsi->d_port.gadget->dev.parent, len,
+		dma_free_coherent(gsi->d_port.gadget->dev.parent->parent, len,
 			gsi->d_port.out_request.buf_base_addr,
 			gsi->d_port.out_request.dma);
 		gsi->d_port.out_request.buf_base_addr = NULL;
@@ -2014,7 +2016,7 @@ static void gsi_free_trb_buffer(struct f_gsi *gsi)
 			gsi->d_port.in_request.buf_base_addr) {
 		len = gsi->d_port.in_request.buf_len *
 			gsi->d_port.in_request.num_bufs;
-		dma_free_coherent(gsi->d_port.gadget->dev.parent, len,
+		dma_free_coherent(gsi->d_port.gadget->dev.parent->parent, len,
 			gsi->d_port.in_request.buf_base_addr,
 			gsi->d_port.in_request.dma);
 		gsi->d_port.in_request.buf_base_addr = NULL;
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
new file mode 100644
index 0000000..a8e7092
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -0,0 +1,1580 @@
+/*
+ * f_qc_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ *			Author: Michal Nazarewicz (mina86@mina86.com)
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "rndis.h"
+#include "u_data_ipa.h"
+#include <linux/rndis_ipa.h>
+#include "configfs.h"
+
+unsigned int rndis_dl_max_xfer_size = 9216;
+module_param(rndis_dl_max_xfer_size, uint, 0644);
+MODULE_PARM_DESC(rndis_dl_max_xfer_size,
+		"Max size of bus transfer to host");
+
+static struct class *rndis_class;
+static dev_t rndis_dev;
+static DEFINE_IDA(chardev_ida);
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet.  The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex.  Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short:  it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets.  Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data.  The control model is built around
+ * what's essentially an "RNDIS RPC" protocol.  It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored).  RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface.  That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy.  They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely.  Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ *   - Power management ... references data that's scattered around lots
+ *     of other documentation, which is incorrect/incomplete there too.
+ *
+ *   - There are various undocumented protocol requirements, like the need
+ *     to send garbage in some control-OUT messages.
+ *
+ *   - MS-Windows drivers sometimes emit undocumented requests.
+ *
+ * This function is based on RNDIS link function driver and
+ * contains MSM specific implementation.
+ */
+
+struct f_rndis_qc {
+	struct usb_function		func;
+	u8				ctrl_id, data_id;
+	u8				ethaddr[ETH_ALEN];
+	u32				vendorID;
+	u8				ul_max_pkt_per_xfer;
+	u8				pkt_alignment_factor;
+	u32				max_pkt_size;
+	const char			*manufacturer;
+	struct rndis_params		*params;
+	atomic_t			ioctl_excl;
+	atomic_t			open_excl;
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	atomic_t			notify_count;
+	struct gadget_ipa_port		bam_port;
+	struct cdev			cdev;
+	struct device			*dev;
+	u8				port_num;
+	u16				cdc_filter;
+	bool				net_ready_trigger;
+};
+
+static struct ipa_usb_init_params rndis_ipa_params;
+static spinlock_t rndis_lock;
+static bool rndis_ipa_supported;
+static void rndis_qc_open(struct f_rndis_qc *rndis);
+
+static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
+{
+	return container_of(f, struct f_rndis_qc, func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return 13 * 1024 * 8 * 1000 * 8;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define RNDIS_QC_STATUS_BYTECOUNT		8	/* 8 bytes data */
+
+/* currently only one rndis instance is supported - port
+ * index 0.
+ */
+#define RNDIS_QC_NO_PORTS				1
+#define RNDIS_QC_ACTIVE_PORT				0
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER			15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR			4
+
+#define RNDIS_QC_IOCTL_MAGIC		'i'
+#define RNDIS_QC_GET_MAX_PKT_PER_XFER   _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+#define RNDIS_QC_GET_MAX_PKT_SIZE	_IOR(RNDIS_QC_IOCTL_MAGIC, 2, u32)
+
+
+/* interface descriptor: */
+
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
+static struct usb_interface_descriptor rndis_qc_control_intf = {
+	.bLength =		sizeof(rndis_qc_control_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_WIRELESS_CONTROLLER,
+	.bInterfaceSubClass =   0x01,
+	.bInterfaceProtocol =   0x03,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_qc_header_desc = {
+	.bLength =		sizeof(rndis_qc_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = {
+	.bLength =		sizeof(rndis_qc_call_mgmt_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+
+	.bmCapabilities =	0x00,
+	.bDataInterface =	0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = {
+	.bLength =		sizeof(rndis_qc_acm_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+
+	.bmCapabilities =	0x00,
+};
+
+static struct usb_cdc_union_desc rndis_qc_union_desc = {
+	.bLength =		sizeof(rndis_qc_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_qc_data_intf = {
+	.bLength =		sizeof(rndis_qc_data_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+
+/*  Supports "Wireless" RNDIS; auto-detected by Windows */
+static struct usb_interface_assoc_descriptor
+rndis_qc_iad_descriptor = {
+	.bLength =		sizeof(rndis_qc_iad_descriptor),
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+	.bFirstInterface =	0, /* XXX, hardcoded */
+	.bInterfaceCount =	2, /* control + data */
+	.bFunctionClass =	USB_CLASS_WIRELESS_CONTROLLER,
+	.bFunctionSubClass =	0x01,
+	.bFunctionProtocol =	0x03,
+	/* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+	.bInterval =		1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_qc_fs_function[] = {
+	(struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_fs_notify_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_fs_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+	.bInterval =		RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_qc_hs_function[] = {
+	(struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_hs_notify_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_hs_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_hs_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+	.bInterval =		RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
+	.bLength =		sizeof(ss_intr_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
+	.bLength =		sizeof(ss_intr_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+	.bLength =		sizeof(ss_bulk_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
+	.bLength =		sizeof(ss_bulk_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *eth_qc_ss_function[] = {
+	(struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_notify_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_ss_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_out_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_qc_string_defs[] = {
+	[0].s = "RNDIS Communications Control",
+	[1].s = "RNDIS Ethernet Data",
+	[2].s = "RNDIS",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_qc_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rndis_qc_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_qc_strings[] = {
+	&rndis_qc_string_table,
+	NULL,
+};
+
+struct f_rndis_qc *_rndis_qc;
+
+static inline int rndis_qc_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1)
+		return 0;
+
+	atomic_dec(excl);
+	return -EBUSY;
+}
+
+static inline void rndis_qc_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void rndis_qc_response_available(void *_rndis)
+{
+	struct f_rndis_qc			*rndis = _rndis;
+	struct usb_request		*req = rndis->notify_req;
+	__le32				*data = req->buf;
+	int				status;
+
+	if (atomic_inc_return(&rndis->notify_count) != 1)
+		return;
+
+	if (!rndis->notify->driver_data)
+		return;
+
+	/* Send RNDIS RESPONSE_AVAILABLE notification; a
+	 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+	 *
+	 * This is the only notification defined by RNDIS.
+	 */
+	data[0] = cpu_to_le32(1);
+	data[1] = cpu_to_le32(0);
+
+	status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+	if (status) {
+		atomic_dec(&rndis->notify_count);
+		pr_info("notify/0 --> %d\n", status);
+	}
+}
+
+static void rndis_qc_response_complete(struct usb_ep *ep,
+					struct usb_request *req)
+{
+	struct f_rndis_qc		*rndis;
+	int				status = req->status;
+	struct usb_composite_dev	*cdev;
+	struct usb_ep *notify_ep;
+
+	spin_lock(&rndis_lock);
+	rndis = _rndis_qc;
+	if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+		spin_unlock(&rndis_lock);
+		return;
+	}
+
+	if (!rndis->func.config || !rndis->func.config->cdev) {
+		pr_err("%s(): cdev or config is NULL.\n", __func__);
+		spin_unlock(&rndis_lock);
+		return;
+	}
+
+	cdev = rndis->func.config->cdev;
+
+	/* after TX:
+	 *  - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+	 *  - RNDIS_RESPONSE_AVAILABLE (status/irq)
+	 */
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&rndis->notify_count, 0);
+		goto out;
+	default:
+		pr_info("RNDIS %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != rndis->notify)
+			goto out;
+
+		/* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&rndis->notify_count))
+			goto out;
+		notify_ep = rndis->notify;
+		spin_unlock(&rndis_lock);
+		status = usb_ep_queue(notify_ep, req, GFP_ATOMIC);
+		if (status) {
+			spin_lock(&rndis_lock);
+			if (!_rndis_qc)
+				goto out;
+			atomic_dec(&_rndis_qc->notify_count);
+			DBG(cdev, "notify/1 --> %d\n", status);
+			spin_unlock(&rndis_lock);
+		}
+	}
+
+	return;
+
+out:
+	spin_unlock(&rndis_lock);
+}
+
+static void rndis_qc_command_complete(struct usb_ep *ep,
+							struct usb_request *req)
+{
+	struct f_rndis_qc		*rndis;
+	int				status;
+	rndis_init_msg_type		*buf;
+	u32		ul_max_xfer_size, dl_max_xfer_size;
+
+	if (req->status != 0) {
+		pr_err("%s: RNDIS command completion error %d\n",
+				__func__, req->status);
+		return;
+	}
+
+	spin_lock(&rndis_lock);
+	rndis = _rndis_qc;
+	if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+		spin_unlock(&rndis_lock);
+		return;
+	}
+
+	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+	status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
+	if (status < 0)
+		pr_err("RNDIS command error %d, %d/%d\n",
+			status, req->actual, req->length);
+
+	buf = (rndis_init_msg_type *)req->buf;
+
+	if (buf->MessageType == RNDIS_MSG_INIT) {
+		ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->params);
+		ipa_data_set_ul_max_xfer_size(ul_max_xfer_size);
+		/*
+		 * For consistent data throughput from IPA, it is required to
+		 * fine tune aggregation byte limit as 7KB. RNDIS IPA driver
+		 * use provided this value to calculate aggregation byte limit
+		 * and program IPA hardware for aggregation.
+		 * Host provides 8KB or 16KB as Max Transfer size, hence select
+		 * minimum out of host provided value and optimum transfer size
+		 * to get 7KB as aggregation byte limit.
+		 */
+		if (rndis_dl_max_xfer_size)
+			dl_max_xfer_size = min_t(u32, rndis_dl_max_xfer_size,
+				rndis_get_dl_max_xfer_size(rndis->params));
+		else
+			dl_max_xfer_size =
+				rndis_get_dl_max_xfer_size(rndis->params);
+		ipa_data_set_dl_max_xfer_size(dl_max_xfer_size);
+	}
+	spin_unlock(&rndis_lock);
+}
+
+static int
+rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	pr_debug("%s: Enter\n", __func__);
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* RNDIS uses the CDC command encapsulation mechanism to implement
+	 * an RPC scheme, with much getting/setting of attributes by OID.
+	 */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		/* read the request; process it later */
+		value = w_length;
+		req->complete = rndis_qc_command_complete;
+		/* later, rndis_response_available() sends a notification */
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		else {
+			u8 *buf;
+			u32 n;
+
+			/* return the result */
+			buf = rndis_get_next_response(rndis->params, &n);
+			if (buf) {
+				memcpy(req->buf, buf, n);
+				req->complete = rndis_qc_response_complete;
+				rndis_free_response(rndis->params, buf);
+				value = n;
+			}
+			/* else stalls ... spec says to avoid that */
+		}
+		break;
+
+	default:
+invalid:
+		VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->context = rndis;
+		req->zero = (value < w_length);
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("rndis response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+struct net_device *rndis_qc_get_net(const char *netname)
+{
+	struct net_device *net_dev;
+
+	net_dev = dev_get_by_name(&init_net, netname);
+	if (!net_dev)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Decrement net_dev refcount as it was incremented in
+	 * dev_get_by_name().
+	 */
+	dev_put(net_dev);
+	return net_dev;
+}
+
+static int rndis_qc_set_alt(struct usb_function *f, unsigned int intf,
+			unsigned int alt)
+{
+	struct f_rndis_qc	 *rndis = func_to_rndis_qc(f);
+	struct f_rndis_qc_opts *opts;
+	struct usb_composite_dev *cdev = f->config->cdev;
+	u8 src_connection_idx;
+	u8 dst_connection_idx;
+	enum usb_ctrl usb_bam_type;
+	int ret;
+
+	/* we know alt == 0 */
+
+	opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+	if (intf == rndis->ctrl_id) {
+		if (rndis->notify->driver_data) {
+			VDBG(cdev, "reset rndis control %d\n", intf);
+			usb_ep_disable(rndis->notify);
+		}
+		if (!rndis->notify->desc) {
+			VDBG(cdev, "init rndis ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+				goto fail;
+		}
+		usb_ep_enable(rndis->notify);
+		rndis->notify->driver_data = rndis;
+
+	} else if (intf == rndis->data_id) {
+		struct net_device	*net;
+
+		rndis->net_ready_trigger = false;
+		if (rndis->bam_port.in->driver_data) {
+			DBG(cdev, "reset rndis\n");
+			/* bam_port is needed for disconnecting the BAM data
+			 * path. Only after the BAM data path is disconnected,
+			 * we can disconnect the port from the network layer.
+			 */
+			ipa_data_disconnect(&rndis->bam_port,
+						USB_IPA_FUNC_RNDIS);
+		}
+
+		if (!rndis->bam_port.in->desc || !rndis->bam_port.out->desc) {
+			DBG(cdev, "init rndis\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       rndis->bam_port.in) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       rndis->bam_port.out)) {
+				rndis->bam_port.in->desc = NULL;
+				rndis->bam_port.out->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* RNDIS should be in the "RNDIS uninitialized" state,
+		 * either never activated or after rndis_uninit().
+		 *
+		 * We don't want data to flow here until a nonzero packet
+		 * filter is set, at which point it enters "RNDIS data
+		 * initialized" state ... but we do want the endpoints
+		 * to be activated.  It's a strange little state.
+		 *
+		 * REVISIT the RNDIS gadget code has done this wrong for a
+		 * very long time.  We need another call to the link layer
+		 * code -- gether_updown(...bool) maybe -- to do it right.
+		 */
+		rndis->cdc_filter = 0;
+
+		rndis->bam_port.cdev = cdev;
+		rndis->bam_port.func = &rndis->func;
+		ipa_data_port_select(USB_IPA_FUNC_RNDIS);
+		usb_bam_type = usb_bam_get_bam_type(cdev->gadget->name);
+
+		src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+			IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+			rndis->port_num);
+		dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+			IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+			rndis->port_num);
+		if (src_connection_idx < 0 || dst_connection_idx < 0) {
+			pr_err("%s: usb_bam_get_connection_idx failed\n",
+				__func__);
+			return ret;
+		}
+		if (ipa_data_connect(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+				src_connection_idx, dst_connection_idx))
+			goto fail;
+
+		DBG(cdev, "RNDIS RX/TX early activation ...\n");
+		rndis_qc_open(rndis);
+		net = rndis_qc_get_net("rndis0");
+		if (IS_ERR(net))
+			return PTR_ERR(net);
+		opts->net = net;
+
+		rndis_set_param_dev(rndis->params, net,
+				&rndis->cdc_filter);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static void rndis_qc_disable(struct usb_function *f)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	unsigned long flags;
+
+	if (!rndis->notify->driver_data)
+		return;
+
+	DBG(cdev, "rndis deactivated\n");
+
+	spin_lock_irqsave(&rndis_lock, flags);
+	rndis_uninit(rndis->params);
+	spin_unlock_irqrestore(&rndis_lock, flags);
+	ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS);
+
+	msm_ep_unconfig(rndis->bam_port.out);
+	msm_ep_unconfig(rndis->bam_port.in);
+	usb_ep_disable(rndis->notify);
+	rndis->notify->driver_data = NULL;
+}
+
+static void rndis_qc_suspend(struct usb_function *f)
+{
+	struct f_rndis_qc	*rndis = func_to_rndis_qc(f);
+	bool remote_wakeup_allowed;
+
+	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+		remote_wakeup_allowed = f->func_wakeup_allowed;
+	else
+		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+	pr_info("%s(): start rndis suspend: remote_wakeup_allowed:%d\n:",
+					__func__, remote_wakeup_allowed);
+
+	if (!remote_wakeup_allowed) {
+		/* This is required as Linux host side RNDIS driver doesn't
+		 * send RNDIS_MESSAGE_PACKET_FILTER before suspending USB bus.
+		 * Hence we perform same operations explicitly here for Linux
+		 * host case. In case of windows, this RNDIS state machine is
+		 * already updated due to receiving of PACKET_FILTER.
+		 */
+		rndis_flow_control(rndis->params, true);
+		pr_debug("%s(): Disconnecting\n", __func__);
+	}
+
+	ipa_data_suspend(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+			remote_wakeup_allowed);
+	pr_debug("rndis suspended\n");
+}
+
+static void rndis_qc_resume(struct usb_function *f)
+{
+	struct f_rndis_qc	*rndis = func_to_rndis_qc(f);
+	bool remote_wakeup_allowed;
+
+	pr_debug("%s: rndis resumed\n", __func__);
+
+	/* Nothing to do if DATA interface wasn't initialized */
+	if (!rndis->bam_port.cdev) {
+		pr_debug("data interface was not up\n");
+		return;
+	}
+
+	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+		remote_wakeup_allowed = f->func_wakeup_allowed;
+	else
+		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+	ipa_data_resume(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+				remote_wakeup_allowed);
+
+	if (!remote_wakeup_allowed) {
+		rndis_qc_open(rndis);
+		/*
+		 * Linux Host doesn't sends RNDIS_MSG_INIT or non-zero value
+		 * set with RNDIS_MESSAGE_PACKET_FILTER after performing bus
+		 * resume. Hence trigger USB IPA transfer functionality
+		 * explicitly here. For Windows host case is also being
+		 * handle with RNDIS state machine.
+		 */
+		rndis_flow_control(rndis->params, false);
+	}
+
+	pr_debug("%s: RNDIS resume completed\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested.  A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_qc_open(struct f_rndis_qc *rndis)
+{
+	struct usb_composite_dev *cdev = rndis->func.config->cdev;
+
+	DBG(cdev, "%s\n", __func__);
+
+	rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
+				rndis_qc_bitrate(cdev->gadget) / 100);
+	rndis_signal_connect(rndis->params);
+}
+
+void ipa_data_flow_control_enable(bool enable, struct rndis_params *param)
+{
+	if (enable)
+		ipa_data_stop_rndis_ipa(USB_IPA_FUNC_RNDIS);
+	else
+		ipa_data_start_rndis_ipa(USB_IPA_FUNC_RNDIS);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	struct rndis_params		*params;
+	int			status;
+	struct usb_ep		*ep;
+
+	/* maybe allocate device-global string IDs */
+	if (rndis_qc_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+		return status;
+		rndis_qc_string_defs[0].id = status;
+		rndis_qc_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_qc_string_defs[1].id = status;
+		rndis_qc_data_intf.iInterface = status;
+
+		/* IAD iFunction label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_qc_string_defs[2].id = status;
+		rndis_qc_iad_descriptor.iFunction = status;
+	}
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->ctrl_id = status;
+	rndis_qc_iad_descriptor.bFirstInterface = status;
+
+	rndis_qc_control_intf.bInterfaceNumber = status;
+	rndis_qc_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->data_id = status;
+
+	rndis_qc_data_intf.bInterfaceNumber = status;
+	rndis_qc_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
+	if (!ep)
+		goto fail;
+	rndis->bam_port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
+	if (!ep)
+		goto fail;
+	rndis->bam_port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* NOTE:  a status/notification endpoint is, strictly speaking,
+	 * optional.  We don't treat it that way though!  It's simpler,
+	 * and some newer profiles don't treat it as optional.
+	 */
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	rndis->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!rndis->notify_req)
+		goto fail;
+	rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!rndis->notify_req->buf)
+		goto fail;
+	rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT;
+	rndis->notify_req->context = rndis;
+	rndis->notify_req->complete = rndis_qc_response_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->fs_descriptors = usb_copy_descriptors(eth_qc_fs_function);
+	if (!f->fs_descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rndis_qc_hs_in_desc.bEndpointAddress =
+				rndis_qc_fs_in_desc.bEndpointAddress;
+		rndis_qc_hs_out_desc.bEndpointAddress =
+				rndis_qc_fs_out_desc.bEndpointAddress;
+		rndis_qc_hs_notify_desc.bEndpointAddress =
+				rndis_qc_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function);
+
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		rndis_qc_ss_in_desc.bEndpointAddress =
+				rndis_qc_fs_in_desc.bEndpointAddress;
+		rndis_qc_ss_out_desc.bEndpointAddress =
+				rndis_qc_fs_out_desc.bEndpointAddress;
+		rndis_qc_ss_notify_desc.bEndpointAddress =
+				rndis_qc_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	params = rndis_register(rndis_qc_response_available, rndis,
+			ipa_data_flow_control_enable);
+	if (params < 0)
+		goto fail;
+	rndis->params = params;
+
+	rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
+	rndis_set_host_mac(rndis->params, rndis->ethaddr);
+
+	if (rndis->manufacturer && rndis->vendorID &&
+		rndis_set_param_vendor(rndis->params, rndis->vendorID,
+			rndis->manufacturer))
+		goto fail;
+
+	pr_debug("%s(): max_pkt_per_xfer:%d\n", __func__,
+				rndis->ul_max_pkt_per_xfer);
+	rndis_set_max_pkt_xfer(rndis->params, rndis->ul_max_pkt_per_xfer);
+
+	/* In case of aggregated packets QC device will request
+	 * aliment to 4 (2^2).
+	 */
+	pr_debug("%s(): pkt_alignment_factor:%d\n", __func__,
+				rndis->pkt_alignment_factor);
+	rndis_set_pkt_alignment_factor(rndis->params,
+				rndis->pkt_alignment_factor);
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			rndis->bam_port.in->name, rndis->bam_port.out->name,
+			rndis->notify->name);
+	return 0;
+
+fail:
+	if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->fs_descriptors)
+		usb_free_descriptors(f->fs_descriptors);
+
+	if (rndis->notify_req) {
+		kfree(rndis->notify_req->buf);
+		usb_ep_free_request(rndis->notify, rndis->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (rndis->notify)
+		rndis->notify->driver_data = NULL;
+	if (rndis->bam_port.out->desc)
+		rndis->bam_port.out->driver_data = NULL;
+	if (rndis->bam_port.in->desc)
+		rndis->bam_port.in->driver_data = NULL;
+
+	pr_err("%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void rndis_qc_free(struct usb_function *f)
+{
+	struct f_rndis_qc_opts *opts;
+
+	opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+	opts->refcnt--;
+}
+
+static void
+rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+
+	pr_debug("rndis_qc_unbind: free\n");
+	rndis_deregister(rndis->params);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->fs_descriptors);
+
+	kfree(rndis->notify_req->buf);
+	usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+	/*
+	 * call flush_workqueue to make sure that any pending
+	 * disconnect_work() from u_bam_data.c file is being
+	 * flushed before calling this rndis_ipa_cleanup API
+	 * as rndis ipa disconnect API is required to be
+	 * called before this.
+	 */
+	ipa_data_flush_workqueue();
+	rndis_ipa_cleanup(rndis_ipa_params.private);
+	rndis_ipa_supported = false;
+
+}
+
+void rndis_ipa_reset_trigger(void)
+{
+	struct f_rndis_qc *rndis;
+
+	rndis = _rndis_qc;
+	if (!rndis) {
+		pr_err("%s: No RNDIS instance", __func__);
+		return;
+	}
+
+	rndis->net_ready_trigger = false;
+}
+
+/*
+ * Callback let RNDIS_IPA trigger us when network interface is up
+ * and userspace is ready to answer DHCP requests
+ */
+void rndis_net_ready_notify(void)
+{
+	struct f_rndis_qc *rndis;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rndis_lock, flags);
+	rndis = _rndis_qc;
+	if (!rndis) {
+		pr_err("%s: No RNDIS instance", __func__);
+		spin_unlock_irqrestore(&rndis_lock, flags);
+		return;
+	}
+	if (rndis->net_ready_trigger) {
+		pr_err("%s: Already triggered", __func__);
+		spin_unlock_irqrestore(&rndis_lock, flags);
+		return;
+	}
+
+	pr_debug("%s: Set net_ready_trigger", __func__);
+	rndis->net_ready_trigger = true;
+	spin_unlock_irqrestore(&rndis_lock, flags);
+	ipa_data_start_rx_tx(USB_IPA_FUNC_RNDIS);
+}
+
+/**
+ * rndis_qc_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+
+static struct
+usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi,
+				u32 vendorID, const char *manufacturer,
+				u8 max_pkt_per_xfer, u8 pkt_alignment_factor)
+{
+	struct f_rndis_qc_opts *opts = container_of(fi,
+				struct f_rndis_qc_opts, func_inst);
+	struct f_rndis_qc	*rndis;
+	int		status;
+
+	/* allocate and initialize one new instance */
+	status = -ENOMEM;
+
+	opts = container_of(fi, struct f_rndis_qc_opts, func_inst);
+
+	opts->refcnt++;
+	rndis = opts->rndis;
+
+	rndis->vendorID = opts->vendor_id;
+	rndis->manufacturer = opts->manufacturer;
+	/* export host's Ethernet address in CDC format */
+	random_ether_addr(rndis_ipa_params.host_ethaddr);
+	random_ether_addr(rndis_ipa_params.device_ethaddr);
+	pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
+		rndis_ipa_params.host_ethaddr,
+		rndis_ipa_params.device_ethaddr);
+	rndis_ipa_supported = true;
+	ether_addr_copy(rndis->ethaddr, rndis_ipa_params.host_ethaddr);
+	rndis_ipa_params.device_ready_notify = rndis_net_ready_notify;
+
+	/* if max_pkt_per_xfer was not configured set to default value */
+	rndis->ul_max_pkt_per_xfer =
+			max_pkt_per_xfer ? max_pkt_per_xfer :
+			DEFAULT_MAX_PKT_PER_XFER;
+	ipa_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer);
+
+	/*
+	 * Check no RNDIS aggregation, and alignment if not mentioned,
+	 * use alignment factor as zero. If aggregated RNDIS data transfer,
+	 * max packet per transfer would be default if it is not set
+	 * explicitly, and same way use alignment factor as 2 by default.
+	 * This would eliminate need of writing to sysfs if default RNDIS
+	 * aggregation setting required. Writing to both sysfs entries,
+	 * those values will always override default values.
+	 */
+	if ((rndis->pkt_alignment_factor == 0) &&
+			(rndis->ul_max_pkt_per_xfer == 1))
+		rndis->pkt_alignment_factor = 0;
+	else
+		rndis->pkt_alignment_factor = pkt_alignment_factor ?
+				pkt_alignment_factor :
+				DEFAULT_PKT_ALIGNMENT_FACTOR;
+
+	/* RNDIS activates when the host changes this filter */
+	rndis->cdc_filter = 0;
+
+	rndis->func.name = "rndis";
+	rndis->func.strings = rndis_qc_strings;
+	/* descriptors are per-instance copies */
+	rndis->func.bind = rndis_qc_bind;
+	rndis->func.unbind = rndis_qc_unbind;
+	rndis->func.set_alt = rndis_qc_set_alt;
+	rndis->func.setup = rndis_qc_setup;
+	rndis->func.disable = rndis_qc_disable;
+	rndis->func.suspend = rndis_qc_suspend;
+	rndis->func.resume = rndis_qc_resume;
+	rndis->func.free_func = rndis_qc_free;
+
+	status = rndis_ipa_init(&rndis_ipa_params);
+	if (status) {
+		pr_err("%s: failed to init rndis_ipa\n", __func__);
+		goto fail;
+	}
+
+	_rndis_qc = rndis;
+
+	return &rndis->func;
+fail:
+	kfree(rndis);
+	_rndis_qc = NULL;
+	return ERR_PTR(status);
+}
+
+static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi)
+{
+	return rndis_qc_bind_config_vendor(fi, 0, NULL, 0, 0);
+}
+
+static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	pr_info("Open rndis QC driver\n");
+
+	spin_lock_irqsave(&rndis_lock, flags);
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not created yet\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	if (rndis_qc_lock(&_rndis_qc->open_excl)) {
+		pr_err("Already opened\n");
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	fp->private_data = _rndis_qc;
+fail:
+	spin_unlock_irqrestore(&rndis_lock, flags);
+
+	if (!ret)
+		pr_info("rndis QC file opened\n");
+
+	return ret;
+}
+
+static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
+{
+	unsigned long flags;
+
+	pr_info("Close rndis QC file\n");
+
+	spin_lock_irqsave(&rndis_lock, flags);
+
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not present\n");
+		spin_unlock_irqrestore(&rndis_lock, flags);
+		return -ENODEV;
+	}
+	rndis_qc_unlock(&_rndis_qc->open_excl);
+	spin_unlock_irqrestore(&rndis_lock, flags);
+	return 0;
+}
+
+static long rndis_qc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+	u8 qc_max_pkt_per_xfer = 0;
+	u32 qc_max_pkt_size = 0;
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rndis_lock, flags);
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not present\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	qc_max_pkt_per_xfer = _rndis_qc->ul_max_pkt_per_xfer;
+	qc_max_pkt_size = _rndis_qc->max_pkt_size;
+
+	if (rndis_qc_lock(&_rndis_qc->ioctl_excl)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	spin_unlock_irqrestore(&rndis_lock, flags);
+
+	pr_info("Received command %d\n", cmd);
+
+	switch (cmd) {
+	case RNDIS_QC_GET_MAX_PKT_PER_XFER:
+		ret = copy_to_user((void __user *)arg,
+					&qc_max_pkt_per_xfer,
+					sizeof(qc_max_pkt_per_xfer));
+		if (ret) {
+			pr_err("copying to user space failed\n");
+			ret = -EFAULT;
+		}
+		pr_info("Sent UL max packets per xfer %d\n",
+				qc_max_pkt_per_xfer);
+		break;
+	case RNDIS_QC_GET_MAX_PKT_SIZE:
+		ret = copy_to_user((void __user *)arg,
+					&qc_max_pkt_size,
+					sizeof(qc_max_pkt_size));
+		if (ret) {
+			pr_err("copying to user space failed\n");
+			ret = -EFAULT;
+		}
+		pr_debug("Sent max packet size %d\n",
+				qc_max_pkt_size);
+		break;
+	default:
+		pr_err("Unsupported IOCTL\n");
+		ret = -EINVAL;
+	}
+
+	spin_lock_irqsave(&rndis_lock, flags);
+
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not present\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	rndis_qc_unlock(&_rndis_qc->ioctl_excl);
+
+fail:
+	spin_unlock_irqrestore(&rndis_lock, flags);
+	return ret;
+}
+
+static const struct file_operations rndis_qc_fops = {
+	.owner = THIS_MODULE,
+	.open = rndis_qc_open_dev,
+	.release = rndis_qc_release_dev,
+	.unlocked_ioctl	= rndis_qc_ioctl,
+};
+
+static void qcrndis_free_inst(struct usb_function_instance *f)
+{
+	struct f_rndis_qc_opts	*opts = container_of(f,
+				struct f_rndis_qc_opts, func_inst);
+	int minor = MINOR(opts->rndis->cdev.dev);
+	unsigned long flags;
+
+	device_destroy(rndis_class, MKDEV(MAJOR(rndis_dev), minor));
+	class_destroy(rndis_class);
+	cdev_del(&opts->rndis->cdev);
+	ida_simple_remove(&chardev_ida, minor);
+	unregister_chrdev_region(rndis_dev, 1);
+
+	ipa_data_free(USB_IPA_FUNC_RNDIS);
+	spin_lock_irqsave(&rndis_lock, flags);
+	kfree(opts->rndis);
+	_rndis_qc = NULL;
+	kfree(opts);
+	spin_unlock_irqrestore(&rndis_lock, flags);
+}
+
+static int qcrndis_set_inst_name(struct usb_function_instance *fi,
+	const char *name)
+{
+	struct f_rndis_qc_opts	*opts = container_of(fi,
+				struct f_rndis_qc_opts, func_inst);
+	struct f_rndis_qc	*rndis;
+	int name_len;
+	int ret, minor;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	pr_debug("initialize rndis QC instance\n");
+	rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
+	if (!rndis) {
+		pr_err("%s: fail allocate and initialize new instance\n",
+			   __func__);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&rndis_lock);
+	opts->rndis = rndis;
+	rndis_class = class_create(THIS_MODULE, "usbrndis");
+	ret = alloc_chrdev_region(&rndis_dev, 0, 1, "usb_rndis");
+	if (ret < 0) {
+		pr_err("Fail to allocate usb rndis char dev region\n");
+		return ret;
+	}
+
+	/* get a minor number */
+	minor = ida_simple_get(&chardev_ida, 0, 0, GFP_KERNEL);
+	if (minor < 0) {
+		pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
+			minor);
+		ret = -ENODEV;
+		goto fail_out_of_minors;
+	}
+	rndis->dev = device_create(rndis_class, NULL,
+			MKDEV(MAJOR(rndis_dev), minor),
+			rndis, "android_rndis_qc");
+	if (IS_ERR(rndis->dev)) {
+		ret = PTR_ERR(rndis->dev);
+		pr_err("%s: device_create failed for (%d)", __func__, ret);
+		goto fail_return_minor;
+	}
+	cdev_init(&rndis->cdev, &rndis_qc_fops);
+	ret = cdev_add(&rndis->cdev, MKDEV(MAJOR(rndis_dev), minor), 1);
+	if (ret < 0) {
+		pr_err("%s: cdev_add failed for %s (%d)", __func__,
+			name, ret);
+		goto fail_cdev_add;
+	}
+
+	if (ret)
+		pr_err("rndis QC driver failed to register\n");
+
+	ret = ipa_data_setup(USB_IPA_FUNC_RNDIS);
+	if (ret) {
+		pr_err("bam_data_setup failed err: %d\n", ret);
+		goto fail_data_setup;
+	}
+
+	return 0;
+fail_data_setup:
+	cdev_del(&rndis->cdev);
+fail_cdev_add:
+	device_destroy(rndis_class, MKDEV(MAJOR(rndis_dev), minor));
+fail_return_minor:
+	ida_simple_remove(&chardev_ida, minor);
+fail_out_of_minors:
+	unregister_chrdev_region(rndis_dev, 1);
+	class_destroy(rndis_class);
+	kfree(rndis);
+	return ret;
+}
+
+static inline
+struct f_rndis_qc_opts *to_f_qc_rndis_opts(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct f_rndis_qc_opts,
+				func_inst.group);
+}
+
+static void qcrndis_attr_release(struct config_item *item)
+{
+	struct f_rndis_qc_opts *opts = to_f_qc_rndis_opts(item);
+
+	usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations qcrndis_item_ops = {
+	.release        = qcrndis_attr_release,
+};
+
+static struct config_item_type qcrndis_func_type = {
+	.ct_item_ops    = &qcrndis_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static struct usb_function_instance *qcrndis_alloc_inst(void)
+{
+	struct f_rndis_qc_opts *opts;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return ERR_PTR(-ENOMEM);
+
+	opts->func_inst.set_inst_name = qcrndis_set_inst_name;
+	opts->func_inst.free_func_inst = qcrndis_free_inst;
+
+	config_group_init_type_name(&opts->func_inst.group, "",
+				&qcrndis_func_type);
+
+	return &opts->func_inst;
+}
+
+void *rndis_qc_get_ipa_rx_cb(void)
+{
+	return rndis_ipa_params.ipa_rx_notify;
+}
+
+void *rndis_qc_get_ipa_tx_cb(void)
+{
+	return rndis_ipa_params.ipa_tx_notify;
+}
+
+void *rndis_qc_get_ipa_priv(void)
+{
+	return rndis_ipa_params.private;
+}
+
+bool rndis_qc_get_skip_ep_config(void)
+{
+	return rndis_ipa_params.skip_ep_cfg;
+}
+
+DECLARE_USB_FUNCTION_INIT(rndis_bam, qcrndis_alloc_inst, qcrndis_alloc);
+
+static int __init usb_qcrndis_init(void)
+{
+	int ret;
+
+	ret = usb_function_register(&rndis_bamusb_func);
+	if (ret) {
+		pr_err("%s: failed to register diag %d\n", __func__, ret);
+		return ret;
+	}
+	return ret;
+}
+
+static void __exit usb_qcrndis_exit(void)
+{
+	usb_function_unregister(&rndis_bamusb_func);
+}
+
+module_init(usb_qcrndis_init);
+module_exit(usb_qcrndis_exit);
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/f_rmnet.c b/drivers/usb/gadget/function/f_rmnet.c
new file mode 100644
index 0000000..64532f6
--- /dev/null
+++ b/drivers/usb/gadget/function/f_rmnet.c
@@ -0,0 +1,1272 @@
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/usb_bam.h>
+#include <linux/module.h>
+
+#include "u_rmnet.h"
+#include "u_data_ipa.h"
+#include "configfs.h"
+
+#define RMNET_NOTIFY_INTERVAL	5
+#define RMNET_MAX_NOTIFY_SIZE	sizeof(struct usb_cdc_notification)
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+struct f_rmnet {
+	struct usb_function             func;
+	enum qti_port_type		qti_port_type;
+	enum ipa_func_type		func_type;
+	struct grmnet			port;
+	int				ifc_id;
+	atomic_t			online;
+	atomic_t			ctrl_online;
+	struct usb_composite_dev	*cdev;
+	struct gadget_ipa_port		ipa_port;
+	spinlock_t			lock;
+
+	/* usb eps*/
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+
+	/* control info */
+	struct list_head		cpkt_resp_q;
+	unsigned long			notify_count;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor rmnet_ss_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = {
+	.bLength =		sizeof(rmnet_ss_notify_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = {
+	.bLength =		sizeof(rmnet_ss_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = {
+	.bLength =		sizeof(rmnet_ss_out_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *rmnet_ss_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_in_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_out_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_out_comp_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+static struct usb_interface_descriptor dpl_data_intf_desc = {
+	.bLength            =	sizeof(dpl_data_intf_desc),
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bAlternateSetting  =   0,
+	.bNumEndpoints      =	1,
+	.bInterfaceClass    =	0xff,
+	.bInterfaceSubClass =	0xff,
+	.bInterfaceProtocol =	0xff,
+};
+
+static struct usb_endpoint_descriptor dpl_hs_data_desc = {
+	.bLength              =	 USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType      =	 USB_DT_ENDPOINT,
+	.bEndpointAddress     =	 USB_DIR_IN,
+	.bmAttributes         =	 USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize       =	 cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dpl_ss_data_desc = {
+	.bLength              =	 USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType      =	 USB_DT_ENDPOINT,
+	.bEndpointAddress     =	 USB_DIR_IN,
+	.bmAttributes         =  USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize       =	 cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dpl_data_ep_comp_desc = {
+	.bLength              =	 sizeof(dpl_data_ep_comp_desc),
+	.bDescriptorType      =	 USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst            =	 1,
+	.bmAttributes         =	 0,
+	.wBytesPerInterval    =	 0,
+};
+
+static struct usb_descriptor_header *dpl_hs_data_only_desc[] = {
+	(struct usb_descriptor_header *) &dpl_data_intf_desc,
+	(struct usb_descriptor_header *) &dpl_hs_data_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *dpl_ss_data_only_desc[] = {
+	(struct usb_descriptor_header *) &dpl_data_intf_desc,
+	(struct usb_descriptor_header *) &dpl_ss_data_desc,
+	(struct usb_descriptor_header *) &dpl_data_ep_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string dpl_string_defs[] = {
+	[0].s = "QDSS DATA",
+	{}, /* end of list */
+};
+
+static struct usb_gadget_strings dpl_string_table = {
+	.language =		0x0409,
+	.strings =		dpl_string_defs,
+};
+
+static struct usb_gadget_strings *dpl_strings[] = {
+	&dpl_string_table,
+	NULL,
+};
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev);
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
+{
+	return container_of(f, struct f_rmnet, func);
+}
+
+static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
+{
+	return container_of(r, struct f_rmnet, port);
+}
+
+int name_to_prot(struct f_rmnet *dev, const char *name)
+{
+	if (!name)
+		goto error;
+
+	if (!strncasecmp("rmnet", name, MAX_INST_NAME_LEN)) {
+		dev->qti_port_type = QTI_PORT_RMNET;
+		dev->func_type = USB_IPA_FUNC_RMNET;
+	} else if (!strncasecmp("dpl", name, MAX_INST_NAME_LEN)) {
+		dev->qti_port_type = QTI_PORT_DPL;
+		dev->func_type = USB_IPA_FUNC_DPL;
+	}
+	return 0;
+
+error:
+	return -EINVAL;
+}
+
+static struct usb_request *
+frmnet_alloc_req(struct usb_ep *ep, unsigned int len, gfp_t flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, flags);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+
+	req->buf = kmalloc(len, flags);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	req->length = len;
+
+	return req;
+}
+
+void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static struct
+rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned int len, gfp_t flags)
+{
+	struct rmnet_ctrl_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+	if (!pkt)
+		return ERR_PTR(-ENOMEM);
+
+	pkt->buf = kmalloc(len, flags);
+	if (!pkt->buf) {
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+	pkt->len = len;
+
+	return pkt;
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+	kfree(pkt->buf);
+	kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int gport_rmnet_connect(struct f_rmnet *dev)
+{
+	int			ret;
+	int			src_connection_idx = 0, dst_connection_idx = 0;
+	struct usb_gadget	*gadget = dev->cdev->gadget;
+	enum usb_ctrl		usb_bam_type;
+	int bam_pipe_num = (dev->qti_port_type == QTI_PORT_DPL) ? 1 : 0;
+
+	ret = gqti_ctrl_connect(&dev->port, dev->qti_port_type, dev->ifc_id);
+	if (ret) {
+		pr_err("%s: gqti_ctrl_connect failed: err:%d\n",
+			__func__, ret);
+		return ret;
+	}
+	if (dev->qti_port_type == QTI_PORT_DPL)
+		dev->port.send_encap_cmd(QTI_PORT_DPL, NULL, 0);
+	dev->ipa_port.cdev = dev->cdev;
+	ipa_data_port_select(dev->func_type);
+	usb_bam_type = usb_bam_get_bam_type(gadget->name);
+
+	if (dev->ipa_port.in) {
+		dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+			IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+			USB_BAM_DEVICE, bam_pipe_num);
+	}
+	if (dev->ipa_port.out) {
+		src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+			IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+			USB_BAM_DEVICE, bam_pipe_num);
+	}
+	if (dst_connection_idx < 0 || src_connection_idx < 0) {
+		pr_err("%s: usb_bam_get_connection_idx failed\n",
+			__func__);
+		gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+		return -EINVAL;
+	}
+	ret = ipa_data_connect(&dev->ipa_port, dev->func_type,
+			src_connection_idx, dst_connection_idx);
+	if (ret) {
+		pr_err("%s: ipa_data_connect failed: err:%d\n",
+			__func__, ret);
+		gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+		return ret;
+	}
+	return 0;
+}
+
+static int gport_rmnet_disconnect(struct f_rmnet *dev)
+{
+	gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+	ipa_data_disconnect(&dev->ipa_port, dev->func_type);
+	return 0;
+}
+
+static void frmnet_free(struct usb_function *f)
+{
+	struct f_rmnet_opts *opts;
+
+	opts = container_of(f->fi, struct f_rmnet_opts, func_inst);
+	opts->refcnt--;
+}
+
+static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rmnet *dev = func_to_rmnet(f);
+	struct usb_gadget *gadget = c->cdev->gadget;
+
+	pr_debug("%s: start unbinding\nclear_desc\n", __func__);
+	if (gadget_is_superspeed(gadget) && f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+
+	if (gadget_is_dualspeed(gadget) && f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+
+	if (f->fs_descriptors)
+		usb_free_descriptors(f->fs_descriptors);
+
+	if (dev->notify_req)
+		frmnet_free_req(dev->notify, dev->notify_req);
+}
+
+static void frmnet_purge_responses(struct f_rmnet *dev)
+{
+	unsigned long flags;
+	struct rmnet_ctrl_pkt *cpkt;
+
+	pr_debug("%s: Purging responses\n", __func__);
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(&dev->cpkt_resp_q)) {
+		cpkt = list_first_entry(&dev->cpkt_resp_q,
+				struct rmnet_ctrl_pkt, list);
+
+		list_del(&cpkt->list);
+		rmnet_free_ctrl_pkt(cpkt);
+	}
+	dev->notify_count = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void frmnet_suspend(struct usb_function *f)
+{
+	struct f_rmnet	*dev = func_to_rmnet(f);
+	bool	remote_wakeup_allowed;
+
+	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+		remote_wakeup_allowed = f->func_wakeup_allowed;
+	else
+		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+	pr_debug("%s: dev: %pK remote_wakeup: %d\n", __func__, dev,
+			remote_wakeup_allowed);
+
+	if (dev->notify) {
+		usb_ep_fifo_flush(dev->notify);
+		frmnet_purge_responses(dev);
+	}
+	ipa_data_suspend(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
+}
+
+static void frmnet_resume(struct usb_function *f)
+{
+	struct f_rmnet	*dev = func_to_rmnet(f);
+	bool	remote_wakeup_allowed;
+
+	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+		remote_wakeup_allowed = f->func_wakeup_allowed;
+	else
+		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+	pr_debug("%s: dev: %pK remote_wakeup: %d\n", __func__, dev,
+			remote_wakeup_allowed);
+
+	ipa_data_resume(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
+}
+
+static void frmnet_disable(struct usb_function *f)
+{
+	struct f_rmnet	*dev = func_to_rmnet(f);
+
+	pr_debug("%s: Disabling\n", __func__);
+	atomic_set(&dev->online, 0);
+	if (dev->notify) {
+		usb_ep_disable(dev->notify);
+		dev->notify->driver_data = NULL;
+		frmnet_purge_responses(dev);
+	}
+
+	gport_rmnet_disconnect(dev);
+}
+
+static int
+frmnet_set_alt(struct usb_function *f, unsigned int intf, unsigned int alt)
+{
+	struct f_rmnet			*dev = func_to_rmnet(f);
+	struct usb_composite_dev	*cdev = f->config->cdev;
+	int				ret = 0;
+
+	pr_debug("%s:dev:%pK\n", __func__, dev);
+	dev->cdev = cdev;
+	if (dev->notify) {
+		if (dev->notify->driver_data) {
+			pr_debug("%s: reset port\n", __func__);
+			usb_ep_disable(dev->notify);
+		}
+
+		ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
+		if (ret) {
+			dev->notify->desc = NULL;
+			ERROR(cdev,
+			"config_ep_by_speed failed for ep %s, result %d\n",
+					dev->notify->name, ret);
+			return ret;
+		}
+
+		ret = usb_ep_enable(dev->notify);
+		if (ret) {
+			pr_err("%s: usb ep#%s enable failed, err#%d\n",
+				__func__, dev->notify->name, ret);
+			dev->notify->desc = NULL;
+			return ret;
+		}
+
+		dev->notify->driver_data = dev;
+	}
+
+	if (dev->ipa_port.in && !dev->ipa_port.in->desc
+		&& config_ep_by_speed(cdev->gadget, f, dev->ipa_port.in)) {
+		pr_err("%s(): config_ep_by_speed failed.\n",
+				__func__);
+		dev->ipa_port.in->desc = NULL;
+		ret = -EINVAL;
+		goto err_disable_ep;
+	}
+
+	if (dev->ipa_port.out && !dev->ipa_port.out->desc
+		&& config_ep_by_speed(cdev->gadget, f, dev->ipa_port.out)) {
+		pr_err("%s(): config_ep_by_speed failed.\n",
+				__func__);
+		dev->ipa_port.out->desc = NULL;
+		ret = -EINVAL;
+		goto err_disable_ep;
+	}
+
+	ret = gport_rmnet_connect(dev);
+	if (ret) {
+		pr_err("%s(): gport_rmnet_connect fail with err:%d\n",
+				__func__, ret);
+		goto err_disable_ep;
+	}
+
+	atomic_set(&dev->online, 1);
+	/*
+	 * In case notifications were aborted, but there are
+	 * pending control packets in the response queue,
+	 * re-add the notifications.
+	 */
+	if (dev->qti_port_type == QTI_PORT_RMNET) {
+		struct list_head		*cpkt;
+
+		list_for_each(cpkt, &dev->cpkt_resp_q)
+			frmnet_ctrl_response_available(dev);
+	}
+
+	return ret;
+err_disable_ep:
+	if (dev->notify && dev->notify->driver_data)
+		usb_ep_disable(dev->notify);
+
+	return ret;
+}
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev)
+{
+	struct usb_request		*req = dev->notify_req;
+	struct usb_cdc_notification	*event;
+	unsigned long			flags;
+	int				ret;
+	struct rmnet_ctrl_pkt		*cpkt;
+
+	pr_debug("%s:dev:%pK\n", __func__, dev);
+	spin_lock_irqsave(&dev->lock, flags);
+	if (!atomic_read(&dev->online) || !req || !req->buf) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return;
+	}
+
+	if (++dev->notify_count != 1) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return;
+	}
+
+	event = req->buf;
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+	if (ret) {
+		spin_lock_irqsave(&dev->lock, flags);
+		if (!list_empty(&dev->cpkt_resp_q)) {
+			if (dev->notify_count > 0)
+				dev->notify_count--;
+			else {
+				pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+					 __func__, dev->notify_count);
+				spin_unlock_irqrestore(&dev->lock, flags);
+				return;
+			}
+			cpkt = list_first_entry(&dev->cpkt_resp_q,
+					struct rmnet_ctrl_pkt, list);
+			list_del(&cpkt->list);
+			rmnet_free_ctrl_pkt(cpkt);
+		}
+		spin_unlock_irqrestore(&dev->lock, flags);
+		pr_debug("ep enqueue error %d\n", ret);
+	}
+}
+
+static void frmnet_connect(struct grmnet *gr)
+{
+	struct f_rmnet			*dev;
+
+	if (!gr) {
+		pr_err("%s: Invalid grmnet:%pK\n", __func__, gr);
+		return;
+	}
+
+	dev = port_to_rmnet(gr);
+
+	atomic_set(&dev->ctrl_online, 1);
+}
+
+static void frmnet_disconnect(struct grmnet *gr)
+{
+	struct f_rmnet			*dev;
+	struct usb_cdc_notification	*event;
+	int				status;
+
+	if (!gr) {
+		pr_err("%s: Invalid grmnet:%pK\n", __func__, gr);
+		return;
+	}
+
+	dev = port_to_rmnet(gr);
+
+	atomic_set(&dev->ctrl_online, 0);
+
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: nothing to do\n", __func__);
+		return;
+	}
+
+	usb_ep_fifo_flush(dev->notify);
+
+	event = dev->notify_req->buf;
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+	if (status < 0) {
+		if (!atomic_read(&dev->online))
+			return;
+		pr_err("%s: rmnet notify ep enqueue error %d\n",
+				__func__, status);
+	}
+
+	frmnet_purge_responses(dev);
+}
+
+static int
+frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
+{
+	struct f_rmnet		*dev;
+	struct rmnet_ctrl_pkt	*cpkt;
+	unsigned long		flags;
+
+	if (!gr || !buf) {
+		pr_err("%s: Invalid grmnet/buf, grmnet:%pK buf:%pK\n",
+				__func__, gr, buf);
+		return -ENODEV;
+	}
+	cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
+	if (IS_ERR(cpkt)) {
+		pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+		return -ENOMEM;
+	}
+	memcpy(cpkt->buf, buf, len);
+	cpkt->len = len;
+
+	dev = port_to_rmnet(gr);
+
+	pr_debug("%s: dev: %pK\n", __func__, dev);
+	if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
+		rmnet_free_ctrl_pkt(cpkt);
+		return 0;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	frmnet_ctrl_response_available(dev);
+
+	return 0;
+}
+
+static void
+frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rmnet			*dev = req->context;
+	struct usb_composite_dev	*cdev;
+
+	if (!dev) {
+		pr_err("%s: rmnet dev is null\n", __func__);
+		return;
+	}
+	pr_debug("%s: dev: %pK\n", __func__, dev);
+	cdev = dev->cdev;
+
+	if (dev->port.send_encap_cmd)
+		dev->port.send_encap_cmd(QTI_PORT_RMNET, req->buf, req->actual);
+}
+
+static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rmnet	*dev = req->context;
+	int	status = req->status;
+	unsigned long		flags;
+	struct rmnet_ctrl_pkt	*cpkt;
+
+	pr_debug("%s: dev: %pK\n", __func__, dev);
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->notify_count = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		break;
+	default:
+		pr_err("rmnet notify ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		if (!atomic_read(&dev->ctrl_online))
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (dev->notify_count > 0) {
+			dev->notify_count--;
+			if (dev->notify_count == 0) {
+				spin_unlock_irqrestore(&dev->lock, flags);
+				break;
+			}
+		} else {
+			pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+					__func__, dev->notify_count);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+		if (status) {
+			spin_lock_irqsave(&dev->lock, flags);
+			if (!list_empty(&dev->cpkt_resp_q)) {
+				if (dev->notify_count > 0)
+					dev->notify_count--;
+				else {
+					pr_err("%s: Invalid notify_count=%lu to decrement\n",
+						__func__, dev->notify_count);
+					spin_unlock_irqrestore(&dev->lock,
+								flags);
+					break;
+				}
+				cpkt = list_first_entry(&dev->cpkt_resp_q,
+						struct rmnet_ctrl_pkt, list);
+				list_del(&cpkt->list);
+				rmnet_free_ctrl_pkt(cpkt);
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+			pr_debug("ep enqueue error %d\n", status);
+		}
+		break;
+	}
+}
+
+static int
+frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_rmnet			*dev = func_to_rmnet(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+	struct usb_request		*req = cdev->req;
+	u16				w_index = le16_to_cpu(ctrl->wIndex);
+	u16				w_value = le16_to_cpu(ctrl->wValue);
+	u16				w_length = le16_to_cpu(ctrl->wLength);
+	int				ret = -EOPNOTSUPP;
+
+	pr_debug("%s: dev: %pK\n", __func__, dev);
+	if (!atomic_read(&dev->online)) {
+		pr_warn("%s: usb cable is not connected\n", __func__);
+		return -ENOTCONN;
+	}
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		pr_debug("%s: USB_CDC_SEND_ENCAPSULATED_COMMAND\n"
+				 , __func__);
+		ret = w_length;
+		req->complete = frmnet_cmd_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		pr_debug("%s: USB_CDC_GET_ENCAPSULATED_RESPONSE\n", __func__);
+		if (w_value) {
+			pr_err("%s: invalid w_value = %04x\n",
+				   __func__, w_value);
+			goto invalid;
+		} else {
+			unsigned int len;
+			struct rmnet_ctrl_pkt *cpkt;
+
+			spin_lock(&dev->lock);
+			if (list_empty(&dev->cpkt_resp_q)) {
+				pr_err("ctrl resp queue empty: ");
+				pr_err("req%02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				ret = 0;
+				spin_unlock(&dev->lock);
+				goto invalid;
+			}
+
+			cpkt = list_first_entry(&dev->cpkt_resp_q,
+					struct rmnet_ctrl_pkt, list);
+			list_del(&cpkt->list);
+			spin_unlock(&dev->lock);
+
+			len = min_t(unsigned int, w_length, cpkt->len);
+			memcpy(req->buf, cpkt->buf, len);
+			ret = len;
+
+			rmnet_free_ctrl_pkt(cpkt);
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		pr_debug("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d\n",
+				__func__, w_value & ACM_CTRL_DTR ? 1 : 0);
+		if (dev->port.notify_modem) {
+			dev->port.notify_modem(&dev->port,
+				QTI_PORT_RMNET, w_value);
+		}
+		ret = 0;
+
+		break;
+	default:
+
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (ret < w_length);
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static int ipa_update_function_bind_params(struct f_rmnet *dev,
+	struct usb_composite_dev *cdev, struct ipa_function_bind_info *info)
+{
+	struct usb_ep *ep;
+	struct usb_function *f = &dev->func;
+	int status;
+
+	/* maybe allocate device-global string IDs */
+	if (info->string_defs[0].id != 0)
+		goto skip_string_id_alloc;
+
+	if (info->data_str_idx >= 0 && info->data_desc) {
+		/* data interface label */
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		info->string_defs[info->data_str_idx].id = status;
+		info->data_desc->iInterface = status;
+	}
+
+skip_string_id_alloc:
+	if (info->data_desc)
+		info->data_desc->bInterfaceNumber = dev->ifc_id;
+
+	if (info->fs_in_desc) {
+		ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+		if (!ep) {
+			pr_err("%s: usb epin autoconfig failed\n",
+					__func__);
+			return -ENODEV;
+		}
+		dev->ipa_port.in = ep;
+		ep->driver_data = cdev;
+	}
+
+	if (info->fs_out_desc) {
+		ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+		if (!ep) {
+			pr_err("%s: usb epout autoconfig failed\n",
+					__func__);
+			status = -ENODEV;
+			goto ep_auto_out_fail;
+		}
+		dev->ipa_port.out = ep;
+		ep->driver_data = cdev;
+	}
+
+	if (info->fs_notify_desc) {
+		ep = usb_ep_autoconfig(cdev->gadget, info->fs_notify_desc);
+		if (!ep) {
+			pr_err("%s: usb epnotify autoconfig failed\n",
+					__func__);
+			status = -ENODEV;
+			goto ep_auto_notify_fail;
+		}
+		dev->notify = ep;
+		ep->driver_data = cdev;
+		dev->notify_req = frmnet_alloc_req(ep,
+				sizeof(struct usb_cdc_notification),
+				GFP_KERNEL);
+		if (IS_ERR(dev->notify_req)) {
+			pr_err("%s: unable to allocate memory for notify req\n",
+				__func__);
+			status = -ENOMEM;
+			goto ep_notify_alloc_fail;
+		}
+
+		dev->notify_req->complete = frmnet_notify_complete;
+		dev->notify_req->context = dev;
+	}
+
+	status = -ENOMEM;
+	f->fs_descriptors = usb_copy_descriptors(info->fs_desc_hdr);
+	if (!f->fs_descriptors) {
+		pr_err("%s: no descriptors, usb_copy descriptors(fs)failed\n",
+			__func__);
+		goto fail;
+	}
+
+	if (gadget_is_dualspeed(cdev->gadget)) {
+		if (info->fs_in_desc && info->hs_in_desc)
+			info->hs_in_desc->bEndpointAddress =
+					info->fs_in_desc->bEndpointAddress;
+		if (info->fs_out_desc && info->hs_out_desc)
+			info->hs_out_desc->bEndpointAddress =
+					info->fs_out_desc->bEndpointAddress;
+		if (info->fs_notify_desc && info->hs_notify_desc)
+			info->hs_notify_desc->bEndpointAddress =
+					info->fs_notify_desc->bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(info->hs_desc_hdr);
+		if (!f->hs_descriptors) {
+			pr_err("%s: no hs_descriptors, usb_copy descriptors(hs)failed\n",
+				__func__);
+			goto fail;
+		}
+	}
+
+	if (gadget_is_superspeed(cdev->gadget)) {
+		if (info->fs_in_desc && info->ss_in_desc)
+			info->ss_in_desc->bEndpointAddress =
+					info->fs_in_desc->bEndpointAddress;
+
+		if (info->fs_out_desc && info->ss_out_desc)
+			info->ss_out_desc->bEndpointAddress =
+					info->fs_out_desc->bEndpointAddress;
+		if (info->fs_notify_desc && info->ss_notify_desc)
+			info->ss_notify_desc->bEndpointAddress =
+					info->fs_notify_desc->bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(info->ss_desc_hdr);
+		if (!f->ss_descriptors) {
+			pr_err("%s: no ss_descriptors,usb_copy descriptors(ss)failed\n",
+			__func__);
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	if (gadget_is_superspeed(cdev->gadget) && f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(cdev->gadget) && f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->fs_descriptors)
+		usb_free_descriptors(f->fs_descriptors);
+	if (dev->notify_req)
+		frmnet_free_req(dev->notify, dev->notify_req);
+ep_notify_alloc_fail:
+		dev->notify->driver_data = NULL;
+		dev->notify = NULL;
+ep_auto_notify_fail:
+		dev->ipa_port.out->driver_data = NULL;
+		dev->ipa_port.out = NULL;
+ep_auto_out_fail:
+		dev->ipa_port.in->driver_data = NULL;
+		dev->ipa_port.in = NULL;
+
+	return status;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rmnet	*dev = func_to_rmnet(f);
+	struct usb_composite_dev *cdev = c->cdev;
+	int ret = -ENODEV;
+	struct ipa_function_bind_info info = {0};
+
+	pr_debug("%s: start binding\n", __func__);
+	dev->ifc_id = usb_interface_id(c, f);
+	if (dev->ifc_id < 0) {
+		pr_err("%s: unable to allocate ifc id, err:%d\n",
+			__func__, dev->ifc_id);
+		return dev->ifc_id;
+	}
+
+	info.data_str_idx = 0;
+	if (dev->qti_port_type == QTI_PORT_RMNET) {
+		info.string_defs = rmnet_string_defs;
+		info.data_desc = &rmnet_interface_desc;
+		info.fs_in_desc = &rmnet_fs_in_desc;
+		info.fs_out_desc = &rmnet_fs_out_desc;
+		info.fs_notify_desc = &rmnet_fs_notify_desc;
+		info.hs_in_desc = &rmnet_hs_in_desc;
+		info.hs_out_desc = &rmnet_hs_out_desc;
+		info.hs_notify_desc = &rmnet_hs_notify_desc;
+		info.ss_in_desc = &rmnet_ss_in_desc;
+		info.ss_out_desc = &rmnet_ss_out_desc;
+		info.ss_notify_desc = &rmnet_ss_notify_desc;
+		info.fs_desc_hdr = rmnet_fs_function;
+		info.hs_desc_hdr = rmnet_hs_function;
+		info.ss_desc_hdr = rmnet_ss_function;
+	} else {
+		info.string_defs = dpl_string_defs;
+		info.data_desc = &dpl_data_intf_desc;
+		info.fs_in_desc = &dpl_hs_data_desc;
+		info.hs_in_desc = &dpl_hs_data_desc;
+		info.ss_in_desc = &dpl_ss_data_desc;
+		info.fs_desc_hdr = dpl_hs_data_only_desc;
+		info.hs_desc_hdr = dpl_hs_data_only_desc;
+		info.ss_desc_hdr = dpl_ss_data_only_desc;
+	}
+
+	ret = ipa_update_function_bind_params(dev, cdev, &info);
+
+	return ret;
+}
+
+static struct usb_function *frmnet_bind_config(struct usb_function_instance *fi)
+{
+	struct f_rmnet_opts	*opts;
+	struct f_rmnet		*dev;
+	struct usb_function	*f;
+
+	opts = container_of(fi, struct f_rmnet_opts, func_inst);
+	opts->refcnt++;
+	dev = opts->dev;
+	f = &dev->func;
+	if (dev->qti_port_type == QTI_PORT_RMNET) {
+		f->name = "rmnet";
+		f->strings = rmnet_strings;
+	} else {
+		f->name = "dpl";
+		f->strings = dpl_strings;
+	}
+
+	f->bind = frmnet_bind;
+	f->unbind = frmnet_unbind;
+	f->disable = frmnet_disable;
+	f->set_alt = frmnet_set_alt;
+	f->setup = frmnet_setup;
+	f->suspend = frmnet_suspend;
+	f->resume = frmnet_resume;
+	f->free_func = frmnet_free;
+	dev->port.send_cpkt_response = frmnet_send_cpkt_response;
+	dev->port.disconnect = frmnet_disconnect;
+	dev->port.connect = frmnet_connect;
+
+	pr_debug("%s: complete\n", __func__);
+
+	return f;
+}
+
+static int rmnet_init(void)
+{
+	return gqti_ctrl_init();
+}
+
+static void frmnet_cleanup(void)
+{
+	gqti_ctrl_cleanup();
+}
+
+static void rmnet_free_inst(struct usb_function_instance *f)
+{
+	struct f_rmnet_opts *opts = container_of(f, struct f_rmnet_opts,
+						func_inst);
+	ipa_data_free(opts->dev->func_type);
+	kfree(opts->dev);
+	kfree(opts);
+}
+
+static int rmnet_set_inst_name(struct usb_function_instance *fi,
+		const char *name)
+{
+	int name_len, ret = 0;
+	struct f_rmnet *dev;
+	struct f_rmnet_opts *opts = container_of(fi,
+					struct f_rmnet_opts, func_inst);
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	/* Update qti->qti_port_type */
+	ret = name_to_prot(dev, name);
+	if (ret < 0) {
+		pr_err("%s: failed to find prot for %s instance\n",
+		__func__, name);
+		goto fail;
+	}
+
+	if (dev->qti_port_type >= QTI_NUM_PORTS ||
+		dev->func_type >= USB_IPA_NUM_FUNCS) {
+		pr_err("%s: invalid prot\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&dev->cpkt_resp_q);
+	ret = ipa_data_setup(dev->func_type);
+	if (ret)
+		goto fail;
+
+	opts->dev = dev;
+	return 0;
+
+fail:
+	kfree(dev);
+	return ret;
+}
+
+static inline struct f_rmnet_opts *to_f_rmnet_opts(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct f_rmnet_opts,
+				func_inst.group);
+}
+
+static void rmnet_opts_release(struct config_item *item)
+{
+	struct f_rmnet_opts *opts = to_f_rmnet_opts(item);
+
+	usb_put_function_instance(&opts->func_inst);
+};
+
+static struct configfs_item_operations rmnet_item_ops = {
+	.release = rmnet_opts_release,
+};
+
+static struct config_item_type rmnet_func_type = {
+	.ct_item_ops    = &rmnet_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static struct usb_function_instance *rmnet_alloc_inst(void)
+{
+	struct f_rmnet_opts *opts;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return ERR_PTR(-ENOMEM);
+
+	opts->func_inst.set_inst_name = rmnet_set_inst_name;
+	opts->func_inst.free_func_inst = rmnet_free_inst;
+
+	config_group_init_type_name(&opts->func_inst.group, "",
+				&rmnet_func_type);
+	return &opts->func_inst;
+}
+
+static struct usb_function *rmnet_alloc(struct usb_function_instance *fi)
+{
+	return frmnet_bind_config(fi);
+}
+
+DECLARE_USB_FUNCTION(rmnet_bam, rmnet_alloc_inst, rmnet_alloc);
+
+static int __init usb_rmnet_init(void)
+{
+	int ret;
+
+	ret = rmnet_init();
+	if (!ret) {
+		ret = usb_function_register(&rmnet_bamusb_func);
+		if (ret) {
+			pr_err("%s: failed to register rmnet %d\n",
+					__func__, ret);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+static void __exit usb_rmnet_exit(void)
+{
+	usb_function_unregister(&rmnet_bamusb_func);
+	frmnet_cleanup();
+}
+
+module_init(usb_rmnet_init);
+module_exit(usb_rmnet_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ac2231a..5d8e6fa 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -596,6 +596,7 @@ static int rndis_init_response(struct rndis_params *params,
 	resp->AFListOffset = cpu_to_le32(0);
 	resp->AFListSize = cpu_to_le32(0);
 
+	params->ul_max_xfer_size = le32_to_cpu(resp->MaxTransferSize);
 	params->resp_avail(params->v);
 	return 0;
 }
@@ -1015,6 +1016,18 @@ int rndis_set_param_medium(struct rndis_params *params, u32 medium, u32 speed)
 }
 EXPORT_SYMBOL_GPL(rndis_set_param_medium);
 
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params)
+{
+	pr_debug("%s:\n", __func__);
+	return params->dl_max_xfer_size;
+}
+
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params)
+{
+	pr_debug("%s:\n", __func__);
+	return params->ul_max_xfer_size;
+}
+
 void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
 {
 	pr_debug("%s:\n", __func__);
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 4ffc282..a3051c4 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -194,6 +194,7 @@ typedef struct rndis_params
 	u32			host_rndis_major_ver;
 	u32			host_rndis_minor_ver;
 	u32			dl_max_xfer_size;
+	u32			ul_max_xfer_size;
 	const char		*vendorDescr;
 	u8			pkt_alignment_factor;
 	void			(*resp_avail)(void *v);
@@ -216,6 +217,8 @@ int  rndis_set_param_vendor(struct rndis_params *params, u32 vendorID,
 int  rndis_set_param_medium(struct rndis_params *params, u32 medium,
 			     u32 speed);
 void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer);
+u32  rndis_get_ul_max_xfer_size(struct rndis_params *params);
+u32  rndis_get_dl_max_xfer_size(struct rndis_params *params);
 void rndis_add_hdr(struct sk_buff *skb);
 int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
 			struct sk_buff_head *list);
diff --git a/drivers/usb/gadget/function/u_ctrl_qti.c b/drivers/usb/gadget/function/u_ctrl_qti.c
new file mode 100644
index 0000000..96018f7
--- /dev/null
+++ b/drivers/usb/gadget/function/u_ctrl_qti.c
@@ -0,0 +1,872 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+
+#include "u_rmnet.h"
+#include "f_qdss.h"
+
+#define RMNET_CTRL_QTI_NAME "rmnet_ctrl"
+#define DPL_CTRL_QTI_NAME "dpl_ctrl"
+#define MODULE_NAME "usb_ctrl"
+/*
+ * Use size of gadget's qti control name. Here currently RMNET and DPL
+ * gadget is using QTI as control transport. Hence using RMNET ctrl name
+ * (as it is bigger in size) for QTI_CTRL_NAME_LEN.
+ */
+#define QTI_CTRL_NAME_LEN (sizeof(RMNET_CTRL_QTI_NAME)+2)
+
+static struct class *rmnet_class;
+static dev_t qti_ctrl_dev;
+
+struct qti_ctrl_port {
+	void		*port_usb;
+	char		name[QTI_CTRL_NAME_LEN];
+	struct cdev	ctrl_device;
+	struct device	*dev;
+
+	bool		is_open;
+	int index;
+	unsigned int	intf;
+	int		ipa_prod_idx;
+	int		ipa_cons_idx;
+	enum peripheral_ep_type	ep_type;
+
+	atomic_t	connected;
+	atomic_t	line_state;
+
+	atomic_t	open_excl;
+	atomic_t	read_excl;
+	atomic_t	write_excl;
+	atomic_t	ioctl_excl;
+
+	wait_queue_head_t	read_wq;
+
+	struct list_head	cpkt_req_q;
+
+	spinlock_t	lock;
+	enum qti_port_type	port_type;
+	unsigned int	host_to_modem;
+	unsigned int	copied_to_modem;
+	unsigned int	copied_from_modem;
+	unsigned int	modem_to_host;
+	unsigned int	drp_cpkt_cnt;
+};
+static struct qti_ctrl_port *ctrl_port[QTI_NUM_PORTS];
+
+static inline int qti_ctrl_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1)
+		return 0;
+	atomic_dec(excl);
+	return -EBUSY;
+}
+
+static inline void qti_ctrl_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+static struct
+rmnet_ctrl_pkt *alloc_rmnet_ctrl_pkt(unsigned int len, gfp_t flags)
+{
+	struct rmnet_ctrl_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+	if (!pkt)
+		return ERR_PTR(-ENOMEM);
+
+	pkt->buf = kmalloc(len, flags);
+	if (!pkt->buf) {
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pkt->len = len;
+
+	return pkt;
+}
+
+static void free_rmnet_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+	kfree(pkt->buf);
+	kfree(pkt);
+}
+
+
+static void qti_ctrl_queue_notify(struct qti_ctrl_port *port)
+{
+	unsigned long		flags;
+	struct rmnet_ctrl_pkt	*cpkt = NULL;
+
+	pr_debug("%s: Queue empty packet for QTI for port%d",
+		 __func__, port->index);
+
+	spin_lock_irqsave(&port->lock, flags);
+	if (!port->is_open) {
+		pr_err("%s: rmnet ctrl file handler %pK is not open",
+			   __func__, port);
+		spin_unlock_irqrestore(&port->lock, flags);
+		return;
+	}
+
+	cpkt = alloc_rmnet_ctrl_pkt(0, GFP_ATOMIC);
+	if (IS_ERR(cpkt)) {
+		pr_err("%s: Unable to allocate reset function pkt\n", __func__);
+		spin_unlock_irqrestore(&port->lock, flags);
+		return;
+	}
+
+	list_add_tail(&cpkt->list, &port->cpkt_req_q);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	pr_debug("%s: Wake up read queue", __func__);
+	wake_up(&port->read_wq);
+}
+
+static int gqti_ctrl_send_cpkt_tomodem(enum qti_port_type qport,
+					void *buf, size_t len)
+{
+	unsigned long		flags;
+	struct qti_ctrl_port	*port;
+	struct rmnet_ctrl_pkt *cpkt;
+
+	if (len > MAX_QTI_PKT_SIZE) {
+		pr_err("given pkt size too big:%zu > max_pkt_size:%d\n",
+				len, MAX_QTI_PKT_SIZE);
+		return -EINVAL;
+	}
+
+	if (qport >= QTI_NUM_PORTS) {
+		pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+		return -ENODEV;
+	}
+	port = ctrl_port[qport];
+	cpkt = alloc_rmnet_ctrl_pkt(len, GFP_ATOMIC);
+	if (IS_ERR(cpkt)) {
+		pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(cpkt->buf, buf, len);
+	cpkt->len = len;
+
+	pr_debug("%s: port type:%d: Add to cpkt_req_q packet with len = %zu\n",
+			__func__, port->port_type, len);
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* drop cpkt if port is not open */
+	if (!port->is_open) {
+		pr_debug("rmnet file handler %pK(index=%d) is not open",
+		       port, port->index);
+		port->drp_cpkt_cnt++;
+		spin_unlock_irqrestore(&port->lock, flags);
+		free_rmnet_ctrl_pkt(cpkt);
+		return 0;
+	}
+
+	list_add_tail(&cpkt->list, &port->cpkt_req_q);
+	port->host_to_modem++;
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	/* wakeup read thread */
+	pr_debug("%s: Wake up read queue", __func__);
+	wake_up(&port->read_wq);
+
+	return 0;
+}
+
+static void
+gqti_ctrl_notify_modem(void *gptr, enum qti_port_type qport, int val)
+{
+	struct qti_ctrl_port *port;
+
+	if (qport >= QTI_NUM_PORTS) {
+		pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+		return;
+	}
+	port = ctrl_port[qport];
+	atomic_set(&port->line_state, val);
+
+	/* send 0 len pkt to qti to notify state change */
+	qti_ctrl_queue_notify(port);
+}
+
+int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned int intf)
+{
+	struct qti_ctrl_port	*port;
+	struct grmnet *g_rmnet = NULL;
+	unsigned long flags;
+
+	pr_debug("%s: port type:%d gadget:%pK\n", __func__, qport, gr);
+	if (qport >= QTI_NUM_PORTS) {
+		pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+		return -ENODEV;
+	}
+
+	port = ctrl_port[qport];
+	if (!port) {
+		pr_err("%s: gadget port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&port->lock, flags);
+	port->port_type = qport;
+	port->ep_type = DATA_EP_TYPE_HSUSB;
+	port->intf = intf;
+
+	if (gr) {
+		port->port_usb = gr;
+		g_rmnet = gr;
+		g_rmnet->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
+		g_rmnet->notify_modem = gqti_ctrl_notify_modem;
+		if (port->port_type == QTI_PORT_DPL)
+			atomic_set(&port->line_state, 1);
+	} else {
+		spin_unlock_irqrestore(&port->lock, flags);
+		pr_err("%s(): Port is used without port type.\n", __func__);
+		return -ENODEV;
+	}
+
+	port->host_to_modem = 0;
+	port->copied_to_modem = 0;
+	port->copied_from_modem = 0;
+	port->modem_to_host = 0;
+	port->drp_cpkt_cnt = 0;
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	atomic_set(&port->connected, 1);
+	wake_up(&port->read_wq);
+	if (port->port_usb && g_rmnet && g_rmnet->connect)
+		g_rmnet->connect(port->port_usb);
+
+	return 0;
+}
+
+void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport)
+{
+	struct qti_ctrl_port	*port;
+	unsigned long		flags;
+	struct rmnet_ctrl_pkt	*cpkt;
+	struct grmnet *g_rmnet = NULL;
+
+	pr_debug("%s: gadget:%pK\n", __func__, gr);
+
+	if (qport >= QTI_NUM_PORTS) {
+		pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+		return;
+	}
+
+	port = ctrl_port[qport];
+	if (!port) {
+		pr_err("%s: gadget port is null\n", __func__);
+		return;
+	}
+
+	atomic_set(&port->connected, 0);
+	atomic_set(&port->line_state, 0);
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* reset ipa eps to -1 */
+	port->ipa_prod_idx = -1;
+	port->ipa_cons_idx = -1;
+	port->port_usb = NULL;
+
+	if (gr) {
+		g_rmnet = gr;
+		g_rmnet->send_encap_cmd = NULL;
+		g_rmnet->notify_modem = NULL;
+	} else {
+		pr_err("%s(): unrecognized gadget type(%d).\n",
+					__func__, port->port_type);
+	}
+
+	while (!list_empty(&port->cpkt_req_q)) {
+		cpkt = list_first_entry(&port->cpkt_req_q,
+					struct rmnet_ctrl_pkt, list);
+
+		list_del(&cpkt->list);
+		free_rmnet_ctrl_pkt(cpkt);
+	}
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	/* send 0 len pkt to qti to notify state change */
+	qti_ctrl_queue_notify(port);
+}
+
+void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport,
+				u32 ipa_prod, u32 ipa_cons)
+{
+	struct qti_ctrl_port	*port;
+
+	if (qport >= QTI_NUM_PORTS) {
+		pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+		return;
+	}
+
+	port = ctrl_port[qport];
+	port->ipa_prod_idx = ipa_prod;
+	port->ipa_cons_idx = ipa_cons;
+
+}
+
+
+static int qti_ctrl_open(struct inode *ip, struct file *fp)
+{
+	unsigned long		flags;
+	struct qti_ctrl_port *port = container_of(fp->private_data,
+						struct qti_ctrl_port,
+						ctrl_device);
+
+	pr_debug("Open rmnet_ctrl_qti device file name=%s(index=%d)\n",
+		port->name, port->index);
+
+	if (qti_ctrl_lock(&port->open_excl)) {
+		pr_err("Already opened\n");
+		return -EBUSY;
+	}
+
+	spin_lock_irqsave(&port->lock, flags);
+	port->is_open = true;
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return 0;
+}
+
+static int qti_ctrl_release(struct inode *ip, struct file *fp)
+{
+	unsigned long		flags;
+	struct qti_ctrl_port *port = container_of(fp->private_data,
+						struct qti_ctrl_port,
+						ctrl_device);
+
+	pr_debug("Close rmnet control file");
+
+	spin_lock_irqsave(&port->lock, flags);
+	port->is_open = false;
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	qti_ctrl_unlock(&port->open_excl);
+
+	return 0;
+}
+
+static ssize_t
+qti_ctrl_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+	struct qti_ctrl_port *port = container_of(fp->private_data,
+						struct qti_ctrl_port,
+						ctrl_device);
+	struct rmnet_ctrl_pkt *cpkt = NULL;
+	unsigned long flags;
+	int ret = 0;
+
+	pr_debug("%s: Enter(%zu)\n", __func__, count);
+
+	if (count > MAX_QTI_PKT_SIZE) {
+		pr_err("Buffer size is too big %zu, should be at most %d\n",
+			count, MAX_QTI_PKT_SIZE);
+		return -EINVAL;
+	}
+
+	if (qti_ctrl_lock(&port->read_excl)) {
+		pr_err("Previous reading is not finished yet\n");
+		return -EBUSY;
+	}
+
+	/* block until a new packet is available */
+	do {
+		spin_lock_irqsave(&port->lock, flags);
+		if (!list_empty(&port->cpkt_req_q))
+			break;
+		spin_unlock_irqrestore(&port->lock, flags);
+
+		pr_debug("%s: Requests list is empty. Wait.\n", __func__);
+		ret = wait_event_interruptible(port->read_wq,
+					!list_empty(&port->cpkt_req_q));
+		if (ret < 0) {
+			pr_debug("Waiting failed\n");
+			qti_ctrl_unlock(&port->read_excl);
+			return -ERESTARTSYS;
+		}
+	} while (1);
+
+	cpkt = list_first_entry(&port->cpkt_req_q, struct rmnet_ctrl_pkt,
+							list);
+	list_del(&cpkt->list);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	if (cpkt->len > count) {
+		pr_err("cpkt size too big:%d > buf size:%zu\n",
+				cpkt->len, count);
+		qti_ctrl_unlock(&port->read_excl);
+		free_rmnet_ctrl_pkt(cpkt);
+		return -ENOMEM;
+	}
+
+	pr_debug("%s: cpkt size:%d\n", __func__, cpkt->len);
+
+
+	qti_ctrl_unlock(&port->read_excl);
+
+	ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+	if (ret) {
+		pr_err("copy_to_user failed: err %d\n", ret);
+		ret = -EFAULT;
+	} else {
+		pr_debug("%s: copied %d bytes to user\n", __func__, cpkt->len);
+		ret = cpkt->len;
+		port->copied_to_modem++;
+	}
+
+	free_rmnet_ctrl_pkt(cpkt);
+
+	return ret;
+}
+
+static ssize_t
+qti_ctrl_write(struct file *fp, const char __user *buf, size_t count,
+		   loff_t *pos)
+{
+	struct qti_ctrl_port *port = container_of(fp->private_data,
+						struct qti_ctrl_port,
+						ctrl_device);
+	void *kbuf;
+	unsigned long flags;
+	int ret = 0;
+	struct grmnet *g_rmnet = NULL;
+
+	pr_debug("%s: Enter(%zu) port_index=%d", __func__, count, port->index);
+
+	if (!count) {
+		pr_debug("zero length ctrl pkt\n");
+		return -EINVAL;
+	}
+
+	if (count > MAX_QTI_PKT_SIZE) {
+		pr_debug("given pkt size too big:%zu > max_pkt_size:%d\n",
+				count, MAX_QTI_PKT_SIZE);
+		return -EINVAL;
+	}
+
+	if (qti_ctrl_lock(&port->write_excl)) {
+		pr_err("Previous writing not finished yet\n");
+		return -EBUSY;
+	}
+
+	if (!atomic_read(&port->connected)) {
+		pr_debug("USB cable not connected\n");
+		qti_ctrl_unlock(&port->write_excl);
+		return -EPIPE;
+	}
+
+	kbuf = kmalloc(count, GFP_KERNEL);
+	if (!kbuf) {
+		qti_ctrl_unlock(&port->write_excl);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(kbuf, buf, count);
+	if (ret) {
+		pr_err("copy_from_user failed err:%d\n", ret);
+		kfree(kbuf);
+		qti_ctrl_unlock(&port->write_excl);
+		return -EFAULT;
+	}
+	port->copied_from_modem++;
+
+	spin_lock_irqsave(&port->lock, flags);
+	if (port->port_usb) {
+		if (port->port_type == QTI_PORT_RMNET) {
+			g_rmnet = port->port_usb;
+		} else {
+			spin_unlock_irqrestore(&port->lock, flags);
+			pr_err("%s(): unrecognized gadget type(%d).\n",
+						__func__, port->port_type);
+			return -EINVAL;
+		}
+
+		if (g_rmnet && g_rmnet->send_cpkt_response) {
+			ret = g_rmnet->send_cpkt_response(port->port_usb,
+							kbuf, count);
+			if (ret)
+				pr_err("%d failed to send ctrl packet.\n", ret);
+			port->modem_to_host++;
+		} else {
+			pr_err("send_cpkt_response callback is NULL\n");
+			ret = -EINVAL;
+		}
+	}
+
+	spin_unlock_irqrestore(&port->lock, flags);
+	kfree(kbuf);
+	qti_ctrl_unlock(&port->write_excl);
+
+	pr_debug("%s: Exit(%zu)", __func__, count);
+	return (ret) ? ret : count;
+}
+
+static long qti_ctrl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+	struct qti_ctrl_port *port = container_of(fp->private_data,
+						struct qti_ctrl_port,
+						ctrl_device);
+	struct grmnet *gr = NULL;
+	struct ep_info info;
+	int val, ret = 0;
+
+	pr_debug("%s: Received command %d for port type:%d\n",
+				__func__, cmd, port->port_type);
+
+	if (qti_ctrl_lock(&port->ioctl_excl))
+		return -EBUSY;
+
+	switch (cmd) {
+	case QTI_CTRL_MODEM_OFFLINE:
+		if (port && (port->port_type == QTI_PORT_DPL)) {
+			pr_err("%s(): Modem Offline not handled\n", __func__);
+			goto exit_ioctl;
+		}
+
+		if (port && port->port_usb)
+			gr = port->port_usb;
+
+		if (gr && gr->disconnect)
+			gr->disconnect(gr);
+		break;
+	case QTI_CTRL_MODEM_ONLINE:
+		if (port && (port->port_type == QTI_PORT_DPL)) {
+			pr_err("%s(): Modem Online not handled\n", __func__);
+			goto exit_ioctl;
+		}
+
+		if (port && port->port_usb)
+			gr = port->port_usb;
+
+		if (gr && gr->connect)
+			gr->connect(gr);
+		break;
+	case QTI_CTRL_GET_LINE_STATE:
+		val = atomic_read(&port->line_state);
+		ret = copy_to_user((void __user *)arg, &val, sizeof(val));
+		if (ret) {
+			pr_err("copying to user space failed");
+			ret = -EFAULT;
+		}
+		pr_debug("%s: Sent line_state: %d for port type:%d\n", __func__,
+			atomic_read(&port->line_state), port->port_type);
+		break;
+	case QTI_CTRL_EP_LOOKUP:
+
+		pr_debug("%s(): EP_LOOKUP for port type:%d\n", __func__,
+							port->port_type);
+		val = atomic_read(&port->connected);
+		if (!val) {
+			pr_err_ratelimited("EP_LOOKUP failed: not connected\n");
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (port->ipa_prod_idx == -1 && port->ipa_cons_idx == -1) {
+			pr_err_ratelimited("EP_LOOKUP ipa pipes not updated\n");
+			ret = -EAGAIN;
+			break;
+		}
+
+		info.ph_ep_info.ep_type = port->ep_type;
+		info.ph_ep_info.peripheral_iface_id = port->intf;
+		info.ipa_ep_pair.cons_pipe_num = port->ipa_cons_idx;
+		info.ipa_ep_pair.prod_pipe_num = port->ipa_prod_idx;
+
+		pr_debug("%s(): port type:%d ep_type:%d intf:%d\n",
+			__func__, port->port_type, info.ph_ep_info.ep_type,
+			info.ph_ep_info.peripheral_iface_id);
+
+		pr_debug("%s(): ipa_cons_idx:%d ipa_prod_idx:%d\n",
+				__func__, info.ipa_ep_pair.cons_pipe_num,
+				info.ipa_ep_pair.prod_pipe_num);
+
+		ret = copy_to_user((void __user *)arg, &info,
+			sizeof(info));
+		if (ret) {
+			pr_err("copying to user space failed");
+			ret = -EFAULT;
+		}
+		break;
+	default:
+		pr_err("wrong parameter");
+		ret = -EINVAL;
+	}
+
+exit_ioctl:
+	qti_ctrl_unlock(&port->ioctl_excl);
+
+	return ret;
+}
+
+static unsigned int qti_ctrl_poll(struct file *file, poll_table *wait)
+{
+	struct qti_ctrl_port *port = container_of(file->private_data,
+						struct qti_ctrl_port,
+						ctrl_device);
+	unsigned long flags;
+	unsigned int mask = 0;
+
+	if (!port) {
+		pr_err("%s on a NULL device\n", __func__);
+		return POLLERR;
+	}
+
+	poll_wait(file, &port->read_wq, wait);
+
+	spin_lock_irqsave(&port->lock, flags);
+	if (!list_empty(&port->cpkt_req_q)) {
+		mask |= POLLIN | POLLRDNORM;
+		pr_debug("%s sets POLLIN for rmnet_ctrl_qti_port\n", __func__);
+	}
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return mask;
+}
+
+static int qti_ctrl_read_stats(struct seq_file *s, void *unused)
+{
+	struct qti_ctrl_port	*port = s->private;
+	unsigned long		flags;
+	int			i;
+
+	for (i = 0; i < QTI_NUM_PORTS; i++) {
+		port = ctrl_port[i];
+		if (!port)
+			continue;
+		spin_lock_irqsave(&port->lock, flags);
+
+		seq_printf(s, "\n#PORT:%d port: %pK\n", i, port);
+		seq_printf(s, "name:			%s\n", port->name);
+		seq_printf(s, "host_to_modem:		%d\n",
+				port->host_to_modem);
+		seq_printf(s, "copied_to_modem:	%d\n",
+				port->copied_to_modem);
+		seq_printf(s, "copied_from_modem:	%d\n",
+				port->copied_from_modem);
+		seq_printf(s, "modem_to_host:		%d\n",
+				port->modem_to_host);
+		seq_printf(s, "cpkt_drp_cnt:		%d\n",
+				port->drp_cpkt_cnt);
+		spin_unlock_irqrestore(&port->lock, flags);
+	}
+
+	return 0;
+}
+
+static int qti_ctrl_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qti_ctrl_read_stats, inode->i_private);
+}
+
+static ssize_t qti_ctrl_reset_stats(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct qti_ctrl_port *port = s->private;
+	int                     i;
+	unsigned long           flags;
+
+	for (i = 0; i < QTI_NUM_PORTS; i++) {
+		port = ctrl_port[i];
+		if (!port)
+			continue;
+
+		spin_lock_irqsave(&port->lock, flags);
+		port->host_to_modem = 0;
+		port->copied_to_modem = 0;
+		port->copied_from_modem = 0;
+		port->modem_to_host = 0;
+		port->drp_cpkt_cnt = 0;
+		spin_unlock_irqrestore(&port->lock, flags);
+	}
+	return count;
+}
+
+const struct file_operations qti_ctrl_stats_ops = {
+	.open = qti_ctrl_stats_open,
+	.read = seq_read,
+	.write = qti_ctrl_reset_stats,
+};
+
+static struct dentry   *qti_ctrl_dent;
+static void qti_ctrl_debugfs_init(void)
+{
+	struct dentry   *qti_ctrl_dfile;
+
+	qti_ctrl_dent = debugfs_create_dir("usb_qti", 0);
+	if (IS_ERR(qti_ctrl_dent))
+		return;
+
+	qti_ctrl_dfile =
+		debugfs_create_file("status", 0444, qti_ctrl_dent, 0,
+				&qti_ctrl_stats_ops);
+	if (!qti_ctrl_dfile || IS_ERR(qti_ctrl_dfile))
+		debugfs_remove(qti_ctrl_dent);
+}
+
+static void qti_ctrl_debugfs_exit(void)
+{
+	debugfs_remove_recursive(qti_ctrl_dent);
+}
+
+/* file operations for rmnet device /dev/rmnet_ctrl */
+static const struct file_operations qti_ctrl_fops = {
+	.owner = THIS_MODULE,
+	.open = qti_ctrl_open,
+	.release = qti_ctrl_release,
+	.read = qti_ctrl_read,
+	.write = qti_ctrl_write,
+	.unlocked_ioctl = qti_ctrl_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = qti_ctrl_ioctl,
+#endif
+	.poll = qti_ctrl_poll,
+};
+/* file operations for DPL device /dev/dpl_ctrl */
+static const struct file_operations dpl_qti_ctrl_fops = {
+	.owner = THIS_MODULE,
+	.open = qti_ctrl_open,
+	.release = qti_ctrl_release,
+	.read = qti_ctrl_read,
+	.write = NULL,
+	.unlocked_ioctl = qti_ctrl_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = qti_ctrl_ioctl,
+#endif
+	.poll = qti_ctrl_poll,
+};
+
+static int qti_ctrl_alloc_chardev_region(void)
+{
+	int ret;
+
+	rmnet_class = class_create(THIS_MODULE, MODULE_NAME);
+	if (IS_ERR(rmnet_class)) {
+		pr_err("class_create() failed ENOMEM\n");
+		ret = -ENOMEM;
+	}
+	ret = alloc_chrdev_region(&qti_ctrl_dev, 0, QTI_NUM_PORTS,
+		MODULE_NAME);
+	if (ret < 0) {
+		pr_err("alloc_chrdev_region() failed ret:%i\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int gqti_ctrl_init(void)
+{
+	int ret, i, sz = QTI_CTRL_NAME_LEN;
+	struct qti_ctrl_port *port = NULL;
+
+	ret = qti_ctrl_alloc_chardev_region();
+	if (ret) {
+		pr_err("qti_ctrl_alloc_chardev_region() failed ret:%d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < QTI_NUM_PORTS; i++) {
+		port = kzalloc(sizeof(struct qti_ctrl_port), GFP_KERNEL);
+		if (!port) {
+			ret = -ENOMEM;
+			goto fail_init;
+		}
+
+		INIT_LIST_HEAD(&port->cpkt_req_q);
+		spin_lock_init(&port->lock);
+
+		atomic_set(&port->open_excl, 0);
+		atomic_set(&port->read_excl, 0);
+		atomic_set(&port->write_excl, 0);
+		atomic_set(&port->ioctl_excl, 0);
+		atomic_set(&port->connected, 0);
+		atomic_set(&port->line_state, 0);
+
+		init_waitqueue_head(&port->read_wq);
+
+		ctrl_port[i] = port;
+		port->index = i;
+		port->ipa_prod_idx = -1;
+		port->ipa_cons_idx = -1;
+
+		if (i == QTI_PORT_RMNET)
+			strlcat(port->name, RMNET_CTRL_QTI_NAME, sz);
+		else if (i == QTI_PORT_DPL)
+			strlcat(port->name, DPL_CTRL_QTI_NAME, sz);
+		else
+			snprintf(port->name, sz, "%s%d",
+				RMNET_CTRL_QTI_NAME, i);
+
+		port->ctrl_device.owner = THIS_MODULE;
+		if (i == QTI_PORT_DPL)
+			cdev_init(&port->ctrl_device, &dpl_qti_ctrl_fops);
+		else
+			cdev_init(&port->ctrl_device, &qti_ctrl_fops);
+
+		ret = cdev_add(&port->ctrl_device, qti_ctrl_dev + i, 1);
+		if (ret < 0) {
+			pr_err("cdev_add() failed ret:%d\n", ret);
+			goto fail_cdev;
+		}
+
+		port->dev = device_create(rmnet_class, NULL, qti_ctrl_dev+i,
+				port->dev, port->name);
+		if (IS_ERR(port->dev)) {
+			pr_err("device_create() failed for port(%d)\n", i);
+			ret = -ENOMEM;
+			goto fail_device_create;
+		}
+	}
+	qti_ctrl_debugfs_init();
+	return ret;
+fail_device_create:
+	cdev_del(&port->ctrl_device);
+fail_cdev:
+	class_destroy(rmnet_class);
+	unregister_chrdev_region(MAJOR(qti_ctrl_dev), QTI_NUM_PORTS);
+fail_init:
+	for (i--; i >= 0; i--) {
+		kfree(ctrl_port[i]);
+		ctrl_port[i] = NULL;
+	}
+	return ret;
+}
+
+void gqti_ctrl_cleanup(void)
+{
+	int i;
+
+	for (i = 0; i < QTI_NUM_PORTS; i++) {
+		cdev_del(&ctrl_port[i]->ctrl_device);
+		device_unregister(ctrl_port[i]->dev);
+		kfree(ctrl_port[i]);
+		ctrl_port[i] = NULL;
+	}
+
+	device_destroy(rmnet_class, qti_ctrl_dev);
+	class_destroy(rmnet_class);
+	unregister_chrdev_region(MAJOR(qti_ctrl_dev), QTI_NUM_PORTS);
+	qti_ctrl_debugfs_exit();
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
new file mode 100644
index 0000000..f379028
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -0,0 +1,1402 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/usb_bam.h>
+
+#include "u_data_ipa.h"
+#include "u_rmnet.h"
+
+struct ipa_data_ch_info {
+	struct usb_request			*rx_req;
+	struct usb_request			*tx_req;
+	unsigned long				flags;
+	unsigned int				id;
+	enum ipa_func_type			func_type;
+	bool					is_connected;
+	unsigned int				port_num;
+	spinlock_t				port_lock;
+
+	struct work_struct			connect_w;
+	struct work_struct			disconnect_w;
+	struct work_struct			suspend_w;
+	struct work_struct			resume_w;
+
+	u32					src_pipe_idx;
+	u32					dst_pipe_idx;
+	u8					src_connection_idx;
+	u8					dst_connection_idx;
+	enum usb_ctrl				usb_bam_type;
+	struct gadget_ipa_port			*port_usb;
+	struct usb_gadget			*gadget;
+	atomic_t				pipe_connect_notified;
+	struct usb_bam_connect_ipa_params	ipa_params;
+};
+
+struct rndis_data_ch_info {
+	/* this provides downlink (device->host i.e host) side configuration*/
+	u32	dl_max_transfer_size;
+	/* this provides uplink (host->device i.e device) side configuration */
+	u32	ul_max_transfer_size;
+	u32	ul_max_packets_number;
+	bool	ul_aggregation_enable;
+	u32	prod_clnt_hdl;
+	u32	cons_clnt_hdl;
+	void	*priv;
+};
+
+static struct workqueue_struct *ipa_data_wq;
+struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS];
+static struct rndis_data_ch_info *rndis_data;
+/**
+ * ipa_data_endless_complete() - completion callback for endless TX/RX request
+ * @ep: USB endpoint for which this completion happen
+ * @req: USB endless request
+ *
+ * This completion is being called when endless (TX/RX) transfer is terminated
+ * i.e. disconnect or suspend case.
+ */
+static void ipa_data_endless_complete(struct usb_ep *ep,
+					struct usb_request *req)
+{
+	pr_debug("%s: endless complete for(%s) with status: %d\n",
+				__func__, ep->name, req->status);
+}
+
+/**
+ * ipa_data_start_endless_xfer() - configure USB endpoint and
+ * queue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. true: IN(Device TX), false: OUT(Device RX)
+ *
+ * It is being used to queue endless TX/RX request with UDC driver.
+ * It does set required DBM endpoint configuration before queueing endless
+ * TX/RX request.
+ */
+static void ipa_data_start_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+	unsigned long flags;
+	int status;
+	struct usb_ep *ep;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb || (in && !port->tx_req)
+				|| (!in && !port->rx_req)) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s(): port_usb/req is NULL.\n", __func__);
+		return;
+	}
+
+	if (in)
+		ep = port->port_usb->in;
+	else
+		ep = port->port_usb->out;
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (in) {
+		pr_debug("%s: enqueue endless TX_REQ(IN)\n", __func__);
+		status = usb_ep_queue(ep, port->tx_req, GFP_ATOMIC);
+		if (status)
+			pr_err("error enqueuing endless TX_REQ, %d\n", status);
+	} else {
+		pr_debug("%s: enqueue endless RX_REQ(OUT)\n", __func__);
+		status = usb_ep_queue(ep, port->rx_req, GFP_ATOMIC);
+		if (status)
+			pr_err("error enqueuing endless RX_REQ, %d\n", status);
+	}
+}
+
+/**
+ * ipa_data_stop_endless_xfer() - terminate and dequeue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. IN - Device TX, OUT - Device RX
+ *
+ * It is being used to terminate and dequeue endless TX/RX request with UDC
+ * driver.
+ */
+static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+	unsigned long flags;
+	int status;
+	struct usb_ep *ep;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb || (in && !port->tx_req)
+				|| (!in && !port->rx_req)) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s(): port_usb/req is NULL.\n", __func__);
+		return;
+	}
+
+	if (in)
+		ep = port->port_usb->in;
+	else
+		ep = port->port_usb->out;
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (in) {
+		pr_debug("%s: dequeue endless TX_REQ(IN)\n", __func__);
+		status = usb_ep_dequeue(ep, port->tx_req);
+		if (status)
+			pr_err("error dequeueing endless TX_REQ, %d\n", status);
+	} else {
+		pr_debug("%s: dequeue endless RX_REQ(OUT)\n", __func__);
+		status = usb_ep_dequeue(ep, port->rx_req);
+		if (status)
+			pr_err("error dequeueing endless RX_REQ, %d\n", status);
+	}
+}
+
+/*
+ * Called when IPA triggers us that the network interface is up.
+ *  Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void ipa_data_start_rx_tx(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info	*port;
+	unsigned long flags;
+	struct usb_ep *epin, *epout;
+
+	pr_debug("%s: Triggered: starting tx, rx", __func__);
+	/* queue in & out requests */
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("%s: port is NULL, can't start tx, rx", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port->port_usb || !port->port_usb->in ||
+		!port->port_usb->out) {
+		pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	if (!port->rx_req || !port->tx_req) {
+		pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK", __func__,
+			port->rx_req, port->tx_req);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+	if (!port->is_connected) {
+		pr_debug("%s: pipes are disconnected", __func__);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	epout = port->port_usb->out;
+	epin = port->port_usb->in;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* queue in & out requests */
+	pr_debug("%s: Starting rx", __func__);
+	if (epout)
+		ipa_data_start_endless_xfer(port, false);
+
+	pr_debug("%s: Starting tx", __func__);
+	if (epin)
+		ipa_data_start_endless_xfer(port, true);
+}
+/**
+ * ipa_data_disconnect_work() - Perform USB IPA BAM disconnect
+ * @w: disconnect work
+ *
+ * It is being schedule from ipa_data_disconnect() API when particular function
+ * is being disable due to USB disconnect or USB composition switch is being
+ * trigger . This API performs disconnect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for USB Disconnect sequence. Due to
+ * handshake operation and involvement of SPS related APIs, this functioality
+ * can't be used from atomic context.
+ */
+static void ipa_data_disconnect_work(struct work_struct *w)
+{
+	struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+								disconnect_w);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->is_connected) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_debug("Already disconnected.\n");
+		return;
+	}
+	port->is_connected = false;
+	pr_debug("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__,
+			port->ipa_params.prod_clnt_hdl,
+			port->ipa_params.cons_clnt_hdl);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	ret = usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+	if (ret)
+		pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+
+	if (port->func_type == USB_IPA_FUNC_RNDIS) {
+		/*
+		 * NOTE: it is required to disconnect USB and IPA BAM related
+		 * pipes before calling IPA tethered function related disconnect
+		 * API. IPA tethered function related disconnect API delete
+		 * depedency graph with IPA RM which would results into IPA not
+		 * pulling data although there is pending data on USB BAM
+		 * producer pipe.
+		 */
+		if (atomic_xchg(&port->pipe_connect_notified, 0) == 1) {
+			void *priv;
+
+			priv = rndis_qc_get_ipa_priv();
+			rndis_ipa_pipe_disconnect_notify(priv);
+		}
+	}
+
+	if (port->ipa_params.prod_clnt_hdl)
+		usb_bam_free_fifos(port->usb_bam_type,
+						port->dst_connection_idx);
+	if (port->ipa_params.cons_clnt_hdl)
+		usb_bam_free_fifos(port->usb_bam_type,
+						port->src_connection_idx);
+
+	if (port->func_type == USB_IPA_FUNC_RMNET)
+		teth_bridge_disconnect(port->ipa_params.src_client);
+	/*
+	 * Decrement usage count which was incremented
+	 * upon cable connect or cable disconnect in suspended state.
+	 */
+	usb_gadget_autopm_put_async(port->gadget);
+
+	pr_debug("%s(): disconnect work completed.\n", __func__);
+}
+
+/**
+ * ipa_data_disconnect() - Restore USB ep operation and disable USB endpoint
+ * @gp: USB gadget IPA Port
+ * @port_num: Port num used by function driver which need to be disable
+ *
+ * It is being called from atomic context from gadget driver when particular
+ * function is being disable due to USB cable disconnect or USB composition
+ * switch is being trigger. This API performs restoring USB endpoint operation
+ * and disable USB endpoint used for accelerated path.
+ */
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+	struct usb_gadget *gadget = NULL;
+
+	pr_debug("dev:%pK port number:%d\n", gp, func);
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("invalid ipa portno#%d\n", func);
+		return;
+	}
+
+	if (!gp) {
+		pr_err("data port is null\n");
+		return;
+	}
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("port %u is NULL", func);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb) {
+		gadget = port->port_usb->cdev->gadget;
+		port->port_usb->ipa_consumer_ep = -1;
+		port->port_usb->ipa_producer_ep = -1;
+
+		if (port->port_usb->in) {
+			/*
+			 * Disable endpoints.
+			 * Unlocking is needed since disabling the eps might
+			 * stop active transfers and therefore the request
+			 * complete function will be called, where we try
+			 * to obtain the spinlock as well.
+			 */
+			msm_ep_unconfig(port->port_usb->in);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			usb_ep_disable(port->port_usb->in);
+			spin_lock_irqsave(&port->port_lock, flags);
+			if (port->tx_req) {
+				usb_ep_free_request(port->port_usb->in,
+						port->tx_req);
+				port->tx_req = NULL;
+			}
+			port->port_usb->in->endless = false;
+		}
+
+		if (port->port_usb->out) {
+			msm_ep_unconfig(port->port_usb->out);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			usb_ep_disable(port->port_usb->out);
+			spin_lock_irqsave(&port->port_lock, flags);
+			if (port->rx_req) {
+				usb_ep_free_request(port->port_usb->out,
+						port->rx_req);
+				port->rx_req = NULL;
+			}
+			port->port_usb->out->endless = false;
+		}
+
+		port->port_usb = NULL;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	queue_work(ipa_data_wq, &port->disconnect_w);
+}
+
+/**
+ * configure_fifo() - Configure USB BAM Pipe's data FIFO
+ * @idx: USB BAM Pipe index
+ * @ep: USB endpoint
+ *
+ * This function configures USB BAM data fifo using fetched pipe configuraion
+ * using provided index value. This function needs to used before starting
+ * endless transfer.
+ */
+static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep)
+{
+	struct sps_mem_buffer data_fifo = {0};
+	u32 usb_bam_pipe_idx;
+
+	get_bam2bam_connection_info(bam_type, idx,
+				&usb_bam_pipe_idx,
+				NULL, &data_fifo, NULL);
+	msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size,
+			usb_bam_pipe_idx);
+}
+
+/**
+ * ipa_data_connect_work() - Perform USB IPA BAM connect
+ * @w: connect work
+ *
+ * It is being schedule from ipa_data_connect() API when particular function
+ * which is using USB IPA accelerated path. This API performs allocating request
+ * for USB endpoint (tx/rx) for endless purpose, configure USB endpoint to be
+ * used in accelerated path, connect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for connect sequence.
+ */
+static void ipa_data_connect_work(struct work_struct *w)
+{
+	struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+								connect_w);
+	struct gadget_ipa_port	*gport;
+	struct usb_gadget	*gadget = NULL;
+	struct teth_bridge_connect_params connect_params;
+	struct teth_bridge_init_params teth_bridge_params;
+	u32			sps_params;
+	int			ret;
+	unsigned long		flags;
+	bool			is_ipa_disconnected = true;
+
+	pr_debug("%s: Connect workqueue started\n", __func__);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_gadget_autopm_put_async(port->gadget);
+		pr_err("%s(): port_usb is NULL.\n", __func__);
+		return;
+	}
+
+	gport = port->port_usb;
+	if (gport && gport->cdev)
+		gadget = gport->cdev->gadget;
+
+	if (!gadget) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_gadget_autopm_put_async(port->gadget);
+		pr_err("%s: gport is NULL.\n", __func__);
+		return;
+	}
+
+	/*
+	 * check if connect_w got called two times during RNDIS resume as
+	 * explicit flow control is called to start data transfers after
+	 * ipa_data_connect()
+	 */
+	if (port->is_connected) {
+		pr_debug("IPA connect is already done & Transfers started\n");
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_gadget_autopm_put_async(port->gadget);
+		return;
+	}
+
+	gport->ipa_consumer_ep = -1;
+	gport->ipa_producer_ep = -1;
+
+	port->is_connected = true;
+
+	/* update IPA Parameteres here. */
+	port->ipa_params.usb_connection_speed = gadget->speed;
+	port->ipa_params.reset_pipe_after_lpm =
+				msm_dwc3_reset_ep_after_lpm(gadget);
+	port->ipa_params.skip_ep_cfg = true;
+	port->ipa_params.keep_ipa_awake = true;
+	port->ipa_params.cons_clnt_hdl = -1;
+	port->ipa_params.prod_clnt_hdl = -1;
+
+	if (gport->out) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_bam_alloc_fifos(port->usb_bam_type,
+						port->src_connection_idx);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (!port->port_usb || port->rx_req == NULL) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			pr_err("%s: port_usb is NULL, or rx_req cleaned\n",
+				__func__);
+			goto out;
+		}
+
+		sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+				| MSM_PRODUCER | port->src_pipe_idx;
+		port->rx_req->length = 32*1024;
+		port->rx_req->udc_priv = sps_params;
+		configure_fifo(port->usb_bam_type,
+				port->src_connection_idx,
+				port->port_usb->out);
+		ret = msm_ep_config(gport->out);
+		if (ret) {
+			pr_err("msm_ep_config() failed for OUT EP\n");
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto out;
+		}
+	}
+
+	if (gport->in) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_bam_alloc_fifos(port->usb_bam_type,
+						port->dst_connection_idx);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (!port->port_usb || port->tx_req == NULL) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			pr_err("%s: port_usb is NULL, or tx_req cleaned\n",
+				__func__);
+			goto unconfig_msm_ep_out;
+		}
+		sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+						port->dst_pipe_idx;
+		port->tx_req->length = 32*1024;
+		port->tx_req->udc_priv = sps_params;
+		configure_fifo(port->usb_bam_type,
+				port->dst_connection_idx, gport->in);
+		ret = msm_ep_config(gport->in);
+		if (ret) {
+			pr_err("msm_ep_config() failed for IN EP\n");
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto unconfig_msm_ep_out;
+		}
+	}
+
+	if (port->func_type == USB_IPA_FUNC_RMNET) {
+		teth_bridge_params.client = port->ipa_params.src_client;
+		ret = teth_bridge_init(&teth_bridge_params);
+		if (ret) {
+			pr_err("%s:teth_bridge_init() failed\n", __func__);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto unconfig_msm_ep_in;
+		}
+	}
+
+	/*
+	 * Perform below operations for Tx from Device (OUT transfer)
+	 * 1. Connect with pipe of USB BAM with IPA BAM pipe
+	 * 2. Update USB Endpoint related information using SPS Param.
+	 * 3. Configure USB Endpoint/DBM for the same.
+	 * 4. Override USB ep queue functionality for endless transfer.
+	 */
+	if (gport->out) {
+		pr_debug("configure bam ipa connect for USB OUT\n");
+		port->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+
+		if (port->func_type == USB_IPA_FUNC_RNDIS) {
+			port->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+			port->ipa_params.priv = rndis_qc_get_ipa_priv();
+			port->ipa_params.skip_ep_cfg =
+				rndis_qc_get_skip_ep_config();
+		} else if (port->func_type == USB_IPA_FUNC_RMNET) {
+			port->ipa_params.notify =
+				teth_bridge_params.usb_notify_cb;
+			port->ipa_params.priv =
+				teth_bridge_params.private_data;
+			port->ipa_params.reset_pipe_after_lpm =
+				msm_dwc3_reset_ep_after_lpm(gadget);
+			port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+			port->ipa_params.skip_ep_cfg =
+				teth_bridge_params.skip_ep_cfg;
+		}
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = usb_bam_connect_ipa(port->usb_bam_type,
+						&port->ipa_params);
+		if (ret) {
+			pr_err("usb_bam_connect_ipa out failed err:%d\n", ret);
+			goto disconnect_usb_bam_ipa_out;
+		}
+		spin_lock_irqsave(&port->port_lock, flags);
+		is_ipa_disconnected = false;
+		/* check if USB cable is disconnected or not */
+		if (!port->port_usb) {
+			pr_debug("%s:%d: cable is disconnected.\n",
+						__func__, __LINE__);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto disconnect_usb_bam_ipa_out;
+		}
+
+		gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx;
+	}
+
+	if (gport->in) {
+		pr_debug("configure bam ipa connect for USB IN\n");
+		port->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+
+		if (port->func_type == USB_IPA_FUNC_RNDIS) {
+			port->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+			port->ipa_params.priv = rndis_qc_get_ipa_priv();
+			port->ipa_params.skip_ep_cfg =
+				rndis_qc_get_skip_ep_config();
+		} else if (port->func_type == USB_IPA_FUNC_RMNET) {
+			port->ipa_params.notify =
+				teth_bridge_params.usb_notify_cb;
+			port->ipa_params.priv =
+				teth_bridge_params.private_data;
+			port->ipa_params.reset_pipe_after_lpm =
+				msm_dwc3_reset_ep_after_lpm(gadget);
+			port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+			port->ipa_params.skip_ep_cfg =
+				teth_bridge_params.skip_ep_cfg;
+		}
+
+		if (port->func_type == USB_IPA_FUNC_DPL)
+			port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = usb_bam_connect_ipa(port->usb_bam_type,
+						&port->ipa_params);
+		if (ret) {
+			pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret);
+			goto disconnect_usb_bam_ipa_out;
+		}
+		spin_lock_irqsave(&port->port_lock, flags);
+		is_ipa_disconnected = false;
+		/* check if USB cable is disconnected or not */
+		if (!port->port_usb) {
+			pr_debug("%s:%d: cable is disconnected.\n",
+						__func__, __LINE__);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto disconnect_usb_bam_ipa_out;
+		}
+
+		gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	if (port->func_type == USB_IPA_FUNC_RNDIS) {
+		rndis_data->prod_clnt_hdl =
+			port->ipa_params.prod_clnt_hdl;
+		rndis_data->cons_clnt_hdl =
+			port->ipa_params.cons_clnt_hdl;
+		rndis_data->priv = port->ipa_params.priv;
+
+		pr_debug("ul_max_transfer_size:%d\n",
+				rndis_data->ul_max_transfer_size);
+		pr_debug("ul_max_packets_number:%d\n",
+				rndis_data->ul_max_packets_number);
+		pr_debug("dl_max_transfer_size:%d\n",
+				rndis_data->dl_max_transfer_size);
+
+		ret = rndis_ipa_pipe_connect_notify(
+				rndis_data->cons_clnt_hdl,
+				rndis_data->prod_clnt_hdl,
+				rndis_data->ul_max_transfer_size,
+				rndis_data->ul_max_packets_number,
+				rndis_data->dl_max_transfer_size,
+				rndis_data->priv);
+		if (ret) {
+			pr_err("%s: failed to connect IPA: err:%d\n",
+				__func__, ret);
+			return;
+		}
+		atomic_set(&port->pipe_connect_notified, 1);
+	} else if (port->func_type == USB_IPA_FUNC_RMNET ||
+			port->func_type == USB_IPA_FUNC_DPL) {
+		/* For RmNet and DPL need to update_ipa_pipes to qti */
+		enum qti_port_type qti_port_type = port->func_type ==
+			USB_IPA_FUNC_RMNET ? QTI_PORT_RMNET : QTI_PORT_DPL;
+		gqti_ctrl_update_ipa_pipes(port->port_usb, qti_port_type,
+			gport->ipa_producer_ep, gport->ipa_consumer_ep);
+	}
+
+	if (port->func_type == USB_IPA_FUNC_RMNET) {
+		connect_params.ipa_usb_pipe_hdl =
+			port->ipa_params.prod_clnt_hdl;
+		connect_params.usb_ipa_pipe_hdl =
+			port->ipa_params.cons_clnt_hdl;
+		connect_params.tethering_mode =
+			TETH_TETHERING_MODE_RMNET;
+		connect_params.client_type =
+			port->ipa_params.src_client;
+		ret = teth_bridge_connect(&connect_params);
+		if (ret) {
+			pr_err("%s:teth_bridge_connect() failed\n", __func__);
+			goto disconnect_usb_bam_ipa_out;
+		}
+	}
+
+	pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+				gport->ipa_producer_ep,
+				gport->ipa_consumer_ep);
+
+	pr_debug("src_bam_idx:%d dst_bam_idx:%d\n",
+			port->src_connection_idx, port->dst_connection_idx);
+
+	/* Don't queue the transfers yet, only after network stack is up */
+	if (port->func_type == USB_IPA_FUNC_RNDIS) {
+		pr_debug("%s: Not starting now, waiting for network notify",
+			__func__);
+		return;
+	}
+
+	if (gport->out)
+		ipa_data_start_endless_xfer(port, false);
+	if (gport->in)
+		ipa_data_start_endless_xfer(port, true);
+
+	pr_debug("Connect workqueue done (port %pK)", port);
+	return;
+
+disconnect_usb_bam_ipa_out:
+	if (!is_ipa_disconnected) {
+		usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+		is_ipa_disconnected = true;
+	}
+	if (port->func_type == USB_IPA_FUNC_RMNET)
+		teth_bridge_disconnect(port->ipa_params.src_client);
+unconfig_msm_ep_in:
+	spin_lock_irqsave(&port->port_lock, flags);
+	/* check if USB cable is disconnected or not */
+	if (port->port_usb && gport->in)
+		msm_ep_unconfig(port->port_usb->in);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+unconfig_msm_ep_out:
+	if (gport->in)
+		usb_bam_free_fifos(port->usb_bam_type,
+						port->dst_connection_idx);
+	spin_lock_irqsave(&port->port_lock, flags);
+	/* check if USB cable is disconnected or not */
+	if (port->port_usb && gport->out)
+		msm_ep_unconfig(port->port_usb->out);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+out:
+	if (gport->out)
+		usb_bam_free_fifos(port->usb_bam_type,
+						port->src_connection_idx);
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->is_connected = false;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	usb_gadget_autopm_put_async(port->gadget);
+}
+
+/**
+ * ipa_data_connect() - Prepare IPA params and enable USB endpoints
+ * @gp: USB IPA gadget port
+ * @port_num: port number used by accelerated function
+ * @src_connection_idx: USB BAM pipe index used as producer
+ * @dst_connection_idx: USB BAM pipe index used as consumer
+ *
+ * It is being called from accelerated function driver (from set_alt()) to
+ * initiate USB BAM IPA connection. This API is enabling accelerated endpoints
+ * and schedule connect_work() which establishes USB IPA BAM communication.
+ */
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+		u8 src_connection_idx, u8 dst_connection_idx)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+	int ret = 0;
+
+	pr_debug("dev:%pK port#%d src_connection_idx:%d dst_connection_idx:%d\n",
+			gp, func, src_connection_idx, dst_connection_idx);
+
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("invalid portno#%d\n", func);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	if (!gp) {
+		pr_err("gadget port is null\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	port = ipa_data_ports[func];
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gp;
+	port->gadget = gp->cdev->gadget;
+
+	if (gp->out) {
+		port->rx_req = usb_ep_alloc_request(gp->out, GFP_ATOMIC);
+		if (!port->rx_req) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			pr_err("%s: failed to allocate rx_req\n", __func__);
+			goto err;
+		}
+		port->rx_req->context = port;
+		port->rx_req->complete = ipa_data_endless_complete;
+		port->rx_req->length = 0;
+		port->rx_req->no_interrupt = 1;
+	}
+
+	if (gp->in) {
+		port->tx_req = usb_ep_alloc_request(gp->in, GFP_ATOMIC);
+		if (!port->tx_req) {
+			pr_err("%s: failed to allocate tx_req\n", __func__);
+			goto free_rx_req;
+		}
+		port->tx_req->context = port;
+		port->tx_req->complete = ipa_data_endless_complete;
+		port->tx_req->length = 0;
+		port->tx_req->no_interrupt = 1;
+	}
+	port->src_connection_idx = src_connection_idx;
+	port->dst_connection_idx = dst_connection_idx;
+	port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name);
+
+	port->ipa_params.src_pipe = &(port->src_pipe_idx);
+	port->ipa_params.dst_pipe = &(port->dst_pipe_idx);
+	port->ipa_params.src_idx = src_connection_idx;
+	port->ipa_params.dst_idx = dst_connection_idx;
+
+	/*
+	 * Disable Xfer complete and Xfer not ready interrupts by
+	 * marking endless flag which is used in UDC driver to enable
+	 * these interrupts. with this set, these interrupts for selected
+	 * endpoints won't be enabled.
+	 */
+	if (port->port_usb->in) {
+		port->port_usb->in->endless = true;
+		ret = usb_ep_enable(port->port_usb->in);
+		if (ret) {
+			pr_err("usb_ep_enable failed eptype:IN ep:%pK",
+						port->port_usb->in);
+			usb_ep_free_request(port->port_usb->in, port->tx_req);
+			port->tx_req = NULL;
+			port->port_usb->in->endless = false;
+			goto err_usb_in;
+		}
+	}
+
+	if (port->port_usb->out) {
+		port->port_usb->out->endless = true;
+		ret = usb_ep_enable(port->port_usb->out);
+		if (ret) {
+			pr_err("usb_ep_enable failed eptype:OUT ep:%pK",
+						port->port_usb->out);
+			usb_ep_free_request(port->port_usb->out, port->rx_req);
+			port->rx_req = NULL;
+			port->port_usb->out->endless = false;
+			goto err_usb_out;
+		}
+	}
+
+	/* Wait for host to enable flow_control */
+	if (port->func_type == USB_IPA_FUNC_RNDIS) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = 0;
+		return ret;
+	}
+
+	/*
+	 * Increment usage count upon cable connect. Decrement after IPA
+	 * handshake is done in disconnect work (due to cable disconnect)
+	 * or in suspend work.
+	 */
+	usb_gadget_autopm_get_noresume(port->gadget);
+
+	queue_work(ipa_data_wq, &port->connect_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return ret;
+
+err_usb_out:
+	if (port->port_usb->in) {
+		usb_ep_disable(port->port_usb->in);
+		port->port_usb->in->endless = false;
+	}
+err_usb_in:
+	if (gp->in && port->tx_req) {
+		usb_ep_free_request(gp->in, port->tx_req);
+		port->tx_req = NULL;
+	}
+free_rx_req:
+	if (gp->out && port->rx_req) {
+		usb_ep_free_request(gp->out, port->rx_req);
+		port->rx_req = NULL;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+err:
+	pr_debug("%s(): failed with error:%d\n", __func__, ret);
+	return ret;
+}
+
+/**
+ * ipa_data_start() - Restart USB endless transfer
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to restart USB endless transfer for USB bus resume.
+ * For USB consumer case, it restarts USB endless RX transfer, whereas
+ * for USB producer case, it resets DBM endpoint and restart USB endless
+ * TX transfer.
+ */
+static void ipa_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+	struct ipa_data_ch_info *port = param;
+	struct usb_gadget *gadget = NULL;
+
+	if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+		pr_err("%s:port,cdev or gadget is  NULL\n", __func__);
+		return;
+	}
+
+	gadget = port->port_usb->cdev->gadget;
+	if (dir == USB_TO_PEER_PERIPHERAL) {
+		pr_debug("%s(): start endless RX\n", __func__);
+		ipa_data_start_endless_xfer(port, false);
+	} else {
+		pr_debug("%s(): start endless TX\n", __func__);
+		if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+			configure_fifo(port->usb_bam_type,
+				 port->dst_connection_idx, port->port_usb->in);
+		}
+		ipa_data_start_endless_xfer(port, true);
+	}
+}
+
+/**
+ * ipa_data_stop() - Stop endless Tx/Rx transfers
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to stop endless Tx/Rx transfers. It is being used
+ * for USB bus suspend functionality.
+ */
+static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+	struct ipa_data_ch_info *port = param;
+	struct usb_gadget *gadget = NULL;
+
+	if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+		pr_err("%s:port,cdev or gadget is  NULL\n", __func__);
+		return;
+	}
+
+	gadget = port->port_usb->cdev->gadget;
+	if (dir == USB_TO_PEER_PERIPHERAL) {
+		pr_debug("%s(): stop endless RX transfer\n", __func__);
+		ipa_data_stop_endless_xfer(port, false);
+	} else {
+		pr_debug("%s(): stop endless TX transfer\n", __func__);
+		ipa_data_stop_endless_xfer(port, true);
+	}
+}
+
+void ipa_data_flush_workqueue(void)
+{
+	pr_debug("%s(): Flushing workqueue\n", __func__);
+	flush_workqueue(ipa_data_wq);
+}
+
+/**
+ * ipa_data_suspend() - Initiate USB BAM IPA suspend functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB BAM IPA suspend functionality
+ * for USB bus suspend functionality.
+ */
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+			bool remote_wakeup_enabled)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("invalid ipa portno#%d\n", func);
+		return;
+	}
+
+	if (!gp) {
+		pr_err("data port is null\n");
+		return;
+	}
+	pr_debug("%s: suspended port %d\n", __func__, func);
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("%s(): Port is NULL.\n", __func__);
+		return;
+	}
+
+	/* suspend with remote wakeup disabled */
+	if (!remote_wakeup_enabled) {
+		/*
+		 * When remote wakeup is disabled, IPA BAM is disconnected
+		 * because it cannot send new data until the USB bus is resumed.
+		 * Endpoint descriptors info is saved before it gets reset by
+		 * the BAM disconnect API. This lets us restore this info when
+		 * the USB bus is resumed.
+		 */
+		if (gp->in) {
+			gp->in_ep_desc_backup = gp->in->desc;
+			pr_debug("in_ep_desc_backup = %pK\n",
+				gp->in_ep_desc_backup);
+		}
+		if (gp->out) {
+			gp->out_ep_desc_backup = gp->out->desc;
+			pr_debug("out_ep_desc_backup = %pK\n",
+				gp->out_ep_desc_backup);
+		}
+		ipa_data_disconnect(gp, func);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	queue_work(ipa_data_wq, &port->suspend_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+	struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+								connect_w);
+	unsigned long flags;
+	int ret;
+
+	pr_debug("%s: suspend started\n", __func__);
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	/* In case of RNDIS, host enables flow_control invoking connect_w. If it
+	 * is delayed then we may end up having suspend_w run before connect_w.
+	 * In this scenario, connect_w may or may not at all start if cable gets
+	 * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+	 * For these cases don't do runtime_put as there was no _get yet, and
+	 * detect this condition on disconnect to not do extra pm_runtme_get
+	 * for SUSPEND --> DISCONNECT scenario.
+	 */
+	if (!port->is_connected) {
+		pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+	ret = usb_bam_register_wake_cb(port->usb_bam_type,
+			port->dst_connection_idx, NULL, port);
+	if (ret) {
+		pr_err("%s(): Failed to register BAM wake callback.\n",
+				__func__);
+		return;
+	}
+
+	usb_bam_register_start_stop_cbs(port->usb_bam_type,
+			port->dst_connection_idx, ipa_data_start,
+			ipa_data_stop, port);
+	/*
+	 * release lock here because bam_data_start() or
+	 * bam_data_stop() called from usb_bam_suspend()
+	 * re-acquires port lock.
+	 */
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	usb_bam_suspend(port->usb_bam_type, &port->ipa_params);
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	/*
+	 * Decrement usage count after IPA handshake is done
+	 * to allow gadget parent to go to lpm. This counter was
+	 * incremented upon cable connect.
+	 */
+	usb_gadget_autopm_put_async(port->gadget);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_resume() - Initiate USB resume functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB resume functionality
+ * for USB bus resume case.
+ */
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+			bool remote_wakeup_enabled)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+	struct usb_gadget *gadget = NULL;
+	u8 src_connection_idx = 0;
+	u8 dst_connection_idx = 0;
+	enum usb_ctrl usb_bam_type;
+
+	pr_debug("dev:%pK port number:%d\n", gp, func);
+
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("invalid ipa portno#%d\n", func);
+		return;
+	}
+
+	if (!gp) {
+		pr_err("data port is null\n");
+		return;
+	}
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("port %u is NULL", func);
+		return;
+	}
+
+	gadget = gp->cdev->gadget;
+	/* resume with remote wakeup disabled */
+	if (!remote_wakeup_enabled) {
+		int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0;
+
+		usb_bam_type = usb_bam_get_bam_type(gadget->name);
+		/* Restore endpoint descriptors info. */
+		if (gp->in) {
+			gp->in->desc = gp->in_ep_desc_backup;
+			pr_debug("in_ep_desc_backup = %pK\n",
+				gp->in_ep_desc_backup);
+			dst_connection_idx = usb_bam_get_connection_idx(
+				usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+				USB_BAM_DEVICE, bam_pipe_num);
+		}
+		if (gp->out) {
+			gp->out->desc = gp->out_ep_desc_backup;
+			pr_debug("out_ep_desc_backup = %pK\n",
+				gp->out_ep_desc_backup);
+			src_connection_idx = usb_bam_get_connection_idx(
+				usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+				USB_BAM_DEVICE, bam_pipe_num);
+		}
+		ipa_data_connect(gp, func,
+				src_connection_idx, dst_connection_idx);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	/*
+	 * Increment usage count here to disallow gadget
+	 * parent suspend. This counter will decrement
+	 * after IPA handshake is done in disconnect work
+	 * (due to cable disconnect) or in bam_data_disconnect
+	 * in suspended state.
+	 */
+	usb_gadget_autopm_get_noresume(port->gadget);
+	queue_work(ipa_data_wq, &port->resume_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+	struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+								connect_w);
+	struct usb_gadget *gadget;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb || !port->port_usb->cdev) {
+		pr_err("port->port_usb or cdev is NULL");
+		goto exit;
+	}
+
+	if (!port->port_usb->cdev->gadget) {
+		pr_err("port->port_usb->cdev->gadget is NULL");
+		goto exit;
+	}
+
+	pr_debug("%s: resume started\n", __func__);
+	gadget = port->port_usb->cdev->gadget;
+	if (!gadget) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s(): Gadget is NULL.\n", __func__);
+		return;
+	}
+
+	ret = usb_bam_register_wake_cb(port->usb_bam_type,
+				port->dst_connection_idx, NULL, NULL);
+	if (ret) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s(): Failed to register BAM wake callback.\n",
+								__func__);
+		return;
+	}
+
+	if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+		configure_fifo(port->usb_bam_type, port->src_connection_idx,
+				port->port_usb->out);
+		configure_fifo(port->usb_bam_type, port->dst_connection_idx,
+				port->port_usb->in);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		msm_dwc3_reset_dbm_ep(port->port_usb->in);
+		spin_lock_irqsave(&port->port_lock, flags);
+		usb_bam_resume(port->usb_bam_type, &port->ipa_params);
+	}
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_port_alloc() - Allocate IPA USB Port structure
+ * @portno: port number to be used by particular USB function
+ *
+ * It is being used by USB function driver to allocate IPA data port
+ * for USB IPA data accelerated path.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+static int ipa_data_port_alloc(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port = NULL;
+
+	if (ipa_data_ports[func] != NULL) {
+		pr_debug("port %d already allocated.\n", func);
+		return 0;
+	}
+
+	port = kzalloc(sizeof(struct ipa_data_ch_info), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	ipa_data_ports[func] = port;
+
+	pr_debug("port:%pK with portno:%d allocated\n", port, func);
+	return 0;
+}
+
+/**
+ * ipa_data_port_select() - Select particular port for BAM2BAM IPA mode
+ * @portno: port number to be used by particular USB function
+ * @func_type: USB gadget function type
+ *
+ * It is being used by USB function driver to select which BAM2BAM IPA
+ * port particular USB function wants to use.
+ *
+ */
+void ipa_data_port_select(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port = NULL;
+
+	pr_debug("portno:%d\n", func);
+
+	port = ipa_data_ports[func];
+	port->port_num  = func;
+	port->is_connected = false;
+
+	spin_lock_init(&port->port_lock);
+
+	if (!work_pending(&port->connect_w))
+		INIT_WORK(&port->connect_w, ipa_data_connect_work);
+
+	if (!work_pending(&port->disconnect_w))
+		INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work);
+
+	INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+	INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+
+	port->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+	port->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+	port->func_type = func;
+};
+
+void ipa_data_free(enum ipa_func_type func)
+{
+	pr_debug("freeing %d IPA BAM port", func);
+
+	kfree(ipa_data_ports[func]);
+	ipa_data_ports[func] = NULL;
+	if (func == USB_IPA_FUNC_RNDIS)
+		kfree(rndis_data);
+	if (ipa_data_wq) {
+		destroy_workqueue(ipa_data_wq);
+		ipa_data_wq = NULL;
+	}
+}
+
+/**
+ * ipa_data_setup() - setup BAM2BAM IPA port
+ *
+ * Each USB function who wants to use BAM2BAM IPA port would
+ * be counting number of IPA port to use and initialize those
+ * ports at time of bind_config() in android gadget driver.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+int ipa_data_setup(enum ipa_func_type func)
+{
+	int ret;
+
+	pr_debug("requested %d IPA BAM port", func);
+
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("Invalid num of ports count:%d\n", func);
+		return -EINVAL;
+	}
+
+	ret = ipa_data_port_alloc(func);
+	if (ret) {
+		pr_err("Failed to alloc port:%d\n", func);
+		return ret;
+	}
+
+	if (func == USB_IPA_FUNC_RNDIS) {
+		rndis_data = kzalloc(sizeof(*rndis_data), GFP_KERNEL);
+		if (!rndis_data) {
+			pr_err("%s: fail allocate and initialize new instance\n",
+				__func__);
+			goto free_ipa_ports;
+		}
+	}
+	if (ipa_data_wq) {
+		pr_debug("ipa_data_wq is already setup.");
+		return 0;
+	}
+
+	ipa_data_wq = alloc_workqueue("k_usb_ipa_data",
+				WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!ipa_data_wq) {
+		pr_err("Failed to create workqueue\n");
+		ret = -ENOMEM;
+		goto free_rndis_data;
+	}
+
+	return 0;
+
+free_rndis_data:
+	if (func == USB_IPA_FUNC_RNDIS)
+		kfree(rndis_data);
+free_ipa_ports:
+	kfree(ipa_data_ports[func]);
+	ipa_data_ports[func] = NULL;
+
+	return ret;
+}
+
+void ipa_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+	if (!max_transfer_size) {
+		pr_err("%s: invalid parameters\n", __func__);
+		return;
+	}
+	rndis_data->ul_max_transfer_size = max_transfer_size;
+	pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+	if (!max_transfer_size) {
+		pr_err("%s: invalid parameters\n", __func__);
+		return;
+	}
+	rndis_data->dl_max_transfer_size = max_transfer_size;
+	pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_ul_max_pkt_num(u8 max_packets_number)
+{
+	if (!max_packets_number) {
+		pr_err("%s: invalid parameters\n", __func__);
+		return;
+	}
+
+	rndis_data->ul_max_packets_number = max_packets_number;
+
+	if (max_packets_number > 1)
+		rndis_data->ul_aggregation_enable = true;
+	else
+		rndis_data->ul_aggregation_enable = false;
+
+	pr_debug("%s(): ul_aggregation enable:%d ul_max_packets_number:%d\n",
+				__func__, rndis_data->ul_aggregation_enable,
+				max_packets_number);
+}
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port;
+
+	pr_debug("%s\n", __func__);
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("%s: port is NULL", __func__);
+		return;
+	}
+
+	if (atomic_read(&port->pipe_connect_notified)) {
+		pr_debug("%s: Transfers already started?\n", __func__);
+		return;
+	}
+	/*
+	 * Increment usage count upon cable connect. Decrement after IPA
+	 * handshake is done in disconnect work due to cable disconnect
+	 * or in suspend work.
+	 */
+	usb_gadget_autopm_get_noresume(port->gadget);
+	queue_work(ipa_data_wq, &port->connect_w);
+}
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+
+	pr_debug("%s\n", __func__);
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("%s: port is NULL", __func__);
+		return;
+	}
+
+	if (!atomic_read(&port->pipe_connect_notified))
+		return;
+
+	rndis_ipa_reset_trigger();
+	ipa_data_stop_endless_xfer(port, true);
+	ipa_data_stop_endless_xfer(port, false);
+	spin_lock_irqsave(&port->port_lock, flags);
+	/* check if USB cable is disconnected or not */
+	if (port->port_usb) {
+		msm_ep_unconfig(port->port_usb->in);
+		msm_ep_unconfig(port->port_usb->out);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	queue_work(ipa_data_wq, &port->disconnect_w);
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
new file mode 100644
index 0000000..70d4293
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_DATA_IPA_H
+#define __U_DATA_IPA_H
+
+#include <linux/usb/composite.h>
+#include <linux/rndis_ipa.h>
+#include <linux/usb/gadget.h>
+#include <linux/cdev.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb_bam.h>
+
+#include "u_rmnet.h"
+
+enum ipa_func_type {
+	USB_IPA_FUNC_ECM,
+	USB_IPA_FUNC_MBIM,
+	USB_IPA_FUNC_RMNET,
+	USB_IPA_FUNC_RNDIS,
+	USB_IPA_FUNC_DPL,
+	USB_IPA_NUM_FUNCS,
+};
+
+/* Max Number of IPA data ports supported */
+#define IPA_N_PORTS USB_IPA_NUM_FUNCS
+
+struct gadget_ipa_port {
+	struct usb_composite_dev	*cdev;
+	struct usb_function		*func;
+	int				rx_buffer_size;
+	struct usb_ep			*in;
+	struct usb_ep			*out;
+	int				ipa_consumer_ep;
+	int				ipa_producer_ep;
+	const struct usb_endpoint_descriptor	*in_ep_desc_backup;
+	const struct usb_endpoint_descriptor	*out_ep_desc_backup;
+
+};
+
+struct ipa_function_bind_info {
+	struct usb_string *string_defs;
+	int data_str_idx;
+	struct usb_interface_descriptor *data_desc;
+	struct usb_endpoint_descriptor *fs_in_desc;
+	struct usb_endpoint_descriptor *fs_out_desc;
+	struct usb_endpoint_descriptor *fs_notify_desc;
+	struct usb_endpoint_descriptor *hs_in_desc;
+	struct usb_endpoint_descriptor *hs_out_desc;
+	struct usb_endpoint_descriptor *hs_notify_desc;
+	struct usb_endpoint_descriptor *ss_in_desc;
+	struct usb_endpoint_descriptor *ss_out_desc;
+	struct usb_endpoint_descriptor *ss_notify_desc;
+
+	struct usb_descriptor_header **fs_desc_hdr;
+	struct usb_descriptor_header **hs_desc_hdr;
+	struct usb_descriptor_header **ss_desc_hdr;
+};
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN      40
+
+struct f_rndis_qc_opts {
+	struct usb_function_instance	func_inst;
+	struct f_rndis_qc		*rndis;
+	u32				vendor_id;
+	const char			*manufacturer;
+	struct net_device		*net;
+	int				refcnt;
+};
+
+struct f_rmnet_opts {
+	struct usb_function_instance func_inst;
+	struct f_rmnet *dev;
+	int refcnt;
+};
+
+void ipa_data_port_select(enum ipa_func_type func);
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func);
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+			u8 src_connection_idx, u8 dst_connection_idx);
+int ipa_data_setup(enum ipa_func_type func);
+void ipa_data_free(enum ipa_func_type func);
+
+void ipa_data_flush_workqueue(void);
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+		bool remote_wakeup_enabled);
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+		bool remote_wakeup_enabled);
+
+void ipa_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void ipa_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void ipa_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void ipa_data_start_rx_tx(enum ipa_func_type func);
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func);
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func);
+
+void *rndis_qc_get_ipa_priv(void);
+void *rndis_qc_get_ipa_rx_cb(void);
+bool rndis_qc_get_skip_ep_config(void);
+void *rndis_qc_get_ipa_tx_cb(void);
+void rndis_ipa_reset_trigger(void);
+#if IS_ENABLED(CONFIG_USB_CONFIGFS_RMNET_BAM)
+void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport,
+				u32 ipa_prod, u32 ipa_cons);
+#else
+static inline void gqti_ctrl_update_ipa_pipes(void *gr,
+				enum qti_port_type qport,
+				u32 ipa_prod, u32 ipa_cons)
+{
+}
+#endif /* CONFIG_USB_CONFIGFS_RMNET_BAM */
+#endif
diff --git a/drivers/usb/gadget/function/u_rmnet.h b/drivers/usb/gadget/function/u_rmnet.h
new file mode 100644
index 0000000..0126932
--- /dev/null
+++ b/drivers/usb/gadget/function/u_rmnet.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "f_qdss.h"
+
+struct rmnet_ctrl_pkt {
+	void	*buf;
+	int	len;
+	struct list_head	list;
+};
+
+struct grmnet {
+	/* to usb host, aka laptop, windows pc etc. Will
+	 * be filled by usb driver of rmnet functionality
+	 */
+	int (*send_cpkt_response)(void *g, void *buf, size_t len);
+
+	/* to modem, and to be filled by driver implementing
+	 * control function
+	 */
+	int (*send_encap_cmd)(enum qti_port_type qport, void *buf, size_t len);
+	void (*notify_modem)(void *g, enum qti_port_type qport, int cbits);
+
+	void (*disconnect)(struct grmnet *g);
+	void (*connect)(struct grmnet *g);
+};
+
+enum ctrl_client {
+	FRMNET_CTRL_CLIENT,
+	GPS_CTRL_CLIENT,
+
+	NR_CTRL_CLIENTS
+};
+
+int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned int intf);
+void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport);
+int gqti_ctrl_init(void);
+void gqti_ctrl_cleanup(void);
+#endif /* __U_RMNET_H*/
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index f7ff9e8f..9c33c6e 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -45,6 +45,8 @@
 #define FREEZIO_N			BIT(1)
 #define POWER_DOWN			BIT(0)
 
+#define QUSB2PHY_PORT_TEST_CTRL		0xB8
+
 #define QUSB2PHY_PWR_CTRL1		0x210
 #define PWR_CTRL1_CLAMP_N_EN		BIT(1)
 #define PWR_CTRL1_POWR_DOWN		BIT(0)
@@ -68,10 +70,7 @@
 #define QUSB2PHY_PORT_TUNE2             0x84
 #define QUSB2PHY_PORT_TUNE3             0x88
 #define QUSB2PHY_PORT_TUNE4             0x8C
-
-/* In case Efuse register shows zero, use this value */
-#define TUNE2_DEFAULT_HIGH_NIBBLE	0xB
-#define TUNE2_DEFAULT_LOW_NIBBLE	0x3
+#define QUSB2PHY_PORT_TUNE5             0x90
 
 /* Get TUNE2's high nibble value read from efuse */
 #define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask)	((val >> pos) & mask)
@@ -98,21 +97,42 @@
 
 #define QUSB2PHY_REFCLK_ENABLE		BIT(0)
 
-unsigned int tune2;
-module_param(tune2, uint, S_IRUGO | S_IWUSR);
+static unsigned int tune1;
+module_param(tune1, uint, 0644);
+MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
+
+static unsigned int tune2;
+module_param(tune2, uint, 0644);
 MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
 
+static unsigned int tune3;
+module_param(tune3, uint, 0644);
+MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
+
+static unsigned int tune4;
+module_param(tune4, uint, 0644);
+MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
+
+static unsigned int tune5;
+module_param(tune5, uint, 0644);
+MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
+
+
 struct qusb_phy {
 	struct usb_phy		phy;
 	void __iomem		*base;
 	void __iomem		*tune2_efuse_reg;
 	void __iomem		*ref_clk_base;
+	void __iomem		*tcsr_clamp_dig_n;
 
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
 	struct clk		*cfg_ahb_clk;
 	struct reset_control	*phy_reset;
+	struct clk		*iface_clk;
+	struct clk		*core_clk;
 
+	struct regulator	*gdsc;
 	struct regulator	*vdd;
 	struct regulator	*vdda33;
 	struct regulator	*vdda18;
@@ -124,6 +144,7 @@ struct qusb_phy {
 	u32			tune2_val;
 	int			tune2_efuse_bit_pos;
 	int			tune2_efuse_num_of_bits;
+	int			tune2_efuse_correction;
 
 	bool			power_enabled;
 	bool			clocks_enabled;
@@ -145,6 +166,8 @@ struct qusb_phy {
 	int			phy_pll_reset_seq_len;
 	int			*emu_dcm_reset_seq;
 	int			emu_dcm_reset_seq_len;
+	bool			put_into_high_z_state;
+	struct mutex		phy_lock;
 };
 
 static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
@@ -155,14 +178,22 @@ static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
 	if (!qphy->clocks_enabled && on) {
 		clk_prepare_enable(qphy->ref_clk_src);
 		clk_prepare_enable(qphy->ref_clk);
+		clk_prepare_enable(qphy->iface_clk);
+		clk_prepare_enable(qphy->core_clk);
 		clk_prepare_enable(qphy->cfg_ahb_clk);
 		qphy->clocks_enabled = true;
 	}
 
 	if (qphy->clocks_enabled && !on) {
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		/*
+		 * FSM depedency beween iface_clk and core_clk.
+		 * Hence turned off core_clk before iface_clk.
+		 */
+		clk_disable_unprepare(qphy->core_clk);
+		clk_disable_unprepare(qphy->iface_clk);
 		clk_disable_unprepare(qphy->ref_clk);
 		clk_disable_unprepare(qphy->ref_clk_src);
-		clk_disable_unprepare(qphy->cfg_ahb_clk);
 		qphy->clocks_enabled = false;
 	}
 
@@ -170,6 +201,32 @@ static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
 						qphy->clocks_enabled);
 }
 
+static int qusb_phy_gdsc(struct qusb_phy *qphy, bool on)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(qphy->gdsc))
+		return -EPERM;
+
+	if (on) {
+		dev_dbg(qphy->phy.dev, "TURNING ON GDSC\n");
+		ret = regulator_enable(qphy->gdsc);
+		if (ret) {
+			dev_err(qphy->phy.dev, "unable to enable gdsc\n");
+			return ret;
+		}
+	} else {
+		dev_dbg(qphy->phy.dev, "TURNING OFF GDSC\n");
+		ret = regulator_disable(qphy->gdsc);
+		if (ret) {
+			dev_err(qphy->phy.dev, "unable to disable gdsc\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
 static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
 {
 	int min, ret;
@@ -313,6 +370,7 @@ static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
 {
 	u8 num_of_bits;
 	u32 bit_mask = 1;
+	u8 reg_val;
 
 	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
 				qphy->tune2_efuse_num_of_bits,
@@ -326,9 +384,8 @@ static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
 
 	/*
 	 * Read EFUSE register having TUNE2 parameter's high nibble.
-	 * If efuse register shows value as 0x0, then use default value
-	 * as 0xB as high nibble. Otherwise use efuse register based
-	 * value for this purpose.
+	 * If efuse register shows value as 0x0, then use previous value
+	 * as it is. Otherwise use efuse register based value for this purpose.
 	 */
 	qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
 	pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
@@ -337,12 +394,24 @@ static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
 	qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
 				qphy->tune2_efuse_bit_pos, bit_mask);
 
-	if (!qphy->tune2_val)
-		qphy->tune2_val = TUNE2_DEFAULT_HIGH_NIBBLE;
+	/* Update higher nibble of TUNE2 value for better rise/fall times */
+	if (qphy->tune2_efuse_correction && qphy->tune2_val) {
+		if (qphy->tune2_efuse_correction > 5 ||
+				qphy->tune2_efuse_correction < -10)
+			pr_warn("Correction value is out of range : %d\n",
+					qphy->tune2_efuse_correction);
+		else
+			qphy->tune2_val = qphy->tune2_val +
+						qphy->tune2_efuse_correction;
+	}
 
-	/* Get TUNE2 byte value using high and low nibble value */
-	qphy->tune2_val = ((qphy->tune2_val << 0x4) |
-					TUNE2_DEFAULT_LOW_NIBBLE);
+	reg_val = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE2);
+	if (qphy->tune2_val) {
+		reg_val  &= 0x0f;
+		reg_val |= (qphy->tune2_val << 4);
+	}
+
+	qphy->tune2_val = reg_val;
 }
 
 static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
@@ -450,7 +519,7 @@ static int qusb_phy_init(struct usb_phy *phy)
 	 * and try to read EFUSE value only once i.e. not every USB
 	 * cable connect case.
 	 */
-	if (qphy->tune2_efuse_reg) {
+	if (qphy->tune2_efuse_reg && !tune2) {
 		if (!qphy->tune2_val)
 			qusb_phy_get_tune2_param(qphy);
 
@@ -460,13 +529,29 @@ static int qusb_phy_init(struct usb_phy *phy)
 				qphy->base + QUSB2PHY_PORT_TUNE2);
 	}
 
-	/* If tune2 modparam set, override tune2 value */
-	if (tune2) {
-		pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
-						__func__, tune2);
+	/* If tune modparam set, override tune value */
+
+	pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
+				__func__, tune1, tune2, tune3, tune4, tune5);
+	if (tune1)
+		writel_relaxed(tune1,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
+
+	if (tune2)
 		writel_relaxed(tune2,
 				qphy->base + QUSB2PHY_PORT_TUNE2);
-	}
+
+	if (tune3)
+		writel_relaxed(tune3,
+				qphy->base + QUSB2PHY_PORT_TUNE3);
+
+	if (tune4)
+		writel_relaxed(tune4,
+				qphy->base + QUSB2PHY_PORT_TUNE4);
+
+	if (tune5)
+		writel_relaxed(tune5,
+				qphy->base + QUSB2PHY_PORT_TUNE5);
 
 	/* ensure above writes are completed before re-enabling PHY */
 	wmb();
@@ -596,27 +681,55 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
 			writel_relaxed(intr_mask,
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 
+			if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+				/* enable phy auto-resume */
+				writel_relaxed(0x0C,
+					qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+				/* flush the previous write before next write */
+				wmb();
+				writel_relaxed(0x04,
+					qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+			}
+
+
+			dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+			__func__, intr_mask);
+
+			/* Makes sure that above write goes through */
+			wmb();
+
 			qusb_phy_enable_clocks(qphy, false);
 		} else { /* Disconnect case */
+			mutex_lock(&qphy->phy_lock);
 			/* Disable all interrupts */
 			writel_relaxed(0x00,
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
-			/*
-			 * Phy in non-driving mode leaves Dp and Dm lines in
-			 * high-Z state. Controller power collapse is not
-			 * switching phy to non-driving mode causing charger
-			 * detection failure. Bring phy to non-driving mode by
-			 * overriding controller output via UTMI interface.
-			 */
-			writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
-				OP_MODE_NON_DRIVE,
-				qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
-			writel_relaxed(UTMI_ULPI_SEL | UTMI_TEST_MUX_SEL,
-				qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
 
+			/* Disable PHY */
+			writel_relaxed(POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+			/* Make sure that above write is completed */
+			wmb();
 
 			qusb_phy_enable_clocks(qphy, false);
-			qusb_phy_enable_power(qphy, false);
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x0,
+					qphy->tcsr_clamp_dig_n);
+			/* Do not disable power rails if there is vote for it */
+			if (!qphy->dpdm_enable)
+				qusb_phy_enable_power(qphy, false);
+			else
+				dev_dbg(phy->dev, "race with rm_pulldown. Keep ldo ON\n");
+			mutex_unlock(&qphy->phy_lock);
+
+			/*
+			 * Set put_into_high_z_state to true so next USB
+			 * cable connect, DPF_DMF request performs PHY
+			 * reset and put it into high-z state. For bootup
+			 * with or without USB cable, it doesn't require
+			 * to put QUSB PHY into high-z state.
+			 */
+			qphy->put_into_high_z_state = true;
 		}
 		qphy->suspended = true;
 	} else {
@@ -629,6 +742,9 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 		} else {
 			qusb_phy_enable_power(qphy, true);
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x1,
+					qphy->tcsr_clamp_dig_n);
 			qusb_phy_enable_clocks(qphy, true);
 		}
 		qphy->suspended = false;
@@ -669,15 +785,61 @@ static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
 	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
 				__func__, qphy->dpdm_enable);
 
+	mutex_lock(&qphy->phy_lock);
 	if (!qphy->dpdm_enable) {
 		ret = qusb_phy_enable_power(qphy, true);
 		if (ret < 0) {
 			dev_dbg(qphy->phy.dev,
 				"dpdm regulator enable failed:%d\n", ret);
+			mutex_unlock(&qphy->phy_lock);
 			return ret;
 		}
 		qphy->dpdm_enable = true;
+		if (qphy->put_into_high_z_state) {
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x1,
+				qphy->tcsr_clamp_dig_n);
+
+			qusb_phy_gdsc(qphy, true);
+			qusb_phy_enable_clocks(qphy, true);
+
+			dev_dbg(qphy->phy.dev, "RESET QUSB PHY\n");
+			ret = reset_control_assert(qphy->phy_reset);
+			if (ret)
+				dev_err(qphy->phy.dev, "phyassert failed\n");
+			usleep_range(100, 150);
+			ret = reset_control_deassert(qphy->phy_reset);
+			if (ret)
+				dev_err(qphy->phy.dev, "deassert failed\n");
+
+			/*
+			 * Phy in non-driving mode leaves Dp and Dm
+			 * lines in high-Z state. Controller power
+			 * collapse is not switching phy to non-driving
+			 * mode causing charger detection failure. Bring
+			 * phy to non-driving mode by overriding
+			 * controller output via UTMI interface.
+			 */
+			writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
+				OP_MODE_NON_DRIVE,
+				qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+			writel_relaxed(UTMI_ULPI_SEL |
+				UTMI_TEST_MUX_SEL,
+				qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+
+			/* Disable PHY */
+			writel_relaxed(CLAMP_N_EN | FREEZIO_N |
+					POWER_DOWN,
+					qphy->base + QUSB2PHY_PORT_POWERDOWN);
+			/* Make sure that above write is completed */
+			wmb();
+
+			qusb_phy_enable_clocks(qphy, false);
+			qusb_phy_gdsc(qphy, false);
+		}
 	}
+	mutex_unlock(&qphy->phy_lock);
 
 	return ret;
 }
@@ -690,19 +852,25 @@ static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
 	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
 				__func__, qphy->dpdm_enable);
 
+	mutex_lock(&qphy->phy_lock);
 	if (qphy->dpdm_enable) {
 		if (!qphy->cable_connected) {
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x0,
+					qphy->tcsr_clamp_dig_n);
 			dev_dbg(qphy->phy.dev, "turn off for HVDCP case\n");
 			ret = qusb_phy_enable_power(qphy, false);
 			if (ret < 0) {
 				dev_dbg(qphy->phy.dev,
 					"dpdm regulator disable failed:%d\n",
 					ret);
+				mutex_unlock(&qphy->phy_lock);
 				return ret;
 			}
 		}
 		qphy->dpdm_enable = false;
 	}
+	mutex_unlock(&qphy->phy_lock);
 
 	return ret;
 }
@@ -794,6 +962,9 @@ static int qusb_phy_probe(struct platform_device *pdev)
 						"qcom,tune2-efuse-num-bits",
 						&qphy->tune2_efuse_num_of_bits);
 			}
+			of_property_read_u32(dev->of_node,
+						"qcom,tune2-efuse-correction",
+						&qphy->tune2_efuse_correction);
 
 			if (ret) {
 				dev_err(dev, "DT Value for tune2 efuse is invalid.\n");
@@ -829,6 +1000,17 @@ static int qusb_phy_probe(struct platform_device *pdev)
 		}
 	}
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"tcsr_clamp_dig_n_1p8");
+	if (res) {
+		qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev,
+				res->start, resource_size(res));
+		if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+			dev_err(dev, "err reading tcsr_clamp_dig_n\n");
+			qphy->tcsr_clamp_dig_n = NULL;
+		}
+	}
+
 	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
 	if (IS_ERR(qphy->ref_clk_src))
 		dev_dbg(dev, "clk get failed for ref_clk_src\n");
@@ -847,6 +1029,34 @@ static int qusb_phy_probe(struct platform_device *pdev)
 	if (IS_ERR(qphy->phy_reset))
 		return PTR_ERR(qphy->phy_reset);
 
+	if (of_property_match_string(dev->of_node,
+		"clock-names", "iface_clk") >= 0) {
+		qphy->iface_clk = devm_clk_get(dev, "iface_clk");
+		if (IS_ERR(qphy->iface_clk)) {
+			ret = PTR_ERR(qphy->iface_clk);
+			qphy->iface_clk = NULL;
+		if (ret == -EPROBE_DEFER)
+			return ret;
+			dev_err(dev, "couldn't get iface_clk(%d)\n", ret);
+		}
+	}
+
+	if (of_property_match_string(dev->of_node,
+		"clock-names", "core_clk") >= 0) {
+		qphy->core_clk = devm_clk_get(dev, "core_clk");
+		if (IS_ERR(qphy->core_clk)) {
+			ret = PTR_ERR(qphy->core_clk);
+			qphy->core_clk = NULL;
+			if (ret == -EPROBE_DEFER)
+				return ret;
+			dev_err(dev, "couldn't get core_clk(%d)\n", ret);
+		}
+	}
+
+	qphy->gdsc = devm_regulator_get(dev, "USB3_GDSC");
+	if (IS_ERR(qphy->gdsc))
+		qphy->gdsc = NULL;
+
 	qphy->emulation = of_property_read_bool(dev->of_node,
 					"qcom,emulation");
 
@@ -981,6 +1191,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
 		return PTR_ERR(qphy->vdda18);
 	}
 
+	mutex_init(&qphy->phy_lock);
 	platform_set_drvdata(pdev, qphy);
 
 	qphy->phy.label			= "msm-qusb-phy";
@@ -1010,6 +1221,10 @@ static int qusb_phy_probe(struct platform_device *pdev)
 	if (ret)
 		usb_remove_phy(&qphy->phy);
 
+	/* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+	if (qphy->tcsr_clamp_dig_n)
+		writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
 	return ret;
 }
 
diff --git a/include/dt-bindings/clock/msm-clocks-8953.h b/include/dt-bindings/clock/msm-clocks-8953.h
new file mode 100644
index 0000000..6bfca0b
--- /dev/null
+++ b/include/dt-bindings/clock/msm-clocks-8953.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8953_H
+#define __MSM_CLOCKS_8953_H
+
+#define clk_gpll0_clk_src				 0x5933b69f
+#define clk_gpll0_ao_clk_src                             0x6b2fb034
+#define clk_gpll2_clk_src				 0x7c34503b
+#define clk_gpll3_clk_src				 0x5b1eccd5
+#define clk_gpll4_clk_src				 0x10525d57
+#define clk_gpll6_clk_src				 0x17dceaad
+#define clk_gpll0_main_clk_src				 0xf6e5be93
+#define clk_gpll0_main_div2_cci_clk_src			 0x614e4b7e
+#define clk_gpll0_main_div2_clk_src			 0x3037cffb
+#define clk_gpll0_main_div2_mm_clk_src			 0xcf89c5ba
+#define clk_gpll0_main_div2_usb3_clk_src		 0x23944173
+#define clk_gpll0_main_mock_clk_src			 0xdc903e09
+#define clk_gpll2_out_main_clk_src			 0xfcdbeff8
+#define clk_gpll2_vcodec_clk_src			 0xa2cbc782
+#define clk_gpll6_aux_clk_src				 0x81187caa
+#define clk_gpll6_main_clk_src				 0xb9a328d6
+#define clk_gpll6_main_div2_clk_src			 0x6f0016a9
+#define clk_gpll6_main_div2_gfx_clk_src			 0xf29e5e5c
+#define clk_gpll6_main_gfx_clk_src			 0x5aee405b
+#define clk_gpll6_out_aux_clk_src			 0xb3fcaa27
+#define clk_xo_clk_src					 0x23f5649f
+#define clk_xo_a_clk_src				 0x2fdd2c7c
+#define clk_bimc_clk					 0x4b80bf00
+#define clk_bimc_a_clk					 0x4b25668a
+#define clk_pcnoc_clk					 0xc1296d0f
+#define clk_pcnoc_a_clk					 0x9bcffee4
+#define clk_snoc_clk					 0x2c341aa0
+#define clk_snoc_a_clk					 0x8fcef2af
+#define clk_sysmmnoc_clk				 0xebb1df78
+#define clk_sysmmnoc_a_clk				 0x6ca682a2
+#define clk_ipa_clk					 0xfa685cda
+#define clk_ipa_a_clk					 0xeeec2919
+#define clk_qdss_clk					 0x1492202a
+#define clk_qdss_a_clk					 0xdd121669
+#define clk_bimc_msmbus_clk				 0xd212feea
+#define clk_bimc_msmbus_a_clk				 0x71d1a499
+#define clk_bimc_usb_clk				 0x9bd2b2bf
+#define clk_bimc_usb_a_clk				 0xea410834
+#define clk_bimc_wcnss_a_clk				 0x5a6df715
+#define clk_pcnoc_keepalive_a_clk			 0x9464f720
+#define clk_pcnoc_msmbus_clk				 0x2b53b688
+#define clk_pcnoc_msmbus_a_clk				 0x9753a54f
+#define clk_pcnoc_usb_clk				 0x57adc448
+#define clk_pcnoc_usb_a_clk				 0x11d6a74e
+#define clk_snoc_msmbus_clk				 0xe6900bb6
+#define clk_snoc_msmbus_a_clk				 0x5d4683bd
+#define clk_snoc_usb_clk				 0x29f9d73d
+#define clk_snoc_usb_a_clk				 0x34b7821b
+#define clk_snoc_wcnss_a_clk				 0xd3949ebc
+#define clk_sysmmnoc_msmbus_a_clk			 0x50600f1b
+#define clk_sysmmnoc_msmbus_clk				 0xd61e5721
+#define clk_xo_dwc3_clk					 0xfad488ce
+#define clk_xo_lpm_clk					 0x2be48257
+#define clk_xo_pil_lpass_clk				 0xb72aa4c9
+#define clk_xo_pil_mss_clk				 0xe97a8354
+#define clk_xo_pil_pronto_clk				 0x89dae6d0
+#define clk_xo_wlan_clk					 0x0116b76f
+#define clk_xo_pipe_clk_src				 0x8eac73d8
+#define clk_gcc_apss_ahb_clk				 0x2b0d39ff
+#define clk_gcc_apss_axi_clk				 0x1d47f4ff
+#define clk_gcc_blsp1_ahb_clk				 0x8caa5b4f
+#define clk_gcc_blsp2_ahb_clk				 0x8f283c1d
+#define clk_gcc_boot_rom_ahb_clk			 0xde2adeb1
+#define clk_gcc_crypto_ahb_clk				 0x94de4919
+#define clk_gcc_crypto_axi_clk				 0xd4415c9b
+#define clk_gcc_crypto_clk				 0x00d390d2
+#define clk_gcc_prng_ahb_clk				 0x397e7eaa
+#define clk_gcc_qdss_dap_clk				 0x7fa9aa73
+#define clk_gcc_apss_tcu_async_clk			 0x8fbc51da
+#define clk_gcc_cpp_tbu_clk				 0xab6f19ab
+#define clk_gcc_jpeg_tbu_clk				 0xcf8fd944
+#define clk_gcc_mdp_tbu_clk				 0x82287f76
+#define clk_gcc_smmu_cfg_clk				 0x75eaefa5
+#define clk_gcc_venus_tbu_clk				 0x7e0b97ce
+#define clk_gcc_vfe1_tbu_clk				 0x4888e70f
+#define clk_gcc_vfe_tbu_clk				 0x061f2f95
+#define clk_camss_top_ahb_clk_src			 0xf92304fb
+#define clk_csi0_clk_src				 0x227e65bc
+#define clk_apss_ahb_clk_src				 0x36f8495f
+#define clk_csi1_clk_src				 0x6a2a6c36
+#define clk_csi2_clk_src				 0x4113589f
+#define clk_vfe0_clk_src				 0xa0c2bd8f
+#define clk_gfx3d_clk_src				 0x917f76ef
+#define clk_vcodec0_clk_src				 0xbc193019
+#define clk_cpp_clk_src					 0x8382f56d
+#define clk_jpeg0_clk_src				 0x9a0a0ac3
+#define clk_mdp_clk_src					 0x6dc1f8f1
+#define clk_pclk0_clk_src				 0xccac1f35
+#define clk_ext_pclk0_clk_src				 0x087c1612
+#define clk_pclk1_clk_src				 0x090f68ac
+#define clk_ext_pclk1_clk_src				 0x8067c5a3
+#define clk_mdss_mdp_vote_clk				 0x588460a4
+#define clk_mdss_rotator_vote_clk			 0x5b1f675e
+#define clk_usb30_master_clk_src			 0xc6262f89
+#define clk_vfe1_clk_src				 0x4e357366
+#define clk_apc0_droop_detector_clk_src			 0x824a5cb7
+#define clk_apc1_droop_detector_clk_src			 0x8708fba4
+#define clk_blsp1_qup1_i2c_apps_clk_src			 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src			 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src			 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src			 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src			 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src			 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src			 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src			 0xddb5bbdb
+#define clk_blsp1_uart1_apps_clk_src			 0xf8146114
+#define clk_blsp1_uart2_apps_clk_src			 0xfc9c2f73
+#define clk_blsp2_qup1_i2c_apps_clk_src			 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src			 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src			 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src			 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src			 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src			 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src			 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src			 0x25d4a2b1
+#define clk_blsp2_uart1_apps_clk_src			 0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src			 0xdd448080
+#define clk_cci_clk_src					 0x822f3d97
+#define clk_csi0p_clk_src				 0xf1b8f4e7
+#define clk_csi1p_clk_src				 0x08d1986c
+#define clk_csi2p_clk_src				 0x7ebc4951
+#define clk_camss_gp0_clk_src				 0x43b063e9
+#define clk_camss_gp1_clk_src				 0xa3315f1b
+#define clk_mclk0_clk_src				 0x266b3853
+#define clk_mclk1_clk_src				 0xa73cad0c
+#define clk_mclk2_clk_src				 0x42545468
+#define clk_mclk3_clk_src				 0x2bfbb714
+#define clk_csi0phytimer_clk_src			 0xc8a309be
+#define clk_csi1phytimer_clk_src			 0x7c0fe23a
+#define clk_csi2phytimer_clk_src			 0x62ffea9c
+#define clk_crypto_clk_src				 0x37a21414
+#define clk_gp1_clk_src					 0xad85b97a
+#define clk_gp2_clk_src					 0xfb1f0065
+#define clk_gp3_clk_src					 0x63b693d6
+#define clk_byte0_clk_src				 0x75cc885b
+#define clk_ext_byte0_clk_src				 0xfb32f31e
+#define clk_byte1_clk_src				 0x63c2c955
+#define clk_ext_byte1_clk_src				 0x585ef6d4
+#define clk_esc0_clk_src				 0xb41d7c38
+#define clk_esc1_clk_src				 0x3b0afa42
+#define clk_vsync_clk_src				 0xecb43940
+#define clk_pdm2_clk_src				 0x31e494fd
+#define clk_rbcpr_gfx_clk_src				 0x37f04b53
+#define clk_sdcc1_apps_clk_src				 0xd4975db2
+#define clk_sdcc1_ice_core_clk_src			 0xfd6a4301
+#define clk_sdcc2_apps_clk_src				 0xfc46c821
+#define clk_usb30_mock_utmi_clk_src			 0xa024a976
+#define clk_usb3_aux_clk_src				 0xfde7ae09
+#define clk_usb3_pipe_clk_src				 0x8b922db4
+#define clk_gcc_apc0_droop_detector_gpll0_clk		 0x514e25ca
+#define clk_gcc_apc1_droop_detector_gpll0_clk		 0x0c9c03ee
+#define clk_gcc_blsp1_qup1_i2c_apps_clk			 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk			 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk			 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk			 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk			 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk			 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk			 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk			 0x80f8722f
+#define clk_gcc_blsp1_uart1_apps_clk			 0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk			 0xf8a61c96
+#define clk_gcc_blsp2_qup1_i2c_apps_clk			 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk			 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk			 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk			 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk			 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk			 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk			 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk			 0x01a72b93
+#define clk_gcc_blsp2_uart1_apps_clk			 0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk			 0x1e1965a3
+#define clk_gcc_camss_cci_ahb_clk			 0xa81c11ba
+#define clk_gcc_camss_cci_clk				 0xb7dd8824
+#define clk_gcc_camss_cpp_ahb_clk			 0x4ac95e14
+#define clk_gcc_camss_cpp_axi_clk			 0xbbf73861
+#define clk_gcc_camss_cpp_clk				 0x7118a0de
+#define clk_gcc_camss_csi0_ahb_clk			 0x175d672a
+#define clk_gcc_camss_csi0_clk				 0x6b01b3e1
+#define clk_gcc_camss_csi0_csiphy_3p_clk		 0x6a23bd3d
+#define clk_gcc_camss_csi0phy_clk			 0x06a41ff7
+#define clk_gcc_camss_csi0pix_clk			 0x61a8a930
+#define clk_gcc_camss_csi0rdi_clk			 0x7053c7ae
+#define clk_gcc_camss_csi1_ahb_clk			 0x2c2dc261
+#define clk_gcc_camss_csi1_clk				 0x1aba4a8c
+#define clk_gcc_camss_csi1_csiphy_3p_clk		 0x7d45d937
+#define clk_gcc_camss_csi1phy_clk			 0x0fd1d1fa
+#define clk_gcc_camss_csi1pix_clk			 0x87fc98d8
+#define clk_gcc_camss_csi1rdi_clk			 0x6ac996fe
+#define clk_gcc_camss_csi2_ahb_clk			 0xf3f25940
+#define clk_gcc_camss_csi2_clk				 0xb6857fa2
+#define clk_gcc_camss_csi2_csiphy_3p_clk		 0x27d7be82
+#define clk_gcc_camss_csi2phy_clk			 0xbeeffbcd
+#define clk_gcc_camss_csi2pix_clk			 0xa619561a
+#define clk_gcc_camss_csi2rdi_clk			 0x019fd3f1
+#define clk_gcc_camss_csi_vfe0_clk			 0xcc73453c
+#define clk_gcc_camss_csi_vfe1_clk			 0xb1ef6e8b
+#define clk_gcc_camss_gp0_clk				 0xd2bc3892
+#define clk_gcc_camss_gp1_clk				 0xe4c013e1
+#define clk_gcc_camss_ispif_ahb_clk			 0x3c0a858f
+#define clk_gcc_camss_jpeg0_clk				 0x1ed3f032
+#define clk_gcc_camss_jpeg_ahb_clk			 0x3bfa7603
+#define clk_gcc_camss_jpeg_axi_clk			 0x3e278896
+#define clk_gcc_camss_mclk0_clk				 0x80902deb
+#define clk_gcc_camss_mclk1_clk				 0x5002d85f
+#define clk_gcc_camss_mclk2_clk				 0x222f8fff
+#define clk_gcc_camss_mclk3_clk				 0x73802c85
+#define clk_gcc_camss_micro_ahb_clk			 0xfbbee8cf
+#define clk_gcc_camss_csi0phytimer_clk			 0xf8897589
+#define clk_gcc_camss_csi1phytimer_clk			 0x4d26438f
+#define clk_gcc_camss_csi2phytimer_clk			 0xe768898c
+#define clk_gcc_camss_ahb_clk				 0x9894b414
+#define clk_gcc_camss_top_ahb_clk			 0x4e814a78
+#define clk_gcc_camss_vfe0_clk				 0xaaa3cd97
+#define clk_gcc_camss_vfe_ahb_clk			 0x4050f47a
+#define clk_gcc_camss_vfe_axi_clk			 0x77fe2384
+#define clk_gcc_camss_vfe1_ahb_clk			 0x634a738a
+#define clk_gcc_camss_vfe1_axi_clk			 0xaf7463b3
+#define clk_gcc_camss_vfe1_clk				 0xcaf20d99
+#define clk_gcc_dcc_clk					 0xd1000c50
+#define clk_gcc_gp1_clk					 0x057f7b69
+#define clk_gcc_gp2_clk					 0x9bf83ffd
+#define clk_gcc_gp3_clk					 0xec6539ee
+#define clk_gcc_mdss_ahb_clk				 0xbfb92ed3
+#define clk_gcc_mdss_axi_clk				 0x668f51de
+#define clk_gcc_mdss_byte0_clk				 0x35da7862
+#define clk_gcc_mdss_byte1_clk				 0x41f97fd8
+#define clk_gcc_mdss_esc0_clk				 0xaec5cb25
+#define clk_gcc_mdss_esc1_clk				 0x34653cc7
+#define clk_gcc_mdss_mdp_clk				 0x22f3521f
+#define clk_gcc_mdss_pclk0_clk				 0xcc5c5c77
+#define clk_gcc_mdss_pclk1_clk				 0x9a9c430d
+#define clk_gcc_mdss_vsync_clk				 0x32a09f1f
+#define clk_gcc_mss_cfg_ahb_clk				 0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk			 0x67544d62
+#define clk_gcc_bimc_gfx_clk				 0x3edd69ad
+#define clk_gcc_bimc_gpu_clk				 0x19922503
+#define clk_gcc_oxili_ahb_clk				 0xd15c8a00
+#define clk_gcc_oxili_aon_clk				 0xae18e54d
+#define clk_gcc_oxili_gfx3d_clk				 0x49a51fd9
+#define clk_gcc_oxili_timer_clk				 0x1180db06
+#define clk_gcc_pcnoc_usb3_axi_clk			 0xf7f4b314
+#define clk_gcc_pdm2_clk				 0x99d55711
+#define clk_gcc_pdm_ahb_clk				 0x365664f6
+#define clk_gcc_rbcpr_gfx_clk				 0x20c0af83
+#define clk_gcc_sdcc1_ahb_clk				 0x691e0caa
+#define clk_gcc_sdcc1_apps_clk				 0x9ad6fb96
+#define clk_gcc_sdcc1_ice_core_clk			 0x0fd5680a
+#define clk_gcc_sdcc2_ahb_clk				 0x23d5727f
+#define clk_gcc_sdcc2_apps_clk				 0x861b20ac
+#define clk_gcc_usb30_master_clk			 0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk			 0xa800b65a
+#define clk_gcc_usb30_sleep_clk				 0xd0b65c92
+#define clk_gcc_usb3_aux_clk				 0x555d16b2
+#define clk_gcc_usb_phy_cfg_ahb_clk			 0xccb7e26f
+#define clk_gcc_venus0_ahb_clk				 0x08d778c6
+#define clk_gcc_venus0_axi_clk				 0xcdf4c8f6
+#define clk_gcc_venus0_core0_vcodec0_clk		 0x83a7f549
+#define clk_gcc_venus0_vcodec0_clk			 0xf76a02bb
+#define clk_gcc_qusb_ref_clk				 0x16e35a90
+#define clk_gcc_usb_ss_ref_clk				 0xb85dadfa
+#define clk_gcc_usb3_pipe_clk				 0x26f8a97a
+#define clk_gcc_qusb2_phy_reset				 0x3ce5fa84
+#define clk_gcc_usb3_phy_reset				 0x03d559f1
+#define clk_gcc_usb3phy_phy_reset			 0xb1a4f885
+#define clk_bb_clk1					 0xf5304268
+#define clk_bb_clk1_a					 0xfa113810
+#define clk_bb_clk1_pin					 0x6dd0a779
+#define clk_bb_clk1_a_pin				 0x9b637772
+#define clk_bb_clk2					 0xfe15cb87
+#define clk_bb_clk2_a					 0x59682706
+#define clk_bb_clk2_pin					 0x498938e5
+#define clk_bb_clk2_a_pin				 0x52513787
+#define clk_rf_clk2					 0x24a30992
+#define clk_rf_clk2_a					 0x944d8bbd
+#define clk_rf_clk3					 0xb673936b
+#define clk_rf_clk3_a					 0xf7975f21
+#define clk_div_clk2					 0xd454019f
+#define clk_div_clk2_a					 0x4bd7bfa8
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux				 0x8121ac15
+#define clk_wcnss_m_clk					 0x709f430b
+
+#define clk_apcs_hf_pll					 0x8fef0444
+#define clk_apcs_hf_pll_main				 0xef871ccf
+#define clk_ccissmux					 0x839fb2ef
+#define clk_a53_perf_clk				 0xa0a0dc7f
+#define clk_a53_pwr_clk					 0x2e6af930
+#define clk_a53ssmux_pwr				 0x48a50c99
+#define clk_a53ssmux_perf				 0x154acbc9
+
+#define clk_cci_clk					 0x96854074
+#define clk_apc0_m_clk					 0xce1e9473
+#define clk_apc1_m_clk					 0x990fbaf7
+#define clk_cci_m_clk					 0xec7e8afc
+#define clk_apss_debug_pri_mux				 0xc691ff55
+#define clk_cpu_debug_pri_mux				 0x61a2945f
+#define clk_debug_cpu_clk                                0x0e696b2b
+
+#define clk_audio_ap_clk				 0x312ac429
+#define clk_audio_pmi_clk				 0xb7ba2274
+#define clk_audio_ap_clk2				 0xf0fbaf5b
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux			 0x792379e1
+#define clk_dsi0pll_byte_clk_mux			 0x60e83f06
+#define clk_dsi0pll_byte_clk_src			 0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src			 0x45b3260f
+#define clk_dsi0pll_n2_div_clk				 0x1474c213
+#define clk_dsi0pll_post_n1_div_clk			 0xdab8c389
+#define clk_dsi0pll_vco_clk				 0x15940d40
+#define clk_dsi1pll_pixel_clk_mux			 0x36458019
+#define clk_dsi1pll_byte_clk_mux			 0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src			 0x63930a8f
+#define clk_dsi1pll_pixel_clk_src			 0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk				 0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk			 0x03020041
+#define clk_dsi1pll_vco_clk				 0x99797b50
+#define clk_mdss_dsi1_vco_clk_src			 0xfcd15658
+
+#define clk_dsi0pll_shadow_byte_clk_src			 0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src		 0x98ae3c92
+#define clk_dsi1pll_shadow_byte_clk_src			 0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src		 0xdcca3ffc
+
+/* GCC block resets */
+#define GCC_QUSB2_PHY_BCR				0
+#define GCC_USB3_PHY_BCR				1
+#define GCC_USB3PHY_PHY_BCR				2
+#define GCC_USB_30_BCR					3
+#define GCC_CAMSS_MICRO_BCR				4
+#endif
diff --git a/include/dt-bindings/clock/msm-clocks-hwio-8953.h b/include/dt-bindings/clock/msm-clocks-hwio-8953.h
new file mode 100644
index 0000000..2838806
--- /dev/null
+++ b/include/dt-bindings/clock/msm-clocks-hwio-8953.h
@@ -0,0 +1,683 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8953_HWIO_H
+#define __MSM_CLOCKS_8953_HWIO_H
+
+#define GPLL0_MODE					0x21000
+#define GPLL0_L_VAL					0x21004
+#define GPLL0_ALPHA_VAL					0x21008
+#define GPLL0_ALPHA_VAL_U				0x2100C
+#define GPLL0_USER_CTL					0x21010
+#define GPLL0_USER_CTL_U				0x21014
+#define GPLL0_CONFIG_CTL				0x21018
+#define GPLL0_TEST_CTL					0x2101C
+#define GPLL0_TEST_CTL_U				0x21020
+#define GPLL0_FREQ_CTL					0x21028
+#define GPLL0_CLK_CGC_EN				0x2102C
+#define GPLL0_SSC_CTL					0x21030
+#define GPLL2_MODE					0x4A000
+#define GPLL2_L_VAL					0x4A004
+#define GPLL2_ALPHA_VAL					0x4A008
+#define GPLL2_ALPHA_VAL_U				0x4A00C
+#define GPLL2_USER_CTL					0x4A010
+#define GPLL2_USER_CTL_U				0x4A014
+#define GPLL2_CONFIG_CTL				0x4A018
+#define GPLL2_TEST_CTL					0x4A01C
+#define GPLL2_TEST_CTL_U				0x4A020
+#define GPLL2_FREQ_CTL					0x4A028
+#define GPLL2_CLK_CGC_EN				0x4A02C
+#define GPLL2_SSC_CTL					0x4A030
+#define GPLL3_MODE					0x22000
+#define GPLL3_L_VAL					0x22004
+#define GPLL3_ALPHA_VAL					0x22008
+#define GPLL3_ALPHA_VAL_U				0x2200C
+#define GPLL3_USER_CTL					0x22010
+#define GPLL3_USER_CTL_U				0x22014
+#define GPLL3_CONFIG_CTL				0x22018
+#define GPLL3_TEST_CTL					0x2201C
+#define GPLL3_TEST_CTL_U				0x22020
+#define GPLL3_FREQ_CTL					0x22028
+#define GPLL3_CLK_CGC_EN				0x2202C
+#define GPLL3_SSC_CTL					0x22030
+#define GPLL4_MODE					0x24000
+#define GPLL4_L_VAL					0x24004
+#define GPLL4_ALPHA_VAL					0x24008
+#define GPLL4_ALPHA_VAL_U				0x2400C
+#define GPLL4_USER_CTL					0x24010
+#define GPLL4_USER_CTL_U				0x24014
+#define GPLL4_CONFIG_CTL				0x24018
+#define GPLL4_TEST_CTL					0x2401C
+#define GPLL4_TEST_CTL_U				0x24020
+#define GPLL4_FREQ_CTL					0x24028
+#define GPLL4_CLK_CGC_EN				0x2402C
+#define GPLL4_SSC_CTL					0x24030
+#define GPLL5_MODE					0x25000
+#define GPLL5_L_VAL					0x25004
+#define GPLL5_ALPHA_VAL					0x25008
+#define GPLL5_ALPHA_VAL_U				0x2500C
+#define GPLL5_USER_CTL					0x25010
+#define GPLL5_CONFIG_CTL				0x25018
+#define GPLL5_TEST_CTL					0x2501C
+#define GPLL5_CLK_CGC_EN				0x2502C
+#define QDSS_DAP_CBCR					0x29084
+#define GPLL6_MODE					0x37000
+#define GPLL6_L_VAL					0x37004
+#define GPLL6_ALPHA_VAL					0x37008
+#define GPLL6_ALPHA_VAL_U				0x3700C
+#define GPLL6_USER_CTL					0x37010
+#define GPLL6_CONFIG_CTL				0x37018
+#define GPLL6_TEST_CTL					0x3701C
+#define GPLL6_STATUS					0x37024
+#define GPLL6_CLK_CGC_EN				0x3702C
+#define DCC_CBCR					0x77004
+#define BIMC_GFX_CBCR					0x59034
+#define OXILI_AON_CBCR					0x59044
+#define MSS_CFG_AHB_CBCR				0x49000
+#define MSS_Q6_BIMC_AXI_CBCR				0x49004
+#define GCC_SLEEP_CMD_RCGR				0x30000
+#define QUSB2_PHY_BCR					0x4103C
+#define USB_30_BCR					0x3F070
+#define PCNOC_USB3_AXI_CBCR				0x3F038
+#define USB_30_MISC					0x3F074
+#define USB30_MASTER_CBCR				0x3F000
+#define USB30_SLEEP_CBCR				0x3F004
+#define USB30_MOCK_UTMI_CBCR				0x3F008
+#define USB30_MASTER_CMD_RCGR				0x3F00C
+#define USB30_MASTER_CFG_RCGR				0x3F010
+#define USB30_MASTER_M					0x3F014
+#define USB30_MASTER_N					0x3F018
+#define USB30_MASTER_D					0x3F01C
+#define USB30_MOCK_UTMI_CMD_RCGR			0x3F020
+#define USB30_MOCK_UTMI_CFG_RCGR			0x3F024
+#define USB30_MOCK_UTMI_M				0x3F028
+#define USB30_MOCK_UTMI_N				0x3F02C
+#define USB30_MOCK_UTMI_D				0x3F030
+#define USB_PHY_CFG_AHB_CBCR				0x3F080
+#define USB3_PHY_BCR					0x3F034
+#define USB3PHY_PHY_BCR					0x3F03C
+#define USB3_PIPE_CBCR					0x3F040
+#define USB3_PHY_PIPE_MISC				0x3F048
+#define USB3_AUX_CBCR					0x3F044
+#define USB3_AUX_CMD_RCGR				0x3F05C
+#define USB3_AUX_CFG_RCGR				0x3F060
+#define USB3_AUX_M					0x3F064
+#define USB3_AUX_N					0x3F068
+#define USB3_AUX_D					0x3F06C
+#define SDCC1_APPS_CMD_RCGR				0x42004
+#define SDCC1_APPS_CFG_RCGR				0x42008
+#define SDCC1_APPS_M					0x4200C
+#define SDCC1_APPS_N					0x42010
+#define SDCC1_APPS_D					0x42014
+#define SDCC1_APPS_CBCR					0x42018
+#define SDCC1_AHB_CBCR					0x4201C
+#define SDCC1_MISC					0x42020
+#define SDCC2_APPS_CMD_RCGR				0x43004
+#define SDCC2_APPS_CFG_RCGR				0x43008
+#define SDCC2_APPS_M					0x4300C
+#define SDCC2_APPS_N					0x43010
+#define SDCC2_APPS_D					0x43014
+#define SDCC2_APPS_CBCR					0x43018
+#define SDCC2_AHB_CBCR					0x4301C
+#define SDCC1_ICE_CORE_CMD_RCGR				0x5D000
+#define SDCC1_ICE_CORE_CFG_RCGR				0x5D004
+#define SDCC1_ICE_CORE_M				0x5D008
+#define SDCC1_ICE_CORE_N				0x5D00C
+#define SDCC1_ICE_CORE_D				0x5D010
+#define SDCC1_ICE_CORE_CBCR				0x5D014
+#define BLSP1_AHB_CBCR					0x01008
+#define BLSP1_QUP1_SPI_APPS_CBCR			0x02004
+#define BLSP1_QUP1_I2C_APPS_CBCR			0x02008
+#define BLSP1_QUP1_I2C_APPS_CMD_RCGR			0x0200C
+#define BLSP1_QUP1_I2C_APPS_CFG_RCGR			0x02010
+#define BLSP1_QUP2_I2C_APPS_CMD_RCGR			0x03000
+#define BLSP1_QUP2_I2C_APPS_CFG_RCGR			0x03004
+#define BLSP1_QUP3_I2C_APPS_CMD_RCGR			0x04000
+#define BLSP1_QUP3_I2C_APPS_CFG_RCGR			0x04004
+#define BLSP1_QUP4_I2C_APPS_CMD_RCGR			0x05000
+#define BLSP1_QUP4_I2C_APPS_CFG_RCGR			0x05004
+#define BLSP1_QUP1_SPI_APPS_CMD_RCGR			0x02024
+#define BLSP1_QUP1_SPI_APPS_CFG_RCGR			0x02028
+#define BLSP1_QUP1_SPI_APPS_M				0x0202C
+#define BLSP1_QUP1_SPI_APPS_N				0x02030
+#define BLSP1_QUP1_SPI_APPS_D				0x02034
+#define BLSP1_UART1_APPS_CBCR				0x0203C
+#define BLSP1_UART1_SIM_CBCR				0x02040
+#define BLSP1_UART1_APPS_CMD_RCGR			0x02044
+#define BLSP1_UART1_APPS_CFG_RCGR			0x02048
+#define BLSP1_UART1_APPS_M				0x0204C
+#define BLSP1_UART1_APPS_N				0x02050
+#define BLSP1_UART1_APPS_D				0x02054
+#define BLSP1_QUP2_SPI_APPS_CBCR			0x0300C
+#define BLSP1_QUP2_I2C_APPS_CBCR			0x03010
+#define BLSP1_QUP2_SPI_APPS_CMD_RCGR			0x03014
+#define BLSP1_QUP2_SPI_APPS_CFG_RCGR			0x03018
+#define BLSP1_QUP2_SPI_APPS_M				0x0301C
+#define BLSP1_QUP2_SPI_APPS_N				0x03020
+#define BLSP1_QUP2_SPI_APPS_D				0x03024
+#define BLSP1_UART2_APPS_CBCR				0x0302C
+#define BLSP1_UART2_SIM_CBCR				0x03030
+#define BLSP1_UART2_APPS_CMD_RCGR			0x03034
+#define BLSP1_UART2_APPS_CFG_RCGR			0x03038
+#define BLSP1_UART2_APPS_M				0x0303C
+#define BLSP1_UART2_APPS_N				0x03040
+#define BLSP1_UART2_APPS_D				0x03044
+#define BLSP1_QUP3_SPI_APPS_CBCR			0x0401C
+#define BLSP1_QUP3_I2C_APPS_CBCR			0x04020
+#define BLSP1_QUP3_SPI_APPS_CMD_RCGR			0x04024
+#define BLSP1_QUP3_SPI_APPS_CFG_RCGR			0x04028
+#define BLSP1_QUP3_SPI_APPS_M				0x0402C
+#define BLSP1_QUP3_SPI_APPS_N				0x04030
+#define BLSP1_QUP3_SPI_APPS_D				0x04034
+#define BLSP1_QUP4_SPI_APPS_CBCR			0x0501C
+#define BLSP1_QUP4_I2C_APPS_CBCR			0x05020
+#define BLSP1_QUP4_SPI_APPS_CMD_RCGR			0x05024
+#define BLSP1_QUP4_SPI_APPS_CFG_RCGR			0x05028
+#define BLSP1_QUP4_SPI_APPS_M				0x0502C
+#define BLSP1_QUP4_SPI_APPS_N				0x05030
+#define BLSP1_QUP4_SPI_APPS_D				0x05034
+#define BLSP2_AHB_CBCR					0x0B008
+#define BLSP2_QUP1_SPI_APPS_CBCR			0x0C004
+#define BLSP2_QUP1_I2C_APPS_CBCR			0x0C008
+#define BLSP2_QUP1_I2C_APPS_CMD_RCGR			0x0C00C
+#define BLSP2_QUP1_I2C_APPS_CFG_RCGR			0x0C010
+#define BLSP2_QUP2_I2C_APPS_CMD_RCGR			0x0D000
+#define BLSP2_QUP2_I2C_APPS_CFG_RCGR			0x0D004
+#define BLSP2_QUP3_I2C_APPS_CMD_RCGR			0x0F000
+#define BLSP2_QUP3_I2C_APPS_CFG_RCGR			0x0F004
+#define BLSP2_QUP4_I2C_APPS_CMD_RCGR			0x18000
+#define BLSP2_QUP4_I2C_APPS_CFG_RCGR			0x18004
+#define BLSP2_QUP1_SPI_APPS_CMD_RCGR			0x0C024
+#define BLSP2_QUP1_SPI_APPS_CFG_RCGR			0x0C028
+#define BLSP2_QUP1_SPI_APPS_M				0x0C02C
+#define BLSP2_QUP1_SPI_APPS_N				0x0C030
+#define BLSP2_QUP1_SPI_APPS_D				0x0C034
+#define BLSP2_UART1_APPS_CBCR				0x0C03C
+#define BLSP2_UART1_SIM_CBCR				0x0C040
+#define BLSP2_UART1_APPS_CMD_RCGR			0x0C044
+#define BLSP2_UART1_APPS_CFG_RCGR			0x0C048
+#define BLSP2_UART1_APPS_M				0x0C04C
+#define BLSP2_UART1_APPS_N				0x0C050
+#define BLSP2_UART1_APPS_D				0x0C054
+#define BLSP2_QUP2_SPI_APPS_CBCR			0x0D00C
+#define BLSP2_QUP2_I2C_APPS_CBCR			0x0D010
+#define BLSP2_QUP2_SPI_APPS_CMD_RCGR			0x0D014
+#define BLSP2_QUP2_SPI_APPS_CFG_RCGR			0x0D018
+#define BLSP2_QUP2_SPI_APPS_M				0x0D01C
+#define BLSP2_QUP2_SPI_APPS_N				0x0D020
+#define BLSP2_QUP2_SPI_APPS_D				0x0D024
+#define BLSP2_UART2_APPS_CBCR				0x0D02C
+#define BLSP2_UART2_SIM_CBCR				0x0D030
+#define BLSP2_UART2_APPS_CMD_RCGR			0x0D034
+#define BLSP2_UART2_APPS_CFG_RCGR			0x0D038
+#define BLSP2_UART2_APPS_M				0x0D03C
+#define BLSP2_UART2_APPS_N				0x0D040
+#define BLSP2_UART2_APPS_D				0x0D044
+#define BLSP2_QUP3_SPI_APPS_CBCR			0x0F01C
+#define BLSP2_QUP3_I2C_APPS_CBCR			0x0F020
+#define BLSP2_QUP3_SPI_APPS_CMD_RCGR			0x0F024
+#define BLSP2_QUP3_SPI_APPS_CFG_RCGR			0x0F028
+#define BLSP2_QUP3_SPI_APPS_M				0x0F02C
+#define BLSP2_QUP3_SPI_APPS_N				0x0F030
+#define BLSP2_QUP3_SPI_APPS_D				0x0F034
+#define BLSP2_QUP4_SPI_APPS_CBCR			0x1801C
+#define BLSP2_QUP4_I2C_APPS_CBCR			0x18020
+#define BLSP2_QUP4_SPI_APPS_CMD_RCGR			0x18024
+#define BLSP2_QUP4_SPI_APPS_CFG_RCGR			0x18028
+#define BLSP2_QUP4_SPI_APPS_M				0x1802C
+#define BLSP2_QUP4_SPI_APPS_N				0x18030
+#define BLSP2_QUP4_SPI_APPS_D				0x18034
+#define BLSP_UART_SIM_CMD_RCGR				0x0100C
+#define BLSP_UART_SIM_CFG_RCGR				0x01010
+#define PRNG_XPU_CFG_AHB_CBCR				0x17008
+#define PDM_AHB_CBCR					0x44004
+#define PDM_XO4_CBCR					0x44008
+#define PDM2_CBCR					0x4400C
+#define PDM2_CMD_RCGR					0x44010
+#define PDM2_CFG_RCGR					0x44014
+#define PRNG_AHB_CBCR					0x13004
+#define BOOT_ROM_AHB_CBCR				0x1300C
+#define CRYPTO_CMD_RCGR					0x16004
+#define CRYPTO_CFG_RCGR					0x16008
+#define CRYPTO_CBCR					0x1601C
+#define CRYPTO_AXI_CBCR					0x16020
+#define CRYPTO_AHB_CBCR					0x16024
+#define GCC_XO_DIV4_CBCR				0x30034
+#define APSS_TCU_CMD_RCGR				0x38000
+#define APSS_TCU_CFG_RCGR				0x38004
+#define APSS_AXI_CMD_RCGR				0x38048
+#define APSS_AXI_CFG_RCGR				0x3804C
+#define APSS_AHB_CMD_RCGR				0x46000
+#define APSS_AHB_CFG_RCGR				0x46004
+#define APSS_AHB_MISC					0x46018
+#define APSS_AHB_CBCR					0x4601C
+#define APSS_AXI_CBCR					0x46020
+#define VENUS_TBU_CBCR					0x12014
+#define APSS_TCU_ASYNC_CBCR				0x12018
+#define MDP_TBU_CBCR					0x1201C
+#define JPEG_TBU_CBCR					0x12034
+#define SMMU_CFG_CBCR					0x12038
+#define VFE_TBU_CBCR					0x1203C
+#define VFE1_TBU_CBCR					0x12090
+#define CPP_TBU_CBCR					0x12040
+#define RBCPR_GFX_CBCR					0x3A004
+#define RBCPR_GFX_CMD_RCGR				0x3A00C
+#define RBCPR_GFX_CFG_RCGR				0x3A010
+#define APCS_GPLL_ENA_VOTE				0x45000
+#define APCS_CLOCK_BRANCH_ENA_VOTE			0x45004
+#define APCS_SMMU_CLOCK_BRANCH_ENA_VOTE			0x4500C
+#define APCS_CLOCK_SLEEP_ENA_VOTE			0x45008
+#define APCS_SMMU_CLOCK_SLEEP_ENA_VOTE			0x45010
+#define GCC_DEBUG_CLK_CTL				0x74000
+#define CLOCK_FRQ_MEASURE_CTL				0x74004
+#define CLOCK_FRQ_MEASURE_STATUS			0x74008
+#define PLLTEST_PAD_CFG					0x7400C
+#define GP1_CBCR					0x08000
+#define GP1_CMD_RCGR					0x08004
+#define GP1_CFG_RCGR					0x08008
+#define GP1_M						0x0800C
+#define GP1_N						0x08010
+#define GP1_D						0x08014
+#define GP2_CBCR					0x09000
+#define GP2_CMD_RCGR					0x09004
+#define GP2_CFG_RCGR					0x09008
+#define GP2_M						0x0900C
+#define GP2_N						0x09010
+#define GP2_D						0x09014
+#define GP3_CBCR					0x0A000
+#define GP3_CMD_RCGR					0x0A004
+#define GP3_CFG_RCGR					0x0A008
+#define GP3_M						0x0A00C
+#define GP3_N						0x0A010
+#define GP3_D						0x0A014
+#define APSS_MISC					0x60000
+#define VCODEC0_CMD_RCGR				0x4C000
+#define VCODEC0_CFG_RCGR				0x4C004
+#define VCODEC0_M					0x4C008
+#define VCODEC0_N					0x4C00C
+#define VCODEC0_D					0x4C010
+#define VENUS0_VCODEC0_CBCR				0x4C01C
+#define VENUS0_CORE0_VCODEC0_CBCR			0x4C02C
+#define VENUS0_AHB_CBCR					0x4C020
+#define VENUS0_AXI_CBCR					0x4C024
+#define PCLK0_CMD_RCGR					0x4D000
+#define PCLK0_CFG_RCGR					0x4D004
+#define PCLK0_M						0x4D008
+#define PCLK0_N						0x4D00C
+#define PCLK0_D						0x4D010
+#define PCLK1_CMD_RCGR					0x4D0B8
+#define PCLK1_CFG_RCGR					0x4D0BC
+#define PCLK1_M						0x4D0C0
+#define PCLK1_N						0x4D0C4
+#define PCLK1_D						0x4D0C8
+#define MDP_CMD_RCGR					0x4D014
+#define MDP_CFG_RCGR					0x4D018
+#define VSYNC_CMD_RCGR					0x4D02C
+#define VSYNC_CFG_RCGR					0x4D030
+#define BYTE0_CMD_RCGR					0x4D044
+#define BYTE0_CFG_RCGR					0x4D048
+#define BYTE1_CMD_RCGR					0x4D0B0
+#define BYTE1_CFG_RCGR					0x4D0B4
+#define ESC0_CMD_RCGR					0x4D05C
+#define ESC0_CFG_RCGR					0x4D060
+#define ESC1_CMD_RCGR					0x4D0A8
+#define ESC1_CFG_RCGR					0x4D0AC
+#define MDSS_AHB_CBCR					0x4D07C
+#define MDSS_AXI_CBCR					0x4D080
+#define MDSS_PCLK0_CBCR					0x4D084
+#define MDSS_PCLK1_CBCR					0x4D0A4
+#define MDSS_MDP_CBCR					0x4D088
+#define MDSS_VSYNC_CBCR					0x4D090
+#define MDSS_BYTE0_CBCR					0x4D094
+#define MDSS_BYTE1_CBCR					0x4D0A0
+#define MDSS_ESC0_CBCR					0x4D098
+#define MDSS_ESC1_CBCR					0x4D09C
+#define CSI0PHYTIMER_CMD_RCGR				0x4E000
+#define CSI0PHYTIMER_CFG_RCGR				0x4E004
+#define CAMSS_CSI0PHYTIMER_CBCR				0x4E01C
+#define CSI0P_CMD_RCGR					0x58084
+#define CSI0P_CFG_RCGR					0x58088
+#define CAMSS_CSI0_CSIPHY_3P_CBCR			0x58090
+#define CSI1P_CMD_RCGR					0x58094
+#define CSI1P_CFG_RCGR					0x58098
+#define CAMSS_CSI1_CSIPHY_3P_CBCR			0x580A0
+#define CSI2P_CMD_RCGR					0x580A4
+#define CSI2P_CFG_RCGR					0x580A8
+#define CAMSS_CSI2_CSIPHY_3P_CBCR			0x580B0
+#define CSI1PHYTIMER_CMD_RCGR				0x4F000
+#define CSI1PHYTIMER_CFG_RCGR				0x4F004
+#define CAMSS_CSI1PHYTIMER_CBCR				0x4F01C
+#define CSI0_CMD_RCGR					0x4E020
+#define CSI2PHYTIMER_CMD_RCGR				0x4F05C
+#define CSI2PHYTIMER_CFG_RCGR				0x4F060
+#define CAMSS_CSI2PHYTIMER_CBCR				0x4F068
+#define CSI0_CFG_RCGR					0x4E024
+#define CAMSS_CSI0_CBCR					0x4E03C
+#define CAMSS_CSI0_AHB_CBCR				0x4E040
+#define CAMSS_CSI0PHY_CBCR				0x4E048
+#define CAMSS_CSI0RDI_CBCR				0x4E050
+#define CAMSS_CSI0PIX_CBCR				0x4E058
+#define CSI1_CMD_RCGR					0x4F020
+#define CSI1_CFG_RCGR					0x4F024
+#define CAMSS_CSI1_CBCR					0x4F03C
+#define CAMSS_CSI1_AHB_CBCR				0x4F040
+#define CAMSS_CSI1PHY_CBCR				0x4F048
+#define CAMSS_CSI1RDI_CBCR				0x4F050
+#define CAMSS_CSI1PIX_CBCR				0x4F058
+#define CSI2_CMD_RCGR					0x3C020
+#define CSI2_CFG_RCGR					0x3C024
+#define CAMSS_CSI2_CBCR					0x3C03C
+#define CAMSS_CSI2_AHB_CBCR				0x3C040
+#define CAMSS_CSI2PHY_CBCR				0x3C048
+#define CAMSS_CSI2RDI_CBCR				0x3C050
+#define CAMSS_CSI2PIX_CBCR				0x3C058
+#define CAMSS_ISPIF_AHB_CBCR				0x50004
+#define CCI_CMD_RCGR					0x51000
+#define CCI_CFG_RCGR					0x51004
+#define CCI_M						0x51008
+#define CCI_N						0x5100C
+#define CCI_D						0x51010
+#define CAMSS_CCI_CBCR					0x51018
+#define CAMSS_CCI_AHB_CBCR				0x5101C
+#define MCLK0_CMD_RCGR					0x52000
+#define MCLK0_CFG_RCGR					0x52004
+#define MCLK0_M						0x52008
+#define MCLK0_N						0x5200C
+#define MCLK0_D						0x52010
+#define CAMSS_MCLK0_CBCR				0x52018
+#define MCLK1_CMD_RCGR					0x53000
+#define MCLK1_CFG_RCGR					0x53004
+#define MCLK1_M						0x53008
+#define MCLK1_N						0x5300C
+#define MCLK1_D						0x53010
+#define CAMSS_MCLK1_CBCR				0x53018
+#define MCLK2_CMD_RCGR					0x5C000
+#define MCLK2_CFG_RCGR					0x5C004
+#define MCLK2_M						0x5C008
+#define MCLK2_N						0x5C00C
+#define MCLK2_D						0x5C010
+#define CAMSS_MCLK2_CBCR				0x5C018
+#define MCLK3_CMD_RCGR					0x5E000
+#define MCLK3_CFG_RCGR					0x5E004
+#define MCLK3_M						0x5E008
+#define MCLK3_N						0x5E00C
+#define MCLK3_D						0x5E010
+#define CAMSS_MCLK3_CBCR				0x5E018
+#define CAMSS_GP0_CMD_RCGR				0x54000
+#define CAMSS_GP0_CFG_RCGR				0x54004
+#define CAMSS_GP0_M					0x54008
+#define CAMSS_GP0_N					0x5400C
+#define CAMSS_GP0_D					0x54010
+#define CAMSS_GP0_CBCR					0x54018
+#define CAMSS_GP1_CMD_RCGR				0x55000
+#define CAMSS_GP1_CFG_RCGR				0x55004
+#define CAMSS_GP1_M					0x55008
+#define CAMSS_GP1_N					0x5500C
+#define CAMSS_GP1_D					0x55010
+#define CAMSS_GP1_CBCR					0x55018
+#define CAMSS_TOP_AHB_CBCR				0x5A014
+#define CAMSS_AHB_CBCR					0x56004
+#define CAMSS_MICRO_BCR					0x56008
+#define CAMSS_MICRO_AHB_CBCR				0x5600C
+#define JPEG0_CMD_RCGR					0x57000
+#define JPEG0_CFG_RCGR					0x57004
+#define CAMSS_JPEG0_CBCR				0x57020
+#define CAMSS_JPEG_AHB_CBCR				0x57024
+#define CAMSS_JPEG_AXI_CBCR				0x57028
+#define VFE0_CMD_RCGR					0x58000
+#define VFE0_CFG_RCGR					0x58004
+#define CPP_CMD_RCGR					0x58018
+#define CPP_CFG_RCGR					0x5801C
+#define CAMSS_VFE0_CBCR					0x58038
+#define CAMSS_CPP_CBCR					0x5803C
+#define CAMSS_CPP_AHB_CBCR				0x58040
+#define CAMSS_VFE_AHB_CBCR				0x58044
+#define CAMSS_VFE_AXI_CBCR				0x58048
+#define CAMSS_CSI_VFE0_CBCR				0x58050
+#define VFE1_CMD_RCGR					0x58054
+#define VFE1_CFG_RCGR					0x58058
+#define CAMSS_VFE1_CBCR					0x5805C
+#define CAMSS_VFE1_AHB_CBCR				0x58060
+#define CAMSS_CPP_AXI_CBCR				0x58064
+#define CAMSS_VFE1_AXI_CBCR				0x58068
+#define CAMSS_CSI_VFE1_CBCR				0x58074
+#define GFX3D_CMD_RCGR					0x59000
+#define GFX3D_CFG_RCGR					0x59004
+#define OXILI_GFX3D_CBCR				0x59020
+#define OXILI_AHB_CBCR					0x59028
+#define BIMC_GPU_CBCR					0x59030
+#define OXILI_TIMER_CBCR				0x59040
+#define CAMSS_TOP_AHB_CMD_RCGR				0x5A000
+#define CAMSS_TOP_AHB_CFG_RCGR				0x5A004
+#define CAMSS_TOP_AHB_M					0x5A008
+#define CAMSS_TOP_AHB_N					0x5A00C
+#define CAMSS_TOP_AHB_D					0x5A010
+#define GX_DOMAIN_MISC					0x5B00C
+#define APC0_VOLTAGE_DROOP_DETECTOR_GPLL0_CBCR		0x78004
+#define APC0_VOLTAGE_DROOP_DETECTOR_CMD_RCGR		0x78008
+#define APC0_VOLTAGE_DROOP_DETECTOR_CFG_RCGR		0x7800C
+#define APC1_VOLTAGE_DROOP_DETECTOR_GPLL0_CBCR		0x79004
+#define APC1_VOLTAGE_DROOP_DETECTOR_CMD_RCGR		0x79008
+#define APC1_VOLTAGE_DROOP_DETECTOR_CFG_RCGR		0x7900C
+#define QUSB_REF_CLK_EN					0x41030
+#define USB_SS_REF_CLK_EN				0x3F07C
+
+/* Mux source select values */
+#define xo_src_val			0
+#define xo_a_src_val			0
+#define xo_pipe_src_val			1
+
+#define gpll0_src_val			1
+#define gpll0_main_src_val		2   /* cci_clk_src */
+#define gpll0_main_mock_src_val		3   /* usb30_mock_utmi_clk_src */
+
+#define gpll0_main_div2_usb3_src_val	2   /* usb30_master_clk_src
+					     * rbcpr_gfx_clk_src
+					     */
+#define gpll0_main_div2_src_val		4
+#define gpll0_main_div2_cci_src_val	3   /* cci_clk_src */
+#define gpll0_main_div2_mm_src_val	5   /* gfx3d_clk_src vfe0_clk_src
+					     * vfe1_clk_src cpp_clk_src
+					     * csi0_clk_src csi0p_clk_src
+					     * csi1p_clk_src csi2p_clk_src
+					     */
+#define gpll0_main_div2_axi_src_val	6   /* apss_axi_clk_src */
+
+#define gpll2_src_val			4   /* vfe0_clk_src  vfe1_clk_src
+					     * cpp_clk_src   csi0_clk_src
+					     * csi0p_clk_src csi1p_clk_src
+					     * csi2p_clk_src
+					     */
+
+#define gpll2_out_main_src_val		5   /* jpeg0_clk_src csi1_clk_src
+					     * csi2_clk_src
+					     */
+#define gpll2_vcodec_src_val		3   /* vcodec0_clk_src */
+
+#define gpll3_src_val			2   /* gfx3d_clk_src */
+
+#define gpll4_src_val			2   /* sdcc1_apss_clk_src v_droop */
+#define gpll4_aux_src_val		2   /* sdcc2_apss_clk_src */
+#define gpll4_out_aux_src_val		4   /* gfx3d_clk_src */
+
+#define gpll6_main_src_val		1   /* usb30_mock_utmi_clk_src */
+#define gpll6_src_val			2
+#define gpll6_main_gfx_src_val		3   /* gfx3d_clk_src */
+
+#define gpll6_main_div2_mock_src_val    2   /* usb30_mock_utmi_clk_src */
+
+#define gpll6_main_div2_src_val		5   /* mclk0_clk_src mclk1_clk_src
+					     * mclk2_clk_src mclk3_clk_src
+					     */
+#define gpll6_main_div2_gfx_src_val	6   /* gfx3d_clk_src */
+
+#define gpll6_aux_src_val		2  /* gp1_clk_src gp2_clk_src
+					    * gp3_clk_src camss_gp0_clk_src
+					    * camss_gp1_clk_src
+					    */
+
+#define gpll6_out_aux_src_val		3   /* mdp_clk_src cpp_clk_src */
+
+#define usb3_pipe_src_val		0
+
+#define dsi0_phypll_mm_src_val		1   /* byte0_clk & pclk0_clk */
+#define dsi1_phypll_mm_src_val		3   /* byte0_clk & pclk0_clk */
+
+#define dsi0_phypll_clk_mm_src_val	3   /* byte1_clk & pclk1_clk */
+#define dsi1_phypll_clk_mm_src_val	1   /* byte1_clk & pclk1_clk */
+
+#define F(f, s, div, m, n) \
+	{ \
+		.freq_hz = (f), \
+		.src_clk = &s##_clk_src.c, \
+		.m_val = (m), \
+		.n_val = ~((n)-(m)) * !!(n), \
+		.d_val = ~(n),\
+		.div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+			| BVAL(10, 8, s##_src_val), \
+	}
+
+#define F_MM(f, s_f, s, div, m, n) \
+	{ \
+		.freq_hz = (f), \
+		.src_freq = (s_f), \
+		.src_clk = &s##_clk_src.c, \
+		.m_val = (m), \
+		.n_val = ~((n)-(m)) * !!(n), \
+		.d_val = ~(n),\
+		.div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+			| BVAL(10, 8, s##_src_val), \
+	}
+
+#define VDD_DIG_FMAX_MAP1(l1, f1) \
+	.vdd_class = &vdd_dig, \
+	.fmax = (unsigned long[VDD_DIG_NUM]) {  \
+		[VDD_DIG_##l1] = (f1),          \
+	},                                      \
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
+	.vdd_class = &vdd_dig, \
+	.fmax = (unsigned long[VDD_DIG_NUM]) {  \
+		[VDD_DIG_##l1] = (f1),          \
+		[VDD_DIG_##l2] = (f2),          \
+	},                                      \
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
+	.vdd_class = &vdd_dig, \
+	.fmax = (unsigned long[VDD_DIG_NUM]) {  \
+		[VDD_DIG_##l1] = (f1),          \
+		[VDD_DIG_##l2] = (f2),          \
+		[VDD_DIG_##l3] = (f3),          \
+	},                                      \
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
+	.vdd_class = &vdd_dig, \
+	.fmax = (unsigned long[VDD_DIG_NUM]) {  \
+		[VDD_DIG_##l1] = (f1),          \
+		[VDD_DIG_##l2] = (f2),          \
+		[VDD_DIG_##l3] = (f3),          \
+		[VDD_DIG_##l4] = (f4),          \
+	},                                      \
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP5(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5) \
+	.vdd_class = &vdd_dig, \
+	.fmax = (unsigned long[VDD_DIG_NUM]) {  \
+		[VDD_DIG_##l1] = (f1),          \
+		[VDD_DIG_##l2] = (f2),          \
+		[VDD_DIG_##l3] = (f3),          \
+		[VDD_DIG_##l4] = (f4),          \
+		[VDD_DIG_##l5] = (f5),          \
+	},                                      \
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP6(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6) \
+	.vdd_class = &vdd_dig, \
+	.fmax = (unsigned long[VDD_DIG_NUM]) {  \
+		[VDD_DIG_##l1] = (f1),          \
+		[VDD_DIG_##l2] = (f2),          \
+		[VDD_DIG_##l3] = (f3),          \
+		[VDD_DIG_##l4] = (f4),          \
+		[VDD_DIG_##l5] = (f5),          \
+		[VDD_DIG_##l6] = (f6),          \
+	},                                      \
+	.num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP7(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, \
+							   f6, l7, f7) \
+	.vdd_class = &vdd_dig, \
+	.fmax = (unsigned long[VDD_DIG_NUM]) {  \
+		[VDD_DIG_##l1] = (f1),          \
+		[VDD_DIG_##l2] = (f2),          \
+		[VDD_DIG_##l3] = (f3),          \
+		[VDD_DIG_##l4] = (f4),          \
+		[VDD_DIG_##l5] = (f5),          \
+		[VDD_DIG_##l6] = (f6),          \
+		[VDD_DIG_##l7] = (f7),          \
+	},                                      \
+	.num_fmax = VDD_DIG_NUM
+
+enum vdd_dig_levels {
+	VDD_DIG_NONE,
+	VDD_DIG_MIN_SVS,
+	VDD_DIG_LOW_SVS,
+	VDD_DIG_SVS,
+	VDD_DIG_SVS_PLUS,
+	VDD_DIG_NOM,
+	VDD_DIG_NOM_PLUS,
+	VDD_DIG_HIGH,
+	VDD_DIG_NUM
+};
+
+static int vdd_level[] = {
+	RPM_REGULATOR_LEVEL_NONE,		/* VDD_DIG_NONE */
+	RPM_REGULATOR_LEVEL_MIN_SVS,		/* VDD_DIG_MIN_SVS */
+	RPM_REGULATOR_LEVEL_LOW_SVS,		/* VDD_DIG_LOW_SVS*/
+	RPM_REGULATOR_LEVEL_SVS,		/* VDD_DIG_SVS */
+	RPM_REGULATOR_LEVEL_SVS_PLUS,		/* VDD_DIG_SVS_PLUS */
+	RPM_REGULATOR_LEVEL_NOM,		/* VDD_DIG_NOM */
+	RPM_REGULATOR_LEVEL_NOM_PLUS,		/* VDD_DIG_NOM_PLUS */
+	RPM_REGULATOR_LEVEL_TURBO,		/* VDD_DIG_TURBO */
+};
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_level, NULL);
+static DEFINE_VDD_REGS_INIT(vdd_gfx, 1);
+
+#define RPM_MISC_CLK_TYPE       0x306b6c63
+#define RPM_BUS_CLK_TYPE        0x316b6c63
+#define RPM_MEM_CLK_TYPE        0x326b6c63
+#define RPM_IPA_CLK_TYPE        0x00617069
+#define RPM_SMD_KEY_ENABLE      0x62616E45
+
+#define XO_ID                   0x0
+#define QDSS_ID                 0x1
+#define BUS_SCALING             0x2
+
+#define PCNOC_ID                0x0
+#define SNOC_ID                 0x1
+#define SYSMMNOC_ID             0x2
+#define BIMC_ID                 0x0
+#define IPA_ID                  0x0
+
+#define BB_CLK1_ID              0x1
+#define BB_CLK2_ID              0x2
+#define RF_CLK2_ID              0x5
+#define RF_CLK3_ID              0x8
+#define DIV_CLK1_ID             0xB
+#define DIV_CLK2_ID		0xC
+
+#endif
+
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index a52b65a..793255d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1004,6 +1004,12 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
 struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
 				void *data, const struct file_operations *fops);
 #endif
+#else
+struct of_device_id;
+
+static inline void __init of_clk_init(const struct of_device_id *matches)
+{
+}
 
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 094b152..eaaad7d 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -20,6 +20,8 @@ struct device;
 
 struct clk;
 
+#ifdef CONFIG_COMMON_CLK
+
 /**
  * DOC: clk notifier callback types
  *
@@ -76,8 +78,6 @@ struct clk_notifier_data {
 	unsigned long		new_rate;
 };
 
-#ifdef CONFIG_COMMON_CLK
-
 /**
  * clk_notifier_register: register a clock rate-change notifier callback
  * @clk: clock whose rate we are interested in
@@ -524,7 +524,7 @@ static inline void clk_disable_unprepare(struct clk *clk)
 struct device_node;
 struct of_phandle_args;
 
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
 struct clk *of_clk_get(struct device_node *np, int index);
 struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
diff --git a/include/linux/clk/gdsc.h b/include/linux/clk/gdsc.h
new file mode 100644
index 0000000..b5a03ac
--- /dev/null
+++ b/include/linux/clk/gdsc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __GDSC_H
+#define __GDSC_H
+
+#include <linux/regulator/consumer.h>
+
+/* Allow the clock memories to be turned off */
+void gdsc_allow_clear_retention(struct regulator *regulator);
+
+#endif
diff --git a/include/linux/clk/msm-clk-provider.h b/include/linux/clk/msm-clk-provider.h
new file mode 100644
index 0000000..2bc6d18
--- /dev/null
+++ b/include/linux/clk/msm-clk-provider.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_CLK_PROVIDER_H
+#define __MSM_CLK_PROVIDER_H
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/clk/msm-clk.h>
+
+#if defined(CONFIG_COMMON_CLK_MSM)
+/*
+ * Bit manipulation macros
+ */
+#define BM(msb, lsb)	(((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val)	(((val) << lsb) & BM(msb, lsb))
+
+/*
+ * Halt/Status Checking Mode Macros
+ */
+#define HALT		0	/* Bit pol: 1 = halted */
+#define NOCHECK		1	/* No bit to check, do nothing */
+#define HALT_VOTED	2	/* Bit pol: 1 = halted; delay on disable */
+#define ENABLE		3	/* Bit pol: 1 = running */
+#define ENABLE_VOTED	4	/* Bit pol: 1 = running; delay on disable */
+#define DELAY		5	/* No bit to check, just delay */
+
+struct clk_register_data {
+	char *name;
+	u32 offset;
+};
+#ifdef CONFIG_DEBUG_FS
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f);
+#else
+static inline void clk_debug_print_hw(struct clk *clk, struct seq_file *f) {}
+#endif
+
+#define CLK_WARN(clk, cond, fmt, ...) do {				\
+	clk_debug_print_hw(clk, NULL);					\
+	WARN(cond, "%s: " fmt, clk_name(clk), ##__VA_ARGS__);		\
+} while (0)
+
+/**
+ * struct clk_vdd_class - Voltage scaling class
+ * @class_name: name of the class
+ * @regulator: array of regulators.
+ * @num_regulators: size of regulator array. Standard regulator APIs will be
+			used if this field > 0.
+ * @set_vdd: function to call when applying a new voltage setting.
+ * @vdd_uv: sorted 2D array of legal voltage settings. Indexed by level, then
+		regulator.
+ * @vdd_ua: sorted 2D array of legal cureent settings. Indexed by level, then
+		regulator. Optional parameter.
+ * @level_votes: array of votes for each level.
+ * @num_levels: specifies the size of level_votes array.
+ * @skip_handoff: do not vote for the max possible voltage during init
+ * @use_max_uV: use INT_MAX for max_uV when calling regulator_set_voltage
+ *           This is useful when different vdd_class share same regulator.
+ * @cur_level: the currently set voltage level
+ * @lock: lock to protect this struct
+ */
+struct clk_vdd_class {
+	const char *class_name;
+	struct regulator **regulator;
+	int num_regulators;
+	int (*set_vdd)(struct clk_vdd_class *v_class, int level);
+	int *vdd_uv;
+	int *vdd_ua;
+	int *level_votes;
+	int num_levels;
+	bool skip_handoff;
+	bool use_max_uV;
+	unsigned long cur_level;
+	struct mutex lock;
+};
+
+#define DEFINE_VDD_CLASS(_name, _set_vdd, _num_levels) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.set_vdd = _set_vdd, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGULATORS(_name, _num_levels, _num_regulators, _vdd_uv, \
+	 _vdd_ua) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.vdd_uv = _vdd_uv, \
+		.vdd_ua = _vdd_ua, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGS_INIT(_name, _num_regulators) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+enum handoff {
+	HANDOFF_ENABLED_CLK,
+	HANDOFF_DISABLED_CLK,
+};
+
+struct clk_ops {
+	int (*prepare)(struct clk *clk);
+	int (*enable)(struct clk *clk);
+	void (*disable)(struct clk *clk);
+	void (*unprepare)(struct clk *clk);
+	void (*enable_hwcg)(struct clk *clk);
+	void (*disable_hwcg)(struct clk *clk);
+	int (*in_hwcg_mode)(struct clk *clk);
+	enum handoff (*handoff)(struct clk *clk);
+	int (*reset)(struct clk *clk, enum clk_reset_action action);
+	int (*pre_set_rate)(struct clk *clk, unsigned long new_rate);
+	int (*set_rate)(struct clk *clk, unsigned long rate);
+	void (*post_set_rate)(struct clk *clk, unsigned long old_rate);
+	int (*set_max_rate)(struct clk *clk, unsigned long rate);
+	int (*set_flags)(struct clk *clk, unsigned long flags);
+	int (*set_duty_cycle)(struct clk *clk, u32 numerator, u32 denominator);
+	unsigned long (*get_rate)(struct clk *clk);
+	long (*list_rate)(struct clk *clk, unsigned long n);
+	int (*is_enabled)(struct clk *clk);
+	long (*round_rate)(struct clk *clk, unsigned long rate);
+	int (*set_parent)(struct clk *clk, struct clk *parent);
+	struct clk *(*get_parent)(struct clk *clk);
+	bool (*is_local)(struct clk *clk);
+	void __iomem *(*list_registers)(struct clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+/**
+ * struct clk
+ * @prepare_count: prepare refcount
+ * @prepare_lock: protects clk_prepare()/clk_unprepare() path and @prepare_count
+ * @count: enable refcount
+ * @lock: protects clk_enable()/clk_disable() path and @count
+ * @depends: non-direct parent of clock to enable when this clock is enabled
+ * @vdd_class: voltage scaling requirement class
+ * @fmax: maximum frequency in Hz supported at each voltage level
+ * @parent: the current source of this clock
+ * @opp_table_populated: tracks if the OPP table of this clock has been filled
+ */
+struct clk {
+	uint32_t flags;
+	const struct clk_ops *ops;
+	const char *dbg_name;
+	struct clk *depends;
+	struct clk_vdd_class *vdd_class;
+	unsigned long *fmax;
+	int num_fmax;
+	unsigned long rate;
+	struct clk *parent;
+	struct clk_src *parents;
+	unsigned int num_parents;
+
+	struct list_head children;
+	struct list_head siblings;
+	struct list_head list;
+
+	unsigned long count;
+	unsigned long notifier_count;
+	spinlock_t lock;
+	unsigned long prepare_count;
+	struct mutex prepare_lock;
+
+	unsigned long init_rate;
+	bool always_on;
+	bool opp_table_populated;
+
+	struct dentry *clk_dir;
+};
+
+#define CLK_INIT(name) \
+	.lock = __SPIN_LOCK_UNLOCKED((name).lock), \
+	.prepare_lock = __MUTEX_INITIALIZER((name).prepare_lock), \
+	.children = LIST_HEAD_INIT((name).children), \
+	.siblings = LIST_HEAD_INIT((name).siblings), \
+	.list = LIST_HEAD_INIT((name).list)
+
+bool is_rate_valid(struct clk *clk, unsigned long rate);
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags);
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags);
+
+/* Register clocks with the MSM clock driver */
+int msm_clock_register(struct clk_lookup *table, size_t size);
+int of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+				size_t size);
+
+int clock_rcgwr_init(struct platform_device *pdev);
+int clock_rcgwr_disable(struct platform_device *pdev);
+
+extern struct clk dummy_clk;
+extern const  struct clk_ops clk_ops_dummy;
+
+#define CLK_DUMMY(clk_name, clk_id, clk_dev, flags) { \
+	.con_id = clk_name, \
+	.dev_id = clk_dev, \
+	.clk = &dummy_clk, \
+	}
+
+#define DEFINE_CLK_DUMMY(name, _rate) \
+	static struct fixed_clk name = { \
+		.c = { \
+			.dbg_name = #name, \
+			.rate = _rate, \
+			.ops = &clk_ops_dummy, \
+			CLK_INIT(name.c), \
+		}, \
+	}
+
+#define CLK_LOOKUP(con, c, dev) { .con_id = con, .clk = &c, .dev_id = dev }
+#define CLK_LOOKUP_OF(con, _c, dev) { .con_id = con, .clk = &(&_c)->c, \
+				      .dev_id = dev, .of_idx = clk_##_c }
+#define CLK_LIST(_c) { .clk = &(&_c)->c, .of_idx = clk_##_c }
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+				  unsigned long new)
+{
+	if (IS_ERR_VALUE(new))
+		return false;
+
+	return (req <= new && new < best) || (best < req && best < new);
+}
+
+extern int of_clk_add_provider(struct device_node *np,
+			struct clk *(*clk_src_get)(struct of_phandle_args *args,
+						   void *data),
+			void *data);
+extern void of_clk_del_provider(struct device_node *np);
+
+static inline const char *clk_name(struct clk *c)
+{
+	if (IS_ERR_OR_NULL(c))
+		return "(null)";
+	return c->dbg_name;
+};
+#endif /* CONFIG_COMMON_CLK_MSM */
+#endif
diff --git a/include/linux/clk/msm-clk.h b/include/linux/clk/msm-clk.h
new file mode 100644
index 0000000..baa8e52
--- /dev/null
+++ b/include/linux/clk/msm-clk.h
@@ -0,0 +1,125 @@
+/* Copyright (c) 2009, 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_CLK_H
+#define __MACH_CLK_H
+
+#include <linux/notifier.h>
+
+#define CLKFLAG_INVERT			0x00000001
+#define CLKFLAG_NOINVERT		0x00000002
+#define CLKFLAG_NONEST			0x00000004
+#define CLKFLAG_NORESET			0x00000008
+#define CLKFLAG_RETAIN_PERIPH		0x00000010
+#define CLKFLAG_NORETAIN_PERIPH		0x00000020
+#define CLKFLAG_RETAIN_MEM		0x00000040
+#define CLKFLAG_NORETAIN_MEM		0x00000080
+#define CLKFLAG_SKIP_HANDOFF		0x00000100
+#define CLKFLAG_MIN			0x00000400
+#define CLKFLAG_MAX			0x00000800
+#define CLKFLAG_INIT_DONE		0x00001000
+#define CLKFLAG_INIT_ERR		0x00002000
+#define CLKFLAG_NO_RATE_CACHE		0x00004000
+#define CLKFLAG_MEASURE			0x00008000
+#define CLKFLAG_EPROBE_DEFER		0x00010000
+#define CLKFLAG_PERIPH_OFF_SET		0x00020000
+#define CLKFLAG_PERIPH_OFF_CLEAR	0x00040000
+
+struct clk_lookup;
+struct clk;
+
+enum clk_reset_action {
+	CLK_RESET_DEASSERT	= 0,
+	CLK_RESET_ASSERT	= 1
+};
+
+struct clk_src {
+	struct clk *src;
+	int sel;
+};
+
+/* Rate is maximum clock rate in Hz */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/* Assert/Deassert reset to a hardware block associated with a clock */
+int clk_reset(struct clk *clk, enum clk_reset_action action);
+
+/* Set clock-specific configuration parameters */
+int clk_set_flags(struct clk *clk, unsigned long flags);
+
+/* returns the mux selection index associated with a particular parent */
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p);
+
+/* returns the mux selection index associated with a particular parent */
+int clk_get_parent_sel(struct clk *c, struct clk *parent);
+
+/**
+ * DOC: clk notifier callback types
+ *
+ * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
+ *     to indicate that the rate change will proceed.  Drivers must
+ *     immediately terminate any operations that will be affected by the
+ *     rate change.  Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
+ *     NOTIFY_STOP or NOTIFY_BAD.
+ *
+ * ABORT_RATE_CHANGE: called if the rate change failed for some reason
+ *     after PRE_RATE_CHANGE.  In this case, all registered notifiers on
+ *     the clk will be called with ABORT_RATE_CHANGE. Callbacks must
+ *     always return NOTIFY_DONE or NOTIFY_OK.
+ *
+ * POST_RATE_CHANGE - called after the clk rate change has successfully
+ *     completed.  Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
+ *
+ */
+#define PRE_RATE_CHANGE			BIT(0)
+#define POST_RATE_CHANGE		BIT(1)
+#define ABORT_RATE_CHANGE		BIT(2)
+
+/**
+ * struct msm_clk_notifier - associate a clk with a notifier
+ * @clk: struct clk * to associate the notifier with
+ * @notifier_head: a blocking_notifier_head for this clk
+ * @node: linked list pointers
+ *
+ * A list of struct clk_notifier is maintained by the notifier code.
+ * An entry is created whenever code registers the first notifier on a
+ * particular @clk.  Future notifiers on that @clk are added to the
+ * @notifier_head.
+ */
+struct msm_clk_notifier {
+	struct clk			*clk;
+	struct srcu_notifier_head	notifier_head;
+	struct list_head		node;
+};
+
+/**
+ * struct msm_clk_notifier_data - rate data to pass to the notifier callback
+ * @clk: struct clk * being changed
+ * @old_rate: previous rate of this clk
+ * @new_rate: new rate of this clk
+ *
+ * For a pre-notifier, old_rate is the clk's rate before this rate
+ * change, and new_rate is what the rate will be in the future.  For a
+ * post-notifier, old_rate and new_rate are both set to the clk's
+ * current rate (this was done to optimize the implementation).
+ */
+struct msm_clk_notifier_data {
+	struct clk		*clk;
+	unsigned long		old_rate;
+	unsigned long		new_rate;
+};
+
+int msm_clk_notif_register(struct clk *clk, struct notifier_block *nb);
+
+int msm_clk_notif_unregister(struct clk *clk, struct notifier_block *nb);
+
+#endif
+
diff --git a/include/linux/clk/msm-clock-generic.h b/include/linux/clk/msm-clock-generic.h
new file mode 100644
index 0000000..010a37f
--- /dev/null
+++ b/include/linux/clk/msm-clock-generic.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCK_GENERIC_H
+#define __MSM_CLOCK_GENERIC_H
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/of.h>
+
+/**
+ * struct fixed_clk - fixed rate clock
+ * @c: clk
+ */
+struct fixed_clk {
+	struct clk c;
+};
+
+/* ==================== Mux clock ==================== */
+
+struct mux_clk;
+
+struct clk_mux_ops {
+	int (*set_mux_sel)(struct mux_clk *clk, int sel);
+	int (*get_mux_sel)(struct mux_clk *clk);
+
+	/* Optional */
+	bool (*is_enabled)(struct mux_clk *clk);
+	int (*enable)(struct mux_clk *clk);
+	void (*disable)(struct mux_clk *clk);
+	void __iomem *(*list_registers)(struct mux_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+#define MUX_SRC_LIST(...) \
+	.parents = (struct clk_src[]){__VA_ARGS__}, \
+	.num_parents = ARRAY_SIZE(((struct clk_src[]){__VA_ARGS__}))
+
+#define MUX_REC_SRC_LIST(...) \
+	.rec_parents = (struct clk * []){__VA_ARGS__}, \
+	.num_rec_parents = ARRAY_SIZE(((struct clk * []){__VA_ARGS__}))
+
+struct mux_clk {
+	/* Parents in decreasing order of preference for obtaining rates. */
+	struct clk_src	*parents;
+	int		num_parents;
+	/* Recursively search for the requested parent in rec_parents. */
+	struct clk	**rec_parents;
+	int		num_rec_parents;
+	struct clk	*safe_parent;
+	int		safe_sel;
+	unsigned long	safe_freq;
+	/*
+	 * Before attempting a clk_round_rate on available sources, attempt a
+	 * clk_get_rate on all those sources. If one of them is already at the
+	 * necessary rate, that source will be used.
+	 */
+	bool		try_get_rate;
+	struct clk_mux_ops *ops;
+	/*
+	 * Set if you need the mux to try a new parent before falling back to
+	 * the current parent. If the safe_parent field above is set, then the
+	 * safe_sel intermediate source will only be used if we fall back to
+	 * to the current parent during mux_set_rate.
+	 */
+	bool		try_new_parent;
+
+	/* Fields not used by helper function. */
+	void *const __iomem *base;
+	u32		offset;
+	u32		en_offset;
+	u32		mask;
+	u32		shift;
+	u32		en_mask;
+	/*
+	 * Set post divider for debug mux in order to divide the clock
+	 * by post_div + 1.
+	 */
+	u32		post_div;
+	int		low_power_sel;
+	void		*priv;
+
+	struct clk	c;
+};
+
+static inline struct mux_clk *to_mux_clk(struct clk *c)
+{
+	return container_of(c, struct mux_clk, c);
+}
+
+extern const  struct clk_ops clk_ops_gen_mux;
+
+/* ==================== Divider clock ==================== */
+
+struct div_clk;
+
+struct clk_div_ops {
+	int (*set_div)(struct div_clk *clk, int div);
+	int (*get_div)(struct div_clk *clk);
+	bool (*is_enabled)(struct div_clk *clk);
+	int (*enable)(struct div_clk *clk);
+	void (*disable)(struct div_clk *clk);
+	void __iomem *(*list_registers)(struct div_clk *clk, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+struct div_data {
+	unsigned int div;
+	unsigned int min_div;
+	unsigned int max_div;
+	unsigned long rate_margin;
+	/*
+	 * Indicate whether this divider clock supports half-integer divider.
+	 * If it is, all the min_div and max_div have been doubled. It means
+	 * they are 2*N.
+	 */
+	bool is_half_divider;
+	/*
+	 * Skip odd dividers since the hardware may not support them.
+	 */
+	bool skip_odd_div;
+	bool skip_even_div;
+	bool allow_div_one;
+	unsigned int cached_div;
+};
+
+struct div_clk {
+	struct div_data data;
+
+	/*
+	 * Some implementations may require the divider to be set to a "safe"
+	 * value that allows reprogramming of upstream clocks without violating
+	 * voltage constraints.
+	 */
+	unsigned long safe_freq;
+
+	/* Optional */
+	struct clk_div_ops *ops;
+
+	/* Fields not used by helper function. */
+	void *const __iomem *base;
+	u32		offset;
+	u32		mask;
+	u32		shift;
+	u32		en_mask;
+	void		*priv;
+	struct clk	c;
+};
+
+static inline struct div_clk *to_div_clk(struct clk *c)
+{
+	return container_of(c, struct div_clk, c);
+}
+
+extern const struct clk_ops clk_ops_div;
+extern const struct clk_ops clk_ops_slave_div;
+
+struct ext_clk {
+	struct clk c;
+	struct device *dev;
+	char *clk_id;
+};
+
+long parent_round_rate(struct clk *c, unsigned long rate);
+unsigned long parent_get_rate(struct clk *c);
+int parent_set_rate(struct clk *c, unsigned long rate);
+
+static inline struct ext_clk *to_ext_clk(struct clk *c)
+{
+	return container_of(c, struct ext_clk, c);
+}
+
+extern const struct clk_ops clk_ops_ext;
+
+#define DEFINE_FIXED_DIV_CLK(clk_name, _div, _parent) \
+static struct div_clk clk_name = {	\
+	.data = {				\
+		.max_div = _div,		\
+		.min_div = _div,		\
+		.div = _div,			\
+	},					\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_div,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+#define DEFINE_FIXED_SLAVE_DIV_CLK(clk_name, _div, _parent) \
+static struct div_clk clk_name = {	\
+	.data = {				\
+		.max_div = _div,		\
+		.min_div = _div,		\
+		.div = _div,			\
+	},					\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_slave_div,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+#define DEFINE_EXT_CLK(clk_name, _parent) \
+static struct ext_clk clk_name = {		\
+	.c = {					\
+		.parent = _parent,		\
+		.dbg_name = #clk_name,		\
+		.ops = &clk_ops_ext,		\
+		CLK_INIT(clk_name.c),		\
+	}					\
+}
+
+/* ==================== Mux Div clock ==================== */
+
+struct mux_div_clk;
+
+/*
+ * struct mux_div_ops
+ * the enable and disable ops are optional.
+ */
+
+struct mux_div_ops {
+	int (*set_src_div)(struct mux_div_clk *, u32 src_sel, u32 div);
+	void (*get_src_div)(struct mux_div_clk *, u32 *src_sel, u32 *div);
+	int (*enable)(struct mux_div_clk *);
+	void (*disable)(struct mux_div_clk *);
+	bool (*is_enabled)(struct mux_div_clk *);
+	void __iomem *(*list_registers)(struct mux_div_clk *md, int n,
+				struct clk_register_data **regs, u32 *size);
+};
+
+/*
+ * struct mux_div_clk - combined mux/divider clock
+ * @priv
+		parameters needed by ops
+ * @safe_freq
+		when switching rates from A to B, the mux div clock will
+		instead switch from A -> safe_freq -> B. This allows the
+		mux_div clock to change rates while enabled, even if this
+		behavior is not supported by the parent clocks.
+
+		If changing the rate of parent A also causes the rate of
+		parent B to change, then safe_freq must be defined.
+
+		safe_freq is expected to have a source clock which is always
+		on and runs at only one rate.
+ * @parents
+		list of parents and mux indicies
+ * @ops
+		function pointers for hw specific operations
+ * @src_sel
+		the mux index which will be used if the clock is enabled.
+ * @try_get_rate
+		Set if you need the mux to directly jump to a source
+		that is at the desired rate currently.
+ * @force_enable_md
+		Set if the mux-div needs to be force enabled/disabled during
+		clk_enable/disable.
+ */
+
+struct mux_div_clk {
+	/* Required parameters */
+	struct mux_div_ops		*ops;
+	struct div_data			data;
+	struct clk_src			*parents;
+	u32				num_parents;
+
+	struct clk			c;
+
+	/* Internal */
+	u32				src_sel;
+
+	/* Optional parameters */
+	void				*priv;
+	void __iomem			*base;
+	u32				div_mask;
+	u32				div_offset;
+	u32				div_shift;
+	u32				src_mask;
+	u32				src_offset;
+	u32				src_shift;
+	u32				en_mask;
+	u32				en_offset;
+
+	u32				safe_div;
+	struct clk			*safe_parent;
+	unsigned long			safe_freq;
+	bool				try_get_rate;
+	bool				force_enable_md;
+};
+
+static inline struct mux_div_clk *to_mux_div_clk(struct clk *clk)
+{
+	return container_of(clk, struct mux_div_clk, c);
+}
+
+extern const struct clk_ops clk_ops_mux_div_clk;
+
+#endif
diff --git a/include/linux/clk/qcom.h b/include/linux/clk/qcom.h
index e2fee60..d413b0a 100644
--- a/include/linux/clk/qcom.h
+++ b/include/linux/clk/qcom.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
 #ifndef __LINUX_CLK_QCOM_H_
 #define __LINUX_CLK_QCOM_H_
 
+#if defined(CONFIG_COMMON_CLK_QCOM)
 enum branch_mem_flags {
 	CLKFLAG_RETAIN_PERIPH,
 	CLKFLAG_NORETAIN_PERIPH,
@@ -23,5 +24,8 @@ enum branch_mem_flags {
 	CLKFLAG_PERIPH_OFF_SET,
 	CLKFLAG_PERIPH_OFF_CLEAR,
 };
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#include <linux/clk/msm-clk.h>
+#endif /* CONFIG_COMMON_CLK_QCOM */
 
 #endif  /* __LINUX_CLK_QCOM_H_ */
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 2eabc86..9b8848b 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -22,6 +22,7 @@ struct clk_lookup {
 	struct list_head	node;
 	const char		*dev_id;
 	const char		*con_id;
+	int			of_idx;
 	struct clk		*clk;
 	struct clk_hw		*clk_hw;
 };
diff --git a/include/linux/input/qpnp-power-on.h b/include/linux/input/qpnp-power-on.h
index a2624ab..5944f0f 100644
--- a/include/linux/input/qpnp-power-on.h
+++ b/include/linux/input/qpnp-power-on.h
@@ -51,6 +51,7 @@ enum pon_power_off_type {
 };
 
 enum pon_restart_reason {
+	/* 0 ~ 31 for common defined features */
 	PON_RESTART_REASON_UNKNOWN		= 0x00,
 	PON_RESTART_REASON_RECOVERY		= 0x01,
 	PON_RESTART_REASON_BOOTLOADER		= 0x02,
@@ -58,6 +59,10 @@ enum pon_restart_reason {
 	PON_RESTART_REASON_DMVERITY_CORRUPTED	= 0x04,
 	PON_RESTART_REASON_DMVERITY_ENFORCE	= 0x05,
 	PON_RESTART_REASON_KEYS_CLEAR		= 0x06,
+
+	/* 32 ~ 63 for OEMs/ODMs secific features */
+	PON_RESTART_REASON_OEM_MIN		= 0x20,
+	PON_RESTART_REASON_OEM_MAX		= 0x3f,
 };
 
 #ifdef CONFIG_INPUT_QPNP_POWER_ON
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
index a584e0a..f46b2f8 100644
--- a/include/linux/msm-bus.h
+++ b/include/linux/msm-bus.h
@@ -130,8 +130,6 @@ int msm_bus_scale_query_tcs_cmd(struct msm_bus_tcs_usecase *tcs_usecase,
 					uint32_t cl, unsigned int index);
 int msm_bus_scale_query_tcs_cmd_all(struct msm_bus_tcs_handle *tcs_handle,
 					uint32_t cl);
-int msm_bus_noc_throttle_wa(bool enable);
-int msm_bus_noc_priority_wa(bool enable);
 
 /* AXI Port configuration APIs */
 int msm_bus_axi_porthalt(int master_port);
@@ -213,6 +211,12 @@ static inline int msm_bus_scale_query_tcs_cmd_all(struct msm_bus_tcs_handle
 	return 0;
 }
 
+#endif
+
+#if defined(CONFIG_QCOM_BUS_SCALING) && defined(CONFIG_QCOM_BUS_CONFIG_RPMH)
+int msm_bus_noc_throttle_wa(bool enable);
+int msm_bus_noc_priority_wa(bool enable);
+#else
 static inline int msm_bus_noc_throttle_wa(bool enable)
 {
 	return 0;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 12343ca..553b873 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -79,10 +79,16 @@
  *	operation, if several modes of operation are supported these can be
  *	passed in the argument on a custom form, else just use argument 1
  *	to indicate low power mode, argument 0 turns low power mode off.
- * @PIN_CONFIG_OUTPUT: this will configure the pin as an output. Use argument
- *	1 to indicate high level, argument 0 to indicate low level. (Please
- *	see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a
- *	discussion around this parameter.)
+ * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode
+ * 	without driving a value there. For most platforms this reduces to
+ * 	enable the output buffers and then let the pin controller current
+ * 	configuration (eg. the currently selected mux function) drive values on
+ * 	the line. Use argument 1 to enable output mode, argument 0 to disable
+ * 	it.
+ * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
+ * 	value on the line. Use argument 1 to indicate high level, argument 0 to
+ * 	indicate low level. (Please see Documentation/pinctrl.txt, section
+ * 	"GPIO mode pitfalls" for a discussion around this parameter.)
  * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
  *	supplies, the argument to this parameter (on a custom format) tells
  *	the driver which alternative power source to use.
@@ -109,6 +115,7 @@ enum pin_config_param {
 	PIN_CONFIG_INPUT_SCHMITT,
 	PIN_CONFIG_INPUT_SCHMITT_ENABLE,
 	PIN_CONFIG_LOW_POWER_MODE,
+	PIN_CONFIG_OUTPUT_ENABLE,
 	PIN_CONFIG_OUTPUT,
 	PIN_CONFIG_POWER_SOURCE,
 	PIN_CONFIG_SLEW_RATE,
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index d253ca6..370cbcf 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -120,6 +120,11 @@ enum {
 	POWER_SUPPLY_CONNECTOR_MICRO_USB,
 };
 
+enum {
+	POWER_SUPPLY_PL_STACKED_BATFET,
+	POWER_SUPPLY_PL_NON_STACKED_BATFET,
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -266,6 +271,8 @@ enum power_supply_property {
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
 	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CONNECTOR_TYPE,
+	POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE,
+	POWER_SUPPLY_PROP_MIN_ICL,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/soc/qcom/clock-alpha-pll.h b/include/soc/qcom/clock-alpha-pll.h
new file mode 100644
index 0000000..f8130f1
--- /dev/null
+++ b/include/soc/qcom/clock-alpha-pll.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_ALPHA_PLL_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_ALPHA_PLL_H
+
+#include <linux/spinlock.h>
+#include <linux/clk/msm-clk-provider.h>
+
+struct alpha_pll_masks {
+	u32 lock_mask;		/* lock_det bit */
+	u32 active_mask;	/* active_flag in FSM mode */
+	u32 update_mask;	/* update bit for dynamic update */
+	u32 vco_mask;		/* vco_sel bits */
+	u32 vco_shift;
+	u32 alpha_en_mask;	/* alpha_en bit */
+	u32 output_mask;	/* pllout_* bits */
+	u32 post_div_mask;
+
+	u32 test_ctl_lo_mask;
+	u32 test_ctl_hi_mask;
+};
+
+struct alpha_pll_vco_tbl {
+	u32 vco_val;
+	unsigned long min_freq;
+	unsigned long max_freq;
+};
+
+#define VCO(a, b, c) { \
+	.vco_val = a,\
+	.min_freq = b,\
+	.max_freq = c,\
+}
+
+struct alpha_pll_clk {
+	struct alpha_pll_masks *masks;
+
+	void *const __iomem *base;
+
+	u32 offset;
+	u32 fabia_frac_offset;
+
+	/* if fsm_en_mask is set, config PLL to FSM mode */
+	u32 fsm_reg_offset;
+	u32 fsm_en_mask;
+
+	u32 enable_config;	/* bitmask of outputs to be enabled */
+	u32 post_div_config;	/* masked post divider setting */
+	u32 config_ctl_val;	/* config register init value */
+	u32 test_ctl_lo_val;	/* test control settings */
+	u32 test_ctl_hi_val;
+
+	struct alpha_pll_vco_tbl *vco_tbl;
+	u32 num_vco;
+	u32 current_vco_val;
+	bool inited;
+	bool slew;
+	bool no_prepared_reconfig;
+
+	/* some PLLs support dynamically updating their rate
+	 * without disabling the PLL first. Set this flag
+	 * to enable this support.
+	 */
+	bool dynamic_update;
+
+	/*
+	 * Some chipsets need the offline request bit to be
+	 * cleared on a second write to the register, even though
+	 * SW wants the bit to be set. Set this flag to indicate
+	 * that the workaround is required.
+	 */
+	bool offline_bit_workaround;
+	bool no_irq_dis;
+	bool is_fabia;
+	unsigned long min_supported_freq;
+	struct clk c;
+};
+
+static inline struct alpha_pll_clk *to_alpha_pll_clk(struct clk *c)
+{
+	return container_of(c, struct alpha_pll_clk, c);
+}
+
+
+#endif
+extern void __init_alpha_pll(struct clk *c);
+extern const struct clk_ops clk_ops_alpha_pll;
+extern const struct clk_ops clk_ops_alpha_pll_hwfsm;
+extern const struct clk_ops clk_ops_fixed_alpha_pll;
+extern const struct clk_ops clk_ops_dyna_alpha_pll;
+extern const struct clk_ops clk_ops_fixed_fabia_alpha_pll;
+extern const struct clk_ops clk_ops_fabia_alpha_pll;
diff --git a/include/soc/qcom/clock-local2.h b/include/soc/qcom/clock-local2.h
new file mode 100644
index 0000000..c5e7488
--- /dev/null
+++ b/include/soc/qcom/clock-local2.h
@@ -0,0 +1,274 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H
+
+#include <linux/spinlock.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+
+/*
+ * Generic frequency-definition structs and macros
+ */
+
+/**
+ * @freq_hz: output rate
+ * @src_freq: source freq for dynamic pll. For fixed plls, set to 0.
+ * @src_clk: source clock for freq_hz
+ * @m_val: M value corresponding to freq_hz
+ * @n_val: N value corresponding to freq_hz
+ * @d_val: D value corresponding to freq_hz
+ * @div_src_val: Pre divider value and source selection mux index for freq_hz
+ * @sys_vdd: Voltage level required for freq_hz
+ */
+struct clk_freq_tbl {
+	unsigned long	freq_hz;
+	unsigned long	src_freq;
+	struct clk	*src_clk;
+	u32	m_val;
+	u32	n_val;
+	u32	d_val;
+	u32	div_src_val;
+	const unsigned long sys_vdd;
+};
+
+#define FREQ_END	(ULONG_MAX-1)
+#define F_END { .freq_hz = FREQ_END }
+#define	FIXED_CLK_SRC	0
+/*
+ * Generic clock-definition struct and macros
+ */
+/**
+ * struct rcg_clk - root clock generator
+ * @cmd_rcgr_reg: command register
+ * @mnd_reg_width: Width of MND register
+ * @set_rate: function to set frequency
+ * @freq_tbl: frequency table for this RCG
+ * @current_freq: current RCG frequency
+ * @c: generic clock data
+ * @non_local_children: set if RCG has at least one branch owned by a diff EE
+ * @non_local_control_timeout: configurable RCG timeout needed when all RCG
+ *			 children can be controlled by an entity outside of
+			 HLOS.
+ * @force_enable_rcgr: set if RCG needs to be force enabled/disabled during
+ * power sequence
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct rcg_clk {
+	u32 cmd_rcgr_reg;
+	u32 mnd_reg_width;
+
+	void   (*set_rate)(struct rcg_clk *, struct clk_freq_tbl *);
+
+	struct clk_freq_tbl *freq_tbl;
+	struct clk_freq_tbl *current_freq;
+	struct clk	c;
+
+	bool non_local_children;
+	int non_local_control_timeout;
+	bool force_enable_rcgr;
+
+	void *const __iomem *base;
+};
+
+static inline struct rcg_clk *to_rcg_clk(struct clk *clk)
+{
+	return container_of(clk, struct rcg_clk, c);
+}
+
+extern struct clk_freq_tbl rcg_dummy_freq;
+
+/**
+ * struct branch_clk - branch clock
+ * @set_rate: Set the frequency of this branch clock.
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @bcr_reg: block reset register
+ * @has_sibling: true if other branches are derived from this branch's source
+ * @cur_div: current branch divider value
+ * @max_div: maximum branch divider value (if zero, no divider exists)
+ * @halt_check: halt checking type
+ * @toggle_memory: toggle memory during enable/disable if true
+ * @no_halt_check_on_disable: When set, do not check status bit during
+ *			      clk_disable().
+ * @check_enable_bit: Check the enable bit to determine clock status
+				during handoff.
+ * @aggr_sibling_rates: Set if there are multiple branch clocks with rate
+			setting capability on the common RCG.
+ * @is_prepared: Set if clock's prepare count is greater than 0.
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct branch_clk {
+	void   (*set_rate)(struct branch_clk *, struct clk_freq_tbl *);
+	struct clk c;
+	u32 cbcr_reg;
+	u32 bcr_reg;
+	int has_sibling;
+	u32 cur_div;
+	u32 max_div;
+	const u32 halt_check;
+	bool toggle_memory;
+	bool no_halt_check_on_disable;
+	bool check_enable_bit;
+	bool aggr_sibling_rates;
+	bool is_prepared;
+
+	void *const __iomem *base;
+};
+
+static inline struct branch_clk *to_branch_clk(struct clk *clk)
+{
+	return container_of(clk, struct branch_clk, c);
+}
+
+/**
+ * struct local_vote_clk - Voteable branch clock
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @vote_reg: voting register
+ * @en_mask: enable mask
+ * @halt_check: halt checking type
+ * @base: pointer to base address of ioremapped registers.
+ * An on/off switch with a rate derived from the parent.
+ */
+struct local_vote_clk {
+	struct clk c;
+	u32 cbcr_reg;
+	u32 vote_reg;
+	u32 bcr_reg;
+	u32 en_mask;
+	const u32 halt_check;
+
+	void * __iomem *base;
+};
+
+static inline struct local_vote_clk *to_local_vote_clk(struct clk *clk)
+{
+	return container_of(clk, struct local_vote_clk, c);
+}
+
+/**
+ * struct reset_clk - Reset clock
+ * @c: clk
+ * @reset_reg: block reset register
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct reset_clk {
+	struct clk c;
+	u32 reset_reg;
+
+	void *__iomem *base;
+};
+
+static inline struct reset_clk *to_reset_clk(struct clk *clk)
+{
+	return container_of(clk, struct reset_clk, c);
+}
+/**
+ * struct measure_clk - for rate measurement debug use
+ * @sample_ticks: sample period in reference clock ticks
+ * @multiplier: measurement scale-up factor
+ * @divider: measurement scale-down factor
+ * @c: clk
+ */
+struct measure_clk {
+	u64 sample_ticks;
+	u32 multiplier;
+	u32 divider;
+
+	struct clk c;
+};
+
+struct measure_clk_data {
+	struct clk *cxo;
+	u32 plltest_reg;
+	u32 plltest_val;
+	u32 xo_div4_cbcr;
+	u32 ctl_reg;
+	u32 status_reg;
+
+	void *const __iomem *base;
+};
+
+static inline struct measure_clk *to_measure_clk(struct clk *clk)
+{
+	return container_of(clk, struct measure_clk, c);
+}
+
+/**
+ * struct gate_clk
+ * @c: clk
+ * @en_mask: ORed with @en_reg to enable gate clk
+ * @en_reg: register used to enable/disable gate clk
+ * @base: pointer to base address of ioremapped registers
+ */
+struct gate_clk {
+	struct clk c;
+	u32 en_mask;
+	u32 en_reg;
+	unsigned int delay_us;
+
+	void *const __iomem *base;
+};
+
+static inline struct gate_clk *to_gate_clk(struct clk *clk)
+{
+	return container_of(clk, struct gate_clk, c);
+}
+
+/*
+ * Generic set-rate implementations
+ */
+void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+void set_rate_hid(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+
+/*
+ * Variables from the clock-local driver
+ */
+extern spinlock_t local_clock_reg_lock;
+
+extern const struct clk_ops clk_ops_empty;
+extern const struct clk_ops clk_ops_rcg;
+extern const struct clk_ops clk_ops_rcg_mnd;
+extern const struct clk_ops clk_ops_branch;
+extern const struct clk_ops clk_ops_vote;
+extern const struct clk_ops clk_ops_rcg_hdmi;
+extern const struct clk_ops clk_ops_rcg_edp;
+extern const struct clk_ops clk_ops_byte;
+extern const struct clk_ops clk_ops_pixel;
+extern const struct clk_ops clk_ops_byte_multiparent;
+extern const struct clk_ops clk_ops_pixel_multiparent;
+extern const struct clk_ops clk_ops_edppixel;
+extern const struct clk_ops clk_ops_gate;
+extern const struct clk_ops clk_ops_rst;
+extern struct clk_mux_ops mux_reg_ops;
+extern struct mux_div_ops rcg_mux_div_ops;
+extern const  struct clk_div_ops postdiv_reg_ops;
+
+enum handoff pixel_rcg_handoff(struct clk *clk);
+enum handoff byte_rcg_handoff(struct clk *clk);
+unsigned long measure_get_rate(struct clk *c);
+
+/*
+ * Clock definition macros
+ */
+#define DEFINE_CLK_MEASURE(name) \
+	struct clk name = { \
+		.ops = &clk_ops_empty, \
+		.dbg_name = #name, \
+		CLK_INIT(name), \
+	} \
+
+#endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H */
+
diff --git a/include/soc/qcom/clock-pll.h b/include/soc/qcom/clock-pll.h
new file mode 100644
index 0000000..1865e3c
--- /dev/null
+++ b/include/soc/qcom/clock-pll.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_PLL_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_PLL_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+/**
+ * struct pll_freq_tbl - generic PLL frequency definition
+ * @freq_hz: pll frequency in hz
+ * @l_val: pll l value
+ * @m_val: pll m value
+ * @n_val: pll n value
+ * @post_div_val: pll post divider value
+ * @pre_div_val: pll pre-divider value
+ * @vco_val: pll vco value
+ */
+struct pll_freq_tbl {
+	const u32 freq_hz;
+	const u32 l_val;
+	const u32 m_val;
+	const u32 n_val;
+	const u32 post_div_val;
+	const u32 pre_div_val;
+	const u32 vco_val;
+};
+
+/**
+ * struct pll_config_masks - PLL config masks struct
+ * @post_div_mask: mask for post divider bits location
+ * @pre_div_mask: mask for pre-divider bits location
+ * @vco_mask: mask for vco bits location
+ * @mn_en_mask: ORed with pll config register to enable the mn counter
+ * @main_output_mask: ORed with pll config register to enable the main output
+ * @apc_pdn_mask: ORed with pll config register to enable/disable APC PDN
+ * @lock_mask: Mask that indicates that the PLL has locked
+ */
+struct pll_config_masks {
+	u32 apc_pdn_mask;
+	u32 post_div_mask;
+	u32 pre_div_mask;
+	u32 vco_mask;
+	u32 mn_en_mask;
+	u32 main_output_mask;
+	u32 early_output_mask;
+	u32 lock_mask;
+};
+
+struct pll_config_vals {
+	u32 post_div_masked;
+	u32 pre_div_masked;
+	u32 config_ctl_val;
+	u32 config_ctl_hi_val;
+	u32 test_ctl_lo_val;
+	u32 test_ctl_hi_val;
+	u32 alpha_val;
+	bool enable_mn;
+};
+
+struct pll_spm_ctrl {
+	u32 offset;
+	u32 event_bit;
+	void __iomem *spm_base;
+};
+
+#define PLL_FREQ_END	(UINT_MAX-1)
+#define PLL_F_END { .freq_hz = PLL_FREQ_END }
+
+/**
+ * struct pll_vote_clk - phase locked loop (HW voteable)
+ * @soft_vote: soft voting variable for multiple PLL software instances
+ * @soft_vote_mask: soft voting mask for multiple PLL software instances
+ * @en_reg: enable register
+ * @en_mask: ORed with @en_reg to enable the clock
+ * @status_mask: ANDed with @status_reg to determine if PLL is active.
+ * @status_reg: status register
+ * @c: clock
+ */
+struct pll_vote_clk {
+	u32 *soft_vote;
+	u32 soft_vote_mask;
+	void __iomem *const en_reg;
+	u32 en_mask;
+	void __iomem *const status_reg;
+	u32 status_mask;
+
+	struct clk c;
+
+	void *const __iomem *base;
+};
+
+extern const struct clk_ops clk_ops_pll_vote;
+extern const struct clk_ops clk_ops_pll_acpu_vote;
+extern const struct clk_ops clk_ops_pll_sleep_vote;
+
+/* Soft voting values */
+#define PLL_SOFT_VOTE_PRIMARY   BIT(0)
+#define PLL_SOFT_VOTE_ACPU      BIT(1)
+#define PLL_SOFT_VOTE_AUX       BIT(2)
+
+static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *c)
+{
+	return container_of(c, struct pll_vote_clk, c);
+}
+
+/**
+ * struct pll_clk - phase locked loop
+ * @mode_reg: enable register
+ * @l_reg: l value register
+ * @m_reg: m value register
+ * @n_reg: n value register
+ * @config_reg: configuration register, contains mn divider enable, pre divider,
+ *   post divider and vco configuration. register name can be configure register
+ *   or user_ctl register depending on targets
+ * @config_ctl_reg: "expert" configuration register
+ * @config_ctl_hi_reg: upper 32 bits of the "expert" configuration register
+ * @status_reg: status register, contains the lock detection bit
+ * @init_test_ctl: initialize the test control register
+ * @pgm_test_ctl_enable: program the test_ctl register in the enable sequence
+ * @test_ctl_dbg: if false will configure the test control registers.
+ * @masks: masks used for settings in config_reg
+ * @vals: configuration values to be written to PLL registers
+ * @freq_tbl: pll freq table
+ * @no_prepared_reconfig: Fail round_rate if pll is prepared
+ * @c: clk
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct pll_clk {
+	void __iomem *const mode_reg;
+	void __iomem *const l_reg;
+	void __iomem *const m_reg;
+	void __iomem *const n_reg;
+	void __iomem *const alpha_reg;
+	void __iomem *const config_reg;
+	void __iomem *const config_ctl_reg;
+	void __iomem *const config_ctl_hi_reg;
+	void __iomem *const status_reg;
+	void __iomem *const alt_status_reg;
+	void __iomem *const test_ctl_lo_reg;
+	void __iomem *const test_ctl_hi_reg;
+
+	bool init_test_ctl;
+	bool pgm_test_ctl_enable;
+	bool test_ctl_dbg;
+
+	struct pll_config_masks masks;
+	struct pll_config_vals vals;
+	struct pll_freq_tbl *freq_tbl;
+
+	unsigned long src_rate;
+	unsigned long min_rate;
+	unsigned long max_rate;
+
+	bool inited;
+	bool no_prepared_reconfig;
+
+	struct pll_spm_ctrl spm_ctrl;
+	struct clk c;
+
+	void *const __iomem *base;
+};
+
+extern const struct clk_ops clk_ops_local_pll;
+extern const struct clk_ops clk_ops_sr2_pll;
+extern const struct clk_ops clk_ops_variable_rate_pll;
+extern const struct clk_ops clk_ops_variable_rate_pll_hwfsm;
+
+void __variable_rate_pll_init(struct clk *c);
+
+static inline struct pll_clk *to_pll_clk(struct clk *c)
+{
+	return container_of(c, struct pll_clk, c);
+}
+
+int sr_pll_clk_enable(struct clk *c);
+int sr_hpm_lp_pll_clk_enable(struct clk *c);
+
+struct pll_alt_config {
+	u32 val;
+	u32 mask;
+};
+
+struct pll_config {
+	u32 l;
+	u32 m;
+	u32 n;
+	u32 vco_val;
+	u32 vco_mask;
+	u32 pre_div_val;
+	u32 pre_div_mask;
+	u32 post_div_val;
+	u32 post_div_mask;
+	u32 mn_ena_val;
+	u32 mn_ena_mask;
+	u32 main_output_val;
+	u32 main_output_mask;
+	u32 aux_output_val;
+	u32 aux_output_mask;
+	u32 cfg_ctl_val;
+	/* SR2 PLL specific fields */
+	u32 add_factor_val;
+	u32 add_factor_mask;
+	struct pll_alt_config alt_cfg;
+};
+
+struct pll_config_regs {
+	void __iomem *l_reg;
+	void __iomem *m_reg;
+	void __iomem *n_reg;
+	void __iomem *config_reg;
+	void __iomem *config_alt_reg;
+	void __iomem *config_ctl_reg;
+	void __iomem *mode_reg;
+
+	void *const __iomem *base;
+};
+
+void configure_sr_pll(struct pll_config *config, struct pll_config_regs *regs,
+				u32 ena_fsm_mode);
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+				struct pll_config_regs *regs, u32 ena_fsm_mode);
+#endif
diff --git a/include/soc/qcom/clock-rpm.h b/include/soc/qcom/clock-rpm.h
new file mode 100644
index 0000000..4af457c
--- /dev/null
+++ b/include/soc/qcom/clock-rpm.h
@@ -0,0 +1,180 @@
+/* Copyright (c) 2010-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_RPM_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_RPM_H
+
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define RPM_SMD_KEY_RATE	0x007A484B
+#define RPM_SMD_KEY_ENABLE	0x62616E45
+#define RPM_SMD_KEY_STATE	0x54415453
+
+#define RPM_CLK_BUFFER_A_REQ			0x616B6C63
+#define RPM_KEY_SOFTWARE_ENABLE			0x6E657773
+#define RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY	0x62636370
+
+struct clk_ops;
+struct clk_rpmrs_data;
+extern const struct clk_ops clk_ops_rpm;
+extern const struct clk_ops clk_ops_rpm_branch;
+
+struct rpm_clk {
+	int rpm_res_type;
+	int rpm_key;
+	int rpm_clk_id;
+	const int rpm_status_id;
+	bool active_only;
+	bool enabled;
+	bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
+	struct clk_rpmrs_data *rpmrs_data;
+	struct rpm_clk *peer;
+	struct clk c;
+	uint32_t *last_active_set_vote;
+	uint32_t *last_sleep_set_vote;
+};
+
+static inline struct rpm_clk *to_rpm_clk(struct clk *clk)
+{
+	return container_of(clk, struct rpm_clk, c);
+}
+
+/*
+ * RPM scaling enable function used for target that has an RPM resource for
+ * rpm clock scaling enable.
+ */
+int enable_rpm_scaling(void);
+
+int vote_bimc(struct rpm_clk *r, uint32_t value);
+
+extern struct clk_rpmrs_data clk_rpmrs_data_smd;
+
+/*
+ * A note on name##last_{active,sleep}_set_vote below:
+ * We track the last active and sleep set votes across both
+ * active-only and active+sleep set clocks. We use the same
+ * tracking variables for both clocks in order to keep both
+ * updated about the last vote irrespective of which clock
+ * actually made the request. This is the only way to allow
+ * optimizations that prevent duplicate requests from being sent
+ * to the RPM. Separate tracking does not work since it is not
+ * possible to know if the peer's last request was actually sent
+ * to the RPM.
+ */
+
+#define __DEFINE_CLK_RPM(name, active, type, r_id, stat_id, dep, key, \
+				rpmrsdata) \
+	static struct rpm_clk active; \
+	static uint32_t name##last_active_set_vote; \
+	static uint32_t name##last_sleep_set_vote; \
+	static struct rpm_clk name = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &active, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm, \
+			.dbg_name = #name, \
+			CLK_INIT(name.c), \
+			.depends = dep, \
+		}, \
+	}; \
+	static struct rpm_clk active = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &name, \
+		.active_only = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm, \
+			.dbg_name = #active, \
+			CLK_INIT(active.c), \
+			.depends = dep, \
+		}, \
+	} \
+
+#define __DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, stat_id, r, \
+					key, rpmrsdata) \
+	static struct rpm_clk active; \
+	static uint32_t name##last_active_set_vote; \
+	static uint32_t name##last_sleep_set_vote; \
+	static struct rpm_clk name = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &active, \
+		.branch = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm_branch, \
+			.dbg_name = #name, \
+			.rate = (r), \
+			CLK_INIT(name.c), \
+		}, \
+	}; \
+	static struct rpm_clk active = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.rpm_key = (key), \
+		.peer = &name, \
+		.active_only = true, \
+		.branch = true, \
+		.rpmrs_data = (rpmrsdata),\
+		.last_active_set_vote = &name##last_active_set_vote, \
+		.last_sleep_set_vote = &name##last_sleep_set_vote, \
+		.c = { \
+			.ops = &clk_ops_rpm_branch, \
+			.dbg_name = #active, \
+			.rate = (r), \
+			CLK_INIT(active.c), \
+		}, \
+	} \
+
+#define DEFINE_CLK_RPM_SMD(name, active, type, r_id, dep) \
+	__DEFINE_CLK_RPM(name, active, type, r_id, 0, dep, \
+				RPM_SMD_KEY_RATE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_BRANCH(name, active, type, r_id, r) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, 0, r, \
+				RPM_SMD_KEY_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_QDSS(name, active, type, r_id) \
+	__DEFINE_CLK_RPM(name, active, type, r_id, \
+		0, 0, RPM_SMD_KEY_STATE, &clk_rpmrs_data_smd)
+/*
+ * The RPM XO buffer clock management code aggregates votes for pin-control mode
+ * and software mode separately. Software-enable has higher priority over pin-
+ * control, and if the software-mode aggregation results in a 'disable', the
+ * buffer will be left in pin-control mode if a pin-control vote is in place.
+ */
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+			1000, RPM_KEY_SOFTWARE_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+	1000, RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY, &clk_rpmrs_data_smd)
+#endif
diff --git a/include/soc/qcom/clock-voter.h b/include/soc/qcom/clock-voter.h
new file mode 100644
index 0000000..7f92a0d
--- /dev/null
+++ b/include/soc/qcom/clock-voter.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2010-2013, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_VOTER_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_VOTER_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+struct clk_ops;
+extern const struct clk_ops clk_ops_voter;
+
+struct clk_voter {
+	int is_branch;
+	bool enabled;
+	struct clk c;
+};
+
+static inline struct clk_voter *to_clk_voter(struct clk *clk)
+{
+	return container_of(clk, struct clk_voter, c);
+}
+
+#define __DEFINE_CLK_VOTER(clk_name, _parent, _default_rate, _is_branch) \
+	struct clk_voter clk_name = { \
+		.is_branch = (_is_branch), \
+		.c = { \
+			.parent = _parent, \
+			.dbg_name = #clk_name, \
+			.ops = &clk_ops_voter, \
+			.rate = _default_rate, \
+			CLK_INIT(clk_name.c), \
+		}, \
+	}
+
+#define DEFINE_CLK_VOTER(clk_name, _parent, _default_rate) \
+	 __DEFINE_CLK_VOTER(clk_name, _parent, _default_rate, 0)
+
+#define DEFINE_CLK_BRANCH_VOTER(clk_name, _parent) \
+	 __DEFINE_CLK_VOTER(clk_name, _parent, 1000, 1)
+
+#endif
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 4a7b0d6..ad38816 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -142,5 +142,6 @@ extern int icnss_smmu_map(struct device *dev, phys_addr_t paddr,
 extern unsigned int icnss_socinfo_get_serial_number(struct device *dev);
 extern bool icnss_is_qmi_disable(struct device *dev);
 extern bool icnss_is_fw_ready(void);
+extern bool icnss_is_fw_down(void);
 extern int icnss_trigger_recovery(struct device *dev);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/msm-clock-controller.h b/include/soc/qcom/msm-clock-controller.h
new file mode 100644
index 0000000..4b7abec
--- /dev/null
+++ b/include/soc/qcom/msm-clock-controller.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MSM_CLOCK_CONTROLLER_H
+#define __ARCH_ARM_MSM_CLOCK_CONTROLLER_H
+
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define dt_err(np, fmt, ...) \
+	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
+#define dt_prop_err(np, str, fmt, ...) \
+	dt_err(np, "%s: " fmt, str, ##__VA_ARGS__)
+
+/**
+ * struct msmclk_parser
+ * @compatible
+ *      matches compatible property from devicetree
+ * @parsedt
+ *      constructs & returns an instance of the appropriate obj based on
+ *      the data from devicetree.
+ */
+struct msmclk_parser {
+	struct list_head list;
+	char *compatible;
+	void * (*parsedt)(struct device *dev, struct device_node *of);
+};
+
+#define MSMCLK_PARSER(fn, str, id) \
+static struct msmclk_parser _msmclk_##fn##id = {		\
+	.list = LIST_HEAD_INIT(_msmclk_##fn##id.list),		\
+	.compatible = str,					\
+	.parsedt = fn,						\
+};								\
+static int __init _msmclk_init_##fn##id(void)			\
+{								\
+	msmclk_parser_register(&_msmclk_##fn##id);		\
+	return 0;						\
+}								\
+early_initcall(_msmclk_init_##fn##id)
+
+/*
+ * struct msmclk_data
+ * @base
+ *      ioremapped region for sub_devices
+ * @list
+ *	tracks all registered driver instances
+ * @htable
+ *	tracks all registered child clocks
+ * @clk_tbl
+ *      array of clk_lookup to be registered with the clock framework
+ */
+#define HASHTABLE_SIZE 200
+struct msmclk_data {
+	void __iomem *base;
+	struct device *dev;
+	struct list_head list;
+	struct hlist_head htable[HASHTABLE_SIZE];
+	struct clk_lookup *clk_tbl;
+	int clk_tbl_size;
+	int max_clk_tbl_size;
+};
+
+#if defined(CONFIG_MSM_CLK_CONTROLLER_V2)
+
+/* Utility functions */
+int of_property_count_phandles(struct device_node *np, char *propname);
+int of_property_read_phandle_index(struct device_node *np, char *propname,
+					int index, phandle *p);
+void *msmclk_generic_clk_init(struct device *dev, struct device_node *np,
+				struct clk *c);
+
+/*
+ * msmclk_parser_register
+ *      Registers a parser which will be matched with a node from dt
+ *      according to the compatible string.
+ */
+void msmclk_parser_register(struct msmclk_parser *p);
+
+/*
+ * msmclk_parse_phandle
+ *      On hashtable miss, the corresponding entry will be retrieved from
+ *      devicetree, and added to the hashtable.
+ */
+void *msmclk_parse_phandle(struct device *dev, phandle key);
+/*
+ * msmclk_lookup_phandle
+ *	Straightforward hashtable lookup
+ */
+void *msmclk_lookup_phandle(struct device *dev, phandle key);
+
+int __init msmclk_init(void);
+#else
+
+static inline int of_property_count_phandles(struct device_node *np,
+			char *propname)
+{
+	return 0;
+}
+
+static inline int of_property_read_phandle_index(struct device_node *np,
+			char *propname, int index, phandle *p)
+{
+	return 0;
+}
+
+static inline void *msmclk_generic_clk_init(struct device *dev,
+				struct device_node *np, struct clk *c)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void msmclk_parser_register(struct msmclk_parser *p) {};
+
+static inline void *msmclk_parse_phandle(struct device *dev, phandle key)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void *msmclk_lookup_phandle(struct device *dev, phandle key)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline int __init msmclk_init(void)
+{
+	return 0;
+}
+
+#endif /* CONFIG_MSM_CLK_CONTROLLER_V2 */
+#endif /* __ARCH_ARM_MSM_CLOCK_CONTROLLER_H */
diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h
index 8a4c6d9..7ba1817 100644
--- a/include/sound/wcd-dsp-mgr.h
+++ b/include/sound/wcd-dsp-mgr.h
@@ -77,6 +77,9 @@ enum wdsp_signal {
 	/* Other signals */
 	WDSP_CDC_DOWN_SIGNAL,
 	WDSP_CDC_UP_SIGNAL,
+
+	/* Software generated signal indicating debug dumps to be collected */
+	WDSP_DEBUG_DUMP,
 };
 
 /*
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 408fa57..8cfb1d7 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -331,6 +331,7 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
  * The clock events are used for clock enable/disable and for
  *  clock rate change
  */
+#if defined(CONFIG_COMMON_CLK_MSM)
 DECLARE_EVENT_CLASS(clock,
 
 	TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
@@ -374,6 +375,13 @@ DEFINE_EVENT(clock, clock_set_rate,
 	TP_ARGS(name, state, cpu_id)
 );
 
+DEFINE_EVENT(clock, clock_set_rate_complete,
+
+	TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+	TP_ARGS(name, state, cpu_id)
+);
+
 TRACE_EVENT(clock_set_parent,
 
 	TP_PROTO(const char *name, const char *parent_name),
@@ -393,6 +401,32 @@ TRACE_EVENT(clock_set_parent,
 	TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
 );
 
+TRACE_EVENT(clock_state,
+
+	TP_PROTO(const char *name, unsigned long prepare_count,
+		unsigned long count, unsigned long rate),
+
+	TP_ARGS(name, prepare_count, count, rate),
+
+	TP_STRUCT__entry(
+		__string(name,			name)
+		__field(unsigned long,		prepare_count)
+		__field(unsigned long,		count)
+		__field(unsigned long,		rate)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->prepare_count = prepare_count;
+		__entry->count = count;
+		__entry->rate = rate;
+	),
+	TP_printk("%s\t[%lu:%lu]\t%lu", __get_str(name), __entry->prepare_count,
+					 __entry->count, __entry->rate)
+
+);
+#endif /* CONFIG_COMMON_CLK_MSM */
+
 /*
  * The power domain events are used for power domains transitions
  */
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
index cabf0a8..e006463 100644
--- a/include/uapi/media/cam_defs.h
+++ b/include/uapi/media/cam_defs.h
@@ -15,7 +15,8 @@
 #define CAM_CONFIG_DEV                          (CAM_COMMON_OPCODE_BASE + 0x5)
 #define CAM_RELEASE_DEV                         (CAM_COMMON_OPCODE_BASE + 0x6)
 #define CAM_SD_SHUTDOWN                         (CAM_COMMON_OPCODE_BASE + 0x7)
-#define CAM_COMMON_OPCODE_MAX                   (CAM_COMMON_OPCODE_BASE + 0x8)
+#define CAM_FLUSH_REQ                           (CAM_COMMON_OPCODE_BASE + 0x8)
+#define CAM_COMMON_OPCODE_MAX                   (CAM_COMMON_OPCODE_BASE + 0x9)
 
 #define CAM_EXT_OPCODE_BASE                     0x200
 #define CAM_CONFIG_DEV_EXTERNAL                 (CAM_EXT_OPCODE_BASE + 0x1)
@@ -43,6 +44,20 @@
 #define CAM_CMD_BUF_LEGACY                  0xA
 
 /**
+ * enum flush_type_t - Identifies the various flush types
+ *
+ * @CAM_FLUSH_TYPE_REQ:    Flush specific request
+ * @CAM_FLUSH_TYPE_ALL:    Flush all requests belonging to a context
+ * @CAM_FLUSH_TYPE_MAX:    Max enum to validate flush type
+ *
+ */
+enum flush_type_t {
+	CAM_FLUSH_TYPE_REQ,
+	CAM_FLUSH_TYPE_ALL,
+	CAM_FLUSH_TYPE_MAX
+};
+
+/**
  * struct cam_control - Structure used by ioctl control for camera
  *
  * @op_code:            This is the op code for camera control
@@ -437,4 +452,26 @@ struct cam_acquire_dev_cmd {
 	uint64_t        resource_hdl;
 };
 
+/**
+ * struct cam_flush_dev_cmd - Control payload for flush devices
+ *
+ * @version:           Version
+ * @session_handle:    Session handle for the acquire command
+ * @dev_handle:        Device handle to be returned
+ * @flush_type:        Flush type:
+ *                     0 = flush specific request
+ *                     1 = flush all
+ * @reserved:          Reserved for 64 bit aligngment
+ * @req_id:            Request id that needs to cancel
+ *
+ */
+struct cam_flush_dev_cmd {
+	uint64_t       version;
+	int32_t        session_handle;
+	int32_t        dev_handle;
+	uint32_t       flush_type;
+	uint32_t       reserved;
+	int64_t        req_id;
+};
+
 #endif /* __UAPI_CAM_DEFS_H__ */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index baf31df..09a684a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3098,18 +3098,12 @@ static inline bool should_suppress_show_mem(void)
 	return ret;
 }
 
-static DEFINE_RATELIMIT_STATE(nopage_rs,
-		DEFAULT_RATELIMIT_INTERVAL,
-		DEFAULT_RATELIMIT_BURST);
-
-void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
+static void warn_alloc_show_mem(gfp_t gfp_mask)
 {
 	unsigned int filter = SHOW_MEM_FILTER_NODES;
-	struct va_format vaf;
-	va_list args;
+	static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
 
-	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
-	    debug_guardpage_minorder() > 0)
+	if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
 		return;
 
 	/*
@@ -3124,6 +3118,20 @@ void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
 	if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
 		filter &= ~SHOW_MEM_FILTER_NODES;
 
+	show_mem(filter);
+}
+
+void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+	static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
+				      DEFAULT_RATELIMIT_BURST);
+
+	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
+	    debug_guardpage_minorder() > 0)
+		return;
+
 	pr_warn("%s: ", current->comm);
 
 	va_start(args, fmt);
@@ -3135,10 +3143,7 @@ void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
 	pr_cont(", mode:%#x(%pGg)\n", gfp_mask, &gfp_mask);
 
 	dump_stack();
-	if (!should_suppress_show_mem()) {
-		show_mem(filter);
-		show_mem_call_notifiers();
-	}
+	warn_alloc_show_mem(gfp_mask);
 }
 
 static inline struct page *
diff --git a/net/ipv4/netfilter/ipt_NATTYPE.c b/net/ipv4/netfilter/ipt_NATTYPE.c
index bed569f..34dc925 100644
--- a/net/ipv4/netfilter/ipt_NATTYPE.c
+++ b/net/ipv4/netfilter/ipt_NATTYPE.c
@@ -98,7 +98,8 @@ static void nattype_free(struct ipt_nattype *nte)
 /* netfilter NATTYPE nattype_refresh_timer()
  * Refresh the timer for this object.
  */
-bool nattype_refresh_timer(unsigned long nat_type, unsigned long timeout_value)
+bool nattype_refresh_timer_impl(unsigned long nat_type,
+				unsigned long timeout_value)
 {
 	struct ipt_nattype *nte = (struct ipt_nattype *)nat_type;
 
@@ -154,7 +155,7 @@ static bool nattype_packet_in_match(const struct ipt_nattype *nte,
 	 * further.
 	 */
 	if (nte->proto != iph->protocol) {
-		DEBUGP("nattype_packet_in_match: protocol failed: nte proto:"
+		DEBUGP("nattype_packet_in_match: protocol failed: nte proto:");
 		DEBUGP(" %d, packet proto: %d\n",
 		       nte->proto, iph->protocol);
 		return false;
@@ -375,7 +376,7 @@ static unsigned int nattype_forward(struct sk_buff *skb,
 			 * found the entry.
 			 */
 			if (!nattype_refresh_timer((unsigned long)nte,
-						   ct->timeout.expires))
+						   ct->timeout))
 				break;
 
 			/* netfilter NATTYPE
@@ -473,8 +474,8 @@ static unsigned int nattype_forward(struct sk_buff *skb,
 	/* netfilter NATTYPE
 	 * Add the new entry to the list.
 	 */
-	nte->timeout_value = ct->timeout.expires;
-	nte->timeout.expires = ct->timeout.expires + jiffies;
+	nte->timeout_value = ct->timeout;
+	nte->timeout.expires = ct->timeout + jiffies;
 	add_timer(&nte->timeout);
 	list_add(&nte->list, &nattype_list);
 	ct->nattype_entry = (unsigned long)nte;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 255a797..f0fe6ee 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1508,7 +1508,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
 #if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
 	nattype_ref_timer = rcu_dereference(nattype_refresh_timer);
 	if (nattype_ref_timer)
-		nattype_ref_timer(ct->nattype_entry, ct->timeout.expires);
+		nattype_ref_timer(ct->nattype_entry, ct->timeout);
 #endif
 
 acct:
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1ce25f5..3c37253 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1555,7 +1555,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
 #if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
 	nattype_ref_timer = rcu_dereference(nattype_refresh_timer);
 	if (nattype_ref_timer)
-		nattype_ref_timer(ct->nattype_entry, ct->timeout.expires);
+		nattype_ref_timer(ct->nattype_entry, ct->timeout);
 #endif
 	return 0;
 }
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
index 50d9b51..bc1829e 100644
--- a/net/rmnet_data/rmnet_data_config.c
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -25,6 +25,7 @@
 #include "rmnet_data_vnd.h"
 #include "rmnet_data_private.h"
 #include "rmnet_data_trace.h"
+#include "rmnet_map.h"
 
 RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG);
 
@@ -869,7 +870,8 @@ int rmnet_associate_network_device(struct net_device *dev)
 	conf->dev = dev;
 	spin_lock_init(&conf->agg_lock);
 	config->recycle = kfree_skb;
-
+	hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	conf->hrtimer.function = rmnet_map_flush_packet_queue;
 	rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);
 
 	if (rc) {
@@ -1232,6 +1234,22 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
 	config = _rmnet_get_phys_ep_config(dev);
 
 	if (config) {
+		unsigned long flags;
+
+		hrtimer_cancel(&config->hrtimer);
+		spin_lock_irqsave(&config->agg_lock, flags);
+		if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) {
+			if (config->agg_skb) {
+				kfree_skb(config->agg_skb);
+				config->agg_skb = NULL;
+				config->agg_count = 0;
+				memset(&config->agg_time, 0,
+				       sizeof(struct timespec));
+			}
+			config->agg_state = RMNET_MAP_AGG_IDLE;
+		}
+		spin_unlock_irqrestore(&config->agg_lock, flags);
+
 		cfg = &config->local_ep;
 
 		if (cfg && cfg->refcount)
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index aa8a0b5..4142656 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -16,6 +16,7 @@
 #include <linux/time.h>
 #include <linux/spinlock.h>
 #include <net/rmnet_config.h>
+#include <linux/hrtimer.h>
 
 #ifndef _RMNET_DATA_CONFIG_H_
 #define _RMNET_DATA_CONFIG_H_
@@ -85,6 +86,7 @@ struct rmnet_phys_ep_config {
 	u8 agg_count;
 	struct timespec agg_time;
 	struct timespec agg_last;
+	struct hrtimer hrtimer;
 };
 
 int rmnet_config_init(void);
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index a5b22c4..8faf7a7 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -734,6 +734,9 @@ void rmnet_egress_handler(struct sk_buff *skb,
 	LOGD("Packet going out on %s with egress format 0x%08X",
 	     skb->dev->name, config->egress_data_format);
 
+	if (ep->rmnet_mode == RMNET_EPMODE_VND)
+		rmnet_vnd_tx_fixup(skb, orig_dev);
+
 	if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
 		switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
 		case RMNET_MAP_CONSUMED:
@@ -751,9 +754,6 @@ void rmnet_egress_handler(struct sk_buff *skb,
 		}
 	}
 
-	if (ep->rmnet_mode == RMNET_EPMODE_VND)
-		rmnet_vnd_tx_fixup(skb, orig_dev);
-
 	rmnet_print_packet(skb, skb->dev->name, 't');
 	trace_rmnet_egress_handler(skb);
 	rc = dev_queue_xmit(skb);
diff --git a/net/rmnet_data/rmnet_map.h b/net/rmnet_data/rmnet_map.h
index 3bab6d9..718140c 100644
--- a/net/rmnet_data/rmnet_map.h
+++ b/net/rmnet_data/rmnet_map.h
@@ -147,4 +147,5 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 				     struct net_device *orig_dev,
 				     u32 egress_data_format);
 int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset);
+enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t);
 #endif /* _RMNET_MAP_H_ */
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
index 1c0f1060..669a890 100644
--- a/net/rmnet_data/rmnet_map_data.c
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -49,7 +49,7 @@ module_param(agg_bypass_time, long, 0644);
 MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
 
 struct agg_work {
-	struct delayed_work work;
+	struct work_struct work;
 	struct rmnet_phys_ep_config *config;
 };
 
@@ -165,25 +165,18 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 	return skbn;
 }
 
-/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
- * @work:        struct agg_work containing delayed work and skb to flush
- *
- * This function is scheduled to run in a specified number of jiffies after
- * the last frame transmitted by the network stack. When run, the buffer
- * containing aggregated packets is finally transmitted on the underlying link.
- *
- */
-static void rmnet_map_flush_packet_queue(struct work_struct *work)
+static void rmnet_map_flush_packet_work(struct work_struct *work)
 {
-	struct agg_work *real_work;
 	struct rmnet_phys_ep_config *config;
+	struct agg_work *real_work;
+	int rc, agg_count = 0;
 	unsigned long flags;
 	struct sk_buff *skb;
-	int rc, agg_count = 0;
 
-	skb = 0;
 	real_work = (struct agg_work *)work;
 	config = real_work->config;
+	skb = NULL;
+
 	LOGD("%s", "Entering flush thread");
 	spin_lock_irqsave(&config->agg_lock, flags);
 	if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
@@ -194,7 +187,7 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
 				LOGL("Agg count: %d", config->agg_count);
 			skb = config->agg_skb;
 			agg_count = config->agg_count;
-			config->agg_skb = 0;
+			config->agg_skb = NULL;
 			config->agg_count = 0;
 			memset(&config->agg_time, 0, sizeof(struct timespec));
 		}
@@ -211,9 +204,37 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
 		rc = dev_queue_xmit(skb);
 		rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
 	}
+
 	kfree(work);
 }
 
+/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
+ *
+ * This function is scheduled to run in a specified number of ns after
+ * the last frame transmitted by the network stack. When run, the buffer
+ * containing aggregated packets is finally transmitted on the underlying link.
+ *
+ */
+enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
+{
+	struct rmnet_phys_ep_config *config;
+	struct agg_work *work;
+
+	config = container_of(t, struct rmnet_phys_ep_config, hrtimer);
+
+	work = kmalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work) {
+		config->agg_state = RMNET_MAP_AGG_IDLE;
+
+		return HRTIMER_NORESTART;
+	}
+
+	INIT_WORK(&work->work, rmnet_map_flush_packet_work);
+	work->config = config;
+	schedule_work((struct work_struct *)work);
+	return HRTIMER_NORESTART;
+}
+
 /* rmnet_map_aggregate() - Software aggregates multiple packets.
  * @skb:        current packet being transmitted
  * @config:     Physical endpoint configuration of the ingress device
@@ -226,7 +247,6 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
 void rmnet_map_aggregate(struct sk_buff *skb,
 			 struct rmnet_phys_ep_config *config) {
 	u8 *dest_buff;
-	struct agg_work *work;
 	unsigned long flags;
 	struct sk_buff *agg_skb;
 	struct timespec diff, last;
@@ -290,7 +310,9 @@ void rmnet_map_aggregate(struct sk_buff *skb,
 		config->agg_skb = 0;
 		config->agg_count = 0;
 		memset(&config->agg_time, 0, sizeof(struct timespec));
+		config->agg_state = RMNET_MAP_AGG_IDLE;
 		spin_unlock_irqrestore(&config->agg_lock, flags);
+		hrtimer_cancel(&config->hrtimer);
 		LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
 		     diff.tv_nsec, agg_count);
 		trace_rmnet_map_aggregate(skb, agg_count);
@@ -307,19 +329,9 @@ void rmnet_map_aggregate(struct sk_buff *skb,
 
 schedule:
 	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
-		work = kmalloc(sizeof(*work), GFP_ATOMIC);
-		if (!work) {
-			LOGE("Failed to allocate work item for packet %s",
-			     "transfer. DATA PATH LIKELY BROKEN!");
-			config->agg_state = RMNET_MAP_AGG_IDLE;
-			spin_unlock_irqrestore(&config->agg_lock, flags);
-			return;
-		}
-		INIT_DELAYED_WORK((struct delayed_work *)work,
-				  rmnet_map_flush_packet_queue);
-		work->config = config;
 		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
-		schedule_delayed_work((struct delayed_work *)work, 1);
+		hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
+			      HRTIMER_MODE_REL);
 	}
 	spin_unlock_irqrestore(&config->agg_lock, flags);
 }