Merge "msm: sde: add query for max line width supported by rotator" into msm-4.9
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index b028dda..d0d7fff 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -15,7 +15,7 @@
                         "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2",
                         "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
                         "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
-                        "qcom,mdss_dsi_pll_8998", "qcom,mdss_dp_pll_8998",
+                        "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
                         "qcom,mdss_hdmi_pll_8998"
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
index c7024e0..d8934c0 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -10,9 +10,13 @@
 - qcom,ipa-loaduC: indicate that ipa uC should be loaded
 - qcom,ipa-advertise-sg-support: determine how to respond to a query
 regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+                        to enable napi framework or not
+- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256
 
 Example:
 	qcom,rmnet-ipa {
 		compatible = "qcom,rmnet-ipa";
+		qcom,wan-rx-desc-size = <256>;
 	}
 
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
index 3f55312..e9575f1 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
@@ -10,9 +10,13 @@
 - qcom,ipa-loaduC: indicate that ipa uC should be loaded
 - qcom,ipa-advertise-sg-support: determine how to respond to a query
 regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+                        to enable napi framework or not
+- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256
 
 Example:
 	qcom,rmnet-ipa3 {
 		compatible = "qcom,rmnet-ipa3";
+		qcom,wan-rx-desc-size = <256>;
 	}
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e46907c..33f3cc6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1925,7 +1925,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t dma_addr;
-	int ret, prot, len = PAGE_ALIGN(size + offset);
+	int ret, prot, len, start_offset, map_offset;
+
+	map_offset = offset & ~PAGE_MASK;
+	start_offset = offset & PAGE_MASK;
+	len = PAGE_ALIGN(map_offset + size);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_ERROR_CODE)
@@ -1933,11 +1937,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
 
 	prot = __dma_direction_to_prot(dir);
 
-	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+			start_offset, len, prot);
 	if (ret < 0)
 		goto fail;
 
-	return dma_addr + offset;
+	return dma_addr + map_offset;
 fail:
 	__free_iova(mapping, dma_addr, len);
 	return DMA_ERROR_CODE;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index e7ff343..a3adcec 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -476,6 +476,16 @@
 			};
 
 			port@2 {
+				reg = <2>;
+				funnel_in2_in_funnel_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&funnel_modem_out_funnel_in2>;
+				};
+
+			};
+
+			port@3 {
 				reg = <5>;
 				funnel_in2_in_funnel_apss_merg: endpoint {
 					slave-mode;
@@ -495,12 +505,17 @@
 		coresight-name = "coresight-tpda";
 
 		qcom,tpda-atid = <65>;
-		qcom,bc-elem-size = <13 32>;
-		qcom,tc-elem-size = <7 32>,
+		qcom,bc-elem-size = <10 32>,
 				    <13 32>;
-		qcom,dsb-elem-size = <13 32>;
-		qcom,cmb-elem-size = <7 32>,
-				     <8 32>,
+		qcom,tc-elem-size = <13 32>;
+		qcom,dsb-elem-size = <0 32>,
+				     <2 32>,
+				     <3 32>,
+				     <10 32>,
+				     <11 32>,
+				     <13 32>;
+		qcom,cmb-elem-size = <3 64>,
+				     <7 64>,
 				     <13 64>;
 
 		clocks = <&clock_gcc RPMH_QDSS_CLK>,
@@ -520,6 +535,33 @@
 			};
 
 			port@1 {
+				reg = <0>;
+				tpda_in_tpdm_center: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_center_out_tpda>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				tpda_in_funnel_dl_mm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_dl_mm_out_tpda>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				tpda_in_funnel_ddr_0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_ddr_0_out_tpda>;
+				};
+			};
+
+			port@4 {
 				reg = <7>;
 				tpda_in_tpdm_vsense: endpoint {
 					slave-mode;
@@ -528,16 +570,25 @@
 				};
 			};
 
-			port@2 {
-				reg = <8>;
-				tpda_in_tpdm_dcc: endpoint {
+			port@5 {
+				reg = <10>;
+				tpda_in_tpdm_qm: endpoint {
 					slave-mode;
 					remote-endpoint =
-						<&tpdm_dcc_out_tpda>;
+						<&tpdm_qm_out_tpda>;
 				};
 			};
 
-			port@3 {
+			port@6 {
+				reg = <11>;
+				tpda_in_tpdm_north: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_north_out_tpda>;
+				};
+			};
+
+			port@7 {
 				reg = <13>;
 				tpda_in_tpdm_pimem: endpoint {
 					slave-mode;
@@ -548,6 +599,423 @@
 		};
 	};
 
+	funnel_modem: funnel@6832000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6832000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-modem";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_modem_out_funnel_in2: endpoint {
+					remote-endpoint =
+					    <&funnel_in2_in_funnel_modem>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_modem_in_tpda_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_modem_out_funnel_modem>;
+				};
+			};
+		};
+	};
+
+	tpda_modem: tpda@6831000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x6831000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-modem";
+
+		qcom,tpda-atid = <67>;
+		qcom,dsb-elem-size = <0 32>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_modem_out_funnel_modem: endpoint {
+					remote-endpoint =
+						<&funnel_modem_in_tpda_modem>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_modem_in_tpdm_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_modem_out_tpda_modem>;
+				};
+			};
+		};
+	};
+
+	tpdm_modem: tpdm@6830000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6830000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-modem";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_modem_out_tpda_modem: endpoint {
+				remote-endpoint = <&tpda_modem_in_tpdm_modem>;
+			};
+		};
+	};
+
+	tpdm_center: tpdm@6c28000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6c28000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-center";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_center_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_center>;
+			};
+		};
+	};
+
+	tpdm_north: tpdm@6a24000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6a24000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-north";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_north_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_north>;
+			};
+		};
+	};
+
+	tpdm_qm: tpdm@69d0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x69d0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-qm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_qm_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_qm>;
+			};
+		};
+	};
+
+	tpda_apss: tpda@7862000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7862000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-apss";
+
+		qcom,tpda-atid = <66>;
+		qcom,dsb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_apss_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					       <&funnel_apss_merg_in_tpda_apss>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_apss_in_tpdm_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_apss_out_tpda_apss>;
+				};
+			};
+		};
+	};
+
+	tpdm_apss: tpdm@7860000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7860000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-apss";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_apss_out_tpda_apss: endpoint {
+				remote-endpoint = <&tpda_apss_in_tpdm_apss>;
+			};
+		};
+	};
+
+	tpda_llm_silver: tpda@78c0000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x78c0000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-llm-silver";
+
+		qcom,tpda-atid = <72>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_llm_silver_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					<&funnel_apss_merg_in_tpda_llm_silver>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_llm_silver_in_tpdm_llm_silver: endpoint {
+					slave-mode;
+					remote-endpoint =
+					<&tpdm_llm_silver_out_tpda_llm_silver>;
+				};
+			};
+		};
+	};
+
+	tpdm_llm_silver: tpdm@78a0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x78a0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-llm-silver";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_llm_silver_out_tpda_llm_silver: endpoint {
+				remote-endpoint =
+					<&tpda_llm_silver_in_tpdm_llm_silver>;
+			};
+		};
+	};
+
+	tpda_llm_gold: tpda@78d0000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x78d0000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-llm-gold";
+
+		qcom,tpda-atid = <73>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_llm_gold_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					  <&funnel_apss_merg_in_tpda_llm_gold>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_llm_gold_in_tpdm_llm_gold: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&tpdm_llm_gold_out_tpda_llm_gold>;
+				};
+			};
+		};
+	};
+
+	tpdm_llm_gold: tpdm@78b0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x78b0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-llm-gold";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_llm_gold_out_tpda_llm_gold: endpoint {
+				remote-endpoint =
+					<&tpda_llm_gold_in_tpdm_llm_gold>;
+			};
+		};
+	};
+
+	funnel_dl_mm: funnel@6c0b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6c0b000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-dl-mm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_dl_mm_out_tpda: endpoint {
+					remote-endpoint =
+					    <&tpda_in_funnel_dl_mm>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				funnel_dl_mm_in_tpdm_mm: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_mm_out_funnel_dl_mm>;
+				};
+			};
+		};
+	};
+
+	tpdm_mm: tpdm@6c08000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6c08000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-mm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_mm_out_funnel_dl_mm: endpoint {
+				remote-endpoint = <&funnel_dl_mm_in_tpdm_mm>;
+			};
+		};
+	};
+
+	funnel_ddr_0: funnel@69e2000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x69e2000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-ddr-0";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_ddr_0_out_tpda: endpoint {
+					remote-endpoint =
+					    <&tpda_in_funnel_ddr_0>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_ddr_0_in_tpdm_ddr: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_ddr_out_funnel_ddr_0>;
+				};
+			};
+		};
+	};
+
+	tpdm_ddr: tpdm@69e0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x69e0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-ddr";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_ddr_out_funnel_ddr_0: endpoint {
+				remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>;
+			};
+		};
+	};
+
 	tpdm_pimem: tpdm@6850000 {
 		compatible = "qcom,coresight-tpdm";
 		reg = <0x6850000 0x1000>;
@@ -566,25 +1034,6 @@
 		};
 	};
 
-
-	tpdm_dcc: tpdm@6870000 {
-		compatible = "qcom,coresight-tpdm";
-		reg = <0x6870000 0x1000>;
-		reg-names = "tpdm-base";
-
-		coresight-name = "coresight-tpdm-dcc";
-
-		clocks = <&clock_gcc RPMH_QDSS_CLK>,
-			 <&clock_gcc RPMH_QDSS_A_CLK>;
-		clock-names = "core_clk", "core_a_clk";
-
-		port {
-			tpdm_dcc_out_tpda: endpoint {
-				remote-endpoint = <&tpda_in_tpdm_dcc>;
-			};
-		};
-	};
-
 	tpdm_vsense: tpdm@6840000 {
 		compatible = "qcom,coresight-tpdm";
 		reg = <0x6840000 0x1000>;
@@ -1129,13 +1578,40 @@
 			};
 
 			port@2 {
-				reg = <1>;
+				reg = <2>;
 				funnel_apss_merg_in_tpda_olc: endpoint {
 					slave-mode;
 					remote-endpoint =
 					    <&tpda_olc_out_funnel_apss_merg>;
 				};
 			};
+
+			port@3 {
+				reg = <4>;
+				funnel_apss_merg_in_tpda_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_apss_out_funnel_apss_merg>;
+				};
+			};
+
+			port@4 {
+				reg = <5>;
+				funnel_apss_merg_in_tpda_llm_silver: endpoint {
+					slave-mode;
+					remote-endpoint =
+					<&tpda_llm_silver_out_funnel_apss_merg>;
+				};
+			};
+
+			port@5 {
+				reg = <6>;
+				funnel_apss_merg_in_tpda_llm_gold: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&tpda_llm_gold_out_funnel_apss_merg>;
+				};
+			};
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index ab4c253..d99e6de 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -13,7 +13,7 @@
 &soc {
 	mdss_mdp: qcom,mdss_mdp@ae00000 {
 		compatible = "qcom,sde-kms";
-		reg = <0x0ae00000 0x81a24>,
+		reg = <0x0ae00000 0x81d40>,
 		      <0x0aeb0000 0x2008>;
 		reg-names = "mdp_phys",
 			"vbif_phys";
@@ -357,18 +357,17 @@
 		reg-names = "dsi_phy";
 		gdsc-supply = <&mdss_core_gdsc>;
 		vdda-1p2-supply = <&pm8998_l26>;
-		qcom,platform-strength-ctrl = [ff 06
-						ff 06
-						ff 06
-						ff 00];
-		qcom,platform-regulator-settings = [1d
-							1d 1d 1d 1d];
-		qcom,platform-lane-config = [00 00 10 0f
-						00 00 10 0f
-						00 00 10 0f
-						00 00 10 0f
-						00 00 10 8f];
-
+		qcom,platform-strength-ctrl = [55 03
+						55 03
+						55 03
+						55 03
+						55 00];
+		qcom,platform-lane-config = [00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 80];
+		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
 		qcom,phy-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -392,18 +391,17 @@
 		reg-names = "dsi_phy";
 		gdsc-supply = <&mdss_core_gdsc>;
 		vdda-1p2-supply = <&pm8998_l26>;
-		qcom,platform-strength-ctrl = [ff 06
-						ff 06
-						ff 06
-						ff 00];
-		qcom,platform-regulator-settings = [1d
-							1d 1d 1d 1d];
-		qcom,platform-lane-config = [00 00 10 0f
-						00 00 10 0f
-						00 00 10 0f
-						00 00 10 0f
-						00 00 10 8f];
-
+		qcom,platform-strength-ctrl = [55 03
+						55 03
+						55 03
+						55 03
+						55 00];
+		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
+		qcom,platform-lane-config = [00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 80];
 		qcom,phy-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index f7b2fc2..f591cca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -308,37 +308,61 @@
 		pil_modem_mem: modem_region@8b000000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x8b000000 0 0x6e00000>;
+			reg = <0 0x8b000000 0 0x7300000>;
 		};
 
-		pil_video_mem: pil_video_region@91e00000 {
+		pil_video_mem: pil_video_region@92300000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x91e00000 0 0x500000>;
+			reg = <0 0x92300000 0 0x500000>;
 		};
 
-		pil_cdsp_mem: cdsp_regions@92300000 {
+		pil_cdsp_mem: cdsp_regions@92800000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x92300000 0 0x800000>;
+			reg = <0 0x92800000 0 0x800000>;
 		};
 
-		pil_adsp_mem: pil_adsp_region@92b00000 {
+		pil_adsp_mem: pil_adsp_region@93000000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x92b00000 0 0x1a00000>;
+			reg = <0 0x93000000 0 0x1a00000>;
 		};
 
-		pil_slpi_mem: pil_slpi_region@94500000 {
+		pil_mba_mem: pil_mba_region@0x94a00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x94500000 0 0xf00000>;
+			reg = <0 0x94a00000 0 0x200000>;
 		};
 
-		pil_spss_mem: spss_region@95400000 {
+		pil_slpi_mem: pil_slpi_region@94c00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x95400000 0 0x700000>;
+			reg = <0 0x94c00000 0 0x1400000>;
+		};
+
+		pil_ipa_fw_mem: pil_ipa_fw_region@96000000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96000000 0 0x10000>;
+		};
+
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@96010000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96010000 0 0x5000>;
+		};
+
+		pil_gpu_mem: pil_gpu_region@96015000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96015000 0 0x1000>;
+		};
+
+		pil_spss_mem: spss_region@96100000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96100000 0 0x100000>;
 		};
 
 		adsp_mem: adsp_region {
@@ -2127,6 +2151,9 @@
 };
 
 &gpu_gx_gdsc {
+	clock-names = "core_root_clk";
+	clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK_SRC>;
+	qcom,force-enable-root-clk;
 	parent-supply = <&pm8005_s1_level>;
 	status = "ok";
 };
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 4a13b7a..9552dc1 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -408,6 +408,7 @@
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_CLOCK_CPU_OSM=y
 CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 7333731..5f22fed 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -425,6 +425,7 @@
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_CLOCK_CPU_OSM=y
 CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 860c3b6..40e775a 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1743,7 +1743,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
 {
 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 	dma_addr_t dma_addr;
-	int ret, prot, len = PAGE_ALIGN(size + offset);
+	int ret, prot, len, start_offset, map_offset;
+
+	map_offset = offset & ~PAGE_MASK;
+	start_offset = offset & PAGE_MASK;
+	len = PAGE_ALIGN(map_offset + size);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_ERROR_CODE)
@@ -1753,12 +1757,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
 	prot = __get_iommu_pgprot(attrs, prot,
 				  is_dma_coherent(dev, attrs));
 
-	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
-			prot);
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+			start_offset, len, prot);
 	if (ret < 0)
 		goto fail;
 
-	return dma_addr + offset;
+	return dma_addr + map_offset;
 fail:
 	__free_iova(mapping, dma_addr, len);
 	return DMA_ERROR_CODE;
@@ -1897,7 +1901,11 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
 	if (!mapping)
 		goto err;
 
-	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+							__GFP_NORETRY);
+	if (!mapping->bitmap)
+		mapping->bitmap = vzalloc(bitmap_size);
+
 	if (!mapping->bitmap)
 		goto err2;
 
@@ -1912,7 +1920,7 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
 	kref_init(&mapping->kref);
 	return mapping;
 err3:
-	kfree(mapping->bitmap);
+	kvfree(mapping->bitmap);
 err2:
 	kfree(mapping);
 err:
@@ -1926,7 +1934,7 @@ static void release_iommu_mapping(struct kref *kref)
 		container_of(kref, struct dma_iommu_mapping, kref);
 
 	iommu_domain_free(mapping->domain);
-	kfree(mapping->bitmap);
+	kvfree(mapping->bitmap);
 	kfree(mapping);
 }
 
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index cf874a1..7226dd3 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -224,3 +224,5 @@
 	  Support for the graphics clock controller on Qualcomm Technologies, Inc.
 	  sdm845 devices.
 	  Say Y if you want to support graphics controller devices.
+
+source "drivers/clk/qcom/mdss/Kconfig"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 6e13562..1d042cd 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -38,3 +38,5 @@
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
 obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
+
+obj-y += mdss/
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index a5a7488..a95deff 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -211,9 +211,9 @@ static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
 	.cmd_rcgr = 0x101c,
 	.mnd_width = 0,
 	.hid_width = 5,
-	.enable_safe_config = true,
 	.parent_map = gpu_cc_parent_map_1,
 	.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gpu_cc_gx_gfx3d_clk_src",
 		.parent_names = gpu_cc_parent_names_1,
diff --git a/drivers/clk/qcom/mdss/Kconfig b/drivers/clk/qcom/mdss/Kconfig
index 229780e..7213e37 100644
--- a/drivers/clk/qcom/mdss/Kconfig
+++ b/drivers/clk/qcom/mdss/Kconfig
@@ -1,5 +1,6 @@
-config MSM_MDSS_PLL
+config QCOM_MDSS_PLL
 	bool "MDSS pll programming"
+	depends on COMMON_CLK_QCOM
 	---help---
 	It provides support for DSI, eDP and HDMI interface pll programming on MDSS
 	hardware. It also handles the pll specific resources and turn them on/off when
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index 64c7609..d183393 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,9 +1,3 @@
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8998.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8998.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
similarity index 62%
rename from drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c
rename to drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 8c6bc2c..6ce0d76 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -17,14 +17,9 @@
 #include <linux/err.h>
 #include <linux/iopoll.h>
 #include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
 #include "mdss-dsi-pll.h"
 #include "mdss-pll.h"
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
 #define VCO_DELAY_USEC 1
 
@@ -128,14 +123,14 @@ struct dsi_pll_config {
 	u32 refclk_cycles;
 };
 
-struct dsi_pll_8998 {
+struct dsi_pll_10nm {
 	struct mdss_pll_resources *rsc;
 	struct dsi_pll_config pll_configuration;
 	struct dsi_pll_regs reg_setup;
 };
 
 static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
-static struct dsi_pll_8998 plls[DSI_PLL_MAX];
+static struct dsi_pll_10nm plls[DSI_PLL_MAX];
 
 static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
 {
@@ -166,7 +161,7 @@ static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
 	pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
 }
 
-static void dsi_pll_setup_config(struct dsi_pll_8998 *pll,
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll,
 				 struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
@@ -198,14 +193,14 @@ static void dsi_pll_setup_config(struct dsi_pll_8998 *pll,
 	dsi_pll_config_slave(rsc);
 }
 
-static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll,
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll,
 				  struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
 	struct dsi_pll_regs *regs = &pll->reg_setup;
 	u64 target_freq;
 	u64 fref = rsc->vco_ref_clk_rate;
-	u32 computed_output_div, div_log;
+	u32 computed_output_div, div_log = 0;
 	u64 pll_freq;
 	u64 divider;
 	u64 dec, dec_multiple;
@@ -262,7 +257,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll,
 	regs->frac_div_start_high = (frac & 0x30000) >> 16;
 }
 
-static void dsi_pll_calc_ssc(struct dsi_pll_8998 *pll,
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll,
 		  struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
@@ -307,7 +302,7 @@ static void dsi_pll_calc_ssc(struct dsi_pll_8998 *pll,
 			ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
 }
 
-static void dsi_pll_ssc_commit(struct dsi_pll_8998 *pll,
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll,
 		struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -333,7 +328,7 @@ static void dsi_pll_ssc_commit(struct dsi_pll_8998 *pll,
 	}
 }
 
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_8998 *pll,
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll,
 				  struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -357,7 +352,7 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_8998 *pll,
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
 }
 
-static void dsi_pll_commit(struct dsi_pll_8998 *pll,
+static void dsi_pll_commit(struct dsi_pll_10nm *pll,
 			   struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -378,12 +373,13 @@ static void dsi_pll_commit(struct dsi_pll_8998 *pll,
 
 }
 
-static int vco_8998_set_rate(struct clk *c, unsigned long rate)
+static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
 {
 	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *rsc = vco->priv;
-	struct dsi_pll_8998 *pll;
+	struct dsi_pll_10nm *pll;
 
 	if (!rsc) {
 		pr_err("pll resource not found\n");
@@ -431,7 +427,7 @@ static int vco_8998_set_rate(struct clk *c, unsigned long rate)
 	return 0;
 }
 
-static int dsi_pll_8998_lock_status(struct mdss_pll_resources *pll)
+static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
 {
 	int rc;
 	u32 status;
@@ -487,7 +483,7 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
 	wmb();
 
 	/* Check for PLL lock */
-	rc = dsi_pll_8998_lock_status(rsc);
+	rc = dsi_pll_10nm_lock_status(rsc);
 	if (rc) {
 		pr_err("PLL(%d) lock failed\n", rsc->index);
 		goto error;
@@ -532,9 +528,25 @@ static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
 	rsc->pll_on = false;
 }
 
-static void vco_8998_unprepare(struct clk *c)
+long vco_10nm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
 {
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	*parent_rate = rrate;
+
+	return rrate;
+}
+
+static void vco_10nm_unprepare(struct clk_hw *hw)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 
 	if (!pll) {
@@ -542,15 +554,15 @@ static void vco_8998_unprepare(struct clk *c)
 		return;
 	}
 
-	pll->vco_cached_rate = c->rate;
+	pll->vco_cached_rate = clk_hw_get_rate(hw);
 	dsi_pll_disable(vco);
 	mdss_pll_resource_enable(pll, false);
 }
 
-static int vco_8998_prepare(struct clk *c)
+static int vco_10nm_prepare(struct clk_hw *hw)
 {
 	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 
 	if (!pll) {
@@ -566,8 +578,9 @@ static int vco_8998_prepare(struct clk *c)
 	}
 
 	if ((pll->vco_cached_rate != 0) &&
-	    (pll->vco_cached_rate == c->rate)) {
-		rc = c->ops->set_rate(c, pll->vco_cached_rate);
+	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
+				pll->vco_cached_rate);
 		if (rc) {
 			pr_err("pll(%d) set_rate failed, rc=%d\n",
 			       pll->index, rc);
@@ -586,9 +599,10 @@ static int vco_8998_prepare(struct clk *c)
 	return rc;
 }
 
-static unsigned long dsi_pll_get_vco_rate(struct clk *c)
+static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
 {
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 	int rc;
 	u64 ref_clk = vco->ref_clk_rate;
@@ -642,46 +656,11 @@ static unsigned long dsi_pll_get_vco_rate(struct clk *c)
 	return (unsigned long)vco_rate;
 }
 
-enum handoff vco_8998_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-	u32 status;
-
-	if (!pll) {
-		pr_err("Unable to find pll resource\n");
-		return HANDOFF_DISABLED_CLK;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("failed to enable pll(%d) resources, rc=%d\n",
-		       pll->index, rc);
-		return ret;
-	}
-
-	status = MDSS_PLL_REG_R(pll->pll_base, PLL_COMMON_STATUS_ONE);
-	if (status & BIT(0)) {
-		pll->handoff_resources = true;
-		pll->pll_on = true;
-		c->rate = dsi_pll_get_vco_rate(c);
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		(void)mdss_pll_resource_enable(pll, false);
-		ret = HANDOFF_DISABLED_CLK;
-	}
-
-	return ret;
-}
-
-static int pixel_clk_get_div(struct div_clk *clk)
+static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -690,11 +669,16 @@ static int pixel_clk_get_div(struct div_clk *clk)
 	}
 
 	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	div = (reg_val & 0xF0) >> 4;
+	*div = (reg_val & 0xF0) >> 4;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -707,16 +691,18 @@ static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
 	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
 }
 
-static int pixel_clk_set_div(struct div_clk *clk, int div)
+static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
 		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
 		return rc;
 	}
+	/* In common clock framework the divider value provided is one less */
+	div++;
 
 	pixel_clk_set_div_sub(pll, div);
 	if (pll->slave)
@@ -727,12 +713,11 @@ static int pixel_clk_set_div(struct div_clk *clk, int div)
 	return 0;
 }
 
-static int bit_clk_get_div(struct div_clk *clk)
+static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -741,11 +726,17 @@ static int bit_clk_get_div(struct div_clk *clk)
 	}
 
 	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	div = (reg_val & 0x0F);
+	*div = (reg_val & 0x0F);
+
+	/* Common clock framework will add one to divider value sent */
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
@@ -758,10 +749,10 @@ static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
 	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
 }
 
-static int bit_clk_set_div(struct div_clk *clk, int div)
+static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
 {
 	int rc;
-	struct mdss_pll_resources *rsc = clk->priv;
+	struct mdss_pll_resources *rsc = context;
 	struct dsi_pll_8998 *pll;
 
 	if (!rsc) {
@@ -780,6 +771,7 @@ static int bit_clk_set_div(struct div_clk *clk, int div)
 		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
 		return rc;
 	}
+	div++;
 
 	bit_clk_set_div_sub(rsc, div);
 	/* For slave PLL, this divider always should be set to 1 */
@@ -791,12 +783,12 @@ static int bit_clk_set_div(struct div_clk *clk, int div)
 	return rc;
 }
 
-static int post_vco_clk_get_div(struct div_clk *clk)
+static int post_vco_clk_get_div(void *context, unsigned int reg,
+			unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -808,15 +800,20 @@ static int post_vco_clk_get_div(struct div_clk *clk)
 	reg_val &= 0x3;
 
 	if (reg_val == 2)
-		div = 1;
+		*div = 1;
 	else if (reg_val == 3)
-		div = 4;
+		*div = 4;
 	else
-		div = 1;
+		*div = 1;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -842,10 +839,11 @@ static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
 	return rc;
 }
 
-static int post_vco_clk_set_div(struct div_clk *clk, int div)
+static int post_vco_clk_set_div(void *context, unsigned int reg,
+		unsigned int div)
 {
 	int rc = 0;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -853,6 +851,8 @@ static int post_vco_clk_set_div(struct div_clk *clk, int div)
 		return rc;
 	}
 
+	div++;
+
 	rc = post_vco_clk_set_div_sub(pll, div);
 	if (!rc && pll->slave)
 		rc = post_vco_clk_set_div_sub(pll->slave, div);
@@ -862,12 +862,12 @@ static int post_vco_clk_set_div(struct div_clk *clk, int div)
 	return rc;
 }
 
-static int post_bit_clk_get_div(struct div_clk *clk)
+static int post_bit_clk_get_div(void *context, unsigned int reg,
+			unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -879,15 +879,20 @@ static int post_bit_clk_get_div(struct div_clk *clk)
 	reg_val &= 0x3;
 
 	if (reg_val == 0)
-		div = 1;
+		*div = 1;
 	else if (reg_val == 1)
-		div = 2;
+		*div = 2;
 	else
-		div = 1;
+		*div = 1;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -913,10 +918,11 @@ static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
 	return rc;
 }
 
-static int post_bit_clk_set_div(struct div_clk *clk, int div)
+static int post_bit_clk_set_div(void *context, unsigned int reg,
+		unsigned int div)
 {
 	int rc = 0;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -924,6 +930,8 @@ static int post_bit_clk_set_div(struct div_clk *clk, int div)
 		return rc;
 	}
 
+	div++;
+
 	rc = post_bit_clk_set_div_sub(pll, div);
 	if (!rc && pll->slave)
 		rc = post_bit_clk_set_div_sub(pll->slave, div);
@@ -933,57 +941,44 @@ static int post_bit_clk_set_div(struct div_clk *clk, int div)
 	return rc;
 }
 
-long vco_8998_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	return rrate;
-}
-
-/* clk ops that require runtime fixup */
-static const struct clk_ops clk_ops_gen_mux_dsi;
-static const struct clk_ops clk_ops_bitclk_src_c;
-static const struct clk_ops clk_ops_post_vco_div_c;
-static const struct clk_ops clk_ops_post_bit_div_c;
-static const struct clk_ops clk_ops_pclk_src_c;
-
-static struct clk_div_ops clk_post_vco_div_ops = {
-	.set_div = post_vco_clk_set_div,
-	.get_div = post_vco_clk_get_div,
+static struct regmap_config dsi_pll_10nm_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x7c0,
 };
 
-static struct clk_div_ops clk_post_bit_div_ops = {
-	.set_div = post_bit_clk_set_div,
-	.get_div = post_bit_clk_get_div,
+static struct regmap_bus post_vco_regmap_bus = {
+	.reg_write = post_vco_clk_set_div,
+	.reg_read = post_vco_clk_get_div,
 };
 
-static struct clk_div_ops pixel_clk_div_ops = {
-	.set_div = pixel_clk_set_div,
-	.get_div = pixel_clk_get_div,
+static struct regmap_bus post_bit_regmap_bus = {
+	.reg_write = post_bit_clk_set_div,
+	.reg_read = post_bit_clk_get_div,
 };
 
-static struct clk_div_ops clk_bitclk_src_ops = {
-	.set_div = bit_clk_set_div,
-	.get_div = bit_clk_get_div,
+static struct regmap_bus pclk_src_regmap_bus = {
+	.reg_write = pixel_clk_set_div,
+	.reg_read = pixel_clk_get_div,
 };
 
-static const struct clk_ops clk_ops_vco_8998 = {
-	.set_rate = vco_8998_set_rate,
-	.round_rate = vco_8998_round_rate,
-	.handoff = vco_8998_handoff,
-	.prepare = vco_8998_prepare,
-	.unprepare = vco_8998_unprepare,
+static struct regmap_bus bitclk_src_regmap_bus = {
+	.reg_write = bit_clk_set_div,
+	.reg_read = bit_clk_get_div,
 };
 
-static struct clk_mux_ops mdss_mux_ops = {
-	.set_mux_sel = mdss_set_mux_sel,
-	.get_mux_sel = mdss_get_mux_sel,
+static const struct clk_ops clk_ops_vco_10nm = {
+	.recalc_rate = vco_10nm_recalc_rate,
+	.set_rate = vco_10nm_set_rate,
+	.round_rate = vco_10nm_round_rate,
+	.prepare = vco_10nm_prepare,
+	.unprepare = vco_10nm_unprepare,
+};
+
+static struct regmap_bus mdss_mux_regmap_bus = {
+	.reg_write = mdss_set_mux_sel,
+	.reg_read = mdss_get_mux_sel,
 };
 
 /*
@@ -1039,303 +1034,296 @@ static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1500000000UL,
 	.max_rate = 3500000000UL,
-	.c = {
-		.dbg_name = "dsi0pll_vco_clk",
-		.ops = &clk_ops_vco_8998,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_vco_clk.c),
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_vco_clk",
+			.parent_names = (const char *[]){"xo_board"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
 	},
 };
 
-static struct div_clk dsi0pll_bitclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &clk_bitclk_src_ops,
-	.c = {
-		.parent = &dsi0pll_vco_clk.c,
-		.dbg_name = "dsi0pll_bitclk_src",
-		.ops = &clk_ops_bitclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_bitclk_src.c),
-	}
-};
-
-static struct div_clk dsi0pll_post_vco_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 4,
-	},
-	.ops = &clk_post_vco_div_ops,
-	.c = {
-		.parent = &dsi0pll_vco_clk.c,
-		.dbg_name = "dsi0pll_post_vco_div",
-		.ops = &clk_ops_post_vco_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_post_vco_div.c),
-	}
-};
-
-static struct div_clk dsi0pll_post_bit_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 2,
-	},
-	.ops = &clk_post_bit_div_ops,
-	.c = {
-		.parent = &dsi0pll_bitclk_src.c,
-		.dbg_name = "dsi0pll_post_bit_div",
-		.ops = &clk_ops_post_bit_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_post_bit_div.c),
-	}
-};
-
-static struct mux_clk dsi0pll_pclk_src_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_post_bit_div.c, 0},
-		{&dsi0pll_post_vco_div.c, 1},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_post_bit_div.c,
-		.dbg_name = "dsi0pll_pclk_src_mux",
-		.ops = &clk_ops_gen_mux,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_src_mux.c),
-	}
-};
-
-static struct div_clk dsi0pll_pclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &pixel_clk_div_ops,
-	.c = {
-		.parent = &dsi0pll_pclk_src_mux.c,
-		.dbg_name = "dsi0pll_pclk_src",
-		.ops = &clk_ops_pclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_src.c),
-	},
-};
-
-static struct mux_clk dsi0pll_pclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_pclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_pclk_src.c,
-		.dbg_name = "dsi0pll_pclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_mux.c),
-	}
-};
-
-static struct div_clk dsi0pll_byteclk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi0pll_bitclk_src.c,
-		.dbg_name = "dsi0pll_byteclk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_byteclk_src.c),
-	},
-};
-
-static struct mux_clk dsi0pll_byteclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_byteclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_byteclk_src.c,
-		.dbg_name = "dsi0pll_byteclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_byteclk_mux.c),
-	}
-};
-
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1500000000UL,
 	.max_rate = 3500000000UL,
-	.c = {
-		.dbg_name = "dsi1pll_vco_clk",
-		.ops = &clk_ops_vco_8998,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_vco_clk.c),
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_vco_clk",
+			.parent_names = (const char *[]){"xo_board"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
 	},
 };
 
-static struct div_clk dsi1pll_bitclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &clk_bitclk_src_ops,
-	.c = {
-		.parent = &dsi1pll_vco_clk.c,
-		.dbg_name = "dsi1pll_bitclk_src",
-		.ops = &clk_ops_bitclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_bitclk_src.c),
-	}
-};
-
-static struct div_clk dsi1pll_post_vco_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 4,
-	},
-	.ops = &clk_post_vco_div_ops,
-	.c = {
-		.parent = &dsi1pll_vco_clk.c,
-		.dbg_name = "dsi1pll_post_vco_div",
-		.ops = &clk_ops_post_vco_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_post_vco_div.c),
-	}
-};
-
-static struct div_clk dsi1pll_post_bit_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 2,
-	},
-	.ops = &clk_post_bit_div_ops,
-	.c = {
-		.parent = &dsi1pll_bitclk_src.c,
-		.dbg_name = "dsi1pll_post_bit_div",
-		.ops = &clk_ops_post_bit_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_post_bit_div.c),
-	}
-};
-
-static struct mux_clk dsi1pll_pclk_src_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_post_bit_div.c, 0},
-		{&dsi1pll_post_vco_div.c, 1},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_post_bit_div.c,
-		.dbg_name = "dsi1pll_pclk_src_mux",
-		.ops = &clk_ops_gen_mux,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_src_mux.c),
-	}
-};
-
-static struct div_clk dsi1pll_pclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &pixel_clk_div_ops,
-	.c = {
-		.parent = &dsi1pll_pclk_src_mux.c,
-		.dbg_name = "dsi1pll_pclk_src",
-		.ops = &clk_ops_pclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_src.c),
+static struct clk_regmap_div dsi0pll_bitclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
 	},
 };
 
-static struct mux_clk dsi1pll_pclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_pclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_pclk_src.c,
-		.dbg_name = "dsi1pll_pclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_mux.c),
-	}
-};
-
-static struct div_clk dsi1pll_byteclk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi1pll_bitclk_src.c,
-		.dbg_name = "dsi1pll_byteclk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_byteclk_src.c),
+static struct clk_regmap_div dsi1pll_bitclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
 	},
 };
 
-static struct mux_clk dsi1pll_byteclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_byteclk_src.c, 0},
+static struct clk_regmap_div dsi0pll_post_vco_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_post_vco_div",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
 	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_byteclk_src.c,
-		.dbg_name = "dsi1pll_byteclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_byteclk_mux.c),
-	}
 };
 
-static struct clk_lookup mdss_dsi_pll0cc_8998[] = {
-	CLK_LIST(dsi0pll_byteclk_mux),
-	CLK_LIST(dsi0pll_byteclk_src),
-	CLK_LIST(dsi0pll_pclk_mux),
-	CLK_LIST(dsi0pll_pclk_src),
-	CLK_LIST(dsi0pll_pclk_src_mux),
-	CLK_LIST(dsi0pll_post_bit_div),
-	CLK_LIST(dsi0pll_post_vco_div),
-	CLK_LIST(dsi0pll_bitclk_src),
-	CLK_LIST(dsi0pll_vco_clk),
-};
-static struct clk_lookup mdss_dsi_pll1cc_8998[] = {
-	CLK_LIST(dsi1pll_byteclk_mux),
-	CLK_LIST(dsi1pll_byteclk_src),
-	CLK_LIST(dsi1pll_pclk_mux),
-	CLK_LIST(dsi1pll_pclk_src),
-	CLK_LIST(dsi1pll_pclk_src_mux),
-	CLK_LIST(dsi1pll_post_bit_div),
-	CLK_LIST(dsi1pll_post_vco_div),
-	CLK_LIST(dsi1pll_bitclk_src),
-	CLK_LIST(dsi1pll_vco_clk),
+static struct clk_regmap_div dsi1pll_post_vco_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_post_vco_div",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
 };
 
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
+static struct clk_fixed_factor dsi0pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_div dsi0pll_post_bit_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_post_bit_div",
+			.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_post_bit_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_post_bit_div",
+			.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_byteclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_byteclk_mux",
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_byteclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_byteclk_mux",
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi0pll_post_bit_div",
+						"dsi0pll_post_bit_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi1pll_post_bit_div",
+						"dsi1pll_post_bit_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_pclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi1pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_mux",
+			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_mux",
+			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
+	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+	[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
+	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.clkr.hw,
+	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.clkr.hw,
+	[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
+	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
+	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.clkr.hw,
+	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.clkr.hw,
+	[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
+	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+
+};
+
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
 				  struct mdss_pll_resources *pll_res)
 {
-	int rc = 0, ndx;
+	int rc = 0, ndx, i;
+	struct clk *clk;
+	struct clk_onecell_data *clk_data;
+	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_10nm);
+	struct regmap *rmap;
 
 	if (!pdev || !pdev->dev.of_node ||
 		!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
@@ -1353,62 +1341,120 @@ int dsi_pll_clock_register_8998(struct platform_device *pdev,
 	pll_rsc_db[ndx] = pll_res;
 	pll_res->priv = &plls[ndx];
 	plls[ndx].rsc = pll_res;
-
-	/* runtime fixup of all div and mux clock ops */
-	clk_ops_gen_mux_dsi = clk_ops_gen_mux;
-	clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
-	clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
-
-	clk_ops_bitclk_src_c = clk_ops_div;
-	clk_ops_bitclk_src_c.prepare = mdss_pll_div_prepare;
-
-	/*
-	 * Set the ops for the two dividers in the pixel clock tree to the
-	 * slave_div to ensure that a set rate on this divider clock will not
-	 * be propagated to it's parent. This is needed ensure that when we set
-	 * the rate for pixel clock, the vco is not reconfigured
-	 */
-	clk_ops_post_vco_div_c = clk_ops_slave_div;
-	clk_ops_post_vco_div_c.prepare = mdss_pll_div_prepare;
-
-	clk_ops_post_bit_div_c = clk_ops_slave_div;
-	clk_ops_post_bit_div_c.prepare = mdss_pll_div_prepare;
-
-	clk_ops_pclk_src_c = clk_ops_div;
-	clk_ops_pclk_src_c.prepare = mdss_pll_div_prepare;
-
 	pll_res->vco_delay = VCO_DELAY_USEC;
-	if (ndx == 0) {
-		dsi0pll_byteclk_mux.priv = pll_res;
-		dsi0pll_byteclk_src.priv = pll_res;
-		dsi0pll_pclk_mux.priv = pll_res;
-		dsi0pll_pclk_src.priv = pll_res;
-		dsi0pll_pclk_src_mux.priv = pll_res;
-		dsi0pll_post_bit_div.priv = pll_res;
-		dsi0pll_post_vco_div.priv = pll_res;
-		dsi0pll_bitclk_src.priv = pll_res;
-		dsi0pll_vco_clk.priv = pll_res;
 
-		rc = of_msm_clock_register(pdev->dev.of_node,
-			mdss_dsi_pll0cc_8998,
-			ARRAY_SIZE(mdss_dsi_pll0cc_8998));
-	} else {
-		dsi1pll_byteclk_mux.priv = pll_res;
-		dsi1pll_byteclk_src.priv = pll_res;
-		dsi1pll_pclk_mux.priv = pll_res;
-		dsi1pll_pclk_src.priv = pll_res;
-		dsi1pll_pclk_src_mux.priv = pll_res;
-		dsi1pll_post_bit_div.priv = pll_res;
-		dsi1pll_post_vco_div.priv = pll_res;
-		dsi1pll_bitclk_src.priv = pll_res;
-		dsi1pll_vco_clk.priv = pll_res;
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+					GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
 
-		rc = of_msm_clock_register(pdev->dev.of_node,
-			mdss_dsi_pll1cc_8998,
-			ARRAY_SIZE(mdss_dsi_pll1cc_8998));
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+				sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks) {
+		devm_kfree(&pdev->dev, clk_data);
+		return -ENOMEM;
 	}
-	if (rc)
-		pr_err("dsi%dpll clock register failed, rc=%d\n", ndx, rc);
+	clk_data->clk_num = num_clks;
 
+	/* Establish client data */
+	if (ndx == 0) {
+		rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_post_vco_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_post_bit_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_byteclk_mux.clkr.regmap = rmap;
+
+		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_10nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+							pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+
+
+	} else {
+		rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_post_vco_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_post_bit_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_byteclk_mux.clkr.regmap = rmap;
+
+		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_10nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+						pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	}
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d, clocks successfully", ndx);
+
+		return rc;
+	}
+clk_register_fail:
+	devm_kfree(&pdev->dev, clk_data->clks);
+	devm_kfree(&pdev->dev, clk_data);
 	return rc;
 }
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll.h b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
index 286c99e..7fc38a2 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,8 @@
 #ifndef __MDSS_DSI_PLL_H
 #define __MDSS_DSI_PLL_H
 
+#include <linux/clk-provider.h>
+#include "mdss-pll.h"
 #define MAX_DSI_PLL_EN_SEQS	10
 
 #define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG		(0x0020)
@@ -31,6 +33,7 @@ struct lpfr_cfg {
 };
 
 struct dsi_pll_vco_clk {
+	struct clk_hw	hw;
 	unsigned long	ref_clk_rate;
 	unsigned long	min_rate;
 	unsigned long	max_rate;
@@ -38,73 +41,16 @@ struct dsi_pll_vco_clk {
 	struct lpfr_cfg *lpfr_lut;
 	u32		lpfr_lut_size;
 	void		*priv;
-
-	struct clk	c;
-
 	int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
 			(struct mdss_pll_resources *dsi_pll_Res);
 };
 
-static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+static inline struct dsi_pll_vco_clk *to_vco_clk_hw(struct clk_hw *hw)
 {
-	return container_of(clk, struct dsi_pll_vco_clk, c);
+	return container_of(hw, struct dsi_pll_vco_clk, hw);
 }
 
-int dsi_pll_clock_register_hpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_20nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_lpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8996(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
-				  struct mdss_pll_resources *pll_res);
-
-int set_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_byte_mux_sel(struct mux_clk *clk);
-int dsi_pll_mux_prepare(struct clk *c);
-int fixed_4div_set_div(struct div_clk *clk, int div);
-int fixed_4div_get_div(struct div_clk *clk);
-int digital_set_div(struct div_clk *clk, int div);
-int digital_get_div(struct div_clk *clk);
-int analog_set_div(struct div_clk *clk, int div);
-int analog_get_div(struct div_clk *clk);
-int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
-int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-unsigned long vco_get_rate(struct clk *c);
-long vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff vco_handoff(struct clk *c);
-int vco_prepare(struct clk *c);
-void vco_unprepare(struct clk *c);
-
-/* APIs for 20nm PHY PLL */
-int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
-				unsigned long rate);
-long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff pll_20nm_vco_handoff(struct clk *c);
-int pll_20nm_vco_prepare(struct clk *c);
-void pll_20nm_vco_unprepare(struct clk *c);
-int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
-
-int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
-int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int fixed_hr_oclk2_get_div(struct div_clk *clk);
-int hr_oclk3_set_div(struct div_clk *clk, int div);
-int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
-int hr_oclk3_get_div(struct div_clk *clk);
-int ndiv_set_div(struct div_clk *clk, int div);
-int shadow_ndiv_set_div(struct div_clk *clk, int div);
-int ndiv_get_div(struct div_clk *clk);
-void __dsi_pll_disable(void __iomem *pll_base);
-
-int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_pixel_mux_sel(struct mux_clk *clk);
-int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_byte_mux_sel(struct mux_clk *clk);
-
 #endif
diff --git a/drivers/clk/qcom/mdss/mdss-pll-util.c b/drivers/clk/qcom/mdss/mdss-pll-util.c
index 690c53f..4d79772 100644
--- a/drivers/clk/qcom/mdss/mdss-pll-util.c
+++ b/drivers/clk/qcom/mdss/mdss-pll-util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/string.h>
-#include <linux/clk/msm-clock-generic.h>
 #include <linux/of_address.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index c22fa80..0a0d303 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -19,12 +19,8 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/iopoll.h>
-#include <linux/clk/msm-clock-generic.h>
-
 #include "mdss-pll.h"
 #include "mdss-dsi-pll.h"
-#include "mdss-hdmi-pll.h"
-#include "mdss-dp-pll.h"
 
 int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
 {
@@ -128,32 +124,10 @@ static int mdss_pll_resource_parse(struct platform_device *pdev,
 		goto err;
 	}
 
-	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
-		pll_res->target_id = MDSS_PLL_TARGET_8996;
-		pll_res->revision = 1;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996_v2")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
-		pll_res->target_id = MDSS_PLL_TARGET_8996;
-		pll_res->revision = 2;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8998;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_DP_PLL_8998;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v2")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V2;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v3")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3;
-	} else if (!strcmp(compatible_stream,
-				"qcom,mdss_hdmi_pll_8996_v3_1p8")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3_1_8;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8998;
-	} else {
+	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
+		pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+	else
 		goto err;
-	}
 
 	return rc;
 
@@ -174,29 +148,8 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
 	}
 
 	switch (pll_res->pll_interface_type) {
-	case MDSS_DSI_PLL_8996:
-		rc = dsi_pll_clock_register_8996(pdev, pll_res);
-		break;
-	case MDSS_DSI_PLL_8998:
-		rc = dsi_pll_clock_register_8998(pdev, pll_res);
-	case MDSS_DP_PLL_8998:
-		rc = dp_pll_clock_register_8998(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996:
-		rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V2:
-		rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V3:
-		rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V3_1_8:
-		rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8998:
-		rc = hdmi_8998_pll_clock_register(pdev, pll_res);
-		break;
+	case MDSS_DSI_PLL_10NM:
+		rc = dsi_pll_clock_register_10nm(pdev, pll_res);
 	case MDSS_UNKNOWN_PLL:
 	default:
 		rc = -EINVAL;
@@ -392,15 +345,7 @@ static int mdss_pll_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id mdss_pll_dt_match[] = {
-	{.compatible = "qcom,mdss_dsi_pll_8996"},
-	{.compatible = "qcom,mdss_dsi_pll_8996_v2"},
-	{.compatible = "qcom,mdss_dsi_pll_8998"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v2"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v3"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v3_1p8"},
-	{.compatible = "qcom,mdss_dp_pll_8998"},
-	{.compatible = "qcom,mdss_hdmi_pll_8998"},
+	{.compatible = "qcom,mdss_dsi_pll_10nm"},
 	{}
 };
 
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 48dddf6..28b7ca6 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -12,10 +12,16 @@
 
 #ifndef __MDSS_PLL_H
 #define __MDSS_PLL_H
-
-#include <linux/mdss_io_util.h>
-#include <linux/clk/msm-clock-generic.h>
+#include <linux/sde_io_util.h>
+#include <linux/clk-provider.h>
 #include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+#include "../clk-regmap.h"
+#include "../clk-regmap-divider.h"
+#include "../clk-regmap-mux.h"
+
 
 #define MDSS_PLL_REG_W(base, offset, data)	\
 				writel_relaxed((data), (base) + (offset))
@@ -30,14 +36,7 @@
 			(base) + (offset))
 
 enum {
-	MDSS_DSI_PLL_8996,
-	MDSS_DSI_PLL_8998,
-	MDSS_DP_PLL_8998,
-	MDSS_HDMI_PLL_8996,
-	MDSS_HDMI_PLL_8996_V2,
-	MDSS_HDMI_PLL_8996_V3,
-	MDSS_HDMI_PLL_8996_V3_1_8,
-	MDSS_HDMI_PLL_8998,
+	MDSS_DSI_PLL_10NM,
 	MDSS_UNKNOWN_PLL,
 };
 
@@ -200,20 +199,24 @@ static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
 		(!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
 }
 
-static inline int mdss_pll_div_prepare(struct clk *c)
+static inline int mdss_pll_div_prepare(struct clk_hw *hw)
 {
-	struct div_clk *div = to_div_clk(c);
+	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
 	/* Restore the divider's value */
-	return div->ops->set_div(div, div->data.div);
+	return hw->init->ops->set_rate(hw, clk_hw_get_rate(hw),
+				clk_hw_get_rate(parent_hw));
 }
 
-static inline int mdss_set_mux_sel(struct mux_clk *clk, int sel)
+static inline int mdss_set_mux_sel(void *context, unsigned int reg,
+					unsigned int val)
 {
 	return 0;
 }
 
-static inline int mdss_get_mux_sel(struct mux_clk *clk)
+static inline int mdss_get_mux_sel(void *context, unsigned int reg,
+					unsigned int *val)
 {
+	*val = 0;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 2fcf10b..cc87775 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -503,7 +503,7 @@ static int dsi_display_core_clk_enable(struct dsi_core_clks *clks,
 			goto error_disable_master;
 		}
 	}
-
+	return rc;
 error_disable_master:
 	(void)dsi_core_clk_stop(m_clks);
 
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 2709aca..218c6e7 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -537,6 +537,8 @@
 #define A6XX_UCHE_GMEM_RANGE_MAX_HI         0xE0E
 #define A6XX_UCHE_CACHE_WAYS                0xE17
 #define A6XX_UCHE_FILTER_CNTL               0xE18
+#define A6XX_UCHE_CLIENT_PF                 0xE19
+#define A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK  0x7
 #define A6XX_UCHE_PERFCTR_UCHE_SEL_0        0xE1C
 #define A6XX_UCHE_PERFCTR_UCHE_SEL_1        0xE1D
 #define A6XX_UCHE_PERFCTR_UCHE_SEL_2        0xE1E
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index e23d6a0..75d5587 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -856,6 +856,8 @@ struct adreno_gpudev {
 				unsigned int arg1, unsigned int arg2);
 	bool (*hw_isidle)(struct adreno_device *);
 	int (*wait_for_gmu_idle)(struct adreno_device *);
+	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
+				unsigned int fsynr1);
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 54d4bf7..49d784c 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -285,6 +285,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
 					  (1 << 30) | 0x4000);
 
+	kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
+
 	/* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
 	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
 		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
@@ -1522,6 +1524,46 @@ static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
 	iounmap(gpu_cx_reg);
 }
 
+static const char *fault_block[8] = {
+	[0] = "CP",
+	[1] = "UCHE",
+	[2] = "VFD",
+	[3] = "UCHE",
+	[4] = "CCU",
+	[5] = "unknown",
+	[6] = "CDP Prefetch",
+	[7] = "GPMU",
+};
+
+static const char *uche_client[8] = {
+	[0] = "VFD",
+	[1] = "SP",
+	[2] = "VSC",
+	[3] = "VPC",
+	[4] = "HLSQ",
+	[5] = "PC",
+	[6] = "LRZ",
+	[7] = "unknown",
+};
+
+static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
+						unsigned int fsynr1)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int client_id;
+	unsigned int uche_client_id;
+
+	client_id = fsynr1 & 0xff;
+
+	if (client_id >= ARRAY_SIZE(fault_block))
+		return "unknown";
+	else if (client_id != 3)
+		return fault_block[client_id];
+
+	kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
+	return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
+}
+
 #define A6XX_INT_MASK \
 	((1 << A6XX_INT_CP_AHB_ERROR) |			\
 	 (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) |	\
@@ -2078,5 +2120,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.oob_clear = a6xx_oob_clear,
 	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
 	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
-	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle
+	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
+	.iommu_fault_block = a6xx_iommu_fault_block,
 };
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 0325db8..86d4d61 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -797,6 +797,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	int write;
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
+	struct adreno_gpudev *gpudev;
 	unsigned int no_page_fault_log = 0;
 	unsigned int curr_context_id = 0;
 	struct kgsl_context *context;
@@ -813,6 +814,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
 	device = KGSL_MMU_DEVICE(mmu);
 	adreno_dev = ADRENO_DEVICE(device);
+	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
 	if (pt->name == KGSL_MMU_SECURE_PT)
 		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
@@ -886,6 +888,16 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 			ctx->name, ptbase, contextidr,
 			write ? "write" : "read", fault_type);
 
+		if (gpudev->iommu_fault_block) {
+			unsigned int fsynr1;
+
+			fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
+			KGSL_MEM_CRIT(ctx->kgsldev,
+				"FAULTING BLOCK: %s\n",
+				gpudev->iommu_fault_block(adreno_dev,
+								fsynr1));
+		}
+
 		/* Don't print the debug if this is a permissions fault */
 		if (!(flags & IOMMU_FAULT_PERMISSION)) {
 			_check_if_freed(ctx, addr, ptname);
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 34c7381..aded314 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,8 @@
 #include <linux/vmalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/dma-iommu.h>
-
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
 
 /* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
 #define FAST_PAGE_SHIFT		12
@@ -633,7 +634,7 @@ static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast,
 	dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
 	dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
 	dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
-		fast->pgtbl_pmds, ptep - fast->pgtbl_pmds);
+		fast->pgtbl_pmds, bitmap_idx);
 	print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
 		       32, 8, fast->bitmap, fast->bitmap_size, false);
 }
@@ -683,7 +684,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
  * fast_smmu_attach_device function.
  */
 static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
-	dma_addr_t base, size_t size)
+	dma_addr_t base, u64 size)
 {
 	struct dma_fast_smmu_mapping *fast;
 
@@ -696,7 +697,11 @@ static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
 	fast->num_4k_pages = size >> FAST_PAGE_SHIFT;
 	fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long);
 
-	fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL);
+	fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+								__GFP_NORETRY);
+	if (!fast->bitmap)
+		fast->bitmap = vzalloc(fast->bitmap_size);
+
 	if (!fast->bitmap)
 		goto err2;
 
@@ -726,7 +731,7 @@ int fast_smmu_attach_device(struct device *dev,
 	int atomic_domain = 1;
 	struct iommu_domain *domain = mapping->domain;
 	struct iommu_pgtbl_info info;
-	size_t size = mapping->bits << PAGE_SHIFT;
+	u64 size = (u64)mapping->bits << PAGE_SHIFT;
 
 	if (mapping->base + size > (SZ_1G * 4ULL))
 		return -EINVAL;
@@ -780,7 +785,7 @@ void fast_smmu_detach_device(struct device *dev,
 	dev->archdata.mapping = NULL;
 	set_dma_ops(dev, NULL);
 
-	kfree(mapping->fast->bitmap);
+	kvfree(mapping->fast->bitmap);
 	kfree(mapping->fast);
 }
 EXPORT_SYMBOL(fast_smmu_detach_device);
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 85fe317..9b13fce 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
 #include <linux/types.h>
 #include <linux/io-pgtable-fast.h>
 #include <asm/cacheflush.h>
+#include <linux/vmalloc.h>
 
 #include "io-pgtable.h"
 
@@ -268,11 +269,18 @@ static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
 	return size;
 }
 
+#if defined(CONFIG_ARM64)
+#define FAST_PGDNDX(va) (((va) & 0x7fc0000000) >> 27)
+#elif defined(CONFIG_ARM)
+#define FAST_PGDNDX(va) (((va) & 0xc0000000) >> 27)
+#endif
+
 static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
 					  unsigned long iova)
 {
 	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
 	av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
+	unsigned long pgd;
 	phys_addr_t phys;
 	const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
 	const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
@@ -282,8 +290,9 @@ static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
 
 	/* TODO: clean up some of these magic numbers... */
 
-	pgdp = (av8l_fast_iopte *)
-		(((unsigned long)data->pgd) | ((iova & 0x7fc0000000) >> 27));
+	pgd = (unsigned long)data->pgd | FAST_PGDNDX(iova);
+	pgdp = (av8l_fast_iopte *)pgd;
+
 	pte = *pgdp;
 	if (((pte >> pts) & ptm) != ptt)
 		return 0;
@@ -345,7 +354,12 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
 	int i, j, pg = 0;
 	struct page **pages, *page;
 
-	pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, GFP_KERNEL);
+	pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN |
+							__GFP_NORETRY);
+
+	if (!pages)
+		pages = vmalloc(sizeof(*pages) * NUM_PGTBL_PAGES);
+
 	if (!pages)
 		return -ENOMEM;
 
@@ -414,7 +428,7 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
 	for (i = 0; i < pg; ++i)
 		__free_page(pages[i]);
 err_free_pages_arr:
-	kfree(pages);
+	kvfree(pages);
 	return -ENOMEM;
 }
 
@@ -473,6 +487,9 @@ av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
 
 	reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
 	reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
+#if defined(CONFIG_ARM)
+	reg |= ARM_32_LPAE_TCR_EAE;
+#endif
 	cfg->av8l_fast_cfg.tcr = reg;
 
 	/* MAIRs */
@@ -512,7 +529,7 @@ static void av8l_fast_free_pgtable(struct io_pgtable *iop)
 	vunmap(data->pmds);
 	for (i = 0; i < NUM_PGTBL_PAGES; ++i)
 		__free_page(data->pages[i]);
-	kfree(data->pages);
+	kvfree(data->pages);
 	kfree(data);
 }
 
@@ -560,7 +577,7 @@ static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops,
 						 const phys_addr_t phys_start,
 						 const size_t size)
 {
-	unsigned long iova = iova_start;
+	u64 iova = iova_start;
 	phys_addr_t phys = phys_start;
 
 	while (iova < (iova_start + size)) {
@@ -576,11 +593,12 @@ static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops,
 static int __init av8l_fast_positive_testing(void)
 {
 	int failed = 0;
-	unsigned long iova;
+	u64 iova;
 	struct io_pgtable_ops *ops;
 	struct io_pgtable_cfg cfg;
 	struct av8l_fast_io_pgtable *data;
 	av8l_fast_iopte *pmds;
+	u64 max = SZ_1G * 4ULL - 1;
 
 	cfg = (struct io_pgtable_cfg) {
 		.quirks = 0,
@@ -600,19 +618,18 @@ static int __init av8l_fast_positive_testing(void)
 	pmds = data->pmds;
 
 	/* map the entire 4GB VA space with 4K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+	for (iova = 0; iova < max; iova += SZ_4K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
 			failed++;
 			continue;
 		}
 	}
-
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+	for (iova = 0; iova < max; iova += SZ_4K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
 			failed++;
 	}
@@ -621,7 +638,7 @@ static int __init av8l_fast_positive_testing(void)
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 8K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+	for (iova = 0; iova < max; iova += SZ_8K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -629,11 +646,11 @@ static int __init av8l_fast_positive_testing(void)
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all with 8K unmap calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+	for (iova = 0; iova < max; iova += SZ_8K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
 			failed++;
 	}
@@ -642,7 +659,7 @@ static int __init av8l_fast_positive_testing(void)
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 16K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+	for (iova = 0; iova < max; iova += SZ_16K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -650,11 +667,11 @@ static int __init av8l_fast_positive_testing(void)
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+	for (iova = 0; iova < max; iova += SZ_16K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
 			failed++;
 	}
@@ -663,7 +680,7 @@ static int __init av8l_fast_positive_testing(void)
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 64K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_64K) {
+	for (iova = 0; iova < max; iova += SZ_64K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -671,11 +688,11 @@ static int __init av8l_fast_positive_testing(void)
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all at once */
-	if (WARN_ON(ops->unmap(ops, 0, SZ_1G * 4UL) != SZ_1G * 4UL))
+	if (WARN_ON(ops->unmap(ops, 0, max) != max))
 		failed++;
 
 	free_io_pgtable_ops(ops);
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 45ffb40..5730126 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -822,7 +822,7 @@ static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
 	if (!virt)
 		goto out;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
 	if (!mapping) {
 		seq_puts(s, "fast_smmu_create_mapping failed\n");
 		goto out_kfree;
@@ -922,8 +922,8 @@ static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
 static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
 {
 	int i, ret = 0;
-	unsigned long iova;
-	const unsigned long max = SZ_1G * 4UL;
+	u64 iova;
+	const u64  max = SZ_1G * 4ULL - 1;
 	void *virt;
 	phys_addr_t phys;
 	dma_addr_t dma_addr;
@@ -995,8 +995,8 @@ static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
 	}
 
 	/* we're all full again. unmap everything. */
-	for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
-		dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += SZ_8K)
+		dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
 
 out:
 	free_pages((unsigned long)virt, get_order(SZ_8K));
@@ -1029,7 +1029,7 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
 			   const size_t size)
 {
 	u64 iova;
-	const unsigned long max = SZ_1G * 4UL;
+	const u64 max = SZ_1G * 4ULL - 1;
 	int i, remapped, unmapped, ret = 0;
 	void *virt;
 	dma_addr_t dma_addr, dma_addr2;
@@ -1061,9 +1061,9 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
 	fib_init(&fib);
 	for (iova = get_next_fib(&fib) * size;
 	     iova < max - size;
-	     iova = get_next_fib(&fib) * size) {
-		dma_addr = iova;
-		dma_addr2 = max - size - iova;
+	     iova = (u64)get_next_fib(&fib) * size) {
+		dma_addr = (dma_addr_t)(iova);
+		dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
 		if (dma_addr == dma_addr2) {
 			WARN(1,
 			"%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
@@ -1089,8 +1089,8 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
 		ret = -EINVAL;
 	}
 
-	for (dma_addr = 0; dma_addr < max; dma_addr += size)
-		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += size)
+		dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
 
 out:
 	free_pages((unsigned long)virt, get_order(size));
@@ -1118,10 +1118,11 @@ static int __check_mapping(struct device *dev, struct iommu_domain *domain,
 static int __full_va_sweep(struct device *dev, struct seq_file *s,
 			   const size_t size, struct iommu_domain *domain)
 {
-	unsigned long iova;
+	u64 iova;
 	dma_addr_t dma_addr;
 	void *virt;
 	phys_addr_t phys;
+	const u64 max = SZ_1G * 4ULL - 1;
 	int ret = 0, i;
 
 	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
@@ -1136,7 +1137,7 @@ static int __full_va_sweep(struct device *dev, struct seq_file *s,
 	}
 	phys = virt_to_phys(virt);
 
-	for (iova = 0, i = 0; iova < SZ_1G * 4UL; iova += size, ++i) {
+	for (iova = 0, i = 0; iova < max; iova += size, ++i) {
 		unsigned long expected = iova;
 
 		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
@@ -1184,8 +1185,8 @@ static int __full_va_sweep(struct device *dev, struct seq_file *s,
 	}
 
 out:
-	for (dma_addr = 0; dma_addr < SZ_1G * 4UL; dma_addr += size)
-		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += size)
+		dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
 
 	free_pages((unsigned long)virt, get_order(size));
 	return ret;
@@ -1374,7 +1375,8 @@ static int __apply_to_new_mapping(struct seq_file *s,
 	int ret = -EINVAL, fast = 1;
 	phys_addr_t pt_phys;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+						(SZ_1G * 4ULL));
 	if (!mapping)
 		goto out;
 
@@ -1443,7 +1445,9 @@ static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
 	size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
 	int ret = -EINVAL;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	/* Make the size equal to MAX_ULONG */
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+						(SZ_1G * 4ULL - 1));
 	if (!mapping)
 		goto out;
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 964d6c8..3dca3e6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -18,6 +18,7 @@
 #include "ipa_i.h"
 #include "ipa_trace.h"
 
+#define IPA_WAN_AGGR_PKT_CNT 5
 #define IPA_LAST_DESC_CNT 0xFFFF
 #define POLLING_INACTIVITY_RX 40
 #define POLLING_INACTIVITY_TX 40
@@ -1099,16 +1100,18 @@ int ipa2_rx_poll(u32 clnt_hdl, int weight)
 			break;
 
 		ipa_wq_rx_common(ep->sys, iov.size);
-		cnt += 5;
+		cnt += IPA_WAN_AGGR_PKT_CNT;
 	};
 
-	if (cnt == 0) {
+	if (cnt == 0 || cnt < weight) {
 		ep->inactive_cycles++;
 		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
 
 		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
 			ep->switch_to_intr = true;
 			delay = 0;
+		} else if (cnt < weight) {
+			delay = 0;
 		}
 		queue_delayed_work(ep->sys->wq,
 			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
@@ -3176,14 +3179,9 @@ static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
 				sys->repl_hdlr =
 				   ipa_replenish_rx_cache;
 			}
-			if (in->napi_enabled) {
-				sys->rx_pool_sz =
-					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
-				if (in->recycle_enabled) {
-					sys->repl_hdlr =
-					   ipa_replenish_rx_cache_recycle;
-				}
-			}
+			if (in->napi_enabled && in->recycle_enabled)
+				sys->repl_hdlr =
+					ipa_replenish_rx_cache_recycle;
 			sys->ep->wakelock_client =
 			   IPA_WAKELOCK_REF_CLIENT_WAN_RX;
 			in->ipa_ep_cfg.aggr.aggr_sw_eof_active
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 672c620..cd575fe 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,8 +51,6 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index db732c5..0af9387 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -64,6 +64,7 @@
 #define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
 
 #define NAPI_WEIGHT 60
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
 
 static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
 static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
@@ -102,6 +103,7 @@ struct ipa_rmnet_plat_drv_res {
 	bool ipa_loaduC;
 	bool ipa_advertise_sg_support;
 	bool ipa_napi_enable;
+	u32 wan_rx_desc_size;
 };
 
 static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
@@ -1310,10 +1312,8 @@ static int handle_ingress_format(struct net_device *dev,
 	ipa_to_apps_ep_cfg.priv = dev;
 
 	ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
-	if (ipa_to_apps_ep_cfg.napi_enabled)
-		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
-	else
-		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	ipa_to_apps_ep_cfg.desc_fifo_sz =
+		ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
 
 	mutex_lock(&ipa_to_apps_pipe_handle_guard);
 	if (atomic_read(&is_ssr)) {
@@ -1944,6 +1944,9 @@ static struct notifier_block ssr_notifier = {
 static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
 		struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
 {
+	int result;
+
+	ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
 	ipa_rmnet_drv_res->ipa_rmnet_ssr =
 			of_property_read_bool(pdev->dev.of_node,
 			"qcom,rmnet-ipa-ssr");
@@ -1966,6 +1969,18 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
 			"qcom,ipa-napi-enable");
 	pr_info("IPA Napi Enable = %s\n",
 		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+	/* Get IPA WAN RX desc fifo size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-desc-size",
+			&ipa_rmnet_drv_res->wan_rx_desc_size);
+	if (result)
+		pr_info("using default for wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+	else
+		IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 343cc14..62e68dd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -21,6 +21,7 @@
 #include "ipahal/ipahal.h"
 #include "ipahal/ipahal_fltrt.h"
 
+#define IPA_WAN_AGGR_PKT_CNT 5
 #define IPA_LAST_DESC_CNT 0xFFFF
 #define POLLING_INACTIVITY_RX 40
 #define POLLING_MIN_SLEEP_RX 1010
@@ -60,7 +61,6 @@
 #define IPA_ODU_RX_POOL_SZ 64
 #define IPA_SIZE_DL_CSUM_META_TRAILER 8
 
-#define IPA_GSI_EVT_RING_LEN 4096
 #define IPA_GSI_MAX_CH_LOW_WEIGHT 15
 #define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
 
@@ -73,12 +73,6 @@
 
 #define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
 
-/*
- * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
- * IPA users still use sps_iovec size as FIFO element size.
- */
-#define IPA_FIFO_ELEMENT_SIZE 8
-
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
@@ -2676,8 +2670,7 @@ static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
 static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		struct ipa3_sys_context *sys)
 {
-	if (in->client == IPA_CLIENT_APPS_CMD_PROD ||
-		in->client == IPA_CLIENT_APPS_WAN_PROD) {
+	if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
 		sys->policy = IPA_POLICY_INTR_MODE;
 		sys->use_comm_evt_ring = false;
 		return 0;
@@ -2742,9 +2735,6 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 					sys->repl_hdlr =
 					   ipa3_replenish_rx_cache;
 				}
-				if (in->napi_enabled)
-					sys->rx_pool_sz =
-					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
 				if (in->napi_enabled && in->recycle_enabled)
 					sys->repl_hdlr =
 					 ipa3_replenish_rx_cache_recycle;
@@ -3441,7 +3431,13 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
 		gsi_evt_ring_props.re_size =
 			GSI_EVT_RING_RE_SIZE_16B;
 
+		/*
+		 * GSI ring length is calculated based on the desc_fifo_sz
+		 * which was meant to define the BAM desc fifo. GSI descriptors
+		 * are 16B as opposed to 8B for BAM.
+		 */
 		gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz;
+
 		gsi_evt_ring_props.ring_base_vaddr =
 			dma_alloc_coherent(ipa3_ctx->pdev,
 			gsi_evt_ring_props.ring_len,
@@ -3686,16 +3682,18 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
 			break;
 
 		ipa3_wq_rx_common(ep->sys, mem_info.size);
-		cnt += 5;
+		cnt += IPA_WAN_AGGR_PKT_CNT;
 	};
 
-	if (cnt == 0) {
+	if (cnt == 0 || cnt < weight) {
 		ep->inactive_cycles++;
 		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
 
 		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
 			ep->switch_to_intr = true;
 			delay = 0;
+		} else if (cnt < weight) {
+			delay = 0;
 		}
 		queue_delayed_work(ep->sys->wq,
 			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 7419a64..90577c0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -54,8 +54,11 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
+/*
+ * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
+ * IPA users still use sps_iovec size as FIFO element size.
+ */
+#define IPA_FIFO_ELEMENT_SIZE 8
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index cf9775b..9e04518 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -66,6 +66,7 @@
 	((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
 	  rmnet_ipa3_ctx->wwan_priv->net : NULL)
 
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 256
 
 static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
 static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
@@ -90,6 +91,7 @@ struct ipa3_rmnet_plat_drv_res {
 	bool ipa_loaduC;
 	bool ipa_advertise_sg_support;
 	bool ipa_napi_enable;
+	u32 wan_rx_desc_size;
 };
 
 /**
@@ -1297,7 +1299,7 @@ static int handle3_ingress_format(struct net_device *dev,
 			ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
 			   in->u.ingress_format.agg_count;
 
-			if (ipa_wan_ep_cfg->napi_enabled) {
+			if (ipa3_rmnet_res.ipa_napi_enable) {
 				ipa_wan_ep_cfg->recycle_enabled = true;
 				ep_cfg = (struct rmnet_phys_ep_conf_s *)
 				   rcu_dereference(dev->rx_handler_data);
@@ -1325,10 +1327,8 @@ static int handle3_ingress_format(struct net_device *dev,
 	ipa_wan_ep_cfg->priv = dev;
 
 	ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
-	if (ipa_wan_ep_cfg->napi_enabled)
-		ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
-	else
-		ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	ipa_wan_ep_cfg->desc_fifo_sz =
+		ipa3_rmnet_res.wan_rx_desc_size * IPA_FIFO_ELEMENT_SIZE;
 
 	mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard);
 
@@ -2012,6 +2012,9 @@ static struct notifier_block ipa3_ssr_notifier = {
 static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
 		struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
 {
+	int result;
+
+	ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
 	ipa_rmnet_drv_res->ipa_rmnet_ssr =
 			of_property_read_bool(pdev->dev.of_node,
 			"qcom,rmnet-ipa-ssr");
@@ -2034,6 +2037,18 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
 			"qcom,ipa-napi-enable");
 	pr_info("IPA Napi Enable = %s\n",
 		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+	/* Get IPA WAN RX desc fifo size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-desc-size",
+			&ipa_rmnet_drv_res->wan_rx_desc_size);
+	if (result)
+		pr_info("using default for wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+	else
+		IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+
 	return 0;
 }
 
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 712de81..7b91717 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4308,15 +4308,25 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 	 * mode hence full reinit is required to move link to HS speeds.
 	 */
 	if (ret || hba->full_init_linereset) {
+		int err;
+
 		hba->full_init_linereset = false;
 		ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
 		dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
 			__func__, ret);
 		/*
-		 * If link recovery fails then return error so that caller
-		 * don't retry the hibern8 enter again.
+		 * If link recovery fails then return error code (-ENOLINK)
+		 * returned ufshcd_link_recovery().
+		 * If link recovery succeeds then return -EAGAIN to attempt
+		 * hibern8 enter retry again.
 		 */
-		ret = ufshcd_link_recovery(hba);
+		err = ufshcd_link_recovery(hba);
+		if (err) {
+			dev_err(hba->dev, "%s: link recovery failed", __func__);
+			ret = err;
+		} else {
+			ret = -EAGAIN;
+		}
 	} else {
 		dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
 			ktime_to_us(ktime_get()));
@@ -4333,8 +4343,8 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 		ret = __ufshcd_uic_hibern8_enter(hba);
 		if (!ret)
 			goto out;
-		/* Unable to recover the link, so no point proceeding */
-		 if (ret == -ENOLINK)
+		else if (ret != -EAGAIN)
+			/* Unable to recover the link, so no point proceeding */
 			BUG();
 	}
 out:
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
new file mode 100644
index 0000000..c1350ce
--- /dev/null
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_10NM_PLL_CLK_H
+#define __MDSS_10NM_PLL_CLK_H
+
+/* DSI PLL clocks */
+#define VCO_CLK_0		0
+#define BITCLK_SRC_0_CLK	1
+#define BYTECLK_SRC_0_CLK	2
+#define POST_BIT_DIV_0_CLK	3
+#define POST_VCO_DIV_0_CLK	4
+#define BYTECLK_MUX_0_CLK	5
+#define PCLK_SRC_MUX_0_CLK	6
+#define PCLK_SRC_0_CLK		7
+#define PCLK_MUX_0_CLK		8
+#define VCO_CLK_1		9
+#define BITCLK_SRC_1_CLK	10
+#define BYTECLK_SRC_1_CLK	11
+#define POST_BIT_DIV_1_CLK	12
+#define POST_VCO_DIV_1_CLK	13
+#define BYTECLK_MUX_1_CLK	14
+#define PCLK_SRC_MUX_1_CLK	15
+#define PCLK_SRC_1_CLK		16
+#define PCLK_MUX_1_CLK		17
+#endif
diff --git a/include/dt-bindings/msm/power-on.h b/include/dt-bindings/msm/power-on.h
new file mode 100644
index 0000000..f43841e
--- /dev/null
+++ b/include/dt-bindings/msm/power-on.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 113d325..41f376d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10949,6 +10949,7 @@ static int __init perf_event_sysfs_init(void)
 }
 device_initcall(perf_event_sysfs_init);
 
+#ifdef CONFIG_HOTPLUG_CPU
 static int perf_cpu_hp_init(void)
 {
 	int ret;
@@ -10963,6 +10964,7 @@ static int perf_cpu_hp_init(void)
 	return ret;
 }
 subsys_initcall(perf_cpu_hp_init);
+#endif
 
 #ifdef CONFIG_CGROUP_PERF
 static struct cgroup_subsys_state *
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e82002..f61724f4f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2058,8 +2058,12 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
  * potentially hurts the reliability of high-order allocations when under
  * intense memory pressure but failed atomic allocations should be easier
  * to recover from than an OOM.
+ *
+ * If @force is true, try to unreserve a pageblock even though highatomic
+ * pageblock is exhausted.
  */
-static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
+static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
+						bool force)
 {
 	struct zonelist *zonelist = ac->zonelist;
 	unsigned long flags;
@@ -2067,11 +2071,16 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
 	struct zone *zone;
 	struct page *page;
 	int order;
+	bool ret;
 
 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
 								ac->nodemask) {
-		/* Preserve at least one pageblock */
-		if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
+		/*
+		 * Preserve at least one pageblock unless memory pressure
+		 * is really high.
+		 */
+		if (!force && zone->nr_reserved_highatomic <=
+					pageblock_nr_pages)
 			continue;
 
 		spin_lock_irqsave(&zone->lock, flags);
@@ -2085,13 +2094,25 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
 				continue;
 
 			/*
-			 * It should never happen but changes to locking could
-			 * inadvertently allow a per-cpu drain to add pages
-			 * to MIGRATE_HIGHATOMIC while unreserving so be safe
-			 * and watch for underflows.
+			 * In page freeing path, migratetype change is racy so
+			 * we can counter several free pages in a pageblock
+			 * in this loop althoug we changed the pageblock type
+			 * from highatomic to ac->migratetype. So we should
+			 * adjust the count once.
 			 */
-			zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
-				zone->nr_reserved_highatomic);
+			if (get_pageblock_migratetype(page) ==
+							MIGRATE_HIGHATOMIC) {
+				/*
+				 * It should never happen but changes to
+				 * locking could inadvertently allow a per-cpu
+				 * drain to add pages to MIGRATE_HIGHATOMIC
+				 * while unreserving so be safe and watch for
+				 * underflows.
+				 */
+				zone->nr_reserved_highatomic -= min(
+						pageblock_nr_pages,
+						zone->nr_reserved_highatomic);
+			}
 
 			/*
 			 * Convert to ac->migratetype and avoid the normal
@@ -2103,12 +2124,16 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
 			 * may increase.
 			 */
 			set_pageblock_migratetype(page, ac->migratetype);
-			move_freepages_block(zone, page, ac->migratetype);
-			spin_unlock_irqrestore(&zone->lock, flags);
-			return;
+			ret = move_freepages_block(zone, page, ac->migratetype);
+			if (ret) {
+				spin_unlock_irqrestore(&zone->lock, flags);
+				return ret;
+			}
 		}
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
+
+	return false;
 }
 
 /* Remove an element from the buddy allocator from the fallback list */
@@ -2133,7 +2158,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
 
 		page = list_first_entry(&area->free_list[fallback_mt],
 						struct page, lru);
-		if (can_steal)
+		if (can_steal &&
+			get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
 			steal_suitable_fallback(zone, page, start_migratetype);
 
 		/* Remove the page from the freelists */
@@ -2542,7 +2568,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
 		struct page *endpage = page + (1 << order) - 1;
 		for (; page < endpage; page += pageblock_nr_pages) {
 			int mt = get_pageblock_migratetype(page);
-			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
+			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
+				&& mt != MIGRATE_HIGHATOMIC)
 				set_pageblock_migratetype(page,
 							  MIGRATE_MOVABLE);
 		}
@@ -3313,7 +3340,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 	 * Shrink them them and try again
 	 */
 	if (!page && !drained) {
-		unreserve_highatomic_pageblock(ac);
+		unreserve_highatomic_pageblock(ac, false);
 		drain_all_pages(NULL);
 		drained = true;
 		goto retry;
@@ -3430,8 +3457,10 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
 	 * Make sure we converge to OOM if we cannot make any progress
 	 * several times in the row.
 	 */
-	if (*no_progress_loops > MAX_RECLAIM_RETRIES)
-		return false;
+	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+		/* Before OOM, exhaust highatomic_reserve */
+		return unreserve_highatomic_pageblock(ac, true);
+	}
 
 	/*
 	 * Keep reclaiming pages while there is a chance this will lead