Merge "msm: ipa: add support for new QMI IDL" into msm-4.8
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 0cebc4d..0450145 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -86,11 +86,11 @@
 - MSMCOBALT
   compatible = "qcom,msmcobalt"
 
-- MSMSKUNK
-  compatible = "qcom,msmskunk"
+- SDM845
+  compatible = "qcom,sdm845"
 
-- SDMBAT
-  compatible = "qcom,sdmbat"
+- SDM830
+  compatible = "qcom,sdm830"
 
 - MSM8952
   compatible = "qcom,msm8952"
@@ -258,15 +258,15 @@
 compatible = "qcom,msmcobalt-sim"
 compatible = "qcom,msmcobalt-rumi"
 compatible = "qcom,msmcobalt-cdp"
-compatible = "qcom,msmskunk-sim"
-compatible = "qcom,msmskunk-rumi"
-compatible = "qcom,msmskunk-cdp"
-compatible = "qcom,msmskunk-mtp"
-compatible = "qcom,msmskunk-mtp"
-compatible = "qcom,sdmbat-sim"
-compatible = "qcom,sdmbat-rumi"
-compatible = "qcom,sdmbat-cdp"
-compatible = "qcom,sdmbat-mtp"
+compatible = "qcom,sdm845-sim"
+compatible = "qcom,sdm845-rumi"
+compatible = "qcom,sdm845-cdp"
+compatible = "qcom,sdm845-mtp"
+compatible = "qcom,sdm845-mtp"
+compatible = "qcom,sdm830-sim"
+compatible = "qcom,sdm830-rumi"
+compatible = "qcom,sdm830-cdp"
+compatible = "qcom,sdm830-mtp"
 compatible = "qcom,msm8952-rumi"
 compatible = "qcom,msm8952-sim"
 compatible = "qcom,msm8952-qrd"
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
index a4672e7..90ddc27 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -79,8 +79,8 @@
 		reg-names = "llcc_base";
 		status = "disabled";
 
-		llcc: qcom,msmskunk-llcc {
-			compatible = "qcom,msmskunk-llcc";
+		llcc: qcom,sdm845-llcc {
+			compatible = "qcom,sdm845-llcc";
 			#cache-cells = <1>;
 			max-slices = <32>;
 		};
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 42a9ec1..7405115 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -16,7 +16,7 @@
 			"qcom,gcc-msm8974pro-ac"
 			"qcom,gcc-msm8996"
 			"qcom,gcc-mdm9615"
-			"qcom,gcc-msmskunk"
+			"qcom,gcc-sdm845"
 
 - reg : shall contain base register location and length
 - #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
index b65de71..5dc109d 100644
--- a/Documentation/devicetree/bindings/clock/qcom,videocc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
@@ -2,7 +2,7 @@
 ----------------------------------------------------
 
 Required properties :
-- compatible : shall contain "qcom,video_cc-msmskunk"
+- compatible : shall contain "qcom,video_cc-sdm845"
 - reg : shall contain base register location and length
 - reg-names: names of registers listed in the same order as in
 	     the reg property.
@@ -14,7 +14,7 @@
 
 Example:
 	clock_videocc: qcom,videocc@ab00000 {
-		compatible = "qcom,video_cc-msmskunk";
+		compatible = "qcom,video_cc-sdm845";
 		reg = <0xab00000 0x10000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pmcobalt_s9_level>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
similarity index 96%
rename from Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl
rename to Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
index 9616d9d..0fe8a1b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
@@ -1,12 +1,12 @@
-Qualcomm Technologies, Inc. SDMBAT TLMM block
+Qualcomm Technologies, Inc. SDM830 TLMM block
 
 This binding describes the Top Level Mode Multiplexer block found in the
-SDMBAT platform.
+SDM830 platform.
 
 - compatible:
 	Usage: required
 	Value type: <string>
-	Definition: must be "qcom,sdmbat-pinctrl"
+	Definition: must be "qcom,sdm830-pinctrl"
 
 - reg:
 	Usage: required
@@ -136,7 +136,7 @@
 Example:
 
 	tlmm: pinctrl@03800000 {
-		compatible = "qcom,sdmbat-pinctrl";
+		compatible = "qcom,sdm830-pinctrl";
 		reg = <0x03800000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
similarity index 97%
rename from Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl
rename to Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
index 10bbe56..9c26374 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
@@ -1,12 +1,12 @@
-Qualcomm Technologies, Inc. MSMSKUNK TLMM block
+Qualcomm Technologies, Inc. SDM845 TLMM block
 
 This binding describes the Top Level Mode Multiplexer block found in the
-MSMSKUNK platform.
+SDM845 platform.
 
 - compatible:
 	Usage: required
 	Value type: <string>
-	Definition: must be "qcom,msmskunk-pinctrl"
+	Definition: must be "qcom,sdm845-pinctrl"
 
 - reg:
 	Usage: required
@@ -176,7 +176,7 @@
 Example:
 
 	tlmm: pinctrl@03400000 {
-		compatible = "qcom,msmskunk-pinctrl";
+		compatible = "qcom,sdm845-pinctrl";
 		reg = <0x03800000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index d0800d3..a1d7499 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2309,7 +2309,7 @@
 
 Required properties:
 - compatible : "qcom,msm8998-asoc-snd-stub" for MSM8998 target.
-	       "qcom,msmskunk-asoc-snd-stub" for MSMSKUNK target.
+	       "qcom,sdm845-asoc-snd-stub" for SDM845 target.
 - qcom,model : The user-visible name of this sound card.
 - qcom,tasha-mclk-clk-freq : MCLK frequency value for tasha codec
 - asoc-platform: This is phandle list containing the references to platform device
@@ -2380,10 +2380,10 @@
 		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
 	};
 
-* MSMSKUNK ASoC Machine driver
+* SDM845 ASoC Machine driver
 
 Required properties:
-- compatible : "qcom,msmskunk-asoc-snd-tavil"
+- compatible : "qcom,sdm845-asoc-snd-tavil"
 - qcom,model : The user-visible name of this sound card.
 - qcom,tavil-mclk-clk-freq : MCLK frequency value for tavil codec
 - qcom,audio-routing : A list of the connections between audio components.
@@ -2422,8 +2422,8 @@
 Example:
 
 	sound-tavil {
-		compatible = "qcom,msmskunk-asoc-snd-tavil";
-		qcom,model = "msmskunk-tavil-snd-card";
+		compatible = "qcom,sdm845-asoc-snd-tavil";
+		qcom,model = "sdm845-tavil-snd-card";
 		qcom,ext-disp-audio-rx;
 		qcom,wcn-btfm;
 		qcom,mi2s-audio-intf;
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 30f2f6c..af1ba92 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -10,7 +10,7 @@
 - compatible        : compatible list, contains one of the following:
 		      "qcom,ufs-phy-qmp-14nm"
 		      "qcom,ufs-phy-qmp-v3"
-		      "qcom,ufs-phy-qrbtc-msmskunk"
+		      "qcom,ufs-phy-qrbtc-sdm845"
 according to the relevant phy in use.
 - reg               : should contain PHY register address space (mandatory),
 - reg-names         : indicates various resources passed to driver (via reg proptery) by name.
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index c14034a..f7a21a6 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -119,22 +119,22 @@
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
-config ARCH_MSMSKUNK
-	bool "Enable Support for Qualcomm MSMSKUNK"
+config ARCH_SDM845
+	bool "Enable Support for Qualcomm SDM845"
 	depends on ARCH_QCOM
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
-	  This enables support for the MSMSKUNK chipset. If you do not
+	  This enables support for the SDM845 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
-config ARCH_SDMBAT
-	bool "Enable Support for Qualcomm Technologies Inc. SDMBAT"
+config ARCH_SDM830
+	bool "Enable Support for Qualcomm Technologies Inc. SDM830"
 	depends on ARCH_QCOM
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
-	  This enables support for the SDMBAT chipset. If you do not
+	  This enables support for the SDM830 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
 config ARCH_ROCKCHIP
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 3ed0b06..54acae6 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -2,15 +2,15 @@
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
 
-dtb-$(CONFIG_ARCH_MSMSKUNK) += msmskunk-sim.dtb \
-	msmskunk-rumi.dtb \
-	msmskunk-mtp.dtb \
-	msmskunk-cdp.dtb
+dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
+	sdm845-rumi.dtb \
+	sdm845-mtp.dtb \
+	sdm845-cdp.dtb
 
-dtb-$(CONFIG_ARCH_SDMBAT) += sdmbat-sim.dtb \
-	sdmbat-rumi.dtb \
-	sdmbat-mtp.dtb \
-	sdmbat-cdp.dtb
+dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
+	sdm830-rumi.dtb \
+	sdm830-mtp.dtb \
+	sdm830-cdp.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-skunk.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msm-arm-smmu-skunk.dtsi
rename to arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-skunk.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msm-gdsc-skunk.dtsi
rename to arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts b/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts
deleted file mode 100644
index b1dd404..0000000
--- a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-
-#include "msmskunk.dtsi"
-#include "msmskunk-cdp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. MSM skunk v1 CDP";
-	compatible = "qcom,msmskunk-cdp", "qcom,msmskunk", "qcom,cdp";
-	qcom,board-id = <1 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts b/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts
deleted file mode 100644
index d6a6ffb..0000000
--- a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-
-#include "msmskunk.dtsi"
-#include "msmskunk-mtp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. MSM skunk v1 MTP";
-	compatible = "qcom,msmskunk-mtp", "qcom,msmskunk", "qcom,mtp";
-	qcom,board-id = <8 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi
deleted file mode 100644
index 930c8de..0000000
--- a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi
+++ /dev/null
@@ -1,14 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "msmskunk-pinctrl.dtsi"
-
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sim.dts b/arch/arm64/boot/dts/qcom/msmskunk-sim.dts
deleted file mode 100644
index eb95256..0000000
--- a/arch/arm64/boot/dts/qcom/msmskunk-sim.dts
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-/memreserve/ 0x90000000 0x00000100;
-
-#include "msmskunk.dtsi"
-#include "msmskunk-sim.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. MSM SKUNK SIM";
-	compatible = "qcom,msmskunk-sim", "qcom,msmskunk", "qcom,sim";
-	qcom,board-id = <16 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts b/arch/arm64/boot/dts/qcom/sdm830-cdp.dts
similarity index 85%
rename from arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
rename to arch/arm64/boot/dts/qcom/sdm830-cdp.dts
index f8f916e..dab4a9d 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm830-cdp.dts
@@ -13,11 +13,11 @@
 
 /dts-v1/;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-cdp.dtsi"
+#include "sdm830.dtsi"
+#include "sdm830-cdp.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM bat v1 CDP";
-	compatible = "qcom,sdmbat-cdp", "qcom,sdmbat", "qcom,cdp";
+	compatible = "qcom,sdm830-cdp", "qcom,sdm830", "qcom,cdp";
 	qcom,board-id = <1 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
similarity index 89%
copy from arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
copy to arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
index af7a194..c7bbef0 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
@@ -10,5 +10,5 @@
  * GNU General Public License for more details.
  */
 
-#include "msmskunk-mtp.dtsi"
-#include "sdmbat-pinctrl.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts b/arch/arm64/boot/dts/qcom/sdm830-mtp.dts
similarity index 85%
rename from arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
rename to arch/arm64/boot/dts/qcom/sdm830-mtp.dts
index fb8e85a..5da16e6 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm830-mtp.dts
@@ -13,11 +13,11 @@
 
 /dts-v1/;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-mtp.dtsi"
+#include "sdm830.dtsi"
+#include "sdm830-mtp.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM bat v1 MTP";
-	compatible = "qcom,sdmbat-mtp", "qcom,sdmbat", "qcom,mtp";
+	compatible = "qcom,sdm830-mtp", "qcom,sdm830", "qcom,mtp";
 	qcom,board-id = <8 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
similarity index 89%
rename from arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
index af7a194..b2d607d 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
@@ -10,5 +10,5 @@
  * GNU General Public License for more details.
  */
 
-#include "msmskunk-mtp.dtsi"
-#include "sdmbat-pinctrl.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
similarity index 94%
rename from arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
index ead34a6..a8d559c 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
@@ -12,7 +12,7 @@
 
 &soc {
 	tlmm: pinctrl@03800000 {
-		compatible = "qcom,sdmbat-pinctrl";
+		compatible = "qcom,sdm830-pinctrl";
 		reg = <0x03800000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dts b/arch/arm64/boot/dts/qcom/sdm830-rumi.dts
similarity index 78%
rename from arch/arm64/boot/dts/qcom/sdmbat-rumi.dts
rename to arch/arm64/boot/dts/qcom/sdm830-rumi.dts
index 2bf868e..2485051 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm830-rumi.dts
@@ -14,12 +14,12 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-rumi.dtsi"
+#include "sdm830.dtsi"
+#include "sdm830-rumi.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM BAT RUMI";
-	compatible = "qcom,sdmbat-rumi", "qcom,sdmbat", "qcom,rumi";
+	model = "Qualcomm Technologies, Inc. SDM830 RUMI";
+	compatible = "qcom,sdm830-rumi", "qcom,sdm830", "qcom,rumi";
 	qcom,board-id = <15 0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
similarity index 92%
rename from arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
index 11901f1..2bc5f3f 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
@@ -13,8 +13,8 @@
 /*
  * As a general rule, only version-specific property overrides should be placed
  * inside this file. Common device definitions should be placed inside the
- * msmskunk-rumi.dtsi file.
+ * sdm845-rumi.dtsi file.
  */
 
- #include "msmskunk-rumi.dtsi"
+ #include "sdm845-rumi.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts b/arch/arm64/boot/dts/qcom/sdm830-sim.dts
similarity index 79%
rename from arch/arm64/boot/dts/qcom/sdmbat-sim.dts
rename to arch/arm64/boot/dts/qcom/sdm830-sim.dts
index 216b3d0..57cd155 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts
+++ b/arch/arm64/boot/dts/qcom/sdm830-sim.dts
@@ -14,12 +14,12 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-sim.dtsi"
+#include "sdm830.dtsi"
+#include "sdm830-sim.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM BAT SIM";
-	compatible = "qcom,sdmbat-sim", "qcom,sdmbat", "qcom,sim";
+	model = "Qualcomm Technologies, Inc. SDM830 SIM";
+	compatible = "qcom,sdm830-sim", "qcom,sdm830", "qcom,sim";
 	qcom,board-id = <16 0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi b/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
similarity index 92%
rename from arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
index 560ad45..85e8075 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
@@ -13,8 +13,8 @@
 /*
  * As a general rule, only version-specific property overrides should be placed
  * inside this file. Common device definitions should be placed inside the
- * msmskunk-sim.dtsi file.
+ * sdm845-sim.dtsi file.
  */
 
- #include "msmskunk-sim.dtsi"
+ #include "sdm845-sim.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/sdmbat.dtsi b/arch/arm64/boot/dts/qcom/sdm830.dtsi
similarity index 86%
rename from arch/arm64/boot/dts/qcom/sdmbat.dtsi
rename to arch/arm64/boot/dts/qcom/sdm830.dtsi
index 950d130..ff0d9a0 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830.dtsi
@@ -13,14 +13,14 @@
 /*
  * As a general rule, only version-specific property overrides should be placed
  * inside this file. Common device definitions should be placed inside the
- * msmskunk.dtsi file.
+ * sdm845.dtsi file.
  */
 
- #include "msmskunk.dtsi"
+ #include "sdm845.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM BAT";
-	compatible = "qcom,sdmbat";
+	model = "Qualcomm Technologies, Inc. SDM830";
+	compatible = "qcom,sdm830";
 	qcom,msm-id = <328 0x0>;
 
 };
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-bus.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-camera.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
copy to arch/arm64/boot/dts/qcom/sdm845-cdp.dts
index f8f916e..22e3aea 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
@@ -13,11 +13,11 @@
 
 /dts-v1/;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-cdp.dtsi"
+#include "sdm845.dtsi"
+#include "sdm845-cdp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM bat v1 CDP";
-	compatible = "qcom,sdmbat-cdp", "qcom,sdmbat", "qcom,cdp";
+	model = "Qualcomm Technologies, Inc. MSM sdm845 v1 CDP";
+	compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
 	qcom,board-id = <1 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
similarity index 93%
rename from arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 930c8de..00bd301 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -10,5 +10,5 @@
  * GNU General Public License for more details.
  */
 
-#include "msmskunk-pinctrl.dtsi"
+#include "sdm845-pinctrl.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-coresight.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-ion.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
copy to arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index fb8e85a..f7af60c 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -13,11 +13,11 @@
 
 /dts-v1/;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-mtp.dtsi"
+#include "sdm845.dtsi"
+#include "sdm845-mtp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM bat v1 MTP";
-	compatible = "qcom,sdmbat-mtp", "qcom,sdmbat", "qcom,mtp";
+	model = "Qualcomm Technologies, Inc. MSM sdm845 v1 MTP";
+	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
 	qcom,board-id = <8 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
similarity index 93%
copy from arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
copy to arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 930c8de..00bd301 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -10,5 +10,5 @@
  * GNU General Public License for more details.
  */
 
-#include "msmskunk-pinctrl.dtsi"
+#include "sdm845-pinctrl.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
similarity index 94%
rename from arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 84010bd..a69525c 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -12,7 +12,7 @@
 
 &soc {
 	tlmm: pinctrl@03400000 {
-		compatible = "qcom,msmskunk-pinctrl";
+		compatible = "qcom,sdm845-pinctrl";
 		reg = <0x03800000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-regulator.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/msmskunk-rumi.dts
rename to arch/arm64/boot/dts/qcom/sdm845-rumi.dts
index f22e5fd..7a4ac64 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
@@ -14,12 +14,12 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "msmskunk.dtsi"
-#include "msmskunk-rumi.dtsi"
-#include "msmskunk-usb.dtsi"
+#include "sdm845.dtsi"
+#include "sdm845-rumi.dtsi"
+#include "sdm845-usb.dtsi"
 / {
-	model = "Qualcomm Technologies, Inc. MSM SKUNK RUMI";
-	compatible = "qcom,msmskunk-rumi", "qcom,msmskunk", "qcom,rumi";
+	model = "Qualcomm Technologies, Inc. SDM845 RUMI";
+	compatible = "qcom,sdm845-rumi", "qcom,sdm845", "qcom,rumi";
 	qcom,board-id = <15 0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
similarity index 96%
rename from arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 1f235d3..3d70a17 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -11,7 +11,7 @@
  */
 
 &ufsphy_mem {
-	compatible = "qcom,ufs-phy-qrbtc-msmskunk";
+	compatible = "qcom,ufs-phy-qrbtc-sdm845";
 
 	vdda-phy-supply = <&pm8998_l1>;
 	vdda-pll-supply = <&pm8998_l2>;
@@ -64,7 +64,7 @@
 };
 
 &ufsphy_card {
-	compatible = "qcom,ufs-phy-qrbtc-msmskunk";
+	compatible = "qcom,ufs-phy-qrbtc-sdm845";
 
 	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
 	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-sde-display.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-sde.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts b/arch/arm64/boot/dts/qcom/sdm845-sim.dts
similarity index 78%
copy from arch/arm64/boot/dts/qcom/sdmbat-sim.dts
copy to arch/arm64/boot/dts/qcom/sdm845-sim.dts
index 216b3d0..2fa77bb 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-sim.dts
@@ -14,12 +14,11 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "sdmbat.dtsi"
-#include "sdmbat-sim.dtsi"
+#include "sdm845.dtsi"
+#include "sdm845-sim.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM BAT SIM";
-	compatible = "qcom,sdmbat-sim", "qcom,sdmbat", "qcom,sim";
+	model = "Qualcomm Technologies, Inc. SDM845 SIM";
+	compatible = "qcom,sdm845-sim", "qcom,sdm845", "qcom,sim";
 	qcom,board-id = <16 0>;
 };
-
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sim.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-sim.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/msmskunk-smp2p.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
similarity index 98%
rename from arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index dd6d4d4..442fcff 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -11,7 +11,7 @@
  * GNU General Public License for more details.
  */
 
-#include <dt-bindings/clock/qcom,gcc-skunk.h>
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
 &soc {
 	usb3: ssusb@a600000 {
 		compatible = "qcom,dwc-usb3-msm";
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
similarity index 97%
rename from arch/arm64/boot/dts/qcom/msmskunk.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845.dtsi
index ff18e02..27d703f 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -11,19 +11,19 @@
  */
 
 #include "skeleton64.dtsi"
-#include <dt-bindings/clock/qcom,gcc-skunk.h>
-#include <dt-bindings/clock/qcom,camcc-skunk.h>
-#include <dt-bindings/clock/qcom,dispcc-skunk.h>
-#include <dt-bindings/clock/qcom,gpucc-skunk.h>
-#include <dt-bindings/clock/qcom,videocc-skunk.h>
-#include <dt-bindings/clock/qcom,cpucc-skunk.h>
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+#include <dt-bindings/clock/qcom,videocc-sdm845.h>
+#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/soc/qcom,tcs-mbox.h>
 
 / {
-	model = "Qualcomm Technologies, Inc. MSM SKUNK";
-	compatible = "qcom,msmskunk";
+	model = "Qualcomm Technologies, Inc. SDM845";
+	compatible = "qcom,sdm845";
 	qcom,msm-id = <321 0x0>;
 	interrupt-parent = <&intc>;
 
@@ -376,9 +376,9 @@
 	};
 };
 
-#include "msm-gdsc-skunk.dtsi"
-#include "msmskunk-sde.dtsi"
-#include "msmskunk-sde-display.dtsi"
+#include "msm-gdsc-sdm845.dtsi"
+#include "sdm845-sde.dtsi"
+#include "sdm845-sde-display.dtsi"
 
 &soc {
 	#address-cells = <1>;
@@ -466,7 +466,7 @@
 	};
 
 	clock_gcc: qcom,gcc@100000 {
-		compatible = "qcom,gcc-msmskunk";
+		compatible = "qcom,gcc-sdm845";
 		reg = <0x100000 0x1f0000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
@@ -476,7 +476,7 @@
 	};
 
 	clock_videocc: qcom,videocc@ab00000 {
-		compatible = "qcom,video_cc-msmskunk";
+		compatible = "qcom,video_cc-sdm845";
 		reg = <0xab00000 0x10000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
@@ -1075,8 +1075,8 @@
 		reg = <0x1300000 0x50000>;
 		reg-names = "llcc_base";
 
-		llcc: qcom,msmskunk-llcc {
-			compatible = "qcom,msmskunk-llcc";
+		llcc: qcom,sdm845-llcc {
+			compatible = "qcom,sdm845-llcc";
 			#cache-cells = <1>;
 			max-slices = <32>;
 			qcom,dump-size = <0x3c0000>;
@@ -1612,10 +1612,10 @@
 	status = "ok";
 };
 
-#include "msmskunk-regulator.dtsi"
-#include "msmskunk-coresight.dtsi"
-#include "msm-arm-smmu-skunk.dtsi"
-#include "msmskunk-ion.dtsi"
-#include "msmskunk-smp2p.dtsi"
-#include "msmskunk-camera.dtsi"
-#include "msmskunk-bus.dtsi"
+#include "sdm845-regulator.dtsi"
+#include "sdm845-coresight.dtsi"
+#include "msm-arm-smmu-sdm845.dtsi"
+#include "sdm845-ion.dtsi"
+#include "sdm845-smp2p.dtsi"
+#include "sdm845-camera.dtsi"
+#include "sdm845-bus.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
deleted file mode 100644
index 77151c5..0000000
--- a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
+++ /dev/null
@@ -1,14 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "msmskunk-cdp.dtsi"
-#include "sdmbat-pinctrl.dtsi"
diff --git a/arch/arm64/configs/msmskunk-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
similarity index 98%
rename from arch/arm64/configs/msmskunk-perf_defconfig
rename to arch/arm64/configs/sdm845-perf_defconfig
index 4115727..e70996b 100644
--- a/arch/arm64/configs/msmskunk-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -41,7 +41,7 @@
 CONFIG_MODULE_SIG_SHA512=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSMSKUNK=y
+CONFIG_ARCH_SDM845=y
 CONFIG_PCI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
@@ -256,8 +256,8 @@
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
-CONFIG_PINCTRL_MSMSKUNK=y
-CONFIG_PINCTRL_SDMBAT=y
+CONFIG_PINCTRL_SDM845=y
+CONFIG_PINCTRL_SDM830=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
@@ -346,8 +346,8 @@
 CONFIG_ION_MSM=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_MSM_GCC_SKUNK=y
-CONFIG_MSM_VIDEOCC_SKUNK=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
@@ -355,7 +355,7 @@
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_QCOM_LLCC=y
-CONFIG_QCOM_MSMSKUNK_LLCC=y
+CONFIG_QCOM_SDM845_LLCC=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
diff --git a/arch/arm64/configs/msmskunk_defconfig b/arch/arm64/configs/sdm845_defconfig
similarity index 98%
rename from arch/arm64/configs/msmskunk_defconfig
rename to arch/arm64/configs/sdm845_defconfig
index c384c4a..80a75a9 100644
--- a/arch/arm64/configs/msmskunk_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -42,7 +42,7 @@
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSMSKUNK=y
+CONFIG_ARCH_SDM845=y
 CONFIG_PCI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
@@ -263,8 +263,8 @@
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
-CONFIG_PINCTRL_MSMSKUNK=y
-CONFIG_PINCTRL_SDMBAT=y
+CONFIG_PINCTRL_SDM845=y
+CONFIG_PINCTRL_SDM830=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
@@ -357,8 +357,8 @@
 CONFIG_ION_MSM=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_MSM_GCC_SKUNK=y
-CONFIG_MSM_VIDEOCC_SKUNK=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
@@ -366,7 +366,7 @@
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_QCOM_LLCC=y
-CONFIG_QCOM_MSMSKUNK_LLCC=y
+CONFIG_QCOM_SDM845_LLCC=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 86774d3..bdbaadf 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -808,11 +808,20 @@ void arch_irq_work_raise(void)
 }
 #endif
 
+static DEFINE_RAW_SPINLOCK(stop_lock);
 /*
  * ipi_cpu_stop - handle IPI from smp_send_stop()
  */
 static void ipi_cpu_stop(unsigned int cpu)
 {
+	if (system_state == SYSTEM_BOOTING ||
+	    system_state == SYSTEM_RUNNING) {
+		raw_spin_lock(&stop_lock);
+		pr_crit("CPU%u: stopping\n", cpu);
+		dump_stack();
+		raw_spin_unlock(&stop_lock);
+	}
+
 	set_cpu_active(cpu, false);
 
 	flush_cache_all();
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index e864e8b..f734b76 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -151,20 +151,20 @@
 	  Say Y if you want to support multimedia devices such as display,
 	  graphics, video encode/decode, camera, etc.
 
-config MSM_GCC_SKUNK
-	tristate "MSMSKUNK Global Clock Controller"
+config MSM_GCC_SDM845
+	tristate "SDM845 Global Clock Controller"
 	depends on COMMON_CLK_QCOM
 	help
 	  Support for the global clock controller on Qualcomm Technologies, Inc
-	  MSMskunk devices.
+	  sdm845 devices.
 	  Say Y if you want to use peripheral devices such as UART, SPI,
 	  i2c, USB, UFS, SD/eMMC, PCIe, etc.
 
-config MSM_VIDEOCC_SKUNK
-	tristate "MSMSKUNK Video Clock Controller"
+config MSM_VIDEOCC_SDM845
+	tristate "SDM845 Video Clock Controller"
 	depends on COMMON_CLK_QCOM
 	help
 	  Support for the video clock controller on Qualcomm Technologies, Inc
-	  MSMskunk devices.
+	  sdm845 devices.
 	  Say Y if you want to support video devices and functionality such as
 	  video encode/decode.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index c227967..62bdf21 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -26,8 +26,8 @@
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
-obj-$(CONFIG_MSM_GCC_SKUNK) += gcc-msmskunk.o
-obj-$(CONFIG_MSM_VIDEOCC_SKUNK) += videocc-msmskunk.o
+obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o
+obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
 obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
diff --git a/drivers/clk/qcom/gcc-msmskunk.c b/drivers/clk/qcom/gcc-sdm845.c
similarity index 97%
rename from drivers/clk/qcom/gcc-msmskunk.c
rename to drivers/clk/qcom/gcc-sdm845.c
index 59eb0ec..92e0ffa 100644
--- a/drivers/clk/qcom/gcc-msmskunk.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -23,7 +23,7 @@
 #include <linux/regmap.h>
 #include <linux/reset-controller.h>
 
-#include <dt-bindings/clock/qcom,gcc-skunk.h>
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
 
 #include "common.h"
 #include "clk-regmap.h"
@@ -32,7 +32,7 @@
 #include "clk-branch.h"
 #include "reset.h"
 #include "clk-alpha-pll.h"
-#include "vdd-level-skunk.h"
+#include "vdd-level-sdm845.h"
 
 #define GCC_APCS_CLOCK_SLEEP_ENA_VOTE_OFFSET	0x52008
 #define CPUSS_AHB_CLK_SLEEP_ENA			BIT(21)
@@ -3302,11 +3302,11 @@ static struct clk_branch gcc_video_xo_clk = {
 	},
 };
 
-struct clk_hw *gcc_msmskunk_hws[] = {
+struct clk_hw *gcc_sdm845_hws[] = {
 	[GCC_XO] =      &bi_tcxo.hw,
 };
 
-static struct clk_regmap *gcc_msmskunk_clocks[] = {
+static struct clk_regmap *gcc_sdm845_clocks[] = {
 	[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
 	[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
 	[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
@@ -3497,7 +3497,7 @@ static struct clk_regmap *gcc_msmskunk_clocks[] = {
 	[GPLL1] = &gpll1.clkr,
 };
 
-static const struct qcom_reset_map gcc_msmskunk_resets[] = {
+static const struct qcom_reset_map gcc_sdm845_resets[] = {
 	[GCC_GPU_BCR] = { 0x71000 },
 	[GCC_MMSS_BCR] = { 0xb000 },
 	[GCC_PCIE_0_BCR] = { 0x6b000 },
@@ -3517,7 +3517,7 @@ static const struct qcom_reset_map gcc_msmskunk_resets[] = {
 	[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
 };
 
-static const struct regmap_config gcc_msmskunk_regmap_config = {
+static const struct regmap_config gcc_sdm845_regmap_config = {
 	.reg_bits	= 32,
 	.reg_stride	= 4,
 	.val_bits	= 32,
@@ -3525,33 +3525,33 @@ static const struct regmap_config gcc_msmskunk_regmap_config = {
 	.fast_io	= true,
 };
 
-static const struct qcom_cc_desc gcc_msmskunk_desc = {
-	.config = &gcc_msmskunk_regmap_config,
-	.clks = gcc_msmskunk_clocks,
-	.num_clks = ARRAY_SIZE(gcc_msmskunk_clocks),
-	.resets = gcc_msmskunk_resets,
-	.num_resets = ARRAY_SIZE(gcc_msmskunk_resets),
+static const struct qcom_cc_desc gcc_sdm845_desc = {
+	.config = &gcc_sdm845_regmap_config,
+	.clks = gcc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+	.resets = gcc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(gcc_sdm845_resets),
 };
 
-static const struct of_device_id gcc_msmskunk_match_table[] = {
-	{ .compatible = "qcom,gcc-msmskunk" },
+static const struct of_device_id gcc_sdm845_match_table[] = {
+	{ .compatible = "qcom,gcc-sdm845" },
 	{ }
 };
-MODULE_DEVICE_TABLE(of, gcc_msmskunk_match_table);
+MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
 
-static int gcc_msmskunk_probe(struct platform_device *pdev)
+static int gcc_sdm845_probe(struct platform_device *pdev)
 {
 	struct clk *clk;
 	struct regmap *regmap;
 	int ret = 0, i;
 
-	regmap = qcom_cc_map(pdev, &gcc_msmskunk_desc);
+	regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
 	if (IS_ERR(regmap))
 		return PTR_ERR(regmap);
 
 	/* register hardware clocks */
-	for (i = 0; i < ARRAY_SIZE(gcc_msmskunk_hws); i++) {
-		clk = devm_clk_register(&pdev->dev, gcc_msmskunk_hws[i]);
+	for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+		clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
 		if (IS_ERR(clk))
 			return PTR_ERR(clk);
 	}
@@ -3579,7 +3579,7 @@ static int gcc_msmskunk_probe(struct platform_device *pdev)
 		return PTR_ERR(vdd_cx_ao.regulator[0]);
 	}
 
-	ret = qcom_cc_really_probe(pdev, &gcc_msmskunk_desc, regmap);
+	ret = qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
 		return ret;
@@ -3617,26 +3617,26 @@ static int gcc_msmskunk_probe(struct platform_device *pdev)
 	return ret;
 }
 
-static struct platform_driver gcc_msmskunk_driver = {
-	.probe		= gcc_msmskunk_probe,
+static struct platform_driver gcc_sdm845_driver = {
+	.probe		= gcc_sdm845_probe,
 	.driver		= {
-		.name	= "gcc-msmskunk",
-		.of_match_table = gcc_msmskunk_match_table,
+		.name	= "gcc-sdm845",
+		.of_match_table = gcc_sdm845_match_table,
 	},
 };
 
-static int __init gcc_msmskunk_init(void)
+static int __init gcc_sdm845_init(void)
 {
-	return platform_driver_register(&gcc_msmskunk_driver);
+	return platform_driver_register(&gcc_sdm845_driver);
 }
-core_initcall(gcc_msmskunk_init);
+core_initcall(gcc_sdm845_init);
 
-static void __exit gcc_msmskunk_exit(void)
+static void __exit gcc_sdm845_exit(void)
 {
-	platform_driver_unregister(&gcc_msmskunk_driver);
+	platform_driver_unregister(&gcc_sdm845_driver);
 }
-module_exit(gcc_msmskunk_exit);
+module_exit(gcc_sdm845_exit);
 
-MODULE_DESCRIPTION("QTI GCC MSMSKUNK Driver");
+MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:gcc-msmskunk");
+MODULE_ALIAS("platform:gcc-sdm845");
diff --git a/drivers/clk/qcom/vdd-level-skunk.h b/drivers/clk/qcom/vdd-level-sdm845.h
similarity index 100%
rename from drivers/clk/qcom/vdd-level-skunk.h
rename to drivers/clk/qcom/vdd-level-sdm845.h
diff --git a/drivers/clk/qcom/videocc-msmskunk.c b/drivers/clk/qcom/videocc-sdm845.c
similarity index 85%
rename from drivers/clk/qcom/videocc-msmskunk.c
rename to drivers/clk/qcom/videocc-sdm845.c
index 670efb5..0e9cf88 100644
--- a/drivers/clk/qcom/videocc-msmskunk.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -23,7 +23,7 @@
 #include <linux/regmap.h>
 #include <linux/reset-controller.h>
 
-#include <dt-bindings/clock/qcom,videocc-skunk.h>
+#include <dt-bindings/clock/qcom,videocc-sdm845.h>
 
 #include "common.h"
 #include "clk-regmap.h"
@@ -32,7 +32,7 @@
 #include "clk-branch.h"
 #include "reset.h"
 #include "clk-alpha-pll.h"
-#include "vdd-level-skunk.h"
+#include "vdd-level-sdm845.h"
 
 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
 
@@ -298,7 +298,7 @@ static struct clk_branch video_cc_venus_ctl_core_clk = {
 	},
 };
 
-static struct clk_regmap *video_cc_msmskunk_clocks[] = {
+static struct clk_regmap *video_cc_sdm845_clocks[] = {
 	[VIDEO_CC_APB_CLK] = &video_cc_apb_clk.clkr,
 	[VIDEO_CC_AT_CLK] = &video_cc_at_clk.clkr,
 	[VIDEO_CC_DEBUG_CLK] = &video_cc_debug_clk.clkr,
@@ -315,14 +315,14 @@ static struct clk_regmap *video_cc_msmskunk_clocks[] = {
 	[VIDEO_PLL0] = &video_pll0.clkr,
 };
 
-static const struct qcom_reset_map video_cc_msmskunk_resets[] = {
+static const struct qcom_reset_map video_cc_sdm845_resets[] = {
 	[VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
 	[VIDEO_CC_VCODEC0_BCR] = { 0x870 },
 	[VIDEO_CC_VCODEC1_BCR] = { 0x8b0 },
 	[VIDEO_CC_VENUS_BCR] = { 0x810 },
 };
 
-static const struct regmap_config video_cc_msmskunk_regmap_config = {
+static const struct regmap_config video_cc_sdm845_regmap_config = {
 	.reg_bits	= 32,
 	.reg_stride	= 4,
 	.val_bits	= 32,
@@ -330,26 +330,26 @@ static const struct regmap_config video_cc_msmskunk_regmap_config = {
 	.fast_io	= true,
 };
 
-static const struct qcom_cc_desc video_cc_msmskunk_desc = {
-	.config = &video_cc_msmskunk_regmap_config,
-	.clks = video_cc_msmskunk_clocks,
-	.num_clks = ARRAY_SIZE(video_cc_msmskunk_clocks),
-	.resets = video_cc_msmskunk_resets,
-	.num_resets = ARRAY_SIZE(video_cc_msmskunk_resets),
+static const struct qcom_cc_desc video_cc_sdm845_desc = {
+	.config = &video_cc_sdm845_regmap_config,
+	.clks = video_cc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(video_cc_sdm845_clocks),
+	.resets = video_cc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(video_cc_sdm845_resets),
 };
 
-static const struct of_device_id video_cc_msmskunk_match_table[] = {
-	{ .compatible = "qcom,video_cc-msmskunk" },
+static const struct of_device_id video_cc_sdm845_match_table[] = {
+	{ .compatible = "qcom,video_cc-sdm845" },
 	{ }
 };
-MODULE_DEVICE_TABLE(of, video_cc_msmskunk_match_table);
+MODULE_DEVICE_TABLE(of, video_cc_sdm845_match_table);
 
-static int video_cc_msmskunk_probe(struct platform_device *pdev)
+static int video_cc_sdm845_probe(struct platform_device *pdev)
 {
 	struct regmap *regmap;
 	int ret = 0;
 
-	regmap = qcom_cc_map(pdev, &video_cc_msmskunk_desc);
+	regmap = qcom_cc_map(pdev, &video_cc_sdm845_desc);
 	if (IS_ERR(regmap)) {
 		pr_err("Failed to map the Video CC registers\n");
 		return PTR_ERR(regmap);
@@ -365,7 +365,7 @@ static int video_cc_msmskunk_probe(struct platform_device *pdev)
 
 	clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
 
-	ret = qcom_cc_really_probe(pdev, &video_cc_msmskunk_desc, regmap);
+	ret = qcom_cc_really_probe(pdev, &video_cc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register Video CC clocks\n");
 		return ret;
@@ -375,26 +375,26 @@ static int video_cc_msmskunk_probe(struct platform_device *pdev)
 	return ret;
 }
 
-static struct platform_driver video_cc_msmskunk_driver = {
-	.probe		= video_cc_msmskunk_probe,
+static struct platform_driver video_cc_sdm845_driver = {
+	.probe		= video_cc_sdm845_probe,
 	.driver		= {
-		.name	= "video_cc-msmskunk",
-		.of_match_table = video_cc_msmskunk_match_table,
+		.name	= "video_cc-sdm845",
+		.of_match_table = video_cc_sdm845_match_table,
 	},
 };
 
-static int __init video_cc_msmskunk_init(void)
+static int __init video_cc_sdm845_init(void)
 {
-	return platform_driver_register(&video_cc_msmskunk_driver);
+	return platform_driver_register(&video_cc_sdm845_driver);
 }
-core_initcall(video_cc_msmskunk_init);
+core_initcall(video_cc_sdm845_init);
 
-static void __exit video_cc_msmskunk_exit(void)
+static void __exit video_cc_sdm845_exit(void)
 {
-	platform_driver_unregister(&video_cc_msmskunk_driver);
+	platform_driver_unregister(&video_cc_sdm845_driver);
 }
-module_exit(video_cc_msmskunk_exit);
+module_exit(video_cc_sdm845_exit);
 
-MODULE_DESCRIPTION("QCOM VIDEO_CC MSMSKUNK Driver");
+MODULE_DESCRIPTION("QCOM VIDEO_CC SDM845 Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:video_cc-msmskunk");
+MODULE_ALIAS("platform:video_cc-sdm845");
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 7d233e6..c53a373 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1969,7 +1969,7 @@ static void sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		break;
 	case SDE_HW_VER_300:
 	case SDE_HW_VER_400:
-		/* update msm8998 and skunk target here */
+		/* update msm8998 and sdm845 target here */
 		break;
 	}
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 5e35e4e..d28be49a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -42,9 +42,9 @@
 #define SDE_HW_VER_171	SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
 #define SDE_HW_VER_172	SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
 #define SDE_HW_VER_300	SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
-#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* msmskunk v1.0 */
+#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */
 
-#define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
+#define IS_SDM845_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
 
 #define MAX_IMG_WIDTH 0x3fff
 #define MAX_IMG_HEIGHT 0x3fff
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 6552326..a471dad 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -109,7 +109,7 @@ static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
 	}
 }
 
-static void sde_hw_lm_setup_blend_config_msmskunk(struct sde_hw_mixer *ctx,
+static void sde_hw_lm_setup_blend_config_sdm845(struct sde_hw_mixer *ctx,
 	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
@@ -227,8 +227,8 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m,
 		unsigned long features)
 {
 	ops->setup_mixer_out = sde_hw_lm_setup_out;
-	if (IS_MSMSKUNK_TARGET(m->hwversion))
-		ops->setup_blend_config = sde_hw_lm_setup_blend_config_msmskunk;
+	if (IS_SDM845_TARGET(m->hwversion))
+		ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
 	else
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config;
 	ops->setup_alpha_out = sde_hw_lm_setup_color3;
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 1749037..92fd916 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -54,7 +54,7 @@
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-14nm.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-v3.o
-obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qrbtc-msmskunk.o
+obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qrbtc-sdm845.o
 obj-$(CONFIG_PHY_TUSB1210)		+= phy-tusb1210.o
 obj-$(CONFIG_PHY_BRCM_SATA)		+= phy-brcm-sata.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
diff --git a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c b/drivers/phy/phy-qcom-ufs-qrbtc-sdm845.c
similarity index 65%
rename from drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c
rename to drivers/phy/phy-qcom-ufs-qrbtc-sdm845.c
index 61f1232..6834f6a 100644
--- a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c
+++ b/drivers/phy/phy-qcom-ufs-qrbtc-sdm845.c
@@ -12,12 +12,12 @@
  *
  */
 
-#include "phy-qcom-ufs-qrbtc-msmskunk.h"
+#include "phy-qcom-ufs-qrbtc-sdm845.h"
 
-#define UFS_PHY_NAME "ufs_phy_qrbtc_msmskunk"
+#define UFS_PHY_NAME "ufs_phy_qrbtc_sdm845"
 
 static
-int ufs_qcom_phy_qrbtc_msmskunk_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+int ufs_qcom_phy_qrbtc_sdm845_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 					bool is_rate_B)
 {
 	int err;
@@ -44,7 +44,7 @@ int ufs_qcom_phy_qrbtc_msmskunk_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 }
 
 static int
-ufs_qcom_phy_qrbtc_msmskunk_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+ufs_qcom_phy_qrbtc_sdm845_is_pcs_ready(struct ufs_qcom_phy *phy_common)
 {
 	int err = 0;
 	u32 val;
@@ -68,7 +68,7 @@ ufs_qcom_phy_qrbtc_msmskunk_is_pcs_ready(struct ufs_qcom_phy *phy_common)
 	return err;
 }
 
-static void ufs_qcom_phy_qrbtc_msmskunk_start_serdes(struct ufs_qcom_phy *phy)
+static void ufs_qcom_phy_qrbtc_sdm845_start_serdes(struct ufs_qcom_phy *phy)
 {
 	u32 temp;
 
@@ -82,29 +82,29 @@ static void ufs_qcom_phy_qrbtc_msmskunk_start_serdes(struct ufs_qcom_phy *phy)
 	mb();
 }
 
-static int ufs_qcom_phy_qrbtc_msmskunk_init(struct phy *generic_phy)
+static int ufs_qcom_phy_qrbtc_sdm845_init(struct phy *generic_phy)
 {
 	return 0;
 }
 
-struct phy_ops ufs_qcom_phy_qrbtc_msmskunk_phy_ops = {
-	.init		= ufs_qcom_phy_qrbtc_msmskunk_init,
+struct phy_ops ufs_qcom_phy_qrbtc_sdm845_phy_ops = {
+	.init		= ufs_qcom_phy_qrbtc_sdm845_init,
 	.exit		= ufs_qcom_phy_exit,
 	.owner		= THIS_MODULE,
 };
 
-struct ufs_qcom_phy_specific_ops phy_qrbtc_msmskunk_ops = {
-	.calibrate_phy		= ufs_qcom_phy_qrbtc_msmskunk_phy_calibrate,
-	.start_serdes		= ufs_qcom_phy_qrbtc_msmskunk_start_serdes,
+struct ufs_qcom_phy_specific_ops phy_qrbtc_sdm845_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qrbtc_sdm845_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qrbtc_sdm845_start_serdes,
 	.is_physical_coding_sublayer_ready =
-				ufs_qcom_phy_qrbtc_msmskunk_is_pcs_ready,
+				ufs_qcom_phy_qrbtc_sdm845_is_pcs_ready,
 };
 
-static int ufs_qcom_phy_qrbtc_msmskunk_probe(struct platform_device *pdev)
+static int ufs_qcom_phy_qrbtc_sdm845_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct phy *generic_phy;
-	struct ufs_qcom_phy_qrbtc_msmskunk *phy;
+	struct ufs_qcom_phy_qrbtc_sdm845 *phy;
 	int err = 0;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -114,7 +114,7 @@ static int ufs_qcom_phy_qrbtc_msmskunk_probe(struct platform_device *pdev)
 	}
 
 	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
-		&ufs_qcom_phy_qrbtc_msmskunk_phy_ops, &phy_qrbtc_msmskunk_ops);
+		&ufs_qcom_phy_qrbtc_sdm845_phy_ops, &phy_qrbtc_sdm845_ops);
 
 	if (!generic_phy) {
 		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
@@ -132,7 +132,7 @@ static int ufs_qcom_phy_qrbtc_msmskunk_probe(struct platform_device *pdev)
 	return err;
 }
 
-static int ufs_qcom_phy_qrbtc_msmskunk_remove(struct platform_device *pdev)
+static int ufs_qcom_phy_qrbtc_sdm845_remove(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct phy *generic_phy = to_phy(dev);
@@ -147,23 +147,23 @@ static int ufs_qcom_phy_qrbtc_msmskunk_remove(struct platform_device *pdev)
 	return err;
 }
 
-static const struct of_device_id ufs_qcom_phy_qrbtc_msmskunk_of_match[] = {
-	{.compatible = "qcom,ufs-phy-qrbtc-msmskunk"},
+static const struct of_device_id ufs_qcom_phy_qrbtc_sdm845_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qrbtc-sdm845"},
 	{},
 };
-MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qrbtc_msmskunk_of_match);
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qrbtc_sdm845_of_match);
 
-static struct platform_driver ufs_qcom_phy_qrbtc_msmskunk_driver = {
-	.probe = ufs_qcom_phy_qrbtc_msmskunk_probe,
-	.remove = ufs_qcom_phy_qrbtc_msmskunk_remove,
+static struct platform_driver ufs_qcom_phy_qrbtc_sdm845_driver = {
+	.probe = ufs_qcom_phy_qrbtc_sdm845_probe,
+	.remove = ufs_qcom_phy_qrbtc_sdm845_remove,
 	.driver = {
-		.of_match_table = ufs_qcom_phy_qrbtc_msmskunk_of_match,
-		.name = "ufs_qcom_phy_qrbtc_msmskunk",
+		.of_match_table = ufs_qcom_phy_qrbtc_sdm845_of_match,
+		.name = "ufs_qcom_phy_qrbtc_sdm845",
 		.owner = THIS_MODULE,
 	},
 };
 
-module_platform_driver(ufs_qcom_phy_qrbtc_msmskunk_driver);
+module_platform_driver(ufs_qcom_phy_qrbtc_sdm845_driver);
 
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QRBTC MSMSKUNK");
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QRBTC SDM845");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h b/drivers/phy/phy-qcom-ufs-qrbtc-sdm845.h
similarity index 97%
rename from drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h
rename to drivers/phy/phy-qcom-ufs-qrbtc-sdm845.h
index 2597576..ddcf4192 100644
--- a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h
+++ b/drivers/phy/phy-qcom-ufs-qrbtc-sdm845.h
@@ -12,8 +12,8 @@
  *
  */
 
-#ifndef UFS_QCOM_PHY_QRBTC_MSMSKUNK_H_
-#define UFS_QCOM_PHY_QRBTC_MSMSKUNK_H_
+#ifndef UFS_QCOM_PHY_QRBTC_SDM845_H_
+#define UFS_QCOM_PHY_QRBTC_SDM845_H_
 
 #include "phy-qcom-ufs-i.h"
 
@@ -166,14 +166,14 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
 
 
 /*
- * This structure represents the qrbtc-msmskunk specific phy.
+ * This structure represents the qrbtc-sdm845 specific phy.
  * common_cfg MUST remain the first field in this structure
  * in case extra fields are added. This way, when calling
  * get_ufs_qcom_phy() of generic phy, we can extract the
  * common phy structure (struct ufs_qcom_phy) out of it
  * regardless of the relevant specific phy.
  */
-struct ufs_qcom_phy_qrbtc_msmskunk {
+struct ufs_qcom_phy_qrbtc_sdm845 {
 	struct ufs_qcom_phy common_cfg;
 };
 
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 1058e5e..5222936 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -79,23 +79,23 @@
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm TLMM block found on the Qualcomm 8916 platform.
 
-config PINCTRL_MSMSKUNK
-	tristate "Qualcomm Technologies Inc MSMSKUNK pin controller driver"
+config PINCTRL_SDM845
+	tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
 	depends on GPIOLIB && OF
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
-	  Technologies Inc MSMSKUNK platform.
+	  Technologies Inc SDM845 platform.
 
-config PINCTRL_SDMBAT
-	tristate "Qualcomm Technologies Inc SDMBAT pin controller driver"
+config PINCTRL_SDM830
+	tristate "Qualcomm Technologies Inc SDM830 pin controller driver"
 	depends on GPIOLIB && OF
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
-	  Technologies Inc SDMBAT platform.
+	  Technologies Inc SDM830 platform.
 
 
 config PINCTRL_MSM8996
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index fd52c43..c66ee3c 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -15,5 +15,5 @@
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
-obj-$(CONFIG_PINCTRL_MSMSKUNK) += pinctrl-msmskunk.o
-obj-$(CONFIG_PINCTRL_SDMBAT) += pinctrl-sdmbat.o
+obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
+obj-$(CONFIG_PINCTRL_SDM830) += pinctrl-sdm830.o
diff --git a/drivers/pinctrl/qcom/pinctrl-sdmbat.c b/drivers/pinctrl/qcom/pinctrl-sdm830.c
similarity index 97%
rename from drivers/pinctrl/qcom/pinctrl-sdmbat.c
rename to drivers/pinctrl/qcom/pinctrl-sdm830.c
index 3e4fdda..fc3d0ad 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdmbat.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm830.c
@@ -118,7 +118,7 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
-static const struct pinctrl_pin_desc sdmbat_pins[] = {
+static const struct pinctrl_pin_desc sdm830_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
 	PINCTRL_PIN(2, "GPIO_2"),
@@ -403,7 +403,7 @@ static const unsigned int sdc2_clk_pins[] = { 150 };
 static const unsigned int sdc2_cmd_pins[] = { 151 };
 static const unsigned int sdc2_data_pins[] = { 152 };
 
-enum sdmbat_functions {
+enum sdm830_functions {
 	msm_mux_qup0,
 	msm_mux_gpio,
 	msm_mux_reserved0,
@@ -1680,7 +1680,7 @@ static const char * const reserved123_groups[] = {
 	"gpio123",
 };
 
-static const struct msm_function sdmbat_functions[] = {
+static const struct msm_function sdm830_functions[] = {
 	FUNCTION(qup0),
 	FUNCTION(gpio),
 	FUNCTION(reserved0),
@@ -1996,7 +1996,7 @@ static const struct msm_function sdmbat_functions[] = {
 	FUNCTION(reserved123),
 };
 
-static const struct msm_pingroup sdmbat_groups[] = {
+static const struct msm_pingroup sdm830_groups[] = {
 	PINGROUP(0, SOUTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
 	PINGROUP(1, SOUTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
 	PINGROUP(2, SOUTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
@@ -2236,48 +2236,48 @@ static const struct msm_pingroup sdmbat_groups[] = {
 	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
 };
 
-static const struct msm_pinctrl_soc_data sdmbat_pinctrl = {
-	.pins = sdmbat_pins,
-	.npins = ARRAY_SIZE(sdmbat_pins),
-	.functions = sdmbat_functions,
-	.nfunctions = ARRAY_SIZE(sdmbat_functions),
-	.groups = sdmbat_groups,
-	.ngroups = ARRAY_SIZE(sdmbat_groups),
+static const struct msm_pinctrl_soc_data sdm830_pinctrl = {
+	.pins = sdm830_pins,
+	.npins = ARRAY_SIZE(sdm830_pins),
+	.functions = sdm830_functions,
+	.nfunctions = ARRAY_SIZE(sdm830_functions),
+	.groups = sdm830_groups,
+	.ngroups = ARRAY_SIZE(sdm830_groups),
 	.ngpios = 136,
 };
 
-static int sdmbat_pinctrl_probe(struct platform_device *pdev)
+static int sdm830_pinctrl_probe(struct platform_device *pdev)
 {
-	return msm_pinctrl_probe(pdev, &sdmbat_pinctrl);
+	return msm_pinctrl_probe(pdev, &sdm830_pinctrl);
 }
 
-static const struct of_device_id sdmbat_pinctrl_of_match[] = {
-	{ .compatible = "qcom,sdmbat-pinctrl", },
+static const struct of_device_id sdm830_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm830-pinctrl", },
 	{ },
 };
 
-static struct platform_driver sdmbat_pinctrl_driver = {
+static struct platform_driver sdm830_pinctrl_driver = {
 	.driver = {
-		.name = "sdmbat-pinctrl",
+		.name = "sdm830-pinctrl",
 		.owner = THIS_MODULE,
-		.of_match_table = sdmbat_pinctrl_of_match,
+		.of_match_table = sdm830_pinctrl_of_match,
 	},
-	.probe = sdmbat_pinctrl_probe,
+	.probe = sdm830_pinctrl_probe,
 	.remove = msm_pinctrl_remove,
 };
 
-static int __init sdmbat_pinctrl_init(void)
+static int __init sdm830_pinctrl_init(void)
 {
-	return platform_driver_register(&sdmbat_pinctrl_driver);
+	return platform_driver_register(&sdm830_pinctrl_driver);
 }
-arch_initcall(sdmbat_pinctrl_init);
+arch_initcall(sdm830_pinctrl_init);
 
-static void __exit sdmbat_pinctrl_exit(void)
+static void __exit sdm830_pinctrl_exit(void)
 {
-	platform_driver_unregister(&sdmbat_pinctrl_driver);
+	platform_driver_unregister(&sdm830_pinctrl_driver);
 }
-module_exit(sdmbat_pinctrl_exit);
+module_exit(sdm830_pinctrl_exit);
 
-MODULE_DESCRIPTION("QTI sdmbat pinctrl driver");
+MODULE_DESCRIPTION("QTI sdm830 pinctrl driver");
 MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, sdmbat_pinctrl_of_match);
+MODULE_DEVICE_TABLE(of, sdm830_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-msmskunk.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
similarity index 97%
rename from drivers/pinctrl/qcom/pinctrl-msmskunk.c
rename to drivers/pinctrl/qcom/pinctrl-sdm845.c
index e203b2d..b237a6d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msmskunk.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -116,7 +116,7 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
-static const struct pinctrl_pin_desc msmskunk_pins[] = {
+static const struct pinctrl_pin_desc sdm845_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
 	PINCTRL_PIN(2, "GPIO_2"),
@@ -429,7 +429,7 @@ static const unsigned int sdc2_clk_pins[] = { 150 };
 static const unsigned int sdc2_cmd_pins[] = { 151 };
 static const unsigned int sdc2_data_pins[] = { 152 };
 
-enum msmskunk_functions {
+enum sdm845_functions {
 	msm_mux_gpio,
 	msm_mux_qup0,
 	msm_mux_reserved0,
@@ -1815,7 +1815,7 @@ static const char * const sdc40_groups[] = {
 	"gpio96",
 };
 
-static const struct msm_function msmskunk_functions[] = {
+static const struct msm_function sdm845_functions[] = {
 	FUNCTION(gpio),
 	FUNCTION(qup0),
 	FUNCTION(reserved0),
@@ -2158,7 +2158,7 @@ static const struct msm_function msmskunk_functions[] = {
 	FUNCTION(sdc40),
 };
 
-static const struct msm_pingroup msmskunk_groups[] = {
+static const struct msm_pingroup sdm845_groups[] = {
 	PINGROUP(0, NORTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
 	PINGROUP(1, NORTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
 	PINGROUP(2, NORTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
@@ -2421,48 +2421,48 @@ static const struct msm_pingroup msmskunk_groups[] = {
 	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
 };
 
-static const struct msm_pinctrl_soc_data msmskunk_pinctrl = {
-	.pins = msmskunk_pins,
-	.npins = ARRAY_SIZE(msmskunk_pins),
-	.functions = msmskunk_functions,
-	.nfunctions = ARRAY_SIZE(msmskunk_functions),
-	.groups = msmskunk_groups,
-	.ngroups = ARRAY_SIZE(msmskunk_groups),
+static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
+	.pins = sdm845_pins,
+	.npins = ARRAY_SIZE(sdm845_pins),
+	.functions = sdm845_functions,
+	.nfunctions = ARRAY_SIZE(sdm845_functions),
+	.groups = sdm845_groups,
+	.ngroups = ARRAY_SIZE(sdm845_groups),
 	.ngpios = 150,
 };
 
-static int msmskunk_pinctrl_probe(struct platform_device *pdev)
+static int sdm845_pinctrl_probe(struct platform_device *pdev)
 {
-	return msm_pinctrl_probe(pdev, &msmskunk_pinctrl);
+	return msm_pinctrl_probe(pdev, &sdm845_pinctrl);
 }
 
-static const struct of_device_id msmskunk_pinctrl_of_match[] = {
-	{ .compatible = "qcom,msmskunk-pinctrl", },
+static const struct of_device_id sdm845_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm845-pinctrl", },
 	{ },
 };
 
-static struct platform_driver msmskunk_pinctrl_driver = {
+static struct platform_driver sdm845_pinctrl_driver = {
 	.driver = {
-		.name = "msmskunk-pinctrl",
+		.name = "sdm845-pinctrl",
 		.owner = THIS_MODULE,
-		.of_match_table = msmskunk_pinctrl_of_match,
+		.of_match_table = sdm845_pinctrl_of_match,
 	},
-	.probe = msmskunk_pinctrl_probe,
+	.probe = sdm845_pinctrl_probe,
 	.remove = msm_pinctrl_remove,
 };
 
-static int __init msmskunk_pinctrl_init(void)
+static int __init sdm845_pinctrl_init(void)
 {
-	return platform_driver_register(&msmskunk_pinctrl_driver);
+	return platform_driver_register(&sdm845_pinctrl_driver);
 }
-arch_initcall(msmskunk_pinctrl_init);
+arch_initcall(sdm845_pinctrl_init);
 
-static void __exit msmskunk_pinctrl_exit(void)
+static void __exit sdm845_pinctrl_exit(void)
 {
-	platform_driver_unregister(&msmskunk_pinctrl_driver);
+	platform_driver_unregister(&sdm845_pinctrl_driver);
 }
-module_exit(msmskunk_pinctrl_exit);
+module_exit(sdm845_pinctrl_exit);
 
-MODULE_DESCRIPTION("QTI msmskunk pinctrl driver");
+MODULE_DESCRIPTION("QTI sdm845 pinctrl driver");
 MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, msmskunk_pinctrl_of_match);
+MODULE_DEVICE_TABLE(of, sdm845_pinctrl_of_match);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d68e15d..298f8c1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -3613,6 +3613,7 @@ static int ipa3_apps_cons_request_resource(void)
 
 static void ipa3_sps_release_resource(struct work_struct *work)
 {
+	mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
 	/* check whether still need to decrease client usage */
 	if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
 		if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
@@ -3624,6 +3625,7 @@ static void ipa3_sps_release_resource(struct work_struct *work)
 		}
 	}
 	atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+	mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
 }
 
 int ipa3_create_apps_resource(void)
@@ -4403,6 +4405,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		goto fail_create_transport_wq;
 	}
 
+	/* Initialize the SPS PM lock. */
+	mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
 	spin_lock_init(&ipa3_ctx->transport_pm.lock);
 	ipa3_ctx->transport_pm.res_granted = false;
 	ipa3_ctx->transport_pm.res_rel_in_prog = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 4f6bf55..bec0b27 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -979,6 +979,7 @@ struct ipa3_uc_wdi_ctx {
  * @lock: lock for ensuring atomic operations
  * @res_granted: true if SPS requested IPA resource and IPA granted it
  * @res_rel_in_prog: true if releasing IPA resource is in progress
+ * @transport_pm_mutex: Mutex to protect the transport_pm functionality.
  */
 struct ipa3_transport_pm {
 	spinlock_t lock;
@@ -986,6 +987,7 @@ struct ipa3_transport_pm {
 	bool res_rel_in_prog;
 	atomic_t dec_clients;
 	atomic_t eot_activity;
+	struct mutex transport_pm_mutex;
 };
 
 /**
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 2829a02..474f914 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -35,11 +35,11 @@
 	  Level Cache. This provides interfaces to client's that use the LLCC.
 	  Say yes here to enable LLCC slice driver.
 
-config QCOM_MSMSKUNK_LLCC
-	tristate "Qualcomm Technologies, Inc. MSMSKUNK LLCC driver"
+config QCOM_SDM845_LLCC
+	tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
 	depends on QCOM_LLCC
 	help
-	  Say yes here to enable the LLCC driver for MSMSKUNK. This is provides
+	  Say yes here to enable the LLCC driver for SDM845. This is provides
 	  data required to configure LLCC so that clients can start using the
 	  LLCC slices.
 
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 1700319..531685c 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,7 +1,7 @@
 obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
-obj-$(CONFIG_QCOM_MSMSKUNK_LLCC) += llcc-msmskunk.o
+obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
 obj-$(CONFIG_QCOM_LLCC_AMON) += llcc-amon.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
 obj-$(CONFIG_QCOM_SMD) +=	smd.o
diff --git a/drivers/soc/qcom/llcc-msmskunk.c b/drivers/soc/qcom/llcc-sdm845.c
similarity index 82%
rename from drivers/soc/qcom/llcc-msmskunk.c
rename to drivers/soc/qcom/llcc-sdm845.c
index 41f55eb..0a28ee0 100644
--- a/drivers/soc/qcom/llcc-msmskunk.c
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -56,7 +56,7 @@
 		.activate_on_init = a,		\
 	}
 
-static struct llcc_slice_config msmskunk_data[] =  {
+static struct llcc_slice_config sdm845_data[] =  {
 	SCT_ENTRY("cpuss",       1, 1, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1),
 	SCT_ENTRY("vidsc0",      2, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
 	SCT_ENTRY("vidsc1",      3, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
@@ -81,38 +81,38 @@ static struct llcc_slice_config msmskunk_data[] =  {
 	SCT_ENTRY("audiohw",     22, 22, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
 };
 
-static int msmskunk_qcom_llcc_probe(struct platform_device *pdev)
+static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
 {
-	return qcom_llcc_probe(pdev, msmskunk_data,
-				 ARRAY_SIZE(msmskunk_data));
+	return qcom_llcc_probe(pdev, sdm845_data,
+				 ARRAY_SIZE(sdm845_data));
 }
 
-static const struct of_device_id msmskunk_qcom_llcc_of_match[] = {
-	{ .compatible = "qcom,msmskunk-llcc", },
+static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
+	{ .compatible = "qcom,sdm845-llcc", },
 	{ },
 };
 
-static struct platform_driver msmskunk_qcom_llcc_driver = {
+static struct platform_driver sdm845_qcom_llcc_driver = {
 	.driver = {
-		.name = "msmskunk-llcc",
+		.name = "sdm845-llcc",
 		.owner = THIS_MODULE,
-		.of_match_table = msmskunk_qcom_llcc_of_match,
+		.of_match_table = sdm845_qcom_llcc_of_match,
 	},
-	.probe = msmskunk_qcom_llcc_probe,
+	.probe = sdm845_qcom_llcc_probe,
 	.remove = qcom_llcc_remove,
 };
 
-static int __init msmskunk_init_qcom_llcc_init(void)
+static int __init sdm845_init_qcom_llcc_init(void)
 {
-	return platform_driver_register(&msmskunk_qcom_llcc_driver);
+	return platform_driver_register(&sdm845_qcom_llcc_driver);
 }
-module_init(msmskunk_init_qcom_llcc_init);
+module_init(sdm845_init_qcom_llcc_init);
 
-static void __exit msmskunk_exit_qcom_llcc_exit(void)
+static void __exit sdm845_exit_qcom_llcc_exit(void)
 {
-	platform_driver_unregister(&msmskunk_qcom_llcc_driver);
+	platform_driver_unregister(&sdm845_qcom_llcc_driver);
 }
-module_exit(msmskunk_exit_qcom_llcc_exit);
+module_exit(sdm845_exit_qcom_llcc_exit);
 
-MODULE_DESCRIPTION("QTI msmskunk LLCC driver");
+MODULE_DESCRIPTION("QTI sdm845 LLCC driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 7957e83..967b227 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -538,11 +538,11 @@ static struct msm_soc_info cpu_of_id[] = {
 	/* falcon ID */
 	[317] = {MSM_CPU_FALCON, "MSMFALCON"},
 
-	/* Skunk ID */
-	[321] = {MSM_CPU_SKUNK, "MSMSKUNK"},
+	/* sdm845 ID */
+	[321] = {MSM_CPU_SDM845, "SDM845"},
 
 	/* Bat ID */
-	[328] = {MSM_CPU_BAT, "SDMBAT"},
+	[328] = {MSM_CPU_SDM830, "SDM830"},
 
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
@@ -1221,13 +1221,13 @@ static void * __init setup_dummy_socinfo(void)
 		dummy_socinfo.id = 319;
 		strlcpy(dummy_socinfo.build_id, "apqcobalt - ",
 			sizeof(dummy_socinfo.build_id));
-	} else if (early_machine_is_msmskunk()) {
+	} else if (early_machine_is_sdm845()) {
 		dummy_socinfo.id = 321;
-		strlcpy(dummy_socinfo.build_id, "msmskunk - ",
+		strlcpy(dummy_socinfo.build_id, "sdm845 - ",
 			sizeof(dummy_socinfo.build_id));
-	} else if (early_machine_is_sdmbat()) {
+	} else if (early_machine_is_sdm830()) {
 		dummy_socinfo.id = 328;
-		strlcpy(dummy_socinfo.build_id, "sdmbat - ",
+		strlcpy(dummy_socinfo.build_id, "sdm830 - ",
 			sizeof(dummy_socinfo.build_id));
 	}
 
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 17a27fe..4ddd8e1 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -516,6 +516,35 @@ struct dwc3_event_buffer {
 	struct dwc3		*dwc;
 };
 
+struct dwc3_gadget_events {
+	unsigned int	disconnect;
+	unsigned int	reset;
+	unsigned int	connect;
+	unsigned int	wakeup;
+	unsigned int	link_status_change;
+	unsigned int	eopf;
+	unsigned int	suspend;
+	unsigned int	sof;
+	unsigned int	erratic_error;
+	unsigned int	overflow;
+	unsigned int	vendor_dev_test_lmp;
+	unsigned int	cmdcmplt;
+	unsigned int	unknown_event;
+};
+
+struct dwc3_ep_events {
+	unsigned int	xfercomplete;
+	unsigned int	xfernotready;
+	unsigned int	control_data;
+	unsigned int	control_status;
+	unsigned int	xferinprogress;
+	unsigned int	rxtxfifoevent;
+	unsigned int	streamevent;
+	unsigned int	epcmdcomplete;
+	unsigned int	unknown_event;
+	unsigned int	total;
+};
+
 #define DWC3_EP_FLAG_STALLED	(1 << 0)
 #define DWC3_EP_FLAG_WEDGED	(1 << 1)
 
@@ -550,6 +579,9 @@ struct dwc3_event_buffer {
  * @name: a human readable name e.g. ep1out-bulk
  * @direction: true for TX, false for RX
  * @stream_capable: true when streams are enabled
+ * @dbg_ep_events: different events counter for endpoint
+ * @dbg_ep_events_diff: differential events counter for endpoint
+ * @dbg_ep_events_ts: timestamp for previous event counters
  */
 struct dwc3_ep {
 	struct usb_ep		endpoint;
@@ -601,6 +633,9 @@ struct dwc3_ep {
 
 	unsigned		direction:1;
 	unsigned		stream_capable:1;
+	struct dwc3_ep_events	dbg_ep_events;
+	struct dwc3_ep_events	dbg_ep_events_diff;
+	struct timespec		dbg_ep_events_ts;
 };
 
 enum dwc3_phy {
@@ -1083,6 +1118,7 @@ struct dwc3 {
 	unsigned int		irq_dbg_index;
 
 	wait_queue_head_t	wait_linkstate;
+	struct dwc3_gadget_events	dbg_gadget_events;
 };
 
 /* -------------------------------------------------------------------------- */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 4444888..2d45c8f 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -62,6 +62,9 @@
 	}
 
 
+#define ep_event_rate(ev, c, p, dt)	\
+	((dt) ? ((c.ev - p.ev) * (MSEC_PER_SEC)) / (dt) : 0)
+
 static const struct debugfs_reg32 dwc3_regs[] = {
 	dump_register(GSBUSCFG0),
 	dump_register(GSBUSCFG1),
@@ -843,6 +846,173 @@ static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
 	}
 }
 
+static ssize_t dwc3_store_int_events(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int i, ret;
+	unsigned long flags;
+	struct seq_file *s = file->private_data;
+	struct dwc3 *dwc = s->private;
+	struct dwc3_ep *dep;
+	struct timespec ts;
+	u8 clear_stats;
+
+	if (ubuf == NULL) {
+		pr_err("[%s] EINVAL\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ret = kstrtou8_from_user(ubuf, count, 0, &clear_stats);
+	if (ret < 0) {
+		pr_err("can't get enter value.\n");
+		return ret;
+	}
+
+	if (clear_stats != 0) {
+		pr_err("Wrong value. To clear stats, enter value as 0.\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	pr_debug("%s(): clearing debug interrupt buffers\n", __func__);
+	spin_lock_irqsave(&dwc->lock, flags);
+	ts = current_kernel_time();
+	for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+		dep = dwc->eps[i];
+		memset(&dep->dbg_ep_events, 0, sizeof(dep->dbg_ep_events));
+		memset(&dep->dbg_ep_events_diff, 0, sizeof(dep->dbg_ep_events));
+		dep->dbg_ep_events_ts = ts;
+	}
+	memset(&dwc->dbg_gadget_events, 0, sizeof(dwc->dbg_gadget_events));
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	return count;
+}
+
+static int dwc3_gadget_int_events_show(struct seq_file *s, void *unused)
+{
+	unsigned long   flags;
+	struct dwc3 *dwc = s->private;
+	struct dwc3_gadget_events *dbg_gadget_events;
+	struct dwc3_ep *dep;
+	int i;
+	struct timespec ts_delta;
+	struct timespec ts_current;
+	u32 ts_delta_ms;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	dbg_gadget_events = &dwc->dbg_gadget_events;
+
+	for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+		dep = dwc->eps[i];
+
+		if (dep == NULL || !(dep->flags & DWC3_EP_ENABLED))
+			continue;
+
+		ts_current = current_kernel_time();
+		ts_delta = timespec_sub(ts_current, dep->dbg_ep_events_ts);
+		ts_delta_ms = ts_delta.tv_nsec / NSEC_PER_MSEC +
+			ts_delta.tv_sec * MSEC_PER_SEC;
+
+		seq_printf(s, "\n\n===== dbg_ep_events for EP(%d) %s =====\n",
+			i, dep->name);
+		seq_printf(s, "xfercomplete:%u @ %luHz\n",
+			dep->dbg_ep_events.xfercomplete,
+			ep_event_rate(xfercomplete, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "xfernotready:%u @ %luHz\n",
+			dep->dbg_ep_events.xfernotready,
+			ep_event_rate(xfernotready, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "control_data:%u @ %luHz\n",
+			dep->dbg_ep_events.control_data,
+			ep_event_rate(control_data, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "control_status:%u @ %luHz\n",
+			dep->dbg_ep_events.control_status,
+			ep_event_rate(control_status, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "xferinprogress:%u @ %luHz\n",
+			dep->dbg_ep_events.xferinprogress,
+			ep_event_rate(xferinprogress, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "rxtxfifoevent:%u @ %luHz\n",
+			dep->dbg_ep_events.rxtxfifoevent,
+			ep_event_rate(rxtxfifoevent, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "streamevent:%u @ %luHz\n",
+			dep->dbg_ep_events.streamevent,
+			ep_event_rate(streamevent, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "epcmdcomplt:%u @ %luHz\n",
+			dep->dbg_ep_events.epcmdcomplete,
+			ep_event_rate(epcmdcomplete, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "unknown:%u @ %luHz\n",
+			dep->dbg_ep_events.unknown_event,
+			ep_event_rate(unknown_event, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+		seq_printf(s, "total:%u @ %luHz\n",
+			dep->dbg_ep_events.total,
+			ep_event_rate(total, dep->dbg_ep_events,
+				dep->dbg_ep_events_diff, ts_delta_ms));
+
+		dep->dbg_ep_events_ts = ts_current;
+		dep->dbg_ep_events_diff = dep->dbg_ep_events;
+	}
+
+	seq_puts(s, "\n=== dbg_gadget events ==\n");
+	seq_printf(s, "disconnect:%u\n reset:%u\n",
+		dbg_gadget_events->disconnect, dbg_gadget_events->reset);
+	seq_printf(s, "connect:%u\n wakeup:%u\n",
+		dbg_gadget_events->connect, dbg_gadget_events->wakeup);
+	seq_printf(s, "link_status_change:%u\n eopf:%u\n",
+		dbg_gadget_events->link_status_change, dbg_gadget_events->eopf);
+	seq_printf(s, "sof:%u\n suspend:%u\n",
+		dbg_gadget_events->sof, dbg_gadget_events->suspend);
+	seq_printf(s, "erratic_error:%u\n overflow:%u\n",
+		dbg_gadget_events->erratic_error,
+		dbg_gadget_events->overflow);
+	seq_printf(s, "vendor_dev_test_lmp:%u\n cmdcmplt:%u\n",
+		dbg_gadget_events->vendor_dev_test_lmp,
+		dbg_gadget_events->cmdcmplt);
+	seq_printf(s, "unknown_event:%u\n", dbg_gadget_events->unknown_event);
+
+	seq_printf(s, "\n\t== Last %d interrupts stats ==\t\n", MAX_INTR_STATS);
+	seq_puts(s, "@ time (us):\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%lld\t", ktime_to_us(dwc->irq_start_time[i]));
+	seq_puts(s, "\nhard irq time (us):\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%d\t", dwc->irq_completion_time[i]);
+	seq_puts(s, "\nevents count:\t\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%d\t", dwc->irq_event_count[i]);
+	seq_puts(s, "\nbh handled count:\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%d\t", dwc->bh_handled_evt_cnt[i]);
+	seq_puts(s, "\nirq thread time (us):\t");
+	for (i = 0; i < MAX_INTR_STATS; i++)
+		seq_printf(s, "%d\t", dwc->bh_completion_time[i]);
+	seq_putc(s, '\n');
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	return 0;
+}
+
+static int dwc3_gadget_events_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, dwc3_gadget_int_events_show, inode->i_private);
+}
+
+const struct file_operations dwc3_gadget_dbg_events_fops = {
+	.open		= dwc3_gadget_events_open,
+	.read		= seq_read,
+	.write		= dwc3_store_int_events,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 int dwc3_debugfs_init(struct dwc3 *dwc)
 {
 	struct dentry		*root;
@@ -892,6 +1062,13 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
 			dev_dbg(dwc->dev, "Can't create debugfs link_state\n");
 
 		dwc3_debugfs_create_endpoint_dirs(dwc, root);
+
+		file = debugfs_create_file("int_events", 0644, root, dwc,
+				&dwc3_gadget_dbg_events_fops);
+		if (!file) {
+			ret = -ENOMEM;
+			goto err1;
+		}
 	}
 
 	return 0;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 99e822d..566b645 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1072,9 +1072,16 @@ static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
 static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 		const struct dwc3_event_depevt *event)
 {
+	u8			epnum;
+	struct dwc3_ep		*dep;
+
+	epnum = event->endpoint_number;
+	dep = dwc->eps[epnum];
+
 	switch (event->status) {
 	case DEPEVT_STATUS_CONTROL_DATA:
 		dwc3_trace(trace_dwc3_ep0, "Control Data");
+		dep->dbg_ep_events.control_data++;
 
 		/*
 		 * We already have a DATA transfer in the controller's cache,
@@ -1098,6 +1105,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 		break;
 
 	case DEPEVT_STATUS_CONTROL_STATUS:
+		dep->dbg_ep_events.control_status++;
 		if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
 			return;
 
@@ -1118,23 +1126,36 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 void dwc3_ep0_interrupt(struct dwc3 *dwc,
 		const struct dwc3_event_depevt *event)
 {
+	struct dwc3_ep	*dep;
+	u8 epnum = event->endpoint_number;
+
 	dwc3_trace(trace_dwc3_ep0, "%s: state '%s'",
 			dwc3_ep_event_string(event),
 			dwc3_ep0_state_string(dwc->ep0state));
 
+	dep = dwc->eps[epnum];
 	switch (event->endpoint_event) {
 	case DWC3_DEPEVT_XFERCOMPLETE:
 		dwc3_ep0_xfer_complete(dwc, event);
+		dep->dbg_ep_events.xfercomplete++;
 		break;
 
 	case DWC3_DEPEVT_XFERNOTREADY:
 		dwc3_ep0_xfernotready(dwc, event);
+		dep->dbg_ep_events.xfernotready++;
 		break;
 
 	case DWC3_DEPEVT_XFERINPROGRESS:
+		dep->dbg_ep_events.xferinprogress++;
+		break;
 	case DWC3_DEPEVT_RXTXFIFOEVT:
+		dep->dbg_ep_events.rxtxfifoevent++;
+		break;
 	case DWC3_DEPEVT_STREAMEVT:
+		dep->dbg_ep_events.streamevent++;
+		break;
 	case DWC3_DEPEVT_EPCMDCMPLT:
+		dep->dbg_ep_events.epcmdcomplete++;
 		break;
 	}
 }
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3f5fad2..7aa290f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2613,9 +2613,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 		return;
 	}
 
+	dep->dbg_ep_events.total++;
+
 	switch (event->endpoint_event) {
 	case DWC3_DEPEVT_XFERCOMPLETE:
 		dep->resource_index = 0;
+		dep->dbg_ep_events.xfercomplete++;
 
 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 			dwc3_trace(trace_dwc3_gadget,
@@ -2627,9 +2630,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 		dwc3_endpoint_transfer_complete(dwc, dep, event);
 		break;
 	case DWC3_DEPEVT_XFERINPROGRESS:
+		dep->dbg_ep_events.xferinprogress++;
 		dwc3_endpoint_transfer_complete(dwc, dep, event);
 		break;
 	case DWC3_DEPEVT_XFERNOTREADY:
+		dep->dbg_ep_events.xfernotready++;
 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 			dwc3_gadget_start_isoc(dwc, dep, event);
 		} else {
@@ -2653,6 +2658,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 
 		break;
 	case DWC3_DEPEVT_STREAMEVT:
+		dep->dbg_ep_events.streamevent++;
 		if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
 			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
 					dep->name);
@@ -2675,9 +2681,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 		break;
 	case DWC3_DEPEVT_RXTXFIFOEVT:
 		dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
+		dep->dbg_ep_events.rxtxfifoevent++;
 		break;
 	case DWC3_DEPEVT_EPCMDCMPLT:
 		dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
+		dep->dbg_ep_events.epcmdcomplete++;
 		break;
 	}
 }
@@ -3266,15 +3274,19 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 	switch (event->type) {
 	case DWC3_DEVICE_EVENT_DISCONNECT:
 		dwc3_gadget_disconnect_interrupt(dwc);
+		dwc->dbg_gadget_events.disconnect++;
 		break;
 	case DWC3_DEVICE_EVENT_RESET:
 		dwc3_gadget_reset_interrupt(dwc);
+		dwc->dbg_gadget_events.reset++;
 		break;
 	case DWC3_DEVICE_EVENT_CONNECT_DONE:
 		dwc3_gadget_conndone_interrupt(dwc);
+		dwc->dbg_gadget_events.connect++;
 		break;
 	case DWC3_DEVICE_EVENT_WAKEUP:
 		dwc3_gadget_wakeup_interrupt(dwc, false);
+		dwc->dbg_gadget_events.wakeup++;
 		break;
 	case DWC3_DEVICE_EVENT_HIBER_REQ:
 		if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
@@ -3285,13 +3297,15 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 		break;
 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
+		dwc->dbg_gadget_events.link_status_change++;
 		break;
 	case DWC3_DEVICE_EVENT_SUSPEND:
 		if (dwc->revision < DWC3_REVISION_230A) {
 			dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+			dwc->dbg_gadget_events.eopf++;
 		} else {
 			dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
-
+			dwc->dbg_gadget_events.suspend++;
 			/*
 			 * Ignore suspend event until the gadget enters into
 			 * USB_STATE_CONFIGURED state.
@@ -3303,18 +3317,23 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 		break;
 	case DWC3_DEVICE_EVENT_SOF:
 		dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
+		dwc->dbg_gadget_events.sof++;
 		break;
 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
 		dwc3_trace(trace_dwc3_gadget, "Erratic Error");
+		dwc->dbg_gadget_events.erratic_error++;
 		break;
 	case DWC3_DEVICE_EVENT_CMD_CMPL:
 		dwc3_trace(trace_dwc3_gadget, "Command Complete");
+		dwc->dbg_gadget_events.cmdcmplt++;
 		break;
 	case DWC3_DEVICE_EVENT_OVERFLOW:
 		dwc3_trace(trace_dwc3_gadget, "Overflow");
+		dwc->dbg_gadget_events.overflow++;
 		break;
 	default:
 		dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
+		dwc->dbg_gadget_events.unknown_event++;
 	}
 
 	dwc->err_evt_seen = (event->type == DWC3_DEVICE_EVENT_ERRATIC_ERROR);
diff --git a/include/dt-bindings/clock/qcom,camcc-skunk.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
similarity index 97%
rename from include/dt-bindings/clock/qcom,camcc-skunk.h
rename to include/dt-bindings/clock/qcom,camcc-sdm845.h
index ea54fab..dbee8901 100644
--- a/include/dt-bindings/clock/qcom,camcc-skunk.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_CAM_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_CAM_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_CAM_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_CAM_CC_SDM845_H
 
 #define CAM_CC_BPS_AHB_CLK					0
 #define CAM_CC_BPS_AREG_CLK					1
diff --git a/include/dt-bindings/clock/qcom,cpucc-skunk.h b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
similarity index 90%
rename from include/dt-bindings/clock/qcom,cpucc-skunk.h
rename to include/dt-bindings/clock/qcom,cpucc-sdm845.h
index 2332969..c1ff2a0 100644
--- a/include/dt-bindings/clock/qcom,cpucc-skunk.h
+++ b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_CPU_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 
 #define L3_CLUSTER0_VOTE_CLK					0
 #define L3_CLUSTER1_VOTE_CLK					1
diff --git a/include/dt-bindings/clock/qcom,dispcc-skunk.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
similarity index 96%
rename from include/dt-bindings/clock/qcom,dispcc-skunk.h
rename to include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 835ebcb..10530c5 100644
--- a/include/dt-bindings/clock/qcom,dispcc-skunk.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_DISP_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_DISP_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_DISP_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_DISP_CC_SDM845_H
 
 #define DISP_CC_DEBUG_CLK					0
 #define DISP_CC_MDSS_AHB_CLK					1
diff --git a/include/dt-bindings/clock/qcom,gcc-skunk.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
similarity index 98%
rename from include/dt-bindings/clock/qcom,gcc-skunk.h
rename to include/dt-bindings/clock/qcom,gcc-sdm845.h
index 7dfcffc..e409205 100644
--- a/include/dt-bindings/clock/qcom,gcc-skunk.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_GCC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_GCC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
 
 /* Hardware/Dummy/Voter clocks */
 #define GCC_XO							0
diff --git a/include/dt-bindings/clock/qcom,gpucc-skunk.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
similarity index 95%
rename from include/dt-bindings/clock/qcom,gpucc-skunk.h
rename to include/dt-bindings/clock/qcom,gpucc-sdm845.h
index 97a1014..41eb823 100644
--- a/include/dt-bindings/clock/qcom,gpucc-skunk.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_GPU_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_GPU_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_GPU_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_GPU_CC_SDM845_H
 
 #define GPU_CC_ACD_AHB_CLK					0
 #define GPU_CC_ACD_CXO_CLK					1
diff --git a/include/dt-bindings/clock/qcom,videocc-skunk.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h
similarity index 92%
rename from include/dt-bindings/clock/qcom,videocc-skunk.h
rename to include/dt-bindings/clock/qcom,videocc-sdm845.h
index cf654ed..723d2e0 100644
--- a/include/dt-bindings/clock/qcom,videocc-skunk.h
+++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _DT_BINDINGS_CLK_MSM_VIDEO_CC_SKUNK_H
-#define _DT_BINDINGS_CLK_MSM_VIDEO_CC_SKUNK_H
+#ifndef _DT_BINDINGS_CLK_MSM_VIDEO_CC_SDM845_H
+#define _DT_BINDINGS_CLK_MSM_VIDEO_CC_SDM845_H
 
 #define VIDEO_CC_APB_CLK					0
 #define VIDEO_CC_AT_CLK						1
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index f5987da..2656d5d 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -96,10 +96,10 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmhamster")
 #define early_machine_is_msmfalcon()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmfalcon")
-#define early_machine_is_msmskunk()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmskunk")
-#define early_machine_is_sdmbat()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmbat")
+#define early_machine_is_sdm845()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm845")
+#define early_machine_is_sdm830()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm830")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -137,8 +137,8 @@
 #define early_machine_is_apqcobalt()	0
 #define early_machine_is_msmhamster()	0
 #define early_machine_is_msmfalcon()	0
-#define early_machine_is_msmskunk()	0
-#define early_machine_is_sdmbat()	0
+#define early_machine_is_sdm845()	0
+#define early_machine_is_sdm830()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -198,8 +198,8 @@ enum msm_cpu {
 	MSM_CPU_COBALT,
 	MSM_CPU_HAMSTER,
 	MSM_CPU_FALCON,
-	MSM_CPU_SKUNK,
-	MSM_CPU_BAT,
+	MSM_CPU_SDM845,
+	MSM_CPU_SDM830,
 };
 
 struct msm_soc_info {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3c8d4d7f..4f43951 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2320,44 +2320,6 @@ void __dl_clear_params(struct task_struct *p)
 	dl_se->dl_yielded = 0;
 }
 
-#ifdef CONFIG_SCHED_HMP
-/*
- * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
- *
- * Stop accounting (exiting) task's future cpu usage
- *
- * We need this so that reset_all_windows_stats() can function correctly.
- * reset_all_window_stats() depends on do_each_thread/for_each_thread task
- * iterators to reset *all* task's statistics. Exiting tasks however become
- * invisible to those iterators. sched_exit() is called on a exiting task prior
- * to being removed from task_list, which will let reset_all_window_stats()
- * function correctly.
- */
-void sched_exit(struct task_struct *p)
-{
-	struct rq_flags rf;
-	struct rq *rq;
-	u64 wallclock;
-
-	sched_set_group_id(p, 0);
-
-	rq = task_rq_lock(p, &rf);
-
-	/* rq->curr == p */
-	wallclock = sched_ktime_clock();
-	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
-	dequeue_task(rq, p, 0);
-	reset_task_stats(p);
-	p->ravg.mark_start = wallclock;
-	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
-	free_task_load_ptrs(p);
-
-	enqueue_task(rq, p, 0);
-	clear_ed_task(p, rq);
-	task_rq_unlock(rq, p, &rf);
-}
-#endif /* CONFIG_SCHED_HMP */
-
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
@@ -9464,3 +9426,41 @@ const u32 sched_prio_to_wmult[40] = {
  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
 };
+
+#ifdef CONFIG_SCHED_HMP
+/*
+ * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
+ *
+ * Stop accounting (exiting) task's future cpu usage
+ *
+ * We need this so that reset_all_windows_stats() can function correctly.
+ * reset_all_window_stats() depends on do_each_thread/for_each_thread task
+ * iterators to reset *all* task's statistics. Exiting tasks however become
+ * invisible to those iterators. sched_exit() is called on a exiting task prior
+ * to being removed from task_list, which will let reset_all_window_stats()
+ * function correctly.
+ */
+void sched_exit(struct task_struct *p)
+{
+	struct rq_flags rf;
+	struct rq *rq;
+	u64 wallclock;
+
+	sched_set_group_id(p, 0);
+
+	rq = task_rq_lock(p, &rf);
+
+	/* rq->curr == p */
+	wallclock = sched_ktime_clock();
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	dequeue_task(rq, p, 0);
+	reset_task_stats(p);
+	p->ravg.mark_start = wallclock;
+	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
+	free_task_load_ptrs(p);
+
+	enqueue_task(rq, p, 0);
+	clear_ed_task(p, rq);
+	task_rq_unlock(rq, p, &rf);
+}
+#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index ffca478..124eb6a 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -18,6 +18,41 @@
 
 #include <linux/slab.h>
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
+			 u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+#endif	/* CONFIG_SCHED_HMP */
+
 struct dl_bandwidth def_dl_bandwidth;
 
 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -820,41 +855,6 @@ static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_SCHED_HMP
-
-static void
-inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
-{
-	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
-{
-	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
-			 u32 new_task_load, u32 new_pred_demand)
-{
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
-				      pred_demand_delta);
-}
-
-#else	/* CONFIG_SCHED_HMP */
-
-static inline void
-inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-
-#endif	/* CONFIG_SCHED_HMP */
-
 static inline
 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 48f25e3..7af3c6b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -34,6 +34,90 @@
 #include "sched.h"
 #include <trace/events/sched.h>
 
+/* QHMP forward declarations */
+
+struct lb_env;
+struct sd_lb_stats;
+struct sg_lb_stats;
+
+#ifdef CONFIG_SCHED_HMP
+
+#ifdef CONFIG_CFS_BANDWIDTH
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+				 struct task_struct *p, int change_cra);
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+				 struct task_struct *p, int change_cra);
+
+static inline void dec_throttled_cfs_rq_hmp_stats(
+				struct hmp_sched_stats *stats,
+				struct cfs_rq *cfs_rq);
+
+static inline void inc_throttled_cfs_rq_hmp_stats(
+				struct hmp_sched_stats *stats,
+				struct cfs_rq *cfs_rq);
+
+static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq);
+
+#else /* CONFIG_CFS_BANDWIDTH */
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+				 struct task_struct *p, int change_cra) { }
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+				 struct task_struct *p, int change_cra) { }
+#endif /* CONFIG_CFS_BANDWIDTH */
+
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand);
+#ifdef CONFIG_SMP
+
+static struct rq *find_busiest_queue_hmp(struct lb_env *env,
+					struct sched_group *group);
+static int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds);
+
+static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+						  struct sd_lb_stats *sds,
+						  struct sched_group *sg,
+						  struct sg_lb_stats *sgs);
+
+static int select_best_cpu(struct task_struct *p, int target, int reason,
+			   int sync);
+
+#ifdef CONFIG_NO_HZ_COMMON
+static int find_new_hmp_ilb(int type);
+static int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type);
+#endif /* CONFIG_NO_HZ_COMMON */
+#endif /* CONFIG_SMP */
+#else /* CONFIG_SCHED_HMP */
+
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+				 struct task_struct *p, int change_cra) { }
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+				 struct task_struct *p, int change_cra) { }
+static inline void dec_throttled_cfs_rq_hmp_stats(
+				struct hmp_sched_stats *stats,
+				struct cfs_rq *cfs_rq) { }
+static inline void inc_throttled_cfs_rq_hmp_stats(
+				struct hmp_sched_stats *stats,
+				struct cfs_rq *cfs_rq) { }
+static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
+
+#ifdef CONFIG_SMP
+static inline int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
+{
+	return 0;
+}
+
+static inline bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+						  struct sd_lb_stats *sds,
+						  struct sched_group *sg,
+						  struct sg_lb_stats *sgs)
+{
+	return false;
+}
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_SCHED_HMP */
+
 /*
  * Targeted preemption latency for CPU-bound tasks:
  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -2777,1007 +2861,6 @@ static u32 __compute_runnable_contrib(u64 n)
 	return contrib + runnable_avg_yN_sum[n];
 }
 
-#ifdef CONFIG_SCHED_HMP
-
-/* CPU selection flag */
-#define SBC_FLAG_PREV_CPU				0x1
-#define SBC_FLAG_BEST_CAP_CPU				0x2
-#define SBC_FLAG_CPU_COST				0x4
-#define SBC_FLAG_MIN_COST				0x8
-#define SBC_FLAG_IDLE_LEAST_LOADED			0x10
-#define SBC_FLAG_IDLE_CSTATE				0x20
-#define SBC_FLAG_COST_CSTATE_TIE_BREAKER		0x40
-#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER	0x80
-#define SBC_FLAG_CSTATE_LOAD				0x100
-#define SBC_FLAG_BEST_SIBLING				0x200
-#define SBC_FLAG_WAKER_CPU				0x400
-#define SBC_FLAG_PACK_TASK				0x800
-
-/* Cluster selection flag */
-#define SBC_FLAG_COLOC_CLUSTER				0x10000
-#define SBC_FLAG_WAKER_CLUSTER				0x20000
-#define SBC_FLAG_BACKUP_CLUSTER				0x40000
-#define SBC_FLAG_BOOST_CLUSTER				0x80000
-
-struct cpu_select_env {
-	struct task_struct *p;
-	struct related_thread_group *rtg;
-	u8 reason;
-	u8 need_idle:1;
-	u8 need_waker_cluster:1;
-	u8 sync:1;
-	u8 ignore_prev_cpu:1;
-	enum sched_boost_policy boost_policy;
-	u8 pack_task:1;
-	int prev_cpu;
-	DECLARE_BITMAP(candidate_list, NR_CPUS);
-	DECLARE_BITMAP(backup_list, NR_CPUS);
-	u64 task_load;
-	u64 cpu_load;
-	u32 sbc_best_flag;
-	u32 sbc_best_cluster_flag;
-};
-
-struct cluster_cpu_stats {
-	int best_idle_cpu, least_loaded_cpu;
-	int best_capacity_cpu, best_cpu, best_sibling_cpu;
-	int min_cost, best_sibling_cpu_cost;
-	int best_cpu_wakeup_latency;
-	u64 min_load, best_load, best_sibling_cpu_load;
-	s64 highest_spare_capacity;
-};
-
-/*
- * Should task be woken to any available idle cpu?
- *
- * Waking tasks to idle cpu has mixed implications on both performance and
- * power. In many cases, scheduler can't estimate correctly impact of using idle
- * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
- * module to pass a strong hint to scheduler that the task in question should be
- * woken to idle cpu, generally to improve performance.
- */
-static inline int wake_to_idle(struct task_struct *p)
-{
-	return (current->flags & PF_WAKE_UP_IDLE) ||
-		 (p->flags & PF_WAKE_UP_IDLE);
-}
-
-static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
-{
-	u64 total_load;
-
-	total_load = env->task_load + env->cpu_load;
-
-	if (total_load > sched_spill_load ||
-	    (rq->nr_running + 1) > sysctl_sched_spill_nr_run)
-		return 1;
-
-	return 0;
-}
-
-static int skip_cpu(int cpu, struct cpu_select_env *env)
-{
-	int tcpu = task_cpu(env->p);
-	int skip = 0;
-
-	if (!env->reason)
-		return 0;
-
-	if (is_reserved(cpu))
-		return 1;
-
-	switch (env->reason) {
-	case UP_MIGRATION:
-		skip = !idle_cpu(cpu);
-		break;
-	case IRQLOAD_MIGRATION:
-		/* Purposely fall through */
-	default:
-		skip = (cpu == tcpu);
-		break;
-	}
-
-	return skip;
-}
-
-static inline int
-acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env)
-{
-	int tcpu;
-
-	if (!env->reason)
-		return 1;
-
-	tcpu = task_cpu(env->p);
-	switch (env->reason) {
-	case UP_MIGRATION:
-		return cluster->capacity > cpu_capacity(tcpu);
-
-	case DOWN_MIGRATION:
-		return cluster->capacity < cpu_capacity(tcpu);
-
-	default:
-		break;
-	}
-
-	return 1;
-}
-
-static int
-skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env)
-{
-	if (!test_bit(cluster->id, env->candidate_list))
-		return 1;
-
-	if (!acceptable_capacity(cluster, env)) {
-		__clear_bit(cluster->id, env->candidate_list);
-		return 1;
-	}
-
-	return 0;
-}
-
-static struct sched_cluster *
-select_least_power_cluster(struct cpu_select_env *env)
-{
-	struct sched_cluster *cluster;
-
-	if (env->rtg) {
-		int cpu = cluster_first_cpu(env->rtg->preferred_cluster);
-
-		env->task_load = scale_load_to_cpu(task_load(env->p), cpu);
-
-		if (task_load_will_fit(env->p, env->task_load,
-					cpu, env->boost_policy)) {
-			env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER;
-
-			if (env->boost_policy == SCHED_BOOST_NONE)
-				return env->rtg->preferred_cluster;
-
-			for_each_sched_cluster(cluster) {
-				if (cluster != env->rtg->preferred_cluster) {
-					__set_bit(cluster->id,
-						env->backup_list);
-					__clear_bit(cluster->id,
-						env->candidate_list);
-				}
-			}
-
-			return env->rtg->preferred_cluster;
-		}
-
-		/*
-		 * Since the task load does not fit on the preferred
-		 * cluster anymore, pretend that the task does not
-		 * have any preferred cluster. This allows the waking
-		 * task to get the appropriate CPU it needs as per the
-		 * non co-location placement policy without having to
-		 * wait until the preferred cluster is updated.
-		 */
-		env->rtg = NULL;
-	}
-
-	for_each_sched_cluster(cluster) {
-		if (!skip_cluster(cluster, env)) {
-			int cpu = cluster_first_cpu(cluster);
-
-			env->task_load = scale_load_to_cpu(task_load(env->p),
-									 cpu);
-			if (task_load_will_fit(env->p, env->task_load, cpu,
-					       env->boost_policy))
-				return cluster;
-
-			__set_bit(cluster->id, env->backup_list);
-			__clear_bit(cluster->id, env->candidate_list);
-		}
-	}
-
-	return NULL;
-}
-
-static struct sched_cluster *
-next_candidate(const unsigned long *list, int start, int end)
-{
-	int cluster_id;
-
-	cluster_id = find_next_bit(list, end, start - 1 + 1);
-	if (cluster_id >= end)
-		return NULL;
-
-	return sched_cluster[cluster_id];
-}
-
-static void
-update_spare_capacity(struct cluster_cpu_stats *stats,
-		      struct cpu_select_env *env, int cpu, int capacity,
-		      u64 cpu_load)
-{
-	s64 spare_capacity = sched_ravg_window - cpu_load;
-
-	if (spare_capacity > 0 &&
-	    (spare_capacity > stats->highest_spare_capacity ||
-	     (spare_capacity == stats->highest_spare_capacity &&
-	      ((!env->need_waker_cluster &&
-		capacity > cpu_capacity(stats->best_capacity_cpu)) ||
-	       (env->need_waker_cluster &&
-		cpu_rq(cpu)->nr_running <
-		cpu_rq(stats->best_capacity_cpu)->nr_running))))) {
-		/*
-		 * If sync waker is the only runnable of CPU, cr_avg of the
-		 * CPU is 0 so we have high chance to place the wakee on the
-		 * waker's CPU which likely causes preemtion of the waker.
-		 * This can lead migration of preempted waker.  Place the
-		 * wakee on the real idle CPU when it's possible by checking
-		 * nr_running to avoid such preemption.
-		 */
-		stats->highest_spare_capacity = spare_capacity;
-		stats->best_capacity_cpu = cpu;
-	}
-}
-
-static inline void find_backup_cluster(
-struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
-	struct sched_cluster *next = NULL;
-	int i;
-
-	while (!bitmap_empty(env->backup_list, num_clusters)) {
-		next = next_candidate(env->backup_list, 0, num_clusters);
-		__clear_bit(next->id, env->backup_list);
-		for_each_cpu_and(i, &env->p->cpus_allowed, &next->cpus) {
-			trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
-			sched_irqload(i), power_cost(i, task_load(env->p) +
-					cpu_cravg_sync(i, env->sync)), 0);
-
-			update_spare_capacity(stats, env, i, next->capacity,
-					  cpu_load_sync(i, env->sync));
-		}
-		env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER;
-	}
-}
-
-struct sched_cluster *
-next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
-					struct cluster_cpu_stats *stats)
-{
-	struct sched_cluster *next = NULL;
-
-	__clear_bit(cluster->id, env->candidate_list);
-
-	if (env->rtg && preferred_cluster(cluster, env->p))
-		return NULL;
-
-	do {
-		if (bitmap_empty(env->candidate_list, num_clusters))
-			return NULL;
-
-		next = next_candidate(env->candidate_list, 0, num_clusters);
-		if (next) {
-			if (next->min_power_cost > stats->min_cost) {
-				clear_bit(next->id, env->candidate_list);
-				next = NULL;
-				continue;
-			}
-
-			if (skip_cluster(next, env))
-				next = NULL;
-		}
-	} while (!next);
-
-	env->task_load = scale_load_to_cpu(task_load(env->p),
-					cluster_first_cpu(next));
-	return next;
-}
-
-#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE
-static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
-				   struct cpu_select_env *env, int cpu_cost)
-{
-	int wakeup_latency;
-	int prev_cpu = env->prev_cpu;
-
-	wakeup_latency = cpu_rq(cpu)->wakeup_latency;
-
-	if (env->need_idle) {
-		stats->min_cost = cpu_cost;
-		if (idle_cpu(cpu)) {
-			if (wakeup_latency < stats->best_cpu_wakeup_latency ||
-			    (wakeup_latency == stats->best_cpu_wakeup_latency &&
-			     cpu == prev_cpu)) {
-				stats->best_idle_cpu = cpu;
-				stats->best_cpu_wakeup_latency = wakeup_latency;
-			}
-		} else {
-			if (env->cpu_load < stats->min_load ||
-				(env->cpu_load == stats->min_load &&
-							cpu == prev_cpu)) {
-				stats->least_loaded_cpu = cpu;
-				stats->min_load = env->cpu_load;
-			}
-		}
-
-		return;
-	}
-
-	if (cpu_cost < stats->min_cost)  {
-		stats->min_cost = cpu_cost;
-		stats->best_cpu_wakeup_latency = wakeup_latency;
-		stats->best_load = env->cpu_load;
-		stats->best_cpu = cpu;
-		env->sbc_best_flag = SBC_FLAG_CPU_COST;
-		return;
-	}
-
-	/* CPU cost is the same. Start breaking the tie by C-state */
-
-	if (wakeup_latency > stats->best_cpu_wakeup_latency)
-		return;
-
-	if (wakeup_latency < stats->best_cpu_wakeup_latency) {
-		stats->best_cpu_wakeup_latency = wakeup_latency;
-		stats->best_load = env->cpu_load;
-		stats->best_cpu = cpu;
-		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
-		return;
-	}
-
-	/* C-state is the same. Use prev CPU to break the tie */
-	if (cpu == prev_cpu) {
-		stats->best_cpu = cpu;
-		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER;
-		return;
-	}
-
-	if (stats->best_cpu != prev_cpu &&
-	    ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
-	    (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
-		stats->best_load = env->cpu_load;
-		stats->best_cpu = cpu;
-		env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
-	}
-}
-#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */
-static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
-				   struct cpu_select_env *env, int cpu_cost)
-{
-	int prev_cpu = env->prev_cpu;
-
-	if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
-		if (stats->best_sibling_cpu_cost > cpu_cost ||
-		    (stats->best_sibling_cpu_cost == cpu_cost &&
-		     stats->best_sibling_cpu_load > env->cpu_load)) {
-			stats->best_sibling_cpu_cost = cpu_cost;
-			stats->best_sibling_cpu_load = env->cpu_load;
-			stats->best_sibling_cpu = cpu;
-		}
-	}
-
-	if ((cpu_cost < stats->min_cost) ||
-	    ((stats->best_cpu != prev_cpu &&
-	      stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
-		if (env->need_idle) {
-			if (idle_cpu(cpu)) {
-				stats->min_cost = cpu_cost;
-				stats->best_idle_cpu = cpu;
-			}
-		} else {
-			stats->min_cost = cpu_cost;
-			stats->min_load = env->cpu_load;
-			stats->best_cpu = cpu;
-			env->sbc_best_flag = SBC_FLAG_MIN_COST;
-		}
-	}
-}
-#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */
-
-static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
-					 struct cpu_select_env *env)
-{
-	int cpu_cost;
-
-	/*
-	 * We try to find the least loaded *busy* CPU irrespective
-	 * of the power cost.
-	 */
-	if (env->pack_task)
-		cpu_cost = cpu_min_power_cost(cpu);
-
-	else
-		cpu_cost = power_cost(cpu, task_load(env->p) +
-				cpu_cravg_sync(cpu, env->sync));
-
-	if (cpu_cost <= stats->min_cost)
-		__update_cluster_stats(cpu, stats, env, cpu_cost);
-}
-
-static void find_best_cpu_in_cluster(struct sched_cluster *c,
-	 struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
-	int i;
-	struct cpumask search_cpus;
-
-	cpumask_and(&search_cpus, tsk_cpus_allowed(env->p), &c->cpus);
-	cpumask_andnot(&search_cpus, &search_cpus, cpu_isolated_mask);
-
-	if (env->ignore_prev_cpu)
-		cpumask_clear_cpu(env->prev_cpu, &search_cpus);
-
-	env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
-
-	for_each_cpu(i, &search_cpus) {
-		env->cpu_load = cpu_load_sync(i, env->sync);
-
-		trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
-			sched_irqload(i),
-			power_cost(i, task_load(env->p) +
-					cpu_cravg_sync(i, env->sync)), 0);
-
-		if (unlikely(!cpu_active(i)) || skip_cpu(i, env))
-			continue;
-
-		update_spare_capacity(stats, env, i, c->capacity,
-				      env->cpu_load);
-
-		/*
-		 * need_idle takes precedence over sched boost but when both
-		 * are set, idlest CPU with in all the clusters is selected
-		 * when boost_policy = BOOST_ON_ALL whereas idlest CPU in the
-		 * big cluster is selected within boost_policy = BOOST_ON_BIG.
-		 */
-		if ((!env->need_idle &&
-		    env->boost_policy != SCHED_BOOST_NONE) ||
-		    env->need_waker_cluster ||
-		    sched_cpu_high_irqload(i) ||
-		    spill_threshold_crossed(env, cpu_rq(i)))
-			continue;
-
-		update_cluster_stats(i, stats, env);
-	}
-}
-
-static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
-{
-	stats->best_cpu = stats->best_idle_cpu = -1;
-	stats->best_capacity_cpu = stats->best_sibling_cpu  = -1;
-	stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
-	stats->min_load	= stats->best_sibling_cpu_load = ULLONG_MAX;
-	stats->highest_spare_capacity = 0;
-	stats->least_loaded_cpu = -1;
-	stats->best_cpu_wakeup_latency = INT_MAX;
-	/* No need to initialize stats->best_load */
-}
-
-static inline bool env_has_special_flags(struct cpu_select_env *env)
-{
-	if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE ||
-	    env->reason)
-		return true;
-
-	return false;
-}
-
-static inline bool
-bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
-	int prev_cpu;
-	struct task_struct *task = env->p;
-	struct sched_cluster *cluster;
-
-	if (!task->ravg.mark_start || !sched_short_sleep_task_threshold)
-		return false;
-
-	prev_cpu = env->prev_cpu;
-	if (!cpumask_test_cpu(prev_cpu, tsk_cpus_allowed(task)) ||
-					unlikely(!cpu_active(prev_cpu)) ||
-					cpu_isolated(prev_cpu))
-		return false;
-
-	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
-				sched_long_cpu_selection_threshold)
-		return false;
-
-	/*
-	 * This function should be used by task wake up path only as it's
-	 * assuming p->last_switch_out_ts as last sleep time.
-	 * p->last_switch_out_ts can denote last preemption time as well as
-	 * last sleep time.
-	 */
-	if (task->ravg.mark_start - task->last_switch_out_ts >=
-					sched_short_sleep_task_threshold)
-		return false;
-
-	env->task_load = scale_load_to_cpu(task_load(task), prev_cpu);
-	cluster = cpu_rq(prev_cpu)->cluster;
-
-	if (!task_load_will_fit(task, env->task_load, prev_cpu,
-				sched_boost_policy())) {
-
-		__set_bit(cluster->id, env->backup_list);
-		__clear_bit(cluster->id, env->candidate_list);
-		return false;
-	}
-
-	env->cpu_load = cpu_load_sync(prev_cpu, env->sync);
-	if (sched_cpu_high_irqload(prev_cpu) ||
-			spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
-		update_spare_capacity(stats, env, prev_cpu,
-				cluster->capacity, env->cpu_load);
-		env->ignore_prev_cpu = 1;
-		return false;
-	}
-
-	return true;
-}
-
-static inline bool
-wake_to_waker_cluster(struct cpu_select_env *env)
-{
-	return env->sync &&
-	       task_load(current) > sched_big_waker_task_load &&
-	       task_load(env->p) < sched_small_wakee_task_load;
-}
-
-static inline bool
-bias_to_waker_cpu(struct task_struct *p, int cpu)
-{
-	return sysctl_sched_prefer_sync_wakee_to_waker &&
-	       cpu_rq(cpu)->nr_running == 1 &&
-	       cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
-	       cpu_active(cpu) && !cpu_isolated(cpu);
-}
-
-static inline int
-cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
-{
-	cpumask_t tmp_mask;
-
-	cpumask_and(&tmp_mask, &cluster->cpus, cpu_active_mask);
-	cpumask_and(&tmp_mask, &tmp_mask, &p->cpus_allowed);
-
-	return !cpumask_empty(&tmp_mask);
-}
-
-/* return cheapest cpu that can fit this task */
-static int select_best_cpu(struct task_struct *p, int target, int reason,
-			   int sync)
-{
-	struct sched_cluster *cluster, *pref_cluster = NULL;
-	struct cluster_cpu_stats stats;
-	struct related_thread_group *grp;
-	unsigned int sbc_flag = 0;
-	int cpu = raw_smp_processor_id();
-	bool special;
-
-	struct cpu_select_env env = {
-		.p			= p,
-		.reason			= reason,
-		.need_idle		= wake_to_idle(p),
-		.need_waker_cluster	= 0,
-		.sync			= sync,
-		.prev_cpu		= target,
-		.ignore_prev_cpu	= 0,
-		.rtg			= NULL,
-		.sbc_best_flag		= 0,
-		.sbc_best_cluster_flag	= 0,
-		.pack_task              = false,
-	};
-
-	env.boost_policy = task_sched_boost(p) ?
-			sched_boost_policy() : SCHED_BOOST_NONE;
-
-	bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
-	bitmap_zero(env.backup_list, NR_CPUS);
-
-	init_cluster_cpu_stats(&stats);
-	special = env_has_special_flags(&env);
-
-	rcu_read_lock();
-
-	grp = task_related_thread_group(p);
-
-	if (grp && grp->preferred_cluster) {
-		pref_cluster = grp->preferred_cluster;
-		if (!cluster_allowed(p, pref_cluster))
-			clear_bit(pref_cluster->id, env.candidate_list);
-		else
-			env.rtg = grp;
-	} else if (!special) {
-		cluster = cpu_rq(cpu)->cluster;
-		if (wake_to_waker_cluster(&env)) {
-			if (bias_to_waker_cpu(p, cpu)) {
-				target = cpu;
-				sbc_flag = SBC_FLAG_WAKER_CLUSTER |
-					   SBC_FLAG_WAKER_CPU;
-				goto out;
-			} else if (cluster_allowed(p, cluster)) {
-				env.need_waker_cluster = 1;
-				bitmap_zero(env.candidate_list, NR_CPUS);
-				__set_bit(cluster->id, env.candidate_list);
-				env.sbc_best_cluster_flag =
-							SBC_FLAG_WAKER_CLUSTER;
-			}
-		} else if (bias_to_prev_cpu(&env, &stats)) {
-			sbc_flag = SBC_FLAG_PREV_CPU;
-			goto out;
-		}
-	}
-
-	if (!special && is_short_burst_task(p)) {
-		env.pack_task = true;
-		sbc_flag = SBC_FLAG_PACK_TASK;
-	}
-retry:
-	cluster = select_least_power_cluster(&env);
-
-	if (!cluster)
-		goto out;
-
-	/*
-	 * 'cluster' now points to the minimum power cluster which can satisfy
-	 * task's perf goals. Walk down the cluster list starting with that
-	 * cluster. For non-small tasks, skip clusters that don't have
-	 * mostly_idle/idle cpus
-	 */
-
-	do {
-		find_best_cpu_in_cluster(cluster, &env, &stats);
-
-	} while ((cluster = next_best_cluster(cluster, &env, &stats)));
-
-	if (env.need_idle) {
-		if (stats.best_idle_cpu >= 0) {
-			target = stats.best_idle_cpu;
-			sbc_flag |= SBC_FLAG_IDLE_CSTATE;
-		} else if (stats.least_loaded_cpu >= 0) {
-			target = stats.least_loaded_cpu;
-			sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED;
-		}
-	} else if (stats.best_cpu >= 0) {
-		if (stats.best_cpu != task_cpu(p) &&
-				stats.min_cost == stats.best_sibling_cpu_cost) {
-			stats.best_cpu = stats.best_sibling_cpu;
-			sbc_flag |= SBC_FLAG_BEST_SIBLING;
-		}
-		sbc_flag |= env.sbc_best_flag;
-		target = stats.best_cpu;
-	} else {
-		if (env.rtg && env.boost_policy == SCHED_BOOST_NONE) {
-			env.rtg = NULL;
-			goto retry;
-		}
-
-		/*
-		 * With boost_policy == SCHED_BOOST_ON_BIG, we reach here with
-		 * backup_list = little cluster, candidate_list = none and
-		 * stats->best_capacity_cpu points the best spare capacity
-		 * CPU among the CPUs in the big cluster.
-		 */
-		if (env.boost_policy == SCHED_BOOST_ON_BIG &&
-		    stats.best_capacity_cpu >= 0)
-			sbc_flag |= SBC_FLAG_BOOST_CLUSTER;
-		else
-			find_backup_cluster(&env, &stats);
-
-		if (stats.best_capacity_cpu >= 0) {
-			target = stats.best_capacity_cpu;
-			sbc_flag |= SBC_FLAG_BEST_CAP_CPU;
-		}
-	}
-	p->last_cpu_selected_ts = sched_ktime_clock();
-out:
-	sbc_flag |= env.sbc_best_cluster_flag;
-	rcu_read_unlock();
-	trace_sched_task_load(p, sched_boost_policy() && task_sched_boost(p),
-		env.reason, env.sync, env.need_idle, sbc_flag, target);
-	return target;
-}
-
-#ifdef CONFIG_CFS_BANDWIDTH
-
-static inline struct task_group *next_task_group(struct task_group *tg)
-{
-	tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
-
-	return (&tg->list == &task_groups) ? NULL : tg;
-}
-
-/* Iterate over all cfs_rq in a cpu */
-#define for_each_cfs_rq(cfs_rq, tg, cpu)	\
-	for (tg = container_of(&task_groups, struct task_group, list);	\
-		((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
-
-void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
-{
-	struct task_group *tg;
-	struct cfs_rq *cfs_rq;
-
-	rcu_read_lock();
-
-	for_each_cfs_rq(cfs_rq, tg, cpu)
-		reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
-
-	rcu_read_unlock();
-}
-
-static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra);
-
-/* Add task's contribution to a cpu' HMP statistics */
-void inc_hmp_sched_stats_fair(struct rq *rq,
-			struct task_struct *p, int change_cra)
-{
-	struct cfs_rq *cfs_rq;
-	struct sched_entity *se = &p->se;
-
-	/*
-	 * Although below check is not strictly required  (as
-	 * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
-	 * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
-	 * efficiency by short-circuiting for_each_sched_entity() loop when
-	 * sched_disable_window_stats
-	 */
-	if (sched_disable_window_stats)
-		return;
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-		inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-	}
-
-	/* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
-	if (!se)
-		inc_rq_hmp_stats(rq, p, change_cra);
-}
-
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
-				       u32 new_task_load, u32 new_pred_demand)
-{
-	struct cfs_rq *cfs_rq;
-	struct sched_entity *se = &p->se;
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-
-		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
-					      task_load_delta,
-					      pred_demand_delta);
-		fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-	}
-
-	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
-	if (!se) {
-		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
-					      task_load_delta,
-					      pred_demand_delta);
-		fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
-	}
-}
-
-static int task_will_be_throttled(struct task_struct *p);
-
-#else	/* CONFIG_CFS_BANDWIDTH */
-
-inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
-
-static void
-fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
-			   u32 new_task_load, u32 new_pred_demand)
-{
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
-				      pred_demand_delta);
-	fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
-}
-
-static inline int task_will_be_throttled(struct task_struct *p)
-{
-	return 0;
-}
-
-void inc_hmp_sched_stats_fair(struct rq *rq,
-			struct task_struct *p, int change_cra)
-{
-	inc_nr_big_task(&rq->hmp_stats, p);
-}
-
-#endif	/* CONFIG_CFS_BANDWIDTH */
-
-/*
- * Reset balance_interval at all sched_domain levels of given cpu, so that it
- * honors kick.
- */
-static inline void reset_balance_interval(int cpu)
-{
-	struct sched_domain *sd;
-
-	if (cpu >= nr_cpu_ids)
-		return;
-
-	rcu_read_lock();
-	for_each_domain(cpu, sd)
-		sd->balance_interval = 0;
-	rcu_read_unlock();
-}
-
-/*
- * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
- * cpu as per its demand or priority)
- *
- * Returns reason why task needs to be migrated
- */
-static inline int migration_needed(struct task_struct *p, int cpu)
-{
-	int nice;
-	struct related_thread_group *grp;
-
-	if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
-		return 0;
-
-	/* No need to migrate task that is about to be throttled */
-	if (task_will_be_throttled(p))
-		return 0;
-
-	if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
-		 cpu_capacity(cpu) != max_capacity && task_sched_boost(p))
-		return UP_MIGRATION;
-
-	if (sched_cpu_high_irqload(cpu))
-		return IRQLOAD_MIGRATION;
-
-	nice = task_nice(p);
-	rcu_read_lock();
-	grp = task_related_thread_group(p);
-	if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
-	       upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
-		rcu_read_unlock();
-		return DOWN_MIGRATION;
-	}
-
-	if (!task_will_fit(p, cpu)) {
-		rcu_read_unlock();
-		return UP_MIGRATION;
-	}
-	rcu_read_unlock();
-
-	return 0;
-}
-
-static inline int
-kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
-{
-	unsigned long flags;
-	int rc = 0;
-
-	/* Invoke active balance to force migrate currently running task */
-	raw_spin_lock_irqsave(&rq->lock, flags);
-	if (!rq->active_balance) {
-		rq->active_balance = 1;
-		rq->push_cpu = new_cpu;
-		get_task_struct(p);
-		rq->push_task = p;
-		rc = 1;
-	}
-	raw_spin_unlock_irqrestore(&rq->lock, flags);
-
-	return rc;
-}
-
-static DEFINE_RAW_SPINLOCK(migration_lock);
-
-/*
- * Check if currently running task should be migrated to a better cpu.
- *
- * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
- */
-void check_for_migration(struct rq *rq, struct task_struct *p)
-{
-	int cpu = cpu_of(rq), new_cpu;
-	int active_balance = 0, reason;
-
-	reason = migration_needed(p, cpu);
-	if (!reason)
-		return;
-
-	raw_spin_lock(&migration_lock);
-	new_cpu = select_best_cpu(p, cpu, reason, 0);
-
-	if (new_cpu != cpu) {
-		active_balance = kick_active_balance(rq, p, new_cpu);
-		if (active_balance)
-			mark_reserved(new_cpu);
-	}
-
-	raw_spin_unlock(&migration_lock);
-
-	if (active_balance)
-		stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
-					&rq->active_balance_work);
-}
-
-#ifdef CONFIG_CFS_BANDWIDTH
-
-static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
-{
-	cfs_rq->hmp_stats.nr_big_tasks = 0;
-	cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
-	cfs_rq->hmp_stats.pred_demands_sum = 0;
-}
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-		 struct task_struct *p, int change_cra)
-{
-	inc_nr_big_task(&cfs_rq->hmp_stats, p);
-	if (change_cra)
-		inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-		 struct task_struct *p, int change_cra)
-{
-	dec_nr_big_task(&cfs_rq->hmp_stats, p);
-	if (change_cra)
-		dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
-			 struct cfs_rq *cfs_rq)
-{
-	stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
-	stats->cumulative_runnable_avg +=
-				cfs_rq->hmp_stats.cumulative_runnable_avg;
-	stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
-}
-
-static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
-				 struct cfs_rq *cfs_rq)
-{
-	stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
-	stats->cumulative_runnable_avg -=
-				cfs_rq->hmp_stats.cumulative_runnable_avg;
-	stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
-
-	BUG_ON(stats->nr_big_tasks < 0 ||
-		(s64)stats->cumulative_runnable_avg < 0);
-	BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
-#else	/* CONFIG_CFS_BANDWIDTH */
-
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra) { }
-
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra) { }
-
-#endif	/* CONFIG_CFS_BANDWIDTH */
-
-#else	/* CONFIG_SCHED_HMP */
-
-static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
-
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra) { }
-
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra) { }
-
-#define dec_throttled_cfs_rq_hmp_stats(...)
-#define inc_throttled_cfs_rq_hmp_stats(...)
-
-#endif	/* CONFIG_SCHED_HMP */
-
 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 
 /*
@@ -4295,12 +3378,6 @@ static inline int idle_balance(struct rq *rq)
 	return 0;
 }
 
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra) { }
-
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra) { }
-
 #endif /* CONFIG_SMP */
 
 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -4912,35 +3989,6 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
 	return cfs_bandwidth_used() && cfs_rq->throttled;
 }
 
-#ifdef CONFIG_SCHED_HMP
-/*
- * Check if task is part of a hierarchy where some cfs_rq does not have any
- * runtime left.
- *
- * We can't rely on throttled_hierarchy() to do this test, as
- * cfs_rq->throttle_count will not be updated yet when this function is called
- * from scheduler_tick()
- */
-static int task_will_be_throttled(struct task_struct *p)
-{
-	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq;
-
-	if (!cfs_bandwidth_used())
-		return 0;
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-		if (!cfs_rq->runtime_enabled)
-			continue;
-		if (cfs_rq->runtime_remaining <= 0)
-			return 1;
-	}
-
-	return 0;
-}
-#endif
-
 /* check whether cfs_rq, or any parent, is throttled */
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
 {
@@ -7936,56 +6984,6 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
 	};
 }
 
-#ifdef CONFIG_SCHED_HMP
-
-static int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
-{
-	int local_cpu, busiest_cpu;
-	int local_capacity, busiest_capacity;
-	int local_pwr_cost, busiest_pwr_cost;
-	int nr_cpus;
-	int boost = sched_boost();
-
-	if (!sysctl_sched_restrict_cluster_spill ||
-		boost == FULL_THROTTLE_BOOST || boost == CONSERVATIVE_BOOST)
-		return 0;
-
-	local_cpu = group_first_cpu(sds->local);
-	busiest_cpu = group_first_cpu(sds->busiest);
-
-	local_capacity = cpu_max_possible_capacity(local_cpu);
-	busiest_capacity = cpu_max_possible_capacity(busiest_cpu);
-
-	local_pwr_cost = cpu_max_power_cost(local_cpu);
-	busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
-
-	if (local_pwr_cost <= busiest_pwr_cost)
-		return 0;
-
-	if (local_capacity > busiest_capacity &&
-			sds->busiest_stat.sum_nr_big_tasks)
-		return 0;
-
-	nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest));
-	if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) &&
-		(sds->busiest_stat.sum_nr_running <
-			nr_cpus * sysctl_sched_spill_nr_run))
-		return 1;
-
-	return 0;
-}
-
-#else	/* CONFIG_SCHED_HMP */
-
-static inline int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
-{
-	return 0;
-}
-
-#endif	/* CONFIG_SCHED_HMP */
-
 /**
  * get_sd_load_idx - Obtain the load index for a given sched domain.
  * @sd: The sched_domain whose load_idx is to be obtained.
@@ -8316,33 +7314,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 		sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 }
 
-#ifdef CONFIG_SCHED_HMP
-static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
-						  struct sd_lb_stats *sds,
-						  struct sched_group *sg,
-						  struct sg_lb_stats *sgs)
-{
-	if (env->idle != CPU_NOT_IDLE &&
-	    cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) {
-		if (sgs->sum_nr_big_tasks >
-				sds->busiest_stat.sum_nr_big_tasks) {
-			env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
-			return true;
-		}
-	}
-
-	return false;
-}
-#else
-static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
-						  struct sd_lb_stats *sds,
-						  struct sched_group *sg,
-						  struct sg_lb_stats *sgs)
-{
-	return false;
-}
-#endif
-
 /**
  * update_sd_pick_busiest - return 1 on busiest group
  * @env: The load balancing environment.
@@ -8804,60 +7775,6 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
 	return NULL;
 }
 
-#ifdef CONFIG_SCHED_HMP
-static struct rq *find_busiest_queue_hmp(struct lb_env *env,
-				     struct sched_group *group)
-{
-	struct rq *busiest = NULL, *busiest_big = NULL;
-	u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
-	int max_nr_big = 0, nr_big;
-	bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
-	int i;
-	cpumask_t cpus;
-
-	cpumask_andnot(&cpus, sched_group_cpus(group), cpu_isolated_mask);
-
-	for_each_cpu(i, &cpus) {
-		struct rq *rq = cpu_rq(i);
-		u64 cumulative_runnable_avg =
-				rq->hmp_stats.cumulative_runnable_avg;
-
-		if (!cpumask_test_cpu(i, env->cpus))
-			continue;
-
-
-		if (find_big) {
-			nr_big = nr_big_tasks(rq);
-			if (nr_big > max_nr_big ||
-			    (nr_big > 0 && nr_big == max_nr_big &&
-			     cumulative_runnable_avg > max_runnable_avg_big)) {
-				max_runnable_avg_big = cumulative_runnable_avg;
-				busiest_big = rq;
-				max_nr_big = nr_big;
-				continue;
-			}
-		}
-
-		if (cumulative_runnable_avg > max_runnable_avg) {
-			max_runnable_avg = cumulative_runnable_avg;
-			busiest = rq;
-		}
-	}
-
-	if (busiest_big)
-		return busiest_big;
-
-	env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
-	return busiest;
-}
-#else
-static inline struct rq *find_busiest_queue_hmp(struct lb_env *env,
-					struct sched_group *group)
-{
-	return NULL;
-}
-#endif
-
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
@@ -9593,34 +8510,6 @@ static struct {
 	unsigned long next_balance;     /* in jiffy units */
 } nohz ____cacheline_aligned;
 
-#ifdef CONFIG_SCHED_HMP
-static inline int find_new_hmp_ilb(int type)
-{
-	int call_cpu = raw_smp_processor_id();
-	struct sched_domain *sd;
-	int ilb;
-
-	rcu_read_lock();
-
-	/* Pick an idle cpu "closest" to call_cpu */
-	for_each_domain(call_cpu, sd) {
-		for_each_cpu_and(ilb, nohz.idle_cpus_mask,
-						sched_domain_span(sd)) {
-			if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
-					cpu_max_power_cost(ilb) <=
-					cpu_max_power_cost(call_cpu))) {
-				rcu_read_unlock();
-				reset_balance_interval(ilb);
-				return ilb;
-			}
-		}
-	}
-
-	rcu_read_unlock();
-	return nr_cpu_ids;
-}
-#endif	/* CONFIG_SCHED_HMP */
-
 static inline int find_new_ilb(int type)
 {
 	int ilb = cpumask_first(nohz.idle_cpus_mask);
@@ -9933,45 +8822,6 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
 
-#ifdef CONFIG_SCHED_HMP
-static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
-{
-	struct sched_domain *sd;
-	int i;
-
-	if (rq->nr_running < 2)
-		return 0;
-
-	if (!sysctl_sched_restrict_cluster_spill ||
-			sched_boost_policy() == SCHED_BOOST_ON_ALL)
-		return 1;
-
-	if (cpu_max_power_cost(cpu) == max_power_cost)
-		return 1;
-
-	rcu_read_lock();
-	sd = rcu_dereference_check_sched_domain(rq->sd);
-	if (!sd) {
-		rcu_read_unlock();
-		return 0;
-	}
-
-	for_each_cpu(i, sched_domain_span(sd)) {
-		if (cpu_load(i) < sched_spill_load &&
-				cpu_rq(i)->nr_running <
-				sysctl_sched_spill_nr_run) {
-			/* Change the kick type to limit to CPUs that
-			 * are of equal or lower capacity.
-			 */
-			*type = NOHZ_KICK_RESTRICT;
-			break;
-		}
-	}
-	rcu_read_unlock();
-	return 1;
-}
-#endif /* CONFIG_SCHED_HMP */
-
 static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
 {
 	unsigned long now = jiffies;
@@ -10651,3 +9501,1180 @@ __init void init_sched_fair_class(void)
 #endif /* SMP */
 
 }
+
+/* QHMP sched implementation begins here */
+
+#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SMP
+
+/* CPU selection flag */
+#define SBC_FLAG_PREV_CPU				0x1
+#define SBC_FLAG_BEST_CAP_CPU				0x2
+#define SBC_FLAG_CPU_COST				0x4
+#define SBC_FLAG_MIN_COST				0x8
+#define SBC_FLAG_IDLE_LEAST_LOADED			0x10
+#define SBC_FLAG_IDLE_CSTATE				0x20
+#define SBC_FLAG_COST_CSTATE_TIE_BREAKER		0x40
+#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER	0x80
+#define SBC_FLAG_CSTATE_LOAD				0x100
+#define SBC_FLAG_BEST_SIBLING				0x200
+#define SBC_FLAG_WAKER_CPU				0x400
+#define SBC_FLAG_PACK_TASK				0x800
+
+/* Cluster selection flag */
+#define SBC_FLAG_COLOC_CLUSTER				0x10000
+#define SBC_FLAG_WAKER_CLUSTER				0x20000
+#define SBC_FLAG_BACKUP_CLUSTER				0x40000
+#define SBC_FLAG_BOOST_CLUSTER				0x80000
+
+struct cpu_select_env {
+	struct task_struct *p;
+	struct related_thread_group *rtg;
+	u8 reason;
+	u8 need_idle:1;
+	u8 need_waker_cluster:1;
+	u8 sync:1;
+	u8 ignore_prev_cpu:1;
+	enum sched_boost_policy boost_policy;
+	u8 pack_task:1;
+	int prev_cpu;
+	DECLARE_BITMAP(candidate_list, NR_CPUS);
+	DECLARE_BITMAP(backup_list, NR_CPUS);
+	u64 task_load;
+	u64 cpu_load;
+	u32 sbc_best_flag;
+	u32 sbc_best_cluster_flag;
+};
+
+struct cluster_cpu_stats {
+	int best_idle_cpu, least_loaded_cpu;
+	int best_capacity_cpu, best_cpu, best_sibling_cpu;
+	int min_cost, best_sibling_cpu_cost;
+	int best_cpu_wakeup_latency;
+	u64 min_load, best_load, best_sibling_cpu_load;
+	s64 highest_spare_capacity;
+};
+
+/*
+ * Should task be woken to any available idle cpu?
+ *
+ * Waking tasks to idle cpu has mixed implications on both performance and
+ * power. In many cases, scheduler can't estimate correctly impact of using idle
+ * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
+ * module to pass a strong hint to scheduler that the task in question should be
+ * woken to idle cpu, generally to improve performance.
+ */
+static inline int wake_to_idle(struct task_struct *p)
+{
+	return (current->flags & PF_WAKE_UP_IDLE) ||
+		 (p->flags & PF_WAKE_UP_IDLE);
+}
+
+static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
+{
+	u64 total_load;
+
+	total_load = env->task_load + env->cpu_load;
+
+	if (total_load > sched_spill_load ||
+	    (rq->nr_running + 1) > sysctl_sched_spill_nr_run)
+		return 1;
+
+	return 0;
+}
+
+static int skip_cpu(int cpu, struct cpu_select_env *env)
+{
+	int tcpu = task_cpu(env->p);
+	int skip = 0;
+
+	if (!env->reason)
+		return 0;
+
+	if (is_reserved(cpu))
+		return 1;
+
+	switch (env->reason) {
+	case UP_MIGRATION:
+		skip = !idle_cpu(cpu);
+		break;
+	case IRQLOAD_MIGRATION:
+		/* Purposely fall through */
+	default:
+		skip = (cpu == tcpu);
+		break;
+	}
+
+	return skip;
+}
+
+static inline int
+acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env)
+{
+	int tcpu;
+
+	if (!env->reason)
+		return 1;
+
+	tcpu = task_cpu(env->p);
+	switch (env->reason) {
+	case UP_MIGRATION:
+		return cluster->capacity > cpu_capacity(tcpu);
+
+	case DOWN_MIGRATION:
+		return cluster->capacity < cpu_capacity(tcpu);
+
+	default:
+		break;
+	}
+
+	return 1;
+}
+
+static int
+skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env)
+{
+	if (!test_bit(cluster->id, env->candidate_list))
+		return 1;
+
+	if (!acceptable_capacity(cluster, env)) {
+		__clear_bit(cluster->id, env->candidate_list);
+		return 1;
+	}
+
+	return 0;
+}
+
+static struct sched_cluster *
+select_least_power_cluster(struct cpu_select_env *env)
+{
+	struct sched_cluster *cluster;
+
+	if (env->rtg) {
+		int cpu = cluster_first_cpu(env->rtg->preferred_cluster);
+
+		env->task_load = scale_load_to_cpu(task_load(env->p), cpu);
+
+		if (task_load_will_fit(env->p, env->task_load,
+					cpu, env->boost_policy)) {
+			env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER;
+
+			if (env->boost_policy == SCHED_BOOST_NONE)
+				return env->rtg->preferred_cluster;
+
+			for_each_sched_cluster(cluster) {
+				if (cluster != env->rtg->preferred_cluster) {
+					__set_bit(cluster->id,
+						env->backup_list);
+					__clear_bit(cluster->id,
+						env->candidate_list);
+				}
+			}
+
+			return env->rtg->preferred_cluster;
+		}
+
+		/*
+		 * Since the task load does not fit on the preferred
+		 * cluster anymore, pretend that the task does not
+		 * have any preferred cluster. This allows the waking
+		 * task to get the appropriate CPU it needs as per the
+		 * non co-location placement policy without having to
+		 * wait until the preferred cluster is updated.
+		 */
+		env->rtg = NULL;
+	}
+
+	for_each_sched_cluster(cluster) {
+		if (!skip_cluster(cluster, env)) {
+			int cpu = cluster_first_cpu(cluster);
+
+			env->task_load = scale_load_to_cpu(task_load(env->p),
+									 cpu);
+			if (task_load_will_fit(env->p, env->task_load, cpu,
+					       env->boost_policy))
+				return cluster;
+
+			__set_bit(cluster->id, env->backup_list);
+			__clear_bit(cluster->id, env->candidate_list);
+		}
+	}
+
+	return NULL;
+}
+
+static struct sched_cluster *
+next_candidate(const unsigned long *list, int start, int end)
+{
+	int cluster_id;
+
+	cluster_id = find_next_bit(list, end, start - 1 + 1);
+	if (cluster_id >= end)
+		return NULL;
+
+	return sched_cluster[cluster_id];
+}
+
+static void
+update_spare_capacity(struct cluster_cpu_stats *stats,
+		      struct cpu_select_env *env, int cpu, int capacity,
+		      u64 cpu_load)
+{
+	s64 spare_capacity = sched_ravg_window - cpu_load;
+
+	if (spare_capacity > 0 &&
+	    (spare_capacity > stats->highest_spare_capacity ||
+	     (spare_capacity == stats->highest_spare_capacity &&
+	      ((!env->need_waker_cluster &&
+		capacity > cpu_capacity(stats->best_capacity_cpu)) ||
+	       (env->need_waker_cluster &&
+		cpu_rq(cpu)->nr_running <
+		cpu_rq(stats->best_capacity_cpu)->nr_running))))) {
+		/*
+		 * If sync waker is the only runnable of CPU, cr_avg of the
+		 * CPU is 0 so we have high chance to place the wakee on the
+		 * waker's CPU which likely causes preemtion of the waker.
+		 * This can lead migration of preempted waker.  Place the
+		 * wakee on the real idle CPU when it's possible by checking
+		 * nr_running to avoid such preemption.
+		 */
+		stats->highest_spare_capacity = spare_capacity;
+		stats->best_capacity_cpu = cpu;
+	}
+}
+
+static inline void find_backup_cluster(
+struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	struct sched_cluster *next = NULL;
+	int i;
+
+	while (!bitmap_empty(env->backup_list, num_clusters)) {
+		next = next_candidate(env->backup_list, 0, num_clusters);
+		__clear_bit(next->id, env->backup_list);
+		for_each_cpu_and(i, &env->p->cpus_allowed, &next->cpus) {
+			trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+			sched_irqload(i), power_cost(i, task_load(env->p) +
+					cpu_cravg_sync(i, env->sync)), 0);
+
+			update_spare_capacity(stats, env, i, next->capacity,
+					  cpu_load_sync(i, env->sync));
+		}
+		env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER;
+	}
+}
+
+struct sched_cluster *
+next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
+					struct cluster_cpu_stats *stats)
+{
+	struct sched_cluster *next = NULL;
+
+	__clear_bit(cluster->id, env->candidate_list);
+
+	if (env->rtg && preferred_cluster(cluster, env->p))
+		return NULL;
+
+	do {
+		if (bitmap_empty(env->candidate_list, num_clusters))
+			return NULL;
+
+		next = next_candidate(env->candidate_list, 0, num_clusters);
+		if (next) {
+			if (next->min_power_cost > stats->min_cost) {
+				clear_bit(next->id, env->candidate_list);
+				next = NULL;
+				continue;
+			}
+
+			if (skip_cluster(next, env))
+				next = NULL;
+		}
+	} while (!next);
+
+	env->task_load = scale_load_to_cpu(task_load(env->p),
+					cluster_first_cpu(next));
+	return next;
+}
+
+#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+				   struct cpu_select_env *env, int cpu_cost)
+{
+	int wakeup_latency;
+	int prev_cpu = env->prev_cpu;
+
+	wakeup_latency = cpu_rq(cpu)->wakeup_latency;
+
+	if (env->need_idle) {
+		stats->min_cost = cpu_cost;
+		if (idle_cpu(cpu)) {
+			if (wakeup_latency < stats->best_cpu_wakeup_latency ||
+			    (wakeup_latency == stats->best_cpu_wakeup_latency &&
+			     cpu == prev_cpu)) {
+				stats->best_idle_cpu = cpu;
+				stats->best_cpu_wakeup_latency = wakeup_latency;
+			}
+		} else {
+			if (env->cpu_load < stats->min_load ||
+				(env->cpu_load == stats->min_load &&
+							cpu == prev_cpu)) {
+				stats->least_loaded_cpu = cpu;
+				stats->min_load = env->cpu_load;
+			}
+		}
+
+		return;
+	}
+
+	if (cpu_cost < stats->min_cost)  {
+		stats->min_cost = cpu_cost;
+		stats->best_cpu_wakeup_latency = wakeup_latency;
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_CPU_COST;
+		return;
+	}
+
+	/* CPU cost is the same. Start breaking the tie by C-state */
+
+	if (wakeup_latency > stats->best_cpu_wakeup_latency)
+		return;
+
+	if (wakeup_latency < stats->best_cpu_wakeup_latency) {
+		stats->best_cpu_wakeup_latency = wakeup_latency;
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
+		return;
+	}
+
+	/* C-state is the same. Use prev CPU to break the tie */
+	if (cpu == prev_cpu) {
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER;
+		return;
+	}
+
+	if (stats->best_cpu != prev_cpu &&
+	    ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
+	    (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
+	}
+}
+#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+				   struct cpu_select_env *env, int cpu_cost)
+{
+	int prev_cpu = env->prev_cpu;
+
+	if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
+		if (stats->best_sibling_cpu_cost > cpu_cost ||
+		    (stats->best_sibling_cpu_cost == cpu_cost &&
+		     stats->best_sibling_cpu_load > env->cpu_load)) {
+			stats->best_sibling_cpu_cost = cpu_cost;
+			stats->best_sibling_cpu_load = env->cpu_load;
+			stats->best_sibling_cpu = cpu;
+		}
+	}
+
+	if ((cpu_cost < stats->min_cost) ||
+	    ((stats->best_cpu != prev_cpu &&
+	      stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
+		if (env->need_idle) {
+			if (idle_cpu(cpu)) {
+				stats->min_cost = cpu_cost;
+				stats->best_idle_cpu = cpu;
+			}
+		} else {
+			stats->min_cost = cpu_cost;
+			stats->min_load = env->cpu_load;
+			stats->best_cpu = cpu;
+			env->sbc_best_flag = SBC_FLAG_MIN_COST;
+		}
+	}
+}
+#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */
+
+static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+					 struct cpu_select_env *env)
+{
+	int cpu_cost;
+
+	/*
+	 * We try to find the least loaded *busy* CPU irrespective
+	 * of the power cost.
+	 */
+	if (env->pack_task)
+		cpu_cost = cpu_min_power_cost(cpu);
+
+	else
+		cpu_cost = power_cost(cpu, task_load(env->p) +
+				cpu_cravg_sync(cpu, env->sync));
+
+	if (cpu_cost <= stats->min_cost)
+		__update_cluster_stats(cpu, stats, env, cpu_cost);
+}
+
+static void find_best_cpu_in_cluster(struct sched_cluster *c,
+	 struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	int i;
+	struct cpumask search_cpus;
+
+	cpumask_and(&search_cpus, tsk_cpus_allowed(env->p), &c->cpus);
+	cpumask_andnot(&search_cpus, &search_cpus, cpu_isolated_mask);
+
+	if (env->ignore_prev_cpu)
+		cpumask_clear_cpu(env->prev_cpu, &search_cpus);
+
+	env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
+
+	for_each_cpu(i, &search_cpus) {
+		env->cpu_load = cpu_load_sync(i, env->sync);
+
+		trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+			sched_irqload(i),
+			power_cost(i, task_load(env->p) +
+					cpu_cravg_sync(i, env->sync)), 0);
+
+		if (unlikely(!cpu_active(i)) || skip_cpu(i, env))
+			continue;
+
+		update_spare_capacity(stats, env, i, c->capacity,
+				      env->cpu_load);
+
+		/*
+		 * need_idle takes precedence over sched boost but when both
+		 * are set, idlest CPU with in all the clusters is selected
+		 * when boost_policy = BOOST_ON_ALL whereas idlest CPU in the
+		 * big cluster is selected within boost_policy = BOOST_ON_BIG.
+		 */
+		if ((!env->need_idle &&
+		    env->boost_policy != SCHED_BOOST_NONE) ||
+		    env->need_waker_cluster ||
+		    sched_cpu_high_irqload(i) ||
+		    spill_threshold_crossed(env, cpu_rq(i)))
+			continue;
+
+		update_cluster_stats(i, stats, env);
+	}
+}
+
+static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
+{
+	stats->best_cpu = stats->best_idle_cpu = -1;
+	stats->best_capacity_cpu = stats->best_sibling_cpu  = -1;
+	stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
+	stats->min_load	= stats->best_sibling_cpu_load = ULLONG_MAX;
+	stats->highest_spare_capacity = 0;
+	stats->least_loaded_cpu = -1;
+	stats->best_cpu_wakeup_latency = INT_MAX;
+	/* No need to initialize stats->best_load */
+}
+
+static inline bool env_has_special_flags(struct cpu_select_env *env)
+{
+	if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE ||
+	    env->reason)
+		return true;
+
+	return false;
+}
+
+static inline bool
+bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	int prev_cpu;
+	struct task_struct *task = env->p;
+	struct sched_cluster *cluster;
+
+	if (!task->ravg.mark_start || !sched_short_sleep_task_threshold)
+		return false;
+
+	prev_cpu = env->prev_cpu;
+	if (!cpumask_test_cpu(prev_cpu, tsk_cpus_allowed(task)) ||
+					unlikely(!cpu_active(prev_cpu)) ||
+					cpu_isolated(prev_cpu))
+		return false;
+
+	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
+				sched_long_cpu_selection_threshold)
+		return false;
+
+	/*
+	 * This function should be used by task wake up path only as it's
+	 * assuming p->last_switch_out_ts as last sleep time.
+	 * p->last_switch_out_ts can denote last preemption time as well as
+	 * last sleep time.
+	 */
+	if (task->ravg.mark_start - task->last_switch_out_ts >=
+					sched_short_sleep_task_threshold)
+		return false;
+
+	env->task_load = scale_load_to_cpu(task_load(task), prev_cpu);
+	cluster = cpu_rq(prev_cpu)->cluster;
+
+	if (!task_load_will_fit(task, env->task_load, prev_cpu,
+				sched_boost_policy())) {
+
+		__set_bit(cluster->id, env->backup_list);
+		__clear_bit(cluster->id, env->candidate_list);
+		return false;
+	}
+
+	env->cpu_load = cpu_load_sync(prev_cpu, env->sync);
+	if (sched_cpu_high_irqload(prev_cpu) ||
+			spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
+		update_spare_capacity(stats, env, prev_cpu,
+				cluster->capacity, env->cpu_load);
+		env->ignore_prev_cpu = 1;
+		return false;
+	}
+
+	return true;
+}
+
+static inline bool
+wake_to_waker_cluster(struct cpu_select_env *env)
+{
+	return env->sync &&
+	       task_load(current) > sched_big_waker_task_load &&
+	       task_load(env->p) < sched_small_wakee_task_load;
+}
+
+static inline bool
+bias_to_waker_cpu(struct task_struct *p, int cpu)
+{
+	return sysctl_sched_prefer_sync_wakee_to_waker &&
+	       cpu_rq(cpu)->nr_running == 1 &&
+	       cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
+	       cpu_active(cpu) && !cpu_isolated(cpu);
+}
+
+static inline int
+cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
+{
+	cpumask_t tmp_mask;
+
+	cpumask_and(&tmp_mask, &cluster->cpus, cpu_active_mask);
+	cpumask_and(&tmp_mask, &tmp_mask, &p->cpus_allowed);
+
+	return !cpumask_empty(&tmp_mask);
+}
+
+/* return cheapest cpu that can fit this task */
+static int select_best_cpu(struct task_struct *p, int target, int reason,
+			   int sync)
+{
+	struct sched_cluster *cluster, *pref_cluster = NULL;
+	struct cluster_cpu_stats stats;
+	struct related_thread_group *grp;
+	unsigned int sbc_flag = 0;
+	int cpu = raw_smp_processor_id();
+	bool special;
+
+	struct cpu_select_env env = {
+		.p			= p,
+		.reason			= reason,
+		.need_idle		= wake_to_idle(p),
+		.need_waker_cluster	= 0,
+		.sync			= sync,
+		.prev_cpu		= target,
+		.ignore_prev_cpu	= 0,
+		.rtg			= NULL,
+		.sbc_best_flag		= 0,
+		.sbc_best_cluster_flag	= 0,
+		.pack_task              = false,
+	};
+
+	env.boost_policy = task_sched_boost(p) ?
+			sched_boost_policy() : SCHED_BOOST_NONE;
+
+	bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
+	bitmap_zero(env.backup_list, NR_CPUS);
+
+	init_cluster_cpu_stats(&stats);
+	special = env_has_special_flags(&env);
+
+	rcu_read_lock();
+
+	grp = task_related_thread_group(p);
+
+	if (grp && grp->preferred_cluster) {
+		pref_cluster = grp->preferred_cluster;
+		if (!cluster_allowed(p, pref_cluster))
+			clear_bit(pref_cluster->id, env.candidate_list);
+		else
+			env.rtg = grp;
+	} else if (!special) {
+		cluster = cpu_rq(cpu)->cluster;
+		if (wake_to_waker_cluster(&env)) {
+			if (bias_to_waker_cpu(p, cpu)) {
+				target = cpu;
+				sbc_flag = SBC_FLAG_WAKER_CLUSTER |
+					   SBC_FLAG_WAKER_CPU;
+				goto out;
+			} else if (cluster_allowed(p, cluster)) {
+				env.need_waker_cluster = 1;
+				bitmap_zero(env.candidate_list, NR_CPUS);
+				__set_bit(cluster->id, env.candidate_list);
+				env.sbc_best_cluster_flag =
+							SBC_FLAG_WAKER_CLUSTER;
+			}
+		} else if (bias_to_prev_cpu(&env, &stats)) {
+			sbc_flag = SBC_FLAG_PREV_CPU;
+			goto out;
+		}
+	}
+
+	if (!special && is_short_burst_task(p)) {
+		env.pack_task = true;
+		sbc_flag = SBC_FLAG_PACK_TASK;
+	}
+retry:
+	cluster = select_least_power_cluster(&env);
+
+	if (!cluster)
+		goto out;
+
+	/*
+	 * 'cluster' now points to the minimum power cluster which can satisfy
+	 * task's perf goals. Walk down the cluster list starting with that
+	 * cluster. For non-small tasks, skip clusters that don't have
+	 * mostly_idle/idle cpus
+	 */
+
+	do {
+		find_best_cpu_in_cluster(cluster, &env, &stats);
+
+	} while ((cluster = next_best_cluster(cluster, &env, &stats)));
+
+	if (env.need_idle) {
+		if (stats.best_idle_cpu >= 0) {
+			target = stats.best_idle_cpu;
+			sbc_flag |= SBC_FLAG_IDLE_CSTATE;
+		} else if (stats.least_loaded_cpu >= 0) {
+			target = stats.least_loaded_cpu;
+			sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED;
+		}
+	} else if (stats.best_cpu >= 0) {
+		if (stats.best_cpu != task_cpu(p) &&
+				stats.min_cost == stats.best_sibling_cpu_cost) {
+			stats.best_cpu = stats.best_sibling_cpu;
+			sbc_flag |= SBC_FLAG_BEST_SIBLING;
+		}
+		sbc_flag |= env.sbc_best_flag;
+		target = stats.best_cpu;
+	} else {
+		if (env.rtg && env.boost_policy == SCHED_BOOST_NONE) {
+			env.rtg = NULL;
+			goto retry;
+		}
+
+		/*
+		 * With boost_policy == SCHED_BOOST_ON_BIG, we reach here with
+		 * backup_list = little cluster, candidate_list = none and
+		 * stats->best_capacity_cpu points the best spare capacity
+		 * CPU among the CPUs in the big cluster.
+		 */
+		if (env.boost_policy == SCHED_BOOST_ON_BIG &&
+		    stats.best_capacity_cpu >= 0)
+			sbc_flag |= SBC_FLAG_BOOST_CLUSTER;
+		else
+			find_backup_cluster(&env, &stats);
+
+		if (stats.best_capacity_cpu >= 0) {
+			target = stats.best_capacity_cpu;
+			sbc_flag |= SBC_FLAG_BEST_CAP_CPU;
+		}
+	}
+	p->last_cpu_selected_ts = sched_ktime_clock();
+out:
+	sbc_flag |= env.sbc_best_cluster_flag;
+	rcu_read_unlock();
+	trace_sched_task_load(p, sched_boost_policy() && task_sched_boost(p),
+		env.reason, env.sync, env.need_idle, sbc_flag, target);
+	return target;
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static inline struct task_group *next_task_group(struct task_group *tg)
+{
+	tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
+
+	return (&tg->list == &task_groups) ? NULL : tg;
+}
+
+/* Iterate over all cfs_rq in a cpu */
+#define for_each_cfs_rq(cfs_rq, tg, cpu)	\
+	for (tg = container_of(&task_groups, struct task_group, list);	\
+		((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
+
+void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
+{
+	struct task_group *tg;
+	struct cfs_rq *cfs_rq;
+
+	rcu_read_lock();
+
+	for_each_cfs_rq(cfs_rq, tg, cpu)
+		reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
+
+	rcu_read_unlock();
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra);
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra);
+
+/* Add task's contribution to a cpu' HMP statistics */
+void inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	/*
+	 * Although below check is not strictly required  (as
+	 * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
+	 * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
+	 * efficiency by short-circuiting for_each_sched_entity() loop when
+	 * sched_disable_window_stats
+	 */
+	if (sched_disable_window_stats)
+		return;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se)
+		inc_rq_hmp_stats(rq, p, change_cra);
+}
+
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+					      task_load_delta,
+					      pred_demand_delta);
+		fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se) {
+		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
+					      task_load_delta,
+					      pred_demand_delta);
+		fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+	}
+}
+
+static int task_will_be_throttled(struct task_struct *p);
+
+#else	/* CONFIG_CFS_BANDWIDTH */
+
+inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
+
+static void
+fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+			   u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+	fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+}
+
+static inline int task_will_be_throttled(struct task_struct *p)
+{
+	return 0;
+}
+
+void inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&rq->hmp_stats, p);
+}
+
+#endif	/* CONFIG_CFS_BANDWIDTH */
+
+/*
+ * Reset balance_interval at all sched_domain levels of given cpu, so that it
+ * honors kick.
+ */
+static inline void reset_balance_interval(int cpu)
+{
+	struct sched_domain *sd;
+
+	if (cpu >= nr_cpu_ids)
+		return;
+
+	rcu_read_lock();
+	for_each_domain(cpu, sd)
+		sd->balance_interval = 0;
+	rcu_read_unlock();
+}
+
+/*
+ * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
+ * cpu as per its demand or priority)
+ *
+ * Returns reason why task needs to be migrated
+ */
+static inline int migration_needed(struct task_struct *p, int cpu)
+{
+	int nice;
+	struct related_thread_group *grp;
+
+	if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
+		return 0;
+
+	/* No need to migrate task that is about to be throttled */
+	if (task_will_be_throttled(p))
+		return 0;
+
+	if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
+		 cpu_capacity(cpu) != max_capacity && task_sched_boost(p))
+		return UP_MIGRATION;
+
+	if (sched_cpu_high_irqload(cpu))
+		return IRQLOAD_MIGRATION;
+
+	nice = task_nice(p);
+	rcu_read_lock();
+	grp = task_related_thread_group(p);
+	if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
+	       upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
+		rcu_read_unlock();
+		return DOWN_MIGRATION;
+	}
+
+	if (!task_will_fit(p, cpu)) {
+		rcu_read_unlock();
+		return UP_MIGRATION;
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	/* Invoke active balance to force migrate currently running task */
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (!rq->active_balance) {
+		rq->active_balance = 1;
+		rq->push_cpu = new_cpu;
+		get_task_struct(p);
+		rq->push_task = p;
+		rc = 1;
+	}
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	return rc;
+}
+
+static DEFINE_RAW_SPINLOCK(migration_lock);
+
+/*
+ * Check if currently running task should be migrated to a better cpu.
+ *
+ * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
+ */
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+	int cpu = cpu_of(rq), new_cpu;
+	int active_balance = 0, reason;
+
+	reason = migration_needed(p, cpu);
+	if (!reason)
+		return;
+
+	raw_spin_lock(&migration_lock);
+	new_cpu = select_best_cpu(p, cpu, reason, 0);
+
+	if (new_cpu != cpu) {
+		active_balance = kick_active_balance(rq, p, new_cpu);
+		if (active_balance)
+			mark_reserved(new_cpu);
+	}
+
+	raw_spin_unlock(&migration_lock);
+
+	if (active_balance)
+		stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
+					&rq->active_balance_work);
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
+{
+	cfs_rq->hmp_stats.nr_big_tasks = 0;
+	cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
+	cfs_rq->hmp_stats.pred_demands_sum = 0;
+}
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+		 struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&cfs_rq->hmp_stats, p);
+	if (change_cra)
+		inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+		 struct task_struct *p, int change_cra)
+{
+	dec_nr_big_task(&cfs_rq->hmp_stats, p);
+	if (change_cra)
+		dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+			 struct cfs_rq *cfs_rq)
+{
+	stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
+	stats->cumulative_runnable_avg +=
+				cfs_rq->hmp_stats.cumulative_runnable_avg;
+	stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
+}
+
+static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+				 struct cfs_rq *cfs_rq)
+{
+	stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
+	stats->cumulative_runnable_avg -=
+				cfs_rq->hmp_stats.cumulative_runnable_avg;
+	stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
+
+	BUG_ON(stats->nr_big_tasks < 0 ||
+		(s64)stats->cumulative_runnable_avg < 0);
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+#endif	/* CONFIG_CFS_BANDWIDTH */
+
+static int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
+{
+	int local_cpu, busiest_cpu;
+	int local_capacity, busiest_capacity;
+	int local_pwr_cost, busiest_pwr_cost;
+	int nr_cpus;
+	int boost = sched_boost();
+
+	if (!sysctl_sched_restrict_cluster_spill ||
+		boost == FULL_THROTTLE_BOOST || boost == CONSERVATIVE_BOOST)
+		return 0;
+
+	local_cpu = group_first_cpu(sds->local);
+	busiest_cpu = group_first_cpu(sds->busiest);
+
+	local_capacity = cpu_max_possible_capacity(local_cpu);
+	busiest_capacity = cpu_max_possible_capacity(busiest_cpu);
+
+	local_pwr_cost = cpu_max_power_cost(local_cpu);
+	busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
+
+	if (local_pwr_cost <= busiest_pwr_cost)
+		return 0;
+
+	if (local_capacity > busiest_capacity &&
+			sds->busiest_stat.sum_nr_big_tasks)
+		return 0;
+
+	nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest));
+	if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) &&
+		(sds->busiest_stat.sum_nr_running <
+			nr_cpus * sysctl_sched_spill_nr_run))
+		return 1;
+
+	return 0;
+}
+
+static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+						  struct sd_lb_stats *sds,
+						  struct sched_group *sg,
+						  struct sg_lb_stats *sgs)
+{
+	if (env->idle != CPU_NOT_IDLE &&
+	    cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) {
+		if (sgs->sum_nr_big_tasks >
+				sds->busiest_stat.sum_nr_big_tasks) {
+			env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static struct rq *find_busiest_queue_hmp(struct lb_env *env,
+				     struct sched_group *group)
+{
+	struct rq *busiest = NULL, *busiest_big = NULL;
+	u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
+	int max_nr_big = 0, nr_big;
+	bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
+	int i;
+	cpumask_t cpus;
+
+	cpumask_andnot(&cpus, sched_group_cpus(group), cpu_isolated_mask);
+
+	for_each_cpu(i, &cpus) {
+		struct rq *rq = cpu_rq(i);
+		u64 cumulative_runnable_avg =
+				rq->hmp_stats.cumulative_runnable_avg;
+
+		if (!cpumask_test_cpu(i, env->cpus))
+			continue;
+
+
+		if (find_big) {
+			nr_big = nr_big_tasks(rq);
+			if (nr_big > max_nr_big ||
+			    (nr_big > 0 && nr_big == max_nr_big &&
+			     cumulative_runnable_avg > max_runnable_avg_big)) {
+				max_runnable_avg_big = cumulative_runnable_avg;
+				busiest_big = rq;
+				max_nr_big = nr_big;
+				continue;
+			}
+		}
+
+		if (cumulative_runnable_avg > max_runnable_avg) {
+			max_runnable_avg = cumulative_runnable_avg;
+			busiest = rq;
+		}
+	}
+
+	if (busiest_big)
+		return busiest_big;
+
+	env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
+	return busiest;
+}
+
+#ifdef CONFIG_NO_HZ_COMMON
+static inline int find_new_hmp_ilb(int type)
+{
+	int call_cpu = raw_smp_processor_id();
+	struct sched_domain *sd;
+	int ilb;
+
+	rcu_read_lock();
+
+	/* Pick an idle cpu "closest" to call_cpu */
+	for_each_domain(call_cpu, sd) {
+		for_each_cpu_and(ilb, nohz.idle_cpus_mask,
+						sched_domain_span(sd)) {
+			if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
+					cpu_max_power_cost(ilb) <=
+					cpu_max_power_cost(call_cpu))) {
+				rcu_read_unlock();
+				reset_balance_interval(ilb);
+				return ilb;
+			}
+		}
+	}
+
+	rcu_read_unlock();
+	return nr_cpu_ids;
+}
+
+static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
+{
+	struct sched_domain *sd;
+	int i;
+
+	if (rq->nr_running < 2)
+		return 0;
+
+	if (!sysctl_sched_restrict_cluster_spill ||
+			sched_boost_policy() == SCHED_BOOST_ON_ALL)
+		return 1;
+
+	if (cpu_max_power_cost(cpu) == max_power_cost)
+		return 1;
+
+	rcu_read_lock();
+	sd = rcu_dereference_check_sched_domain(rq->sd);
+	if (!sd) {
+		rcu_read_unlock();
+		return 0;
+	}
+
+	for_each_cpu(i, sched_domain_span(sd)) {
+		if (cpu_load(i) < sched_spill_load &&
+				cpu_rq(i)->nr_running <
+				sysctl_sched_spill_nr_run) {
+			/* Change the kick type to limit to CPUs that
+			 * are of equal or lower capacity.
+			 */
+			*type = NOHZ_KICK_RESTRICT;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	return 1;
+}
+#endif /* CONFIG_NO_HZ_COMMON */
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_CFS_BANDWIDTH
+/*
+ * Check if task is part of a hierarchy where some cfs_rq does not have any
+ * runtime left.
+ *
+ * We can't rely on throttled_hierarchy() to do this test, as
+ * cfs_rq->throttle_count will not be updated yet when this function is called
+ * from scheduler_tick()
+ */
+static int task_will_be_throttled(struct task_struct *p)
+{
+	struct sched_entity *se = &p->se;
+	struct cfs_rq *cfs_rq;
+
+	if (!cfs_bandwidth_used())
+		return 0;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		if (!cfs_rq->runtime_enabled)
+			continue;
+		if (cfs_rq->runtime_remaining <= 0)
+			return 1;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_CFS_BANDWIDTH */
+
+#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 189fc63..67ab014 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -9,6 +9,58 @@
 #include <linux/irq_work.h>
 #include <trace/events/sched.h>
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
+{
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
+{
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
+			 u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+}
+
+#ifdef CONFIG_SMP
+static int find_lowest_rq(struct task_struct *task);
+
+static int
+select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
+{
+	int target;
+
+	rcu_read_lock();
+	target = find_lowest_rq(p);
+	if (target != -1)
+		cpu = target;
+	rcu_read_unlock();
+
+	return cpu;
+}
+#endif /* CONFIG_SMP */
+#else  /* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
+
+#endif	/* CONFIG_SCHED_HMP */
+
 int sched_rr_timeslice = RR_TIMESLICE;
 
 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
@@ -1197,41 +1249,6 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
 
 #endif /* CONFIG_RT_GROUP_SCHED */
 
-#ifdef CONFIG_SCHED_HMP
-
-static void
-inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
-{
-	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
-{
-	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
-			 u32 new_task_load, u32 new_pred_demand)
-{
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
-				      pred_demand_delta);
-}
-
-#else	/* CONFIG_SCHED_HMP */
-
-static inline void
-inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
-
-#endif	/* CONFIG_SCHED_HMP */
-
 static inline
 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
 {
@@ -1467,22 +1484,6 @@ static void yield_task_rt(struct rq *rq)
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
-#ifdef CONFIG_SCHED_HMP
-static int
-select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
-{
-	int target;
-
-	rcu_read_lock();
-	target = find_lowest_rq(p);
-	if (target != -1)
-		cpu = target;
-	rcu_read_unlock();
-
-	return cpu;
-}
-#endif /* CONFIG_SCHED_HMP */
-
 static int
 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 41a7039..eed0639 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -25,6 +25,64 @@
 struct rq;
 struct cpuidle_state;
 
+struct hmp_sched_stats {
+	int nr_big_tasks;
+	u64 cumulative_runnable_avg;
+	u64 pred_demands_sum;
+};
+
+
+#ifdef CONFIG_SCHED_HMP
+#define NUM_TRACKED_WINDOWS 2
+#define NUM_LOAD_INDICES 1000
+
+struct load_subtractions {
+	u64 window_start;
+	u64 subs;
+	u64 new_subs;
+};
+
+struct group_cpu_time {
+	u64 curr_runnable_sum;
+	u64 prev_runnable_sum;
+	u64 nt_curr_runnable_sum;
+	u64 nt_prev_runnable_sum;
+};
+
+struct sched_cluster {
+	raw_spinlock_t load_lock;
+	struct list_head list;
+	struct cpumask cpus;
+	int id;
+	int max_power_cost;
+	int min_power_cost;
+	int max_possible_capacity;
+	int capacity;
+	int efficiency; /* Differentiate cpus with different IPC capability */
+	int load_scale_factor;
+	unsigned int exec_scale_factor;
+	/*
+	 * max_freq = user maximum
+	 * max_mitigated_freq = thermal defined maximum
+	 * max_possible_freq = maximum supported by hardware
+	 */
+	unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
+	unsigned int max_possible_freq;
+	bool freq_init_done;
+	int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
+	unsigned int static_cluster_pwr_cost;
+	int notifier_sent;
+	bool wake_up_idle;
+};
+
+struct cpu_cycle {
+	u64 cycles;
+	u64 time;
+};
+
+#endif
+
+
 /* task_struct::on_rq states: */
 #define TASK_ON_RQ_QUEUED	1
 #define TASK_ON_RQ_MIGRATING	2
@@ -377,87 +435,6 @@ struct cfs_bandwidth { };
 
 #endif	/* CONFIG_CGROUP_SCHED */
 
-#ifdef CONFIG_SCHED_HMP
-
-#define NUM_TRACKED_WINDOWS 2
-#define NUM_LOAD_INDICES 1000
-
-struct hmp_sched_stats {
-	int nr_big_tasks;
-	u64 cumulative_runnable_avg;
-	u64 pred_demands_sum;
-};
-
-struct load_subtractions {
-	u64 window_start;
-	u64 subs;
-	u64 new_subs;
-};
-
-struct group_cpu_time {
-	u64 curr_runnable_sum;
-	u64 prev_runnable_sum;
-	u64 nt_curr_runnable_sum;
-	u64 nt_prev_runnable_sum;
-};
-
-struct sched_cluster {
-	raw_spinlock_t load_lock;
-	struct list_head list;
-	struct cpumask cpus;
-	int id;
-	int max_power_cost;
-	int min_power_cost;
-	int max_possible_capacity;
-	int capacity;
-	int efficiency; /* Differentiate cpus with different IPC capability */
-	int load_scale_factor;
-	unsigned int exec_scale_factor;
-	/*
-	 * max_freq = user maximum
-	 * max_mitigated_freq = thermal defined maximum
-	 * max_possible_freq = maximum supported by hardware
-	 */
-	unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
-	unsigned int max_possible_freq;
-	bool freq_init_done;
-	int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
-	unsigned int static_cluster_pwr_cost;
-	int notifier_sent;
-	bool wake_up_idle;
-};
-
-extern unsigned long all_cluster_ids[];
-
-static inline int cluster_first_cpu(struct sched_cluster *cluster)
-{
-	return cpumask_first(&cluster->cpus);
-}
-
-struct related_thread_group {
-	int id;
-	raw_spinlock_t lock;
-	struct list_head tasks;
-	struct list_head list;
-	struct sched_cluster *preferred_cluster;
-	struct rcu_head rcu;
-	u64 last_update;
-};
-
-extern struct list_head cluster_head;
-extern int num_clusters;
-extern struct sched_cluster *sched_cluster[NR_CPUS];
-
-struct cpu_cycle {
-	u64 cycles;
-	u64 time;
-};
-
-#define for_each_sched_cluster(cluster) \
-	list_for_each_entry_rcu(cluster, &cluster_head, list)
-
-#endif /* CONFIG_SCHED_HMP */
-
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
 	struct load_weight load;
@@ -526,9 +503,7 @@ struct cfs_rq {
 	struct task_group *tg;	/* group that "owns" this runqueue */
 
 #ifdef CONFIG_CFS_BANDWIDTH
-#ifdef CONFIG_SCHED_HMP
 	struct hmp_sched_stats hmp_stats;
-#endif
 
 	int runtime_enabled;
 	u64 runtime_expires;
@@ -764,10 +739,11 @@ struct rq {
 	u64 max_idle_balance_cost;
 #endif
 
+	struct hmp_sched_stats hmp_stats;
+
 #ifdef CONFIG_SCHED_HMP
 	struct sched_cluster *cluster;
 	struct cpumask freq_domain_cpumask;
-	struct hmp_sched_stats hmp_stats;
 
 	int cstate, wakeup_latency, wakeup_energy;
 	u64 window_start;
@@ -1091,617 +1067,6 @@ enum sched_boost_policy {
 	SCHED_BOOST_ON_ALL,
 };
 
-#ifdef CONFIG_SCHED_HMP
-
-#define WINDOW_STATS_RECENT		0
-#define WINDOW_STATS_MAX		1
-#define WINDOW_STATS_MAX_RECENT_AVG	2
-#define WINDOW_STATS_AVG		3
-#define WINDOW_STATS_INVALID_POLICY	4
-
-#define SCHED_UPMIGRATE_MIN_NICE 15
-#define EXITING_TASK_MARKER	0xdeaddead
-
-#define UP_MIGRATION		1
-#define DOWN_MIGRATION		2
-#define IRQLOAD_MIGRATION	3
-
-extern struct mutex policy_mutex;
-extern unsigned int sched_ravg_window;
-extern unsigned int sched_disable_window_stats;
-extern unsigned int max_possible_freq;
-extern unsigned int min_max_freq;
-extern unsigned int pct_task_load(struct task_struct *p);
-extern unsigned int max_possible_efficiency;
-extern unsigned int min_possible_efficiency;
-extern unsigned int max_capacity;
-extern unsigned int min_capacity;
-extern unsigned int max_load_scale_factor;
-extern unsigned int max_possible_capacity;
-extern unsigned int min_max_possible_capacity;
-extern unsigned int max_power_cost;
-extern unsigned int sched_init_task_load_windows;
-extern unsigned int up_down_migrate_scale_factor;
-extern unsigned int sysctl_sched_restrict_cluster_spill;
-extern unsigned int sched_pred_alert_load;
-extern struct sched_cluster init_cluster;
-extern unsigned int  __read_mostly sched_short_sleep_task_threshold;
-extern unsigned int  __read_mostly sched_long_cpu_selection_threshold;
-extern unsigned int  __read_mostly sched_big_waker_task_load;
-extern unsigned int  __read_mostly sched_small_wakee_task_load;
-extern unsigned int  __read_mostly sched_spill_load;
-extern unsigned int  __read_mostly sched_upmigrate;
-extern unsigned int  __read_mostly sched_downmigrate;
-extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
-extern unsigned int  __read_mostly sched_load_granule;
-
-extern void init_new_task_load(struct task_struct *p, bool idle_task);
-extern u64 sched_ktime_clock(void);
-extern int got_boost_kick(void);
-extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
-						u64 wallclock, u64 irqtime);
-extern bool early_detection_notify(struct rq *rq, u64 wallclock);
-extern void clear_ed_task(struct task_struct *p, struct rq *rq);
-extern void fixup_busy_time(struct task_struct *p, int new_cpu);
-extern void clear_boost_kick(int cpu);
-extern void clear_hmp_request(int cpu);
-extern void mark_task_starting(struct task_struct *p);
-extern void set_window_start(struct rq *rq);
-extern void update_cluster_topology(void);
-extern void note_task_waking(struct task_struct *p, u64 wallclock);
-extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
-extern void init_clusters(void);
-extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
-extern unsigned int max_task_load(void);
-extern void sched_account_irqtime(int cpu, struct task_struct *curr,
-				 u64 delta, u64 wallclock);
-extern void sched_account_irqstart(int cpu, struct task_struct *curr,
-				   u64 wallclock);
-extern unsigned int cpu_temp(int cpu);
-extern unsigned int nr_eligible_big_tasks(int cpu);
-extern int update_preferred_cluster(struct related_thread_group *grp,
-			struct task_struct *p, u32 old_load);
-extern void set_preferred_cluster(struct related_thread_group *grp);
-extern void add_new_task_to_grp(struct task_struct *new);
-extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
-extern void update_avg_burst(struct task_struct *p);
-extern void update_avg(u64 *avg, u64 sample);
-
-#define NO_BOOST 0
-#define FULL_THROTTLE_BOOST 1
-#define CONSERVATIVE_BOOST 2
-#define RESTRAINED_BOOST 3
-
-static inline struct sched_cluster *cpu_cluster(int cpu)
-{
-	return cpu_rq(cpu)->cluster;
-}
-
-static inline int cpu_capacity(int cpu)
-{
-	return cpu_rq(cpu)->cluster->capacity;
-}
-
-static inline int cpu_max_possible_capacity(int cpu)
-{
-	return cpu_rq(cpu)->cluster->max_possible_capacity;
-}
-
-static inline int cpu_load_scale_factor(int cpu)
-{
-	return cpu_rq(cpu)->cluster->load_scale_factor;
-}
-
-static inline int cpu_efficiency(int cpu)
-{
-	return cpu_rq(cpu)->cluster->efficiency;
-}
-
-static inline unsigned int cpu_cur_freq(int cpu)
-{
-	return cpu_rq(cpu)->cluster->cur_freq;
-}
-
-static inline unsigned int cpu_min_freq(int cpu)
-{
-	return cpu_rq(cpu)->cluster->min_freq;
-}
-
-static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
-{
-	/*
-	 * Governor and thermal driver don't know the other party's mitigation
-	 * voting. So struct cluster saves both and return min() for current
-	 * cluster fmax.
-	 */
-	return min(cluster->max_mitigated_freq, cluster->max_freq);
-}
-
-static inline unsigned int cpu_max_freq(int cpu)
-{
-	return cluster_max_freq(cpu_rq(cpu)->cluster);
-}
-
-static inline unsigned int cpu_max_possible_freq(int cpu)
-{
-	return cpu_rq(cpu)->cluster->max_possible_freq;
-}
-
-static inline int same_cluster(int src_cpu, int dst_cpu)
-{
-	return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
-}
-
-static inline int cpu_max_power_cost(int cpu)
-{
-	return cpu_rq(cpu)->cluster->max_power_cost;
-}
-
-static inline int cpu_min_power_cost(int cpu)
-{
-	return cpu_rq(cpu)->cluster->min_power_cost;
-}
-
-static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
-{
-	return div64_u64(cycles, period);
-}
-
-static inline bool hmp_capable(void)
-{
-	return max_possible_capacity != min_max_possible_capacity;
-}
-
-/*
- * 'load' is in reference to "best cpu" at its best frequency.
- * Scale that in reference to a given cpu, accounting for how bad it is
- * in reference to "best cpu".
- */
-static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
-{
-	u64 lsf = cpu_load_scale_factor(cpu);
-
-	if (lsf != 1024) {
-		task_load *= lsf;
-		task_load /= 1024;
-	}
-
-	return task_load;
-}
-
-static inline unsigned int task_load(struct task_struct *p)
-{
-	return p->ravg.demand;
-}
-
-static inline void
-inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-				 struct task_struct *p)
-{
-	u32 task_load;
-
-	if (sched_disable_window_stats)
-		return;
-
-	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
-	stats->cumulative_runnable_avg += task_load;
-	stats->pred_demands_sum += p->ravg.pred_demand;
-}
-
-static inline void
-dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-				struct task_struct *p)
-{
-	u32 task_load;
-
-	if (sched_disable_window_stats)
-		return;
-
-	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
-	stats->cumulative_runnable_avg -= task_load;
-
-	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
-	stats->pred_demands_sum -= p->ravg.pred_demand;
-	BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
-static inline void
-fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-			      struct task_struct *p, s64 task_load_delta,
-			      s64 pred_demand_delta)
-{
-	if (sched_disable_window_stats)
-		return;
-
-	stats->cumulative_runnable_avg += task_load_delta;
-	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
-	stats->pred_demands_sum += pred_demand_delta;
-	BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
-#define pct_to_real(tunable)	\
-		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
-
-#define real_to_pct(tunable)	\
-		(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
-
-#define SCHED_HIGH_IRQ_TIMEOUT 3
-static inline u64 sched_irqload(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-	s64 delta;
-
-	delta = get_jiffies_64() - rq->irqload_ts;
-	/*
-	 * Current context can be preempted by irq and rq->irqload_ts can be
-	 * updated by irq context so that delta can be negative.
-	 * But this is okay and we can safely return as this means there
-	 * was recent irq occurrence.
-	 */
-
-	if (delta < SCHED_HIGH_IRQ_TIMEOUT)
-		return rq->avg_irqload;
-	else
-		return 0;
-}
-
-static inline int sched_cpu_high_irqload(int cpu)
-{
-	return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
-}
-
-static inline bool task_in_related_thread_group(struct task_struct *p)
-{
-	return !!(rcu_access_pointer(p->grp) != NULL);
-}
-
-static inline
-struct related_thread_group *task_related_thread_group(struct task_struct *p)
-{
-	return rcu_dereference(p->grp);
-}
-
-#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
-
-extern void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
-
-extern void notify_migration(int src_cpu, int dest_cpu,
-			bool src_cpu_dead, struct task_struct *p);
-
-/* Is frequency of two cpus synchronized with each other? */
-static inline int same_freq_domain(int src_cpu, int dst_cpu)
-{
-	struct rq *rq = cpu_rq(src_cpu);
-
-	if (src_cpu == dst_cpu)
-		return 1;
-
-	return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
-}
-
-#define	BOOST_KICK	0
-#define	CPU_RESERVED	1
-
-static inline int is_reserved(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	return test_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline int mark_reserved(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	/* Name boost_flags as hmp_flags? */
-	return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline void clear_reserved(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	clear_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline u64 cpu_cravg_sync(int cpu, int sync)
-{
-	struct rq *rq = cpu_rq(cpu);
-	u64 load;
-
-	load = rq->hmp_stats.cumulative_runnable_avg;
-
-	/*
-	 * If load is being checked in a sync wakeup environment,
-	 * we may want to discount the load of the currently running
-	 * task.
-	 */
-	if (sync && cpu == smp_processor_id()) {
-		if (load > rq->curr->ravg.demand)
-			load -= rq->curr->ravg.demand;
-		else
-			load = 0;
-	}
-
-	return load;
-}
-
-static inline bool is_short_burst_task(struct task_struct *p)
-{
-	return p->ravg.avg_burst < sysctl_sched_short_burst &&
-	       p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
-}
-
-extern void check_for_migration(struct rq *rq, struct task_struct *p);
-extern void pre_big_task_count_change(const struct cpumask *cpus);
-extern void post_big_task_count_change(const struct cpumask *cpus);
-extern void set_hmp_defaults(void);
-extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
-extern unsigned int power_cost(int cpu, u64 demand);
-extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
-extern int sched_boost(void);
-extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
-					enum sched_boost_policy boost_policy);
-extern enum sched_boost_policy sched_boost_policy(void);
-extern int task_will_fit(struct task_struct *p, int cpu);
-extern u64 cpu_load(int cpu);
-extern u64 cpu_load_sync(int cpu, int sync);
-extern int preferred_cluster(struct sched_cluster *cluster,
-						struct task_struct *p);
-extern void inc_nr_big_task(struct hmp_sched_stats *stats,
-					struct task_struct *p);
-extern void dec_nr_big_task(struct hmp_sched_stats *stats,
-					struct task_struct *p);
-extern void inc_rq_hmp_stats(struct rq *rq,
-				struct task_struct *p, int change_cra);
-extern void dec_rq_hmp_stats(struct rq *rq,
-				struct task_struct *p, int change_cra);
-extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
-extern int is_big_task(struct task_struct *p);
-extern int upmigrate_discouraged(struct task_struct *p);
-extern struct sched_cluster *rq_cluster(struct rq *rq);
-extern int nr_big_tasks(struct rq *rq);
-extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
-					struct task_struct *p, s64 delta);
-extern void reset_task_stats(struct task_struct *p);
-extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
-extern void inc_hmp_sched_stats_fair(struct rq *rq,
-			struct task_struct *p, int change_cra);
-extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
-					struct cftype *cft);
-extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
-				struct cftype *cft, u64 upmigrate_discourage);
-extern void sched_boost_parse_dt(void);
-extern void clear_top_tasks_bitmap(unsigned long *bitmap);
-
-#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
-extern bool task_sched_boost(struct task_struct *p);
-extern int sync_cgroup_colocation(struct task_struct *p, bool insert);
-extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2);
-extern void update_cgroup_boost_settings(void);
-extern void restore_cgroup_boost_settings(void);
-
-#else
-static inline bool
-same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
-{
-	return true;
-}
-
-static inline bool task_sched_boost(struct task_struct *p)
-{
-	return true;
-}
-
-static inline void update_cgroup_boost_settings(void) { }
-static inline void restore_cgroup_boost_settings(void) { }
-#endif
-
-extern int alloc_related_thread_groups(void);
-
-#else	/* CONFIG_SCHED_HMP */
-
-struct hmp_sched_stats;
-struct related_thread_group;
-struct sched_cluster;
-
-static inline enum sched_boost_policy sched_boost_policy(void)
-{
-	return SCHED_BOOST_NONE;
-}
-
-static inline bool task_sched_boost(struct task_struct *p)
-{
-	return true;
-}
-
-static inline int got_boost_kick(void)
-{
-	return 0;
-}
-
-static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
-				int event, u64 wallclock, u64 irqtime) { }
-
-static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
-{
-	return 0;
-}
-
-static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
-static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
-static inline void clear_boost_kick(int cpu) { }
-static inline void clear_hmp_request(int cpu) { }
-static inline void mark_task_starting(struct task_struct *p) { }
-static inline void set_window_start(struct rq *rq) { }
-static inline void init_clusters(void) {}
-static inline void update_cluster_topology(void) { }
-static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
-static inline void set_task_last_switch_out(struct task_struct *p,
-					    u64 wallclock) { }
-
-static inline int task_will_fit(struct task_struct *p, int cpu)
-{
-	return 1;
-}
-
-static inline unsigned int power_cost(int cpu, u64 demand)
-{
-	return SCHED_CAPACITY_SCALE;
-}
-
-static inline int sched_boost(void)
-{
-	return 0;
-}
-
-static inline int is_big_task(struct task_struct *p)
-{
-	return 0;
-}
-
-static inline int nr_big_tasks(struct rq *rq)
-{
-	return 0;
-}
-
-static inline int is_cpu_throttling_imminent(int cpu)
-{
-	return 0;
-}
-
-static inline int is_task_migration_throttled(struct task_struct *p)
-{
-	return 0;
-}
-
-static inline unsigned int cpu_temp(int cpu)
-{
-	return 0;
-}
-
-static inline void
-inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
-
-static inline void
-dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
-
-static inline int
-preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
-{
-	return 1;
-}
-
-static inline struct sched_cluster *rq_cluster(struct rq *rq)
-{
-	return NULL;
-}
-
-static inline void init_new_task_load(struct task_struct *p, bool idle_task)
-{
-}
-
-static inline u64 scale_load_to_cpu(u64 load, int cpu)
-{
-	return load;
-}
-
-static inline unsigned int nr_eligible_big_tasks(int cpu)
-{
-	return 0;
-}
-
-static inline int pct_task_load(struct task_struct *p) { return 0; }
-
-static inline int cpu_capacity(int cpu)
-{
-	return SCHED_CAPACITY_SCALE;
-}
-
-static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
-
-static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-		 struct task_struct *p)
-{
-}
-
-static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-		 struct task_struct *p)
-{
-}
-
-static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
-				 u64 delta, u64 wallclock)
-{
-}
-
-static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
-					  u64 wallclock)
-{
-}
-
-static inline int sched_cpu_high_irqload(int cpu) { return 0; }
-
-static inline void set_preferred_cluster(struct related_thread_group *grp) { }
-
-static inline bool task_in_related_thread_group(struct task_struct *p)
-{
-	return false;
-}
-
-static inline
-struct related_thread_group *task_related_thread_group(struct task_struct *p)
-{
-	return NULL;
-}
-
-static inline u32 task_load(struct task_struct *p) { return 0; }
-
-static inline int update_preferred_cluster(struct related_thread_group *grp,
-			 struct task_struct *p, u32 old_load)
-{
-	return 0;
-}
-
-static inline void add_new_task_to_grp(struct task_struct *new) {}
-
-#define PRED_DEMAND_DELTA (0)
-
-static inline void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
-
-static inline void notify_migration(int src_cpu, int dest_cpu,
-			bool src_cpu_dead, struct task_struct *p) { }
-
-static inline int same_freq_domain(int src_cpu, int dst_cpu)
-{
-	return 1;
-}
-
-static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
-static inline void pre_big_task_count_change(void) { }
-static inline void post_big_task_count_change(void) { }
-static inline void set_hmp_defaults(void) { }
-
-static inline void clear_reserved(int cpu) { }
-static inline void sched_boost_parse_dt(void) {}
-static inline int alloc_related_thread_groups(void) { return 0; }
-
-#define trace_sched_cpu_load(...)
-#define trace_sched_cpu_load_lb(...)
-#define trace_sched_cpu_load_cgroup(...)
-#define trace_sched_cpu_load_wakeup(...)
-
-static inline void update_avg_burst(struct task_struct *p) {}
-
-#endif	/* CONFIG_SCHED_HMP */
-
 /*
  * Returns the rq capacity of any rq in a group. This does not play
  * well with groups where rq capacity can change independently.
@@ -2572,3 +1937,638 @@ static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
 #else /* arch_scale_freq_capacity */
 #define arch_scale_freq_invariant()	(false)
 #endif
+
+#ifdef CONFIG_SCHED_HMP
+
+static inline int cluster_first_cpu(struct sched_cluster *cluster)
+{
+	return cpumask_first(&cluster->cpus);
+}
+
+struct related_thread_group {
+	int id;
+	raw_spinlock_t lock;
+	struct list_head tasks;
+	struct list_head list;
+	struct sched_cluster *preferred_cluster;
+	struct rcu_head rcu;
+	u64 last_update;
+};
+
+extern struct list_head cluster_head;
+extern int num_clusters;
+extern struct sched_cluster *sched_cluster[NR_CPUS];
+
+#define for_each_sched_cluster(cluster) \
+	list_for_each_entry_rcu(cluster, &cluster_head, list)
+
+#define WINDOW_STATS_RECENT		0
+#define WINDOW_STATS_MAX		1
+#define WINDOW_STATS_MAX_RECENT_AVG	2
+#define WINDOW_STATS_AVG		3
+#define WINDOW_STATS_INVALID_POLICY	4
+
+#define SCHED_UPMIGRATE_MIN_NICE 15
+#define EXITING_TASK_MARKER	0xdeaddead
+
+#define UP_MIGRATION		1
+#define DOWN_MIGRATION		2
+#define IRQLOAD_MIGRATION	3
+
+extern struct mutex policy_mutex;
+extern unsigned int sched_ravg_window;
+extern unsigned int sched_disable_window_stats;
+extern unsigned int max_possible_freq;
+extern unsigned int min_max_freq;
+extern unsigned int pct_task_load(struct task_struct *p);
+extern unsigned int max_possible_efficiency;
+extern unsigned int min_possible_efficiency;
+extern unsigned int max_capacity;
+extern unsigned int min_capacity;
+extern unsigned int max_load_scale_factor;
+extern unsigned int max_possible_capacity;
+extern unsigned int min_max_possible_capacity;
+extern unsigned int max_power_cost;
+extern unsigned int sched_init_task_load_windows;
+extern unsigned int up_down_migrate_scale_factor;
+extern unsigned int sysctl_sched_restrict_cluster_spill;
+extern unsigned int sched_pred_alert_load;
+extern struct sched_cluster init_cluster;
+extern unsigned int  __read_mostly sched_short_sleep_task_threshold;
+extern unsigned int  __read_mostly sched_long_cpu_selection_threshold;
+extern unsigned int  __read_mostly sched_big_waker_task_load;
+extern unsigned int  __read_mostly sched_small_wakee_task_load;
+extern unsigned int  __read_mostly sched_spill_load;
+extern unsigned int  __read_mostly sched_upmigrate;
+extern unsigned int  __read_mostly sched_downmigrate;
+extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
+extern unsigned int  __read_mostly sched_load_granule;
+
+extern void init_new_task_load(struct task_struct *p, bool idle_task);
+extern u64 sched_ktime_clock(void);
+extern int got_boost_kick(void);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+						u64 wallclock, u64 irqtime);
+extern bool early_detection_notify(struct rq *rq, u64 wallclock);
+extern void clear_ed_task(struct task_struct *p, struct rq *rq);
+extern void fixup_busy_time(struct task_struct *p, int new_cpu);
+extern void clear_boost_kick(int cpu);
+extern void clear_hmp_request(int cpu);
+extern void mark_task_starting(struct task_struct *p);
+extern void set_window_start(struct rq *rq);
+extern void update_cluster_topology(void);
+extern void note_task_waking(struct task_struct *p, u64 wallclock);
+extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
+extern void init_clusters(void);
+extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
+extern unsigned int max_task_load(void);
+extern void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock);
+extern void sched_account_irqstart(int cpu, struct task_struct *curr,
+				   u64 wallclock);
+extern unsigned int cpu_temp(int cpu);
+extern unsigned int nr_eligible_big_tasks(int cpu);
+extern int update_preferred_cluster(struct related_thread_group *grp,
+			struct task_struct *p, u32 old_load);
+extern void set_preferred_cluster(struct related_thread_group *grp);
+extern void add_new_task_to_grp(struct task_struct *new);
+extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
+extern void update_avg_burst(struct task_struct *p);
+extern void update_avg(u64 *avg, u64 sample);
+
+#define NO_BOOST 0
+#define FULL_THROTTLE_BOOST 1
+#define CONSERVATIVE_BOOST 2
+#define RESTRAINED_BOOST 3
+
+static inline struct sched_cluster *cpu_cluster(int cpu)
+{
+	return cpu_rq(cpu)->cluster;
+}
+
+static inline int cpu_capacity(int cpu)
+{
+	return cpu_rq(cpu)->cluster->capacity;
+}
+
+static inline int cpu_max_possible_capacity(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_possible_capacity;
+}
+
+static inline int cpu_load_scale_factor(int cpu)
+{
+	return cpu_rq(cpu)->cluster->load_scale_factor;
+}
+
+static inline int cpu_efficiency(int cpu)
+{
+	return cpu_rq(cpu)->cluster->efficiency;
+}
+
+static inline unsigned int cpu_cur_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->cur_freq;
+}
+
+static inline unsigned int cpu_min_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->min_freq;
+}
+
+static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
+{
+	/*
+	 * Governor and thermal driver don't know the other party's mitigation
+	 * voting. So struct cluster saves both and return min() for current
+	 * cluster fmax.
+	 */
+	return min(cluster->max_mitigated_freq, cluster->max_freq);
+}
+
+static inline unsigned int cpu_max_freq(int cpu)
+{
+	return cluster_max_freq(cpu_rq(cpu)->cluster);
+}
+
+static inline unsigned int cpu_max_possible_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_possible_freq;
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu)
+{
+	return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
+}
+
+static inline int cpu_max_power_cost(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_power_cost;
+}
+
+static inline int cpu_min_power_cost(int cpu)
+{
+	return cpu_rq(cpu)->cluster->min_power_cost;
+}
+
+static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+{
+	return div64_u64(cycles, period);
+}
+
+static inline bool hmp_capable(void)
+{
+	return max_possible_capacity != min_max_possible_capacity;
+}
+
+/*
+ * 'load' is in reference to "best cpu" at its best frequency.
+ * Scale that in reference to a given cpu, accounting for how bad it is
+ * in reference to "best cpu".
+ */
+static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
+{
+	u64 lsf = cpu_load_scale_factor(cpu);
+
+	if (lsf != 1024) {
+		task_load *= lsf;
+		task_load /= 1024;
+	}
+
+	return task_load;
+}
+
+static inline unsigned int task_load(struct task_struct *p)
+{
+	return p->ravg.demand;
+}
+
+static inline void
+inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+				 struct task_struct *p)
+{
+	u32 task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+	stats->cumulative_runnable_avg += task_load;
+	stats->pred_demands_sum += p->ravg.pred_demand;
+}
+
+static inline void
+dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+				struct task_struct *p)
+{
+	u32 task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+	stats->cumulative_runnable_avg -= task_load;
+
+	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+	stats->pred_demands_sum -= p->ravg.pred_demand;
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+static inline void
+fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+			      struct task_struct *p, s64 task_load_delta,
+			      s64 pred_demand_delta)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	stats->cumulative_runnable_avg += task_load_delta;
+	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+	stats->pred_demands_sum += pred_demand_delta;
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+#define pct_to_real(tunable)	\
+		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
+
+#define real_to_pct(tunable)	\
+		(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
+
+#define SCHED_HIGH_IRQ_TIMEOUT 3
+static inline u64 sched_irqload(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	s64 delta;
+
+	delta = get_jiffies_64() - rq->irqload_ts;
+	/*
+	 * Current context can be preempted by irq and rq->irqload_ts can be
+	 * updated by irq context so that delta can be negative.
+	 * But this is okay and we can safely return as this means there
+	 * was recent irq occurrence.
+	 */
+
+	if (delta < SCHED_HIGH_IRQ_TIMEOUT)
+		return rq->avg_irqload;
+	else
+		return 0;
+}
+
+static inline int sched_cpu_high_irqload(int cpu)
+{
+	return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
+}
+
+static inline bool task_in_related_thread_group(struct task_struct *p)
+{
+	return !!(rcu_access_pointer(p->grp) != NULL);
+}
+
+static inline
+struct related_thread_group *task_related_thread_group(struct task_struct *p)
+{
+	return rcu_dereference(p->grp);
+}
+
+#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
+
+extern void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
+
+extern void notify_migration(int src_cpu, int dest_cpu,
+			bool src_cpu_dead, struct task_struct *p);
+
+/* Is frequency of two cpus synchronized with each other? */
+static inline int same_freq_domain(int src_cpu, int dst_cpu)
+{
+	struct rq *rq = cpu_rq(src_cpu);
+
+	if (src_cpu == dst_cpu)
+		return 1;
+
+	return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
+}
+
+#define	BOOST_KICK	0
+#define	CPU_RESERVED	1
+
+static inline int is_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	return test_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline int mark_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	/* Name boost_flags as hmp_flags? */
+	return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline void clear_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	clear_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline u64 cpu_cravg_sync(int cpu, int sync)
+{
+	struct rq *rq = cpu_rq(cpu);
+	u64 load;
+
+	load = rq->hmp_stats.cumulative_runnable_avg;
+
+	/*
+	 * If load is being checked in a sync wakeup environment,
+	 * we may want to discount the load of the currently running
+	 * task.
+	 */
+	if (sync && cpu == smp_processor_id()) {
+		if (load > rq->curr->ravg.demand)
+			load -= rq->curr->ravg.demand;
+		else
+			load = 0;
+	}
+
+	return load;
+}
+
+static inline bool is_short_burst_task(struct task_struct *p)
+{
+	return p->ravg.avg_burst < sysctl_sched_short_burst &&
+	       p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
+}
+
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
+extern void pre_big_task_count_change(const struct cpumask *cpus);
+extern void post_big_task_count_change(const struct cpumask *cpus);
+extern void set_hmp_defaults(void);
+extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
+extern unsigned int power_cost(int cpu, u64 demand);
+extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
+extern int sched_boost(void);
+extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
+					enum sched_boost_policy boost_policy);
+extern enum sched_boost_policy sched_boost_policy(void);
+extern int task_will_fit(struct task_struct *p, int cpu);
+extern u64 cpu_load(int cpu);
+extern u64 cpu_load_sync(int cpu, int sync);
+extern int preferred_cluster(struct sched_cluster *cluster,
+						struct task_struct *p);
+extern void inc_nr_big_task(struct hmp_sched_stats *stats,
+					struct task_struct *p);
+extern void dec_nr_big_task(struct hmp_sched_stats *stats,
+					struct task_struct *p);
+extern void inc_rq_hmp_stats(struct rq *rq,
+				struct task_struct *p, int change_cra);
+extern void dec_rq_hmp_stats(struct rq *rq,
+				struct task_struct *p, int change_cra);
+extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
+extern int is_big_task(struct task_struct *p);
+extern int upmigrate_discouraged(struct task_struct *p);
+extern struct sched_cluster *rq_cluster(struct rq *rq);
+extern int nr_big_tasks(struct rq *rq);
+extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+					struct task_struct *p, s64 delta);
+extern void reset_task_stats(struct task_struct *p);
+extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
+extern void inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra);
+extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
+					struct cftype *cft);
+extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
+				struct cftype *cft, u64 upmigrate_discourage);
+extern void sched_boost_parse_dt(void);
+extern void clear_top_tasks_bitmap(unsigned long *bitmap);
+
+#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
+extern bool task_sched_boost(struct task_struct *p);
+extern int sync_cgroup_colocation(struct task_struct *p, bool insert);
+extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2);
+extern void update_cgroup_boost_settings(void);
+extern void restore_cgroup_boost_settings(void);
+
+#else
+static inline bool
+same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
+{
+	return true;
+}
+
+static inline bool task_sched_boost(struct task_struct *p)
+{
+	return true;
+}
+
+static inline void update_cgroup_boost_settings(void) { }
+static inline void restore_cgroup_boost_settings(void) { }
+#endif
+
+extern int alloc_related_thread_groups(void);
+
+extern unsigned long all_cluster_ids[];
+
+#else	/* CONFIG_SCHED_HMP */
+
+struct hmp_sched_stats;
+struct related_thread_group;
+struct sched_cluster;
+
+static inline enum sched_boost_policy sched_boost_policy(void)
+{
+	return SCHED_BOOST_NONE;
+}
+
+static inline bool task_sched_boost(struct task_struct *p)
+{
+	return true;
+}
+
+static inline int got_boost_kick(void)
+{
+	return 0;
+}
+
+static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
+				int event, u64 wallclock, u64 irqtime) { }
+
+static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+	return 0;
+}
+
+static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
+static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void clear_boost_kick(int cpu) { }
+static inline void clear_hmp_request(int cpu) { }
+static inline void mark_task_starting(struct task_struct *p) { }
+static inline void set_window_start(struct rq *rq) { }
+static inline void init_clusters(void) {}
+static inline void update_cluster_topology(void) { }
+static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
+static inline void set_task_last_switch_out(struct task_struct *p,
+					    u64 wallclock) { }
+
+static inline int task_will_fit(struct task_struct *p, int cpu)
+{
+	return 1;
+}
+
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
+static inline int sched_boost(void)
+{
+	return 0;
+}
+
+static inline int is_big_task(struct task_struct *p)
+{
+	return 0;
+}
+
+static inline int nr_big_tasks(struct rq *rq)
+{
+	return 0;
+}
+
+static inline int is_cpu_throttling_imminent(int cpu)
+{
+	return 0;
+}
+
+static inline int is_task_migration_throttled(struct task_struct *p)
+{
+	return 0;
+}
+
+static inline unsigned int cpu_temp(int cpu)
+{
+	return 0;
+}
+
+static inline void
+inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+
+static inline void
+dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+
+static inline int
+preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+	return 1;
+}
+
+static inline struct sched_cluster *rq_cluster(struct rq *rq)
+{
+	return NULL;
+}
+
+static inline void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+}
+
+static inline u64 scale_load_to_cpu(u64 load, int cpu)
+{
+	return load;
+}
+
+static inline unsigned int nr_eligible_big_tasks(int cpu)
+{
+	return 0;
+}
+
+static inline int pct_task_load(struct task_struct *p) { return 0; }
+
+static inline int cpu_capacity(int cpu)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
+
+static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+		 struct task_struct *p)
+{
+}
+
+static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+		 struct task_struct *p)
+{
+}
+
+static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock)
+{
+}
+
+static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
+					  u64 wallclock)
+{
+}
+
+static inline int sched_cpu_high_irqload(int cpu) { return 0; }
+
+static inline void set_preferred_cluster(struct related_thread_group *grp) { }
+
+static inline bool task_in_related_thread_group(struct task_struct *p)
+{
+	return false;
+}
+
+static inline
+struct related_thread_group *task_related_thread_group(struct task_struct *p)
+{
+	return NULL;
+}
+
+static inline u32 task_load(struct task_struct *p) { return 0; }
+
+static inline int update_preferred_cluster(struct related_thread_group *grp,
+			 struct task_struct *p, u32 old_load)
+{
+	return 0;
+}
+
+static inline void add_new_task_to_grp(struct task_struct *new) {}
+
+#define PRED_DEMAND_DELTA (0)
+
+static inline void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
+
+static inline void notify_migration(int src_cpu, int dest_cpu,
+			bool src_cpu_dead, struct task_struct *p) { }
+
+static inline int same_freq_domain(int src_cpu, int dst_cpu)
+{
+	return 1;
+}
+
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
+static inline void pre_big_task_count_change(void) { }
+static inline void post_big_task_count_change(void) { }
+static inline void set_hmp_defaults(void) { }
+
+static inline void clear_reserved(int cpu) { }
+static inline void sched_boost_parse_dt(void) {}
+static inline int alloc_related_thread_groups(void) { return 0; }
+
+#define trace_sched_cpu_load(...)
+#define trace_sched_cpu_load_lb(...)
+#define trace_sched_cpu_load_cgroup(...)
+#define trace_sched_cpu_load_wakeup(...)
+
+static inline void update_avg_burst(struct task_struct *p) {}
+
+#endif	/* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index ee2af8e..5a09d5a 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -10,6 +10,24 @@ unsigned int sysctl_sched_cfs_boost __read_mostly;
 
 #ifdef CONFIG_CGROUP_SCHEDTUNE
 
+#ifdef CONFIG_SCHED_HMP
+struct schedtune;
+static inline void init_sched_boost(struct schedtune *st);
+static void schedtune_attach(struct cgroup_taskset *tset);
+static u64 sched_boost_override_read(struct cgroup_subsys_state *css,
+				     struct cftype *cft);
+static int sched_boost_override_write(struct cgroup_subsys_state *css,
+				     struct cftype *cft, u64 override);
+static u64 sched_boost_enabled_read(struct cgroup_subsys_state *css,
+				    struct cftype *cft);
+static int sched_boost_enabled_write(struct cgroup_subsys_state *css,
+				     struct cftype *cft, u64 enable);
+static u64 sched_colocate_read(struct cgroup_subsys_state *css,
+			       struct cftype *cft);
+static int sched_colocate_write(struct cgroup_subsys_state *css,
+				struct cftype *cft, u64 colocate);
+#endif /* CONFIG_SCHED_HMP */
+
 /*
  * EAS scheduler tunables for task groups.
  */
@@ -131,121 +149,6 @@ struct boost_groups {
 /* Boost groups affecting each CPU in the system */
 DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
 
-#ifdef CONFIG_SCHED_HMP
-static inline void init_sched_boost(struct schedtune *st)
-{
-	st->sched_boost_no_override = false;
-	st->sched_boost_enabled = true;
-	st->sched_boost_enabled_backup = st->sched_boost_enabled;
-	st->colocate = false;
-	st->colocate_update_disabled = false;
-}
-
-bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
-{
-	return task_schedtune(tsk1) == task_schedtune(tsk2);
-}
-
-void update_cgroup_boost_settings(void)
-{
-	int i;
-
-	for (i = 0; i < BOOSTGROUPS_COUNT; i++) {
-		if (!allocated_group[i])
-			break;
-
-		if (allocated_group[i]->sched_boost_no_override)
-			continue;
-
-		allocated_group[i]->sched_boost_enabled = false;
-	}
-}
-
-void restore_cgroup_boost_settings(void)
-{
-	int i;
-
-	for (i = 0; i < BOOSTGROUPS_COUNT; i++) {
-		if (!allocated_group[i])
-			break;
-
-		allocated_group[i]->sched_boost_enabled =
-			allocated_group[i]->sched_boost_enabled_backup;
-	}
-}
-
-bool task_sched_boost(struct task_struct *p)
-{
-	struct schedtune *st = task_schedtune(p);
-
-	return st->sched_boost_enabled;
-}
-
-static u64
-sched_boost_override_read(struct cgroup_subsys_state *css,
-			struct cftype *cft)
-{
-	struct schedtune *st = css_st(css);
-
-	return st->sched_boost_no_override;
-}
-
-static int sched_boost_override_write(struct cgroup_subsys_state *css,
-			struct cftype *cft, u64 override)
-{
-	struct schedtune *st = css_st(css);
-
-	st->sched_boost_no_override = !!override;
-
-	return 0;
-}
-
-static u64 sched_boost_enabled_read(struct cgroup_subsys_state *css,
-			struct cftype *cft)
-{
-	struct schedtune *st = css_st(css);
-
-	return st->sched_boost_enabled;
-}
-
-static int sched_boost_enabled_write(struct cgroup_subsys_state *css,
-			struct cftype *cft, u64 enable)
-{
-	struct schedtune *st = css_st(css);
-
-	st->sched_boost_enabled = !!enable;
-	st->sched_boost_enabled_backup = st->sched_boost_enabled;
-
-	return 0;
-}
-
-static u64 sched_colocate_read(struct cgroup_subsys_state *css,
-			struct cftype *cft)
-{
-	struct schedtune *st = css_st(css);
-
-	return st->colocate;
-}
-
-static int sched_colocate_write(struct cgroup_subsys_state *css,
-			struct cftype *cft, u64 colocate)
-{
-	struct schedtune *st = css_st(css);
-
-	if (st->colocate_update_disabled)
-		return -EPERM;
-
-	st->colocate = !!colocate;
-	st->colocate_update_disabled = true;
-	return 0;
-}
-
-#else /* CONFIG_SCHED_HMP */
-
-static inline void init_sched_boost(struct schedtune *st) { }
-
-#endif /* CONFIG_SCHED_HMP */
-
 static u64
 boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
@@ -270,22 +173,6 @@ boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
 	return 0;
 }
 
-static void schedtune_attach(struct cgroup_taskset *tset)
-{
-	struct task_struct *task;
-	struct cgroup_subsys_state *css;
-	struct schedtune *st;
-	bool colocate;
-
-	cgroup_taskset_first(tset, &css);
-	st = css_st(css);
-
-	colocate = st->colocate;
-
-	cgroup_taskset_for_each(task, css, tset)
-		sync_cgroup_colocation(task, colocate);
-}
-
 static struct cftype files[] = {
 	{
 		.name = "boost",
@@ -423,3 +310,135 @@ sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
 	return 0;
 }
 
+/* QHMP/Zone implementation s*/
+
+static void schedtune_attach(struct cgroup_taskset *tset)
+{
+	struct task_struct *task;
+	struct cgroup_subsys_state *css;
+	struct schedtune *st;
+	bool colocate;
+
+	cgroup_taskset_first(tset, &css);
+	st = css_st(css);
+
+	colocate = st->colocate;
+
+	cgroup_taskset_for_each(task, css, tset)
+		sync_cgroup_colocation(task, colocate);
+}
+
+#ifdef CONFIG_SCHED_HMP
+static inline void init_sched_boost(struct schedtune *st)
+{
+	st->sched_boost_no_override = false;
+	st->sched_boost_enabled = true;
+	st->sched_boost_enabled_backup = st->sched_boost_enabled;
+	st->colocate = false;
+	st->colocate_update_disabled = false;
+}
+
+bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
+{
+	return task_schedtune(tsk1) == task_schedtune(tsk2);
+}
+
+void update_cgroup_boost_settings(void)
+{
+	int i;
+
+	for (i = 0; i < BOOSTGROUPS_COUNT; i++) {
+		if (!allocated_group[i])
+			break;
+
+		if (allocated_group[i]->sched_boost_no_override)
+			continue;
+
+		allocated_group[i]->sched_boost_enabled = false;
+	}
+}
+
+void restore_cgroup_boost_settings(void)
+{
+	int i;
+
+	for (i = 0; i < BOOSTGROUPS_COUNT; i++) {
+		if (!allocated_group[i])
+			break;
+
+		allocated_group[i]->sched_boost_enabled =
+			allocated_group[i]->sched_boost_enabled_backup;
+	}
+}
+
+bool task_sched_boost(struct task_struct *p)
+{
+	struct schedtune *st = task_schedtune(p);
+
+	return st->sched_boost_enabled;
+}
+
+static u64
+sched_boost_override_read(struct cgroup_subsys_state *css,
+			struct cftype *cft)
+{
+	struct schedtune *st = css_st(css);
+
+	return st->sched_boost_no_override;
+}
+
+static int sched_boost_override_write(struct cgroup_subsys_state *css,
+			struct cftype *cft, u64 override)
+{
+	struct schedtune *st = css_st(css);
+
+	st->sched_boost_no_override = !!override;
+
+	return 0;
+}
+
+static u64 sched_boost_enabled_read(struct cgroup_subsys_state *css,
+			struct cftype *cft)
+{
+	struct schedtune *st = css_st(css);
+
+	return st->sched_boost_enabled;
+}
+
+static int sched_boost_enabled_write(struct cgroup_subsys_state *css,
+			struct cftype *cft, u64 enable)
+{
+	struct schedtune *st = css_st(css);
+
+	st->sched_boost_enabled = !!enable;
+	st->sched_boost_enabled_backup = st->sched_boost_enabled;
+
+	return 0;
+}
+
+static u64 sched_colocate_read(struct cgroup_subsys_state *css,
+			struct cftype *cft)
+{
+	struct schedtune *st = css_st(css);
+
+	return st->colocate;
+}
+
+static int sched_colocate_write(struct cgroup_subsys_state *css,
+			struct cftype *cft, u64 colocate)
+{
+	struct schedtune *st = css_st(css);
+
+	if (st->colocate_update_disabled)
+		return -EPERM;
+
+	st->colocate = !!colocate;
+	st->colocate_update_disabled = true;
+	return 0;
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void init_sched_boost(struct schedtune *st) { }
+
+#endif /* CONFIG_SCHED_HMP */
diff --git a/scripts/build-all.py b/scripts/build-all.py
index 0f8babf..d36e96f 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -61,7 +61,7 @@
     """Ensure that PWD is a kernel directory"""
     have_defconfig = any([
         os.path.isfile('arch/arm64/configs/msm_defconfig'),
-        os.path.isfile('arch/arm64/configs/msmskunk_defconfig')])
+        os.path.isfile('arch/arm64/configs/sdm845_defconfig')])
 
     if not all([os.path.isfile('MAINTAINERS'), have_defconfig]):
         fail("This doesn't seem to be an MSM kernel dir")
@@ -305,10 +305,12 @@
         r'[fm]sm[0-9]*_defconfig',
         r'apq*_defconfig',
         r'qsd*_defconfig',
-	r'mpq*_defconfig',
+        r'mpq*_defconfig',
+        r'sdm[0-9]*_defconfig',
         )
     arch64_pats = (
-	r'msm*_defconfig',
+        r'msm*_defconfig',
+        r'sdm[0-9]*_defconfig',
         )
     for p in arch_pats:
         for n in glob.glob('arch/arm/configs/' + p):
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index b3ac439..4a7af76 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -233,8 +233,8 @@
 	 the machine driver and the corresponding
 	 DAI-links
 
-config SND_SOC_MSMSKUNK
-	tristate "SoC Machine driver for MSMSKUNK boards"
+config SND_SOC_SDM845
+	tristate "SoC Machine driver for SDM845 boards"
 	depends on ARCH_QCOM
 	select SND_SOC_COMPRESS
 	select SND_SOC_QDSP6V2
@@ -255,7 +255,7 @@
 	select SND_HWDEP
         select DTS_EAGLE
 	help
-	 To add support for SoC audio on MSMSKUNK.
+	 To add support for SoC audio on SDM845.
 	 This enables sound soc drivers that interfaces
 	 with DSP. This also enables the machine driver
 	 and the corresponding DAI-links.
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index 50ceda4..e0544fc 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -34,6 +34,6 @@
 obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-msmfalcon-common.o
 obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-ext-codec.o
 
-# for MSMSKUNK sound card driver
-snd-soc-msmskunk-objs := msmskunk.o
-obj-$(CONFIG_SND_SOC_MSMSKUNK) += snd-soc-msmskunk.o
+# for SDM845 sound card driver
+snd-soc-sdm845-objs := sdm845.o
+obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
diff --git a/sound/soc/msm/msmskunk.c b/sound/soc/msm/sdm845.c
similarity index 99%
rename from sound/soc/msm/msmskunk.c
rename to sound/soc/msm/sdm845.c
index 4759235..6987949 100644
--- a/sound/soc/msm/msmskunk.c
+++ b/sound/soc/msm/sdm845.c
@@ -38,9 +38,9 @@
 #include "../codecs/wcd934x/wcd934x-mbhc.h"
 #include "../codecs/wsa881x.h"
 
-#define DRV_NAME "msmskunk-asoc-snd"
+#define DRV_NAME "sdm845-asoc-snd"
 
-#define __CHIPSET__ "MSMSKUNK "
+#define __CHIPSET__ "SDM845 "
 #define MSM_DAILINK_NAME(name) (__CHIPSET__#name)
 
 #define SAMPLING_RATE_8KHZ      8000
@@ -3173,7 +3173,7 @@ static int msm_adsp_power_up_config(struct snd_soc_codec *codec)
 	return ret;
 }
 
-static int msmskunk_notifier_service_cb(struct notifier_block *this,
+static int sdm845_notifier_service_cb(struct notifier_block *this,
 					 unsigned long opcode, void *ptr)
 {
 	int ret;
@@ -3231,7 +3231,7 @@ static int msmskunk_notifier_service_cb(struct notifier_block *this,
 }
 
 static struct notifier_block service_nb = {
-	.notifier_call  = msmskunk_notifier_service_cb,
+	.notifier_call  = sdm845_notifier_service_cb,
 	.priority = -INT_MAX,
 };
 
@@ -5417,7 +5417,7 @@ static int msm_snd_card_tavil_late_probe(struct snd_soc_card *card)
 }
 
 struct snd_soc_card snd_soc_card_tavil_msm = {
-	.name		= "msmskunk-tavil-snd-card",
+	.name		= "sdm845-tavil-snd-card",
 	.late_probe	= msm_snd_card_tavil_late_probe,
 };
 
@@ -5647,13 +5647,13 @@ static struct snd_soc_dai_link msm_stub_dai_links[
 			 ARRAY_SIZE(msm_stub_be_dai_links)];
 
 struct snd_soc_card snd_soc_card_stub_msm = {
-	.name		= "msmskunk-stub-snd-card",
+	.name		= "sdm845-stub-snd-card",
 };
 
-static const struct of_device_id msmskunk_asoc_machine_of_match[]  = {
-	{ .compatible = "qcom,msmskunk-asoc-snd-tavil",
+static const struct of_device_id sdm845_asoc_machine_of_match[]  = {
+	{ .compatible = "qcom,sdm845-asoc-snd-tavil",
 	  .data = "tavil_codec"},
-	{ .compatible = "qcom,msmskunk-asoc-snd-stub",
+	{ .compatible = "qcom,sdm845-asoc-snd-stub",
 	  .data = "stub_codec"},
 	{},
 };
@@ -5666,7 +5666,7 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
 	int total_links;
 	const struct of_device_id *match;
 
-	match = of_match_node(msmskunk_asoc_machine_of_match, dev->of_node);
+	match = of_match_node(sdm845_asoc_machine_of_match, dev->of_node);
 	if (!match) {
 		dev_err(dev, "%s: No DT match found for sound card\n",
 			__func__);
@@ -6107,7 +6107,7 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
 		goto err;
 	}
 
-	match = of_match_node(msmskunk_asoc_machine_of_match,
+	match = of_match_node(sdm845_asoc_machine_of_match,
 			pdev->dev.of_node);
 	if (!match) {
 		dev_err(&pdev->dev, "%s: no matched codec is found.\n",
@@ -6222,7 +6222,7 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
 	msm_i2s_auxpcm_init(pdev);
 
 	is_initial_boot = true;
-	ret = audio_notifier_register("msmskunk", AUDIO_NOTIFIER_ADSP_DOMAIN,
+	ret = audio_notifier_register("sdm845", AUDIO_NOTIFIER_ADSP_DOMAIN,
 				      &service_nb);
 	if (ret < 0)
 		pr_err("%s: Audio notifier register failed ret = %d\n",
@@ -6250,19 +6250,19 @@ static int msm_asoc_machine_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static struct platform_driver msmskunk_asoc_machine_driver = {
+static struct platform_driver sdm845_asoc_machine_driver = {
 	.driver = {
 		.name = DRV_NAME,
 		.owner = THIS_MODULE,
 		.pm = &snd_soc_pm_ops,
-		.of_match_table = msmskunk_asoc_machine_of_match,
+		.of_match_table = sdm845_asoc_machine_of_match,
 	},
 	.probe = msm_asoc_machine_probe,
 	.remove = msm_asoc_machine_remove,
 };
-module_platform_driver(msmskunk_asoc_machine_driver);
+module_platform_driver(sdm845_asoc_machine_driver);
 
 MODULE_DESCRIPTION("ALSA SoC msm");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, msmskunk_asoc_machine_of_match);
+MODULE_DEVICE_TABLE(of, sdm845_asoc_machine_of_match);