Merge "msm: camera: Use devm API to free irq" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 53e4295..fe53218 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -73,9 +73,6 @@
AMBA markee):
- "arm,coresight-replicator"
- "qcom,coresight-csr"
- - "arm,coresight-cti"
- - "qcom,coresight-tpda"
- - "qcom,coresight-tpdm"
- "qcom,coresight-remote-etm"
- "qcom,coresight-hwevent"
- "qcom,coresight-dummy"
@@ -264,7 +261,7 @@
};
tpda_mss: tpda@7043000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "qcom,coresight-tpda", "arm,primecell";
reg = <0x7043000 0x1000>;
reg-names = "tpda-base";
@@ -274,9 +271,8 @@
qcom,dsb-elem-size = <0 32>;
qcom,cmb-elem-size = <0 32>;
- clocks = <&clock_gcc clk_qdss_clk>,
- <&clock_gcc clk_qdss_a_clk>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop clk_qdss_clk>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -345,15 +341,14 @@
};
tpdm_mss: tpdm@7042000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
reg = <0x7042000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-mss";
- clocks = <&clock_gcc clk_qdss_clk>,
- <&clock_gcc clk_qdss_a_clk>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop qdss_clk>;
+ clock-names = "apb_pclk";
port{
tpdm_mss_out_tpda_mss: endpoint {
@@ -364,15 +359,14 @@
4. CTIs
cti0: cti@6010000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,coresight-cti", "arm,primecell";
reg = <0x6010000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti0";
- clocks = <&clock_gcc clk_qdss_clk>,
- <&clock_gcc clk_qdss_a_clk>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop qdss_clk>;
+ clock-names = "apb_pclk";
};
[1]. There is currently two version of STM: STM32 and STM500. Both
diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt
index d1f8ce1..eaa7146b 100644
--- a/Documentation/devicetree/bindings/arm/msm/imem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/imem.txt
@@ -57,6 +57,11 @@
-compatible: "qcom,msm-imem-emergency_download_mode"
-reg: start address and size of emergency_download_mode region in imem
+Kaslr Offset:
+------------------------
+-compatible: "qcom,msm-imem-kaslr_offset"
+-reg: start address and size of kaslr_offset region in imem
+
USB Diag Cookies:
-----------------
Memory region used to store USB PID and serial numbers to be used by
@@ -95,6 +100,12 @@
reg = <0x6b0 32>;
};
+ kaslr_offset@6d0 {
+ compatible = "qcom,msm-imem-kaslr_offset";
+ reg = <0x6d0 12>;
+ };
+
+
pil@94c {
compatible = "qcom,msm-imem-pil";
reg = <0x94c 200>;
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
index c6626d1..3ad0986 100644
--- a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
@@ -6,8 +6,9 @@
DSI Controller:
Required properties:
- compatible: Should be "qcom,dsi-ctrl-hw-v<version>". Supported
- versions include 1.4 and 2.0.
- eg: qcom,dsi-ctrl-hw-v1.4, qcom,dsi-ctrl-hw-v2.0
+ versions include 1.4, 2.0 and 2.2.
+ eg: qcom,dsi-ctrl-hw-v1.4, qcom,dsi-ctrl-hw-v2.0,
+ qcom,dsi-ctrl-hw-v2.2
And for dsi phy driver:
qcom,dsi-phy-v0.0-hpm, qcom,dsi-phy-v0.0-lpm,
qcom,dsi-phy-v1.0, qcom,dsi-phy-v2.0,
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 95e6f6c..da9a632 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -72,6 +72,11 @@
during clock scaling. If this property is not
defined, then it falls back to the default HS
bus speed mode to maintain backward compatibility.
+ - qcom,sdr104-wa: On Certain chipsets, SDR104 mode might be unstable causing CRC errors
+ on the interface. So there is a workaround implemented to skip printing
+ register dumps on CRC errors and also downgrade bus speed mode to
+ SDR50/DDR50 in case of continuous CRC errors. Set this flag to enable
+ this workaround.
In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index f4a22e0..e1f194f3 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -169,6 +169,18 @@
Definition: Boolean flag which when present enables input suspend for
debug battery.
+- qcom,min-freq-khz
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the minimum charger buck/boost switching frequency
+ in KHz. It overrides the min frequency defined for the charger.
+
+- qcom,max-freq-khz
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the maximum charger buck/boost switching frequency in
+ KHz. It overrides the max frequency defined for the charger.
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
index 92ef23c..5529e308 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
@@ -22,7 +22,8 @@
Definition: String which indicates the charging mode. Can be one of the
following:
Standalone/Parallel Master - "qcom,smb138x-charger"
- Parallel Slave - "qcom,smb138x-parallel-slave"
+ smb138x Parallel Slave - "qcom,smb138x-parallel-slave"
+ smb1355 Parallel Slave - "qcom,smb1355-parallel-slave",
- qcom,pmic-revid
Usage: required
@@ -35,7 +36,8 @@
Usage: optional
Value type: <u32>
Definition: Specifies parallel charging mode. If not specified, MID-MID
- option is selected by default.
+ option is selected by default. Note that smb1355 can only
+ run in MID-MID configuration.
- qcom,suspend-input
Usage: optional
@@ -125,7 +127,7 @@
=======
smb138x_charger: qcom,smb138x-charger {
- compatible = "qcom,qpnp-smb138x-charger";
+ compatible = "qcom,smb138x-charger";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/prng/msm-rng.txt b/Documentation/devicetree/bindings/prng/msm-rng.txt
new file mode 100644
index 0000000..917c2fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/prng/msm-rng.txt
@@ -0,0 +1,18 @@
+* RNG (Random Number Generator)
+
+Required properties:
+- compatible : Should be "qcom,msm-rng"
+- reg : Offset and length of the register set for the device
+
+Optional property:
+- qcom,msm-rng-iface-clk : If the device uses iface-clk.
+- qcom,no-qrng-config : Flag to decide whether the driver do the hardware configuration or not.
+
+Example:
+
+ qcom,msm-rng@f9bff000 {
+ compatible = "qcom,msm-rng";
+ reg = <0xf9bff000 0x200>;
+ qcom,msm-rng-iface-clk;
+ qcom,no-qrng-config;
+ };
diff --git a/Makefile b/Makefile
index 7040118..2b8f550 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 24
+SUBLEVEL = 25
EXTRAVERSION =
NAME = Roaring Lionus
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index cb7ab27..0afa5a8 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -12,6 +12,7 @@
#include "skeleton.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
/ {
model = "Qualcomm Technologies, Inc. SDX POORWILLS";
@@ -135,6 +136,18 @@
};
};
+ clock_gcc: qcom,gcc@100000 {
+ compatible = "qcom,dummycc";
+ clock-output-names = "gcc_clocks";
+ #clock-cells = <1>;
+ };
+
+ clock_cpu: qcom,clock-a7@17810008 {
+ compatible = "qcom,dummycc";
+ clock-output-names = "cpu_clocks";
+ #clock-cells = <1>;
+ };
+
blsp1_uart2: serial@831000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x831000 0x200>;
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index dc44f9d..1f6d2cc 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -207,6 +207,7 @@
CONFIG_POWER_RESET=y
CONFIG_POWER_SUPPLY=y
CONFIG_THERMAL=y
+CONFIG_REGULATOR=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
@@ -257,6 +258,7 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_USB_BAM=y
CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_SMEM=y
CONFIG_TRACER_PKT=y
@@ -264,6 +266,7 @@
CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_PWM=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_ANDROID=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 2bfcf87..5d61163 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -198,6 +198,7 @@
CONFIG_THERMAL=y
CONFIG_MSM_CDC_PINCTRL=y
CONFIG_MSM_CDC_SUPPLY=y
+CONFIG_REGULATOR=y
CONFIG_FB=y
CONFIG_SOUND=y
CONFIG_SND=y
@@ -247,13 +248,13 @@
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SMD=y
+CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_TRACER_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_PWM=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
-CONFIG_RESET_CONTROLLER=y
CONFIG_ANDROID=y
CONFIG_STM=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index ef75d2f..f4d7965 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -49,5 +49,7 @@
select MSM_JTAG_MM if CORESIGHT_ETM
select PM_DEVFREQ
select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
endmenu
endif
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 1a0ec0b..434de76 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -21,9 +21,8 @@
coresight-name = "coresight-replicator";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -57,9 +56,8 @@
coresight-name = "coresight-replicator-swao";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -106,9 +104,8 @@
coresight-name = "coresight-tmc-etf-swao";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -143,9 +140,8 @@
coresight-name = "coresight-funnel-swao";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -171,7 +167,8 @@
};
tpda_swao: tpda@6b01000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x6b01000 0x1000>;
reg-names = "tpda-base";
@@ -181,9 +178,8 @@
qcom,dsb-elem-size = <1 32>;
qcom,cmb-elem-size = <0 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -220,16 +216,16 @@
};
tpdm_swao0: tpdm@6b02000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6b02000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-swao-0";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_swao0_out_tpda_swao: endpoint {
@@ -239,15 +235,15 @@
};
tpdm_swao1: tpdm@6b03000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6b03000 0x1000>;
reg-names = "tpdm-base";
coresight-name="coresight-tpdm-swao-1";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_swao1_out_tpda_swao: endpoint {
@@ -265,13 +261,13 @@
reg-names = "tmc-base", "bam-base";
arm,buffer-size = <0x400000>;
+ arm,sg-enable;
coresight-name = "coresight-tmc-etr";
coresight-ctis = <&cti0 &cti8>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tmc_etr_in_replicator: endpoint {
@@ -292,9 +288,8 @@
coresight-ctis = <&cti0 &cti8>;
arm,default-sink;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -329,9 +324,8 @@
coresight-name = "coresight-funnel-merg";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -375,9 +369,8 @@
coresight-name = "coresight-stm";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
stm_out_funnel_in0: endpoint {
@@ -396,9 +389,8 @@
coresight-name = "coresight-funnel-in0";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -449,9 +441,8 @@
coresight-name = "coresight-funnel-in2";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -498,7 +489,8 @@
};
tpda: tpda@6004000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x6004000 0x1000>;
reg-names = "tpda-base";
@@ -519,9 +511,8 @@
<7 64>,
<13 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -618,9 +609,8 @@
coresight-name = "coresight-funnel-modem";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -646,7 +636,8 @@
};
tpda_modem: tpda@6831000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x6831000 0x1000>;
reg-names = "tpda-base";
@@ -656,9 +647,8 @@
qcom,dsb-elem-size = <0 32>;
qcom,cmb-elem-size = <0 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -683,15 +673,15 @@
};
tpdm_modem: tpdm@6830000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6830000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-modem";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_modem_out_tpda_modem: endpoint {
@@ -709,9 +699,8 @@
coresight-name = "coresight-funnel-lpass";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -743,9 +732,8 @@
coresight-name = "coresight-tpdm-lpass";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "core_clk";
port {
tpdm_lpass_out_funnel_lpass: endpoint {
@@ -755,15 +743,15 @@
};
tpdm_center: tpdm@6c28000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6c28000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-center";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_center_out_tpda: endpoint {
@@ -773,15 +761,15 @@
};
tpdm_north: tpdm@6a24000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6a24000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-north";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_north_out_tpda: endpoint {
@@ -791,15 +779,15 @@
};
tpdm_qm: tpdm@69d0000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x69d0000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-qm";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_qm_out_tpda: endpoint {
@@ -809,7 +797,8 @@
};
tpda_apss: tpda@7862000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x7862000 0x1000>;
reg-names = "tpda-base";
@@ -818,9 +807,8 @@
qcom,tpda-atid = <66>;
qcom,dsb-elem-size = <0 32>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -845,15 +833,15 @@
};
tpdm_apss: tpdm@7860000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x7860000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-apss";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_apss_out_tpda_apss: endpoint {
@@ -863,7 +851,8 @@
};
tpda_llm_silver: tpda@78c0000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x78c0000 0x1000>;
reg-names = "tpda-base";
@@ -872,9 +861,8 @@
qcom,tpda-atid = <72>;
qcom,cmb-elem-size = <0 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -899,15 +887,15 @@
};
tpdm_llm_silver: tpdm@78a0000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x78a0000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-llm-silver";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_llm_silver_out_tpda_llm_silver: endpoint {
@@ -918,7 +906,8 @@
};
tpda_llm_gold: tpda@78d0000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x78d0000 0x1000>;
reg-names = "tpda-base";
@@ -927,9 +916,8 @@
qcom,tpda-atid = <73>;
qcom,cmb-elem-size = <0 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -954,15 +942,15 @@
};
tpdm_llm_gold: tpdm@78b0000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x78b0000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-llm-gold";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_llm_gold_out_tpda_llm_gold: endpoint {
@@ -981,9 +969,8 @@
coresight-name = "coresight-funnel-dl-mm";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1009,15 +996,15 @@
};
tpdm_mm: tpdm@6c08000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6c08000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-mm";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_mm_out_funnel_dl_mm: endpoint {
@@ -1035,9 +1022,8 @@
coresight-name = "coresight-funnel-ddr-0";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1063,15 +1049,15 @@
};
tpdm_ddr: tpdm@69e0000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x69e0000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-ddr";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_ddr_out_funnel_ddr_0: endpoint {
@@ -1081,15 +1067,15 @@
};
tpdm_pimem: tpdm@6850000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6850000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-pimem";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_pimem_out_tpda: endpoint {
@@ -1099,15 +1085,15 @@
};
tpdm_vsense: tpdm@6840000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6840000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-vsense";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port{
tpdm_vsense_out_tpda: endpoint {
@@ -1117,7 +1103,8 @@
};
tpda_olc: tpda@7832000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x7832000 0x1000>;
reg-names = "tpda-base";
@@ -1126,9 +1113,8 @@
qcom,tpda-atid = <69>;
qcom,cmb-elem-size = <0 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1152,15 +1138,15 @@
};
tpdm_olc: tpdm@7830000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x7830000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-olc";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port{
tpdm_olc_out_tpda_olc: endpoint {
@@ -1170,7 +1156,8 @@
};
tpda_spss: tpda@6882000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x6882000 0x1000>;
reg-names = "tpda-base";
@@ -1179,9 +1166,8 @@
qcom,tpda-atid = <70>;
qcom,dsb-elem-size = <0 32>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1205,16 +1191,15 @@
};
tpdm_spss: tpdm@6880000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6880000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-spss";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
-
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
qcom,msr-fix-req;
port{
@@ -1233,9 +1218,8 @@
coresight-name = "coresight-funnel-spss";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1269,9 +1253,8 @@
coresight-name = "coresight-funnel-qatb";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1297,299 +1280,315 @@
};
cti0: cti@6010000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6010000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti0";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti1: cti@6011000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6011000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti1";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti2: cti@6012000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6012000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti2";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti3: cti@6013000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6013000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti3";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti4: cti@6014000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6014000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti4";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti5: cti@6015000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6015000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti5";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti6: cti@6016000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6016000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti6";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti7: cti@6017000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6017000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti7";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti8: cti@6018000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6018000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti8";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti9: cti@6019000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6019000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti9";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti10: cti@601a000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601a000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti10";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti11: cti@601b000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601b000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti11";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti12: cti@601c000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601c000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti12";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti13: cti@601d000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601d000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti13";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti14: cti@601e000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601e000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti14";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti15: cti@601f000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601f000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti15";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti_cpu0: cti@7020000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7020000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu0";
cpu = <&CPU0>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti_cpu1: cti@7120000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7120000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu1";
cpu = <&CPU1>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu2: cti@7220000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7220000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu2";
cpu = <&CPU2>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu3: cti@7320000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7320000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu3";
cpu = <&CPU3>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu4: cti@7420000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7420000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu4";
cpu = <&CPU4>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu5: cti@7520000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7520000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu5";
cpu = <&CPU5>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu6: cti@7620000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7620000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu6";
cpu = <&CPU6>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu7: cti@7720000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7720000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu7";
cpu = <&CPU7>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
dummy_eud: dummy_sink {
@@ -1616,9 +1615,8 @@
coresight-name = "coresight-funnel-apss-merg";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1688,9 +1686,8 @@
coresight-name = "coresight-etm0";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
etm0_out_funnel_apss: endpoint {
@@ -1708,9 +1705,8 @@
coresight-name = "coresight-etm1";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
etm1_out_funnel_apss: endpoint {
@@ -1728,9 +1724,8 @@
coresight-name = "coresight-etm2";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
etm2_out_funnel_apss: endpoint {
@@ -1748,9 +1743,8 @@
coresight-name = "coresight-etm3";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
etm3_out_funnel_apss: endpoint {
@@ -1768,9 +1762,8 @@
coresight-name = "coresight-etm4";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
etm4_out_funnel_apss: endpoint {
@@ -1788,9 +1781,8 @@
coresight-name = "coresight-etm5";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
etm5_out_funnel_apss: endpoint {
@@ -1808,9 +1800,8 @@
coresight-name = "coresight-etm6";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
etm6_out_funnel_apss: endpoint {
@@ -1828,9 +1819,8 @@
coresight-name = "coresight-etm7";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
etm7_out_funnel_apss: endpoint {
@@ -1848,9 +1838,8 @@
coresight-name = "coresight-funnel-apss";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
new file mode 100644
index 0000000..168f2a9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
@@ -0,0 +1,67 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ mdss_dsi0_pll: qcom,mdss_dsi_pll@ae94a00 {
+ compatible = "qcom,mdss_dsi_pll_10nm";
+ label = "MDSS DSI 0 PLL";
+ cell-index = <0>;
+ #clock-cells = <1>;
+ reg = <0xae94a00 0x1e0>,
+ <0xae94400 0x800>,
+ <0xaf03000 0x8>;
+ reg-names = "pll_base", "phy_base", "gdsc_base";
+ clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+ clock-rate = <0>;
+ gdsc-supply = <&mdss_core_gdsc>;
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+ };
+
+ mdss_dsi1_pll: qcom,mdss_dsi_pll@ae96a00 {
+ compatible = "qcom,mdss_dsi_pll_10nm";
+ label = "MDSS DSI 1 PLL";
+ cell-index = <1>;
+ #clock-cells = <1>;
+ reg = <0xae96a00 0x1e0>,
+ <0xae96400 0x800>,
+ <0xaf03000 0x8>;
+ reg-names = "pll_base", "phy_base", "gdsc_base";
+ clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+ clock-rate = <0>;
+ gdsc-supply = <&mdss_core_gdsc>;
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 2f45c41..cb5d924 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -273,98 +273,81 @@
};
mdss_dsi0: qcom,mdss_dsi_ctrl0@ae94000 {
- compatible = "qcom,dsi-ctrl-hw-v2.0";
+ compatible = "qcom,dsi-ctrl-hw-v2.2";
label = "dsi-ctrl-0";
- status = "disabled";
cell-index = <0>;
- reg = <0xae94000 0x400>;
- reg-names = "dsi_ctrl";
+ reg = <0xae94000 0x400>,
+ <0xaf08000 0x4>;
+ reg-names = "dsi_ctrl", "disp_cc_base";
interrupt-parent = <&mdss_mdp>;
interrupts = <4 0>;
vdda-1p2-supply = <&pm8998_l26>;
- vdda-0p9-supply = <&pm8998_l1>;
clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
<&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>,
+ <&clock_dispcc DISP_CC_MDSS_ESC0_CLK>;
clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
- "pixel_clk", "pixel_clk_rcg";
+ "pixel_clk", "pixel_clk_rcg",
+ "esc_clk";
qcom,ctrl-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
+
qcom,ctrl-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "vdda-0p9";
- qcom,supply-min-voltage = <925000>;
- qcom,supply-max-voltage = <925000>;
- qcom,supply-enable-load = <17000>;
- qcom,supply-disable-load = <32>;
- };
-
- qcom,ctrl-supply-entry@1 {
- reg = <0>;
qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1250000>;
- qcom,supply-max-voltage = <1250000>;
- qcom,supply-enable-load = <18160>;
- qcom,supply-disable-load = <1>;
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <21800>;
+ qcom,supply-disable-load = <4>;
};
};
};
mdss_dsi1: qcom,mdss_dsi_ctrl1@ae96000 {
- compatible = "qcom,dsi-ctrl-hw-v2.0";
+ compatible = "qcom,dsi-ctrl-hw-v2.2";
label = "dsi-ctrl-1";
- status = "disabled";
cell-index = <1>;
- reg = <0xae96000 0x400>;
- reg-names = "dsi_ctrl";
+ reg = <0xae96000 0x400>,
+ <0xaf08000 0x4>;
+ reg-names = "dsi_ctrl", "disp_cc_base";
interrupt-parent = <&mdss_mdp>;
interrupts = <5 0>;
vdda-1p2-supply = <&pm8998_l26>;
- vdda-0p9-supply = <&pm8998_l1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
- <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&clock_dispcc DISP_CC_MDSS_BYTE1_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
+ <&clock_dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_PCLK1_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>,
+ <&clock_dispcc DISP_CC_MDSS_ESC1_CLK>;
clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
- "pixel_clk", "pixel_clk_rcg";
+ "pixel_clk", "pixel_clk_rcg", "esc_clk";
qcom,ctrl-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
qcom,ctrl-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "vdda-0p9";
- qcom,supply-min-voltage = <925000>;
- qcom,supply-max-voltage = <925000>;
- qcom,supply-enable-load = <17000>;
- qcom,supply-disable-load = <32>;
- };
-
- qcom,ctrl-supply-entry@1 {
- reg = <0>;
qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1250000>;
- qcom,supply-max-voltage = <1250000>;
- qcom,supply-enable-load = <18160>;
- qcom,supply-disable-load = <1>;
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <21800>;
+ qcom,supply-disable-load = <4>;
};
};
};
mdss_dsi_phy0: qcom,mdss_dsi_phy0@ae94400 {
compatible = "qcom,dsi-phy-v3.0";
- status = "disabled";
label = "dsi-phy-0";
cell-index = <0>;
reg = <0xae94400 0x7c0>;
reg-names = "dsi_phy";
gdsc-supply = <&mdss_core_gdsc>;
- vdda-1p2-supply = <&pm8998_l26>;
+ vdda-0p9-supply = <&pm8998_l1>;
qcom,platform-strength-ctrl = [55 03
55 03
55 03
@@ -381,24 +364,23 @@
#size-cells = <0>;
qcom,phy-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1250000>;
- qcom,supply-max-voltage = <1250000>;
- qcom,supply-enable-load = <2500>;
- qcom,supply-disable-load = <1>;
+ qcom,supply-name = "vdda-0p9";
+ qcom,supply-min-voltage = <880000>;
+ qcom,supply-max-voltage = <880000>;
+ qcom,supply-enable-load = <36000>;
+ qcom,supply-disable-load = <32>;
};
};
};
mdss_dsi_phy1: qcom,mdss_dsi_phy0@ae96400 {
compatible = "qcom,dsi-phy-v3.0";
- status = "disabled";
label = "dsi-phy-1";
cell-index = <1>;
reg = <0xae96400 0x7c0>;
reg-names = "dsi_phy";
gdsc-supply = <&mdss_core_gdsc>;
- vdda-1p2-supply = <&pm8998_l26>;
+ vdda-0p9-supply = <&pm8998_l1>;
qcom,platform-strength-ctrl = [55 03
55 03
55 03
@@ -415,11 +397,11 @@
#size-cells = <0>;
qcom,phy-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1250000>;
- qcom,supply-max-voltage = <1250000>;
- qcom,supply-enable-load = <2500>;
- qcom,supply-disable-load = <1>;
+ qcom,supply-name = "vdda-0p9";
+ qcom,supply-min-voltage = <880000>;
+ qcom,supply-max-voltage = <880000>;
+ qcom,supply-enable-load = <36000>;
+ qcom,supply-disable-load = <32>;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 97c3124..54e0162 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -18,6 +18,7 @@
#include <dt-bindings/clock/qcom,videocc-sdm845.h>
#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/qcom,tcs-mbox.h>
@@ -429,6 +430,7 @@
};
#include "msm-gdsc-sdm845.dtsi"
+#include "sdm845-sde-pll.dtsi"
#include "sdm845-sde.dtsi"
#include "sdm845-sde-display.dtsi"
#include "sdm845-qupv3.dtsi"
@@ -1011,6 +1013,13 @@
#clock-cells = <1>;
};
+ clock_aop: qcom,aopclk {
+ compatible = "qcom,aop-qmp-clk";
+ #clock-cells = <1>;
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "qdss_clk";
+ };
+
ufsphy_mem: ufsphy_mem@1d87000 {
reg = <0x1d87000 0xda8>; /* PHY regs */
reg-names = "phy_mem";
@@ -1263,6 +1272,8 @@
qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
100000000 200000000 4294967295>;
+ qcom,sdr104-wa;
+
qcom,devfreq,freq-table = <50000000 200000000>;
clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
<&clock_gcc GCC_SDCC2_APPS_CLK>;
@@ -1406,6 +1417,16 @@
qcom,ea-pc = <0x270>;
};
+ slim_qca: slim@17240000 {
+ cell-index = <3>;
+ compatible = "qcom,slim-ngd";
+ reg = <0x17240000 0x2c000>,
+ <0x17204000 0x20000>;
+ reg-names = "slimbus_physical", "slimbus_bam_physical";
+ interrupts = <0 291 0>, <0 292 0>;
+ interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+ };
+
eud: qcom,msm-eud@88e0000 {
compatible = "qcom,msm-eud";
interrupt-names = "eud_irq";
@@ -1493,11 +1514,22 @@
qcom,rtb-size = <0x100000>;
};
+ qcom,mpm2-sleep-counter@0x0c221000 {
+ compatible = "qcom,mpm2-sleep-counter";
+ reg = <0x0c221000 0x1000>;
+ clock-frequency = <32768>;
+ };
+
qcom,msm-cdsp-loader {
compatible = "qcom,cdsp-loader";
qcom,proc-img-to-load = "cdsp";
};
+ qcom,msm-adsprpc-mem {
+ compatible = "qcom,msm-adsprpc-mem-region";
+ memory-region = <&adsp_mem>;
+ };
+
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-compute";
@@ -1549,6 +1581,32 @@
iommus = <&apps_smmu 0x1408>,
<&apps_smmu 0x1428>;
};
+ qcom,msm_fastrpc_compute_cb9 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ qcom,secure-context-bank;
+ iommus = <&apps_smmu 0x1409>,
+ <&apps_smmu 0x1419>,
+ <&apps_smmu 0x1429>;
+ };
+ qcom,msm_fastrpc_compute_cb10 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ qcom,secure-context-bank;
+ iommus = <&apps_smmu 0x140A>,
+ <&apps_smmu 0x141A>,
+ <&apps_smmu 0x142A>;
+ };
+ qcom,msm_fastrpc_compute_cb11 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&apps_smmu 0x1823>;
+ };
+ qcom,msm_fastrpc_compute_cb12 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&apps_smmu 0x1824>;
+ };
};
qcom,msm-imem@146bf000 {
@@ -1568,10 +1626,20 @@
reg = <0x65c 4>;
};
+ boot_stats@6b0 {
+ compatible = "qcom,msm-imem-boot_stats";
+ reg = <0x6b0 32>;
+ };
+
pil@94c {
compatible = "qcom,msm-imem-pil";
reg = <0x94c 200>;
};
+
+ kaslr_offset@6d0 {
+ compatible = "qcom,msm-imem-kaslr_offset";
+ reg = <0x6d0 12>;
+ };
};
qcom,venus@aae0000 {
@@ -2056,6 +2124,29 @@
qcom,qsee-reentrancy-support = <2>;
};
+ qcom_rng: qrng@793000 {
+ compatible = "qcom,msm-rng";
+ reg = <0x793000 0x1000>;
+ qcom,msm-rng-iface-clk;
+ qcom,no-qrng-config;
+ qcom,msm-bus,name = "msm-rng-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <1 618 0 0>, /* No vote */
+ <1 618 0 800>; /* 100 KHz */
+ clocks = <&clock_gcc GCC_PRNG_AHB_CLK>;
+ clock-names = "iface_clk";
+ };
+
+ qcom_tzlog: tz-log@146bf720 {
+ compatible = "qcom,tz-log";
+ reg = <0x146bf720 0x3000>;
+ qcom,hyplog-enabled;
+ hyplog-address-offset = <0x410>;
+ hyplog-size-offset = <0x414>;
+ };
+
qcom,msm_gsi {
compatible = "qcom,msm_gsi";
};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 658c871..9a2ab8c 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -55,6 +55,7 @@
CONFIG_HZ_100=y
CONFIG_CMA=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@@ -275,6 +276,7 @@
CONFIG_SERIAL_MSM_GENI=y
CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QCOM_GENI=y
@@ -390,6 +392,7 @@
CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
CONFIG_MMC_TEST=y
@@ -441,6 +444,7 @@
CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_EUD=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -490,6 +494,7 @@
CONFIG_ARM_GIC_V3_ACL=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 7507c3b..31f8703 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -61,6 +61,7 @@
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@@ -282,9 +283,8 @@
CONFIG_SERIAL_MSM_GENI=y
CONFIG_SERIAL_MSM_GENI_CONSOLE=y
CONFIG_DIAG_CHAR=y
-CONFIG_HVC_DCC=y
-CONFIG_HVC_DCC_SERIALIZE_SMP=y
CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QCOM_GENI=y
@@ -338,6 +338,7 @@
CONFIG_VIDEO_ADV_DEBUG=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
CONFIG_MSM_VIDC_V4L2=y
CONFIG_MSM_VIDC_VMEM=y
CONFIG_MSM_VIDC_GOVERNORS=y
@@ -399,6 +400,7 @@
CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
CONFIG_MMC_TEST=y
@@ -459,6 +461,7 @@
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_CORE_HANG_DETECT=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
CONFIG_QCOM_EUD=y
@@ -513,6 +516,7 @@
CONFIG_PHY_XGENE=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 296e139..0a34644 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -335,7 +335,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (is_el0_instruction_abort(esr)) {
vm_flags = VM_EXEC;
- } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
+ } else if (((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) ||
+ ((esr & ESR_ELx_CM) && !(mm_flags & FAULT_FLAG_USER))) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6432d4b..767ef6d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -689,7 +689,7 @@
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
- lwz r3,GPR1(r1)
+ ld r3,GPR1(r1)
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
mr r4,r1 /* src: current exception frame */
mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@
addi r6,r6,8
bdnz 2b
- /* Do real store operation to complete stwu */
- lwz r5,GPR1(r1)
+ /* Do real store operation to complete stdu */
+ ld r5,GPR1(r1)
std r8,0(r5)
/* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0362cd5..0cea702 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1029,6 +1029,8 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
+ if (pte_present(entry))
+ pte_val(entry) &= ~_PAGE_UNUSED;
if (mm_has_pgste(mm))
ptep_set_pte_at(mm, addr, ptep, entry);
else
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 93d824e..040af19 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -85,7 +85,7 @@ void mce_gen_pool_process(void)
head = llist_reverse_order(head);
llist_for_each_entry_safe(node, tmp, head, llnode) {
mce = &node->mce;
- atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+ blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
}
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index cd74a3f..de20902 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -13,7 +13,7 @@ enum severity_level {
MCE_PANIC_SEVERITY,
};
-extern struct atomic_notifier_head x86_mce_decoder_chain;
+extern struct blocking_notifier_head x86_mce_decoder_chain;
#define ATTR_LEN 16
#define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a7fdf45..22cda29 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -120,7 +120,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
*/
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
@@ -213,13 +213,13 @@ void mce_register_decode_chain(struct notifier_block *nb)
if (nb != &mce_srao_nb && nb->priority == INT_MAX)
nb->priority -= 1;
- atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+ blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_register_decode_chain);
void mce_unregister_decode_chain(struct notifier_block *nb)
{
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
+ blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
@@ -272,8 +272,6 @@ struct mca_msr_regs msr_ops = {
static void print_mce(struct mce *m)
{
- int ret = 0;
-
pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
m->extcpu, m->mcgstatus, m->bank, m->status);
@@ -309,14 +307,6 @@ static void print_mce(struct mce *m)
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
cpu_data(m->extcpu).microcode);
- /*
- * Print out human-readable details about the MCE error,
- * (if the CPU has an implementation for that)
- */
- ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
- if (ret == NOTIFY_STOP)
- return;
-
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9b54034..3dfca7b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -59,7 +59,7 @@ static const char * const th_names[] = {
"load_store",
"insn_fetch",
"combined_unit",
- "",
+ "decode_unit",
"northbridge",
"execution_unit",
};
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fcd4ce6..1c2b846 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
+ cur_state = 0;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
acpi_handle handle = resource->device.handle;
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
index 4281801..09e77d5 100644
--- a/drivers/base/dma-removed.c
+++ b/drivers/base/dma-removed.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2000-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
@@ -294,6 +294,7 @@ void removed_free(struct device *dev, size_t size, void *cpu_addr,
bool no_kernel_mapping = attrs & DMA_ATTR_NO_KERNEL_MAPPING;
struct removed_region *dma_mem = dev->removed_mem;
+ size = PAGE_ALIGN(size);
if (!no_kernel_mapping)
iounmap(cpu_addr);
mutex_lock(&dma_mem->lock);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 200dab5..18849f4 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -337,7 +337,6 @@
config HW_RANDOM_MSM
tristate "Qualcomm SoCs Random Number Generator support"
depends on HW_RANDOM && ARCH_QCOM
- default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Qualcomm SoCs.
@@ -347,6 +346,20 @@
If unsure, say Y.
+config HW_RANDOM_MSM_LEGACY
+ tristate "QTI MSM Random Number Generator support (LEGACY)"
+ depends on HW_RANDOM && ARCH_QCOM
+ select CRYPTO_AES
+ select CRYPTO_ECB
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on QTI MSM SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called msm_rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5f52b1e..637adb5 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -30,6 +30,7 @@
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
+obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += msm_rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
new file mode 100644
index 0000000..7641a6a
--- /dev/null
+++ b/drivers/char/hw_random/msm_rng.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2011-2013, 2015, 2017 The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/internal/rng.h>
+
+#include <linux/platform_data/qcom_crypto_device.h>
+
+
+
+#define DRIVER_NAME "msm_rng"
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT_OFFSET 0x0000
+#define PRNG_STATUS_OFFSET 0x0004
+#define PRNG_LFSR_CFG_OFFSET 0x0100
+#define PRNG_CONFIG_OFFSET 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0xFFFF0000
+#define PRNG_LFSR_CFG_CLOCKS 0x0000DDDD
+#define PRNG_CONFIG_MASK 0xFFFFFFFD
+#define PRNG_HW_ENABLE 0x00000002
+
+#define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */
+
+struct msm_rng_device {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct clk *prng_clk;
+ uint32_t qrng_perf_client;
+ struct mutex rng_lock;
+};
+
+struct msm_rng_device msm_rng_device_info;
+static struct msm_rng_device *msm_rng_dev_cached;
+struct mutex cached_rng_lock;
+static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+
+ switch (cmd) {
+ case QRNG_IOCTL_RESET_BUS_BANDWIDTH:
+ pr_info("calling msm_rng_bus_scale(LOW)\n");
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_device_info.qrng_perf_client, 0);
+ if (ret)
+ pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret);
+ break;
+ default:
+ pr_err("Unsupported IOCTL call");
+ break;
+ }
+ return ret;
+}
+
+/*
+ *
+ * This function calls hardware random bit generator directory and retuns it
+ * back to caller
+ *
+ */
+static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
+ void *data, size_t max)
+{
+ struct platform_device *pdev;
+ void __iomem *base;
+ size_t currsize = 0;
+ u32 val;
+ u32 *retdata = data;
+ int ret;
+ int failed = 0;
+
+ pdev = msm_rng_dev->pdev;
+ base = msm_rng_dev->base;
+
+ /* no room for word data */
+ if (max < 4)
+ return 0;
+
+ mutex_lock(&msm_rng_dev->rng_lock);
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 1);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ /* enable PRNG clock */
+ ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock in callback\n");
+ goto err;
+ }
+ /* read random data from h/w */
+ do {
+ /* check status bit if data is available */
+ while (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+ & 0x00000001)) {
+ if (failed == 10) {
+ pr_err("Data not available after retry\n");
+ break;
+ }
+ pr_err("msm_rng:Data not available!\n");
+ msleep_interruptible(10);
+ failed++;
+ }
+
+ /* read FIFO */
+ val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+ if (!val)
+ break; /* no data to read so just bail */
+
+ /* write data back to callers pointer */
+ *(retdata++) = val;
+ currsize += 4;
+ /* make sure we stay on 32bit boundary */
+ if ((max - currsize) < 4)
+ break;
+
+ } while (currsize < max);
+
+ /* vote to turn off clock */
+ clk_disable_unprepare(msm_rng_dev->prng_clk);
+err:
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 0);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ mutex_unlock(&msm_rng_dev->rng_lock);
+
+ val = 0L;
+ return currsize;
+}
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct msm_rng_device *msm_rng_dev;
+ int rv = 0;
+
+ msm_rng_dev = (struct msm_rng_device *)rng->priv;
+ rv = msm_rng_direct_read(msm_rng_dev, data, max);
+
+ return rv;
+}
+
+
+static struct hwrng msm_rng = {
+ .name = DRIVER_NAME,
+ .read = msm_rng_read,
+ .quality = 700,
+};
+
+static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev)
+{
+ unsigned long val = 0;
+ unsigned long reg_val = 0;
+ int ret = 0;
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 1);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ /* Enable the PRNG CLK */
+ ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+ if (ret) {
+ dev_err(&(msm_rng_dev->pdev)->dev,
+ "failed to enable clock in probe\n");
+ return -EPERM;
+ }
+
+ /* Enable PRNG h/w only if it is NOT ON */
+ val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) &
+ PRNG_HW_ENABLE;
+ /* PRNG H/W is not ON */
+ if (val != PRNG_HW_ENABLE) {
+ val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+ val &= PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+
+ /* The PRNG CONFIG register should be first written */
+ mb();
+
+ reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET)
+ & PRNG_CONFIG_MASK;
+ reg_val |= PRNG_HW_ENABLE;
+ writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET);
+
+ /* The PRNG clk should be disabled only after we enable the
+ * PRNG h/w by writing to the PRNG CONFIG register.
+ */
+ mb();
+ }
+ clk_disable_unprepare(msm_rng_dev->prng_clk);
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 0);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+
+ return 0;
+}
+
+static const struct file_operations msm_rng_fops = {
+ .unlocked_ioctl = msm_rng_ioctl,
+};
+static struct class *msm_rng_class;
+static struct cdev msm_rng_cdev;
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct msm_rng_device *msm_rng_dev = NULL;
+ void __iomem *base = NULL;
+ bool configure_qrng = true;
+ int error = 0;
+ int ret = 0;
+ struct device *dev;
+
+ struct msm_bus_scale_pdata *qrng_platform_support = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "invalid address\n");
+ error = -EFAULT;
+ goto err_exit;
+ }
+
+ msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
+ if (!msm_rng_dev) {
+ error = -ENOMEM;
+ goto err_exit;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ error = -ENOMEM;
+ goto err_iomap;
+ }
+ msm_rng_dev->base = base;
+
+ /* create a handle for clock control */
+ if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+ "qcom,msm-rng-iface-clk")))
+ msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+ "iface_clk");
+ else
+ msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(msm_rng_dev->prng_clk)) {
+ dev_err(&pdev->dev, "failed to register clock source\n");
+ error = -EPERM;
+ goto err_clk_get;
+ }
+
+ /* save away pdev and register driver data */
+ msm_rng_dev->pdev = pdev;
+ platform_set_drvdata(pdev, msm_rng_dev);
+
+ if (pdev->dev.of_node) {
+ /* Register bus client */
+ qrng_platform_support = msm_bus_cl_get_pdata(pdev);
+ msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client(
+ qrng_platform_support);
+ msm_rng_device_info.qrng_perf_client =
+ msm_rng_dev->qrng_perf_client;
+ if (!msm_rng_dev->qrng_perf_client)
+ pr_err("Unable to register bus client\n");
+ }
+
+ /* Enable rng h/w for the targets which can access the entire
+ * address space of PRNG.
+ */
+ if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+ "qcom,no-qrng-config")))
+ configure_qrng = false;
+ if (configure_qrng) {
+ error = msm_rng_enable_hw(msm_rng_dev);
+ if (error)
+ goto rollback_clk;
+ }
+
+ mutex_init(&msm_rng_dev->rng_lock);
+ mutex_init(&cached_rng_lock);
+
+ /* register with hwrng framework */
+ msm_rng.priv = (unsigned long) msm_rng_dev;
+ error = hwrng_register(&msm_rng);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register hwrng\n");
+ error = -EPERM;
+ goto rollback_clk;
+ }
+ ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);
+
+ msm_rng_class = class_create(THIS_MODULE, "msm-rng");
+ if (IS_ERR(msm_rng_class)) {
+ pr_err("class_create failed\n");
+ return PTR_ERR(msm_rng_class);
+ }
+
+ dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
+ NULL, "msm-rng");
+ if (IS_ERR(dev)) {
+ pr_err("Device create failed\n");
+ error = PTR_ERR(dev);
+ goto unregister_chrdev;
+ }
+ cdev_init(&msm_rng_cdev, &msm_rng_fops);
+ msm_rng_dev_cached = msm_rng_dev;
+ return error;
+
+unregister_chrdev:
+ unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+rollback_clk:
+ clk_put(msm_rng_dev->prng_clk);
+err_clk_get:
+ iounmap(msm_rng_dev->base);
+err_iomap:
+ kzfree(msm_rng_dev);
+err_exit:
+ return error;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+ struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+ unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+ hwrng_unregister(&msm_rng);
+ clk_put(msm_rng_dev->prng_clk);
+ iounmap(msm_rng_dev->base);
+ platform_set_drvdata(pdev, NULL);
+ if (msm_rng_dev->qrng_perf_client)
+ msm_bus_scale_unregister_client(msm_rng_dev->qrng_perf_client);
+
+ kzfree(msm_rng_dev);
+ msm_rng_dev_cached = NULL;
+ return 0;
+}
+
+static int qrng_get_random(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *rdata,
+ unsigned int dlen)
+{
+ int sizeread = 0;
+ int rv = -EFAULT;
+
+ if (!msm_rng_dev_cached) {
+ pr_err("%s: msm_rng_dev is not initialized.\n", __func__);
+ rv = -ENODEV;
+ goto err_exit;
+ }
+
+ if (!rdata) {
+ pr_err("%s: data buffer is null!\n", __func__);
+ rv = -EINVAL;
+ goto err_exit;
+ }
+
+ if (signal_pending(current) ||
+ mutex_lock_interruptible(&cached_rng_lock)) {
+ pr_err("%s: mutex lock interrupted!\n", __func__);
+ rv = -ERESTARTSYS;
+ goto err_exit;
+ }
+ sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen);
+
+ if (sizeread == dlen)
+ rv = 0;
+
+ mutex_unlock(&cached_rng_lock);
+err_exit:
+ return rv;
+
+}
+
+static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+ return 0;
+}
+
+static struct rng_alg rng_algs[] = { {
+ .generate = qrng_get_random,
+ .seed = qrng_reset,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "qrng",
+ .cra_driver_name = "fips_hw_qrng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static const struct of_device_id qrng_match[] = {
+ { .compatible = "qcom,msm-rng",
+ },
+ {}
+};
+
+static struct platform_driver rng_driver = {
+ .probe = msm_rng_probe,
+ .remove = msm_rng_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qrng_match,
+ }
+};
+
+static int __init msm_rng_init(void)
+{
+ int ret;
+
+ msm_rng_dev_cached = NULL;
+ ret = platform_driver_register(&rng_driver);
+ if (ret) {
+ pr_err("%s: platform_driver_register error:%d\n",
+ __func__, ret);
+ goto err_exit;
+ }
+ ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+ if (ret) {
+ pr_err("%s: crypto_register_algs error:%d\n",
+ __func__, ret);
+ goto err_exit;
+ }
+
+err_exit:
+ return ret;
+}
+
+module_init(msm_rng_init);
+
+static void __exit msm_rng_exit(void)
+{
+ crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+ platform_driver_unregister(&rng_driver);
+}
+
+module_exit(msm_rng_exit);
+
+MODULE_DESCRIPTION("QTI MSM Random Number Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 46a3d27..d47b66e 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,6 +1,7 @@
config QCOM_GDSC
bool
select PM_GENERIC_DOMAINS if PM
+ depends on REGULATOR
config COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 84e9698..6296c40 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1115,19 +1115,6 @@ static struct clk_branch cam_cc_csiphy2_clk = {
},
};
-static struct clk_branch cam_cc_debug_clk = {
- .halt_reg = 0xc008,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xc008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "cam_cc_debug_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch cam_cc_fd_core_clk = {
.halt_reg = 0xb0c8,
.halt_check = BRANCH_HALT,
@@ -1764,7 +1751,6 @@ static struct clk_regmap *cam_cc_sdm845_clocks[] = {
[CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
[CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
[CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
- [CAM_CC_DEBUG_CLK] = &cam_cc_debug_clk.clkr,
[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
[CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
[CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 20ce78b..3b56fa1 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -478,19 +478,6 @@ static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
},
};
-static struct clk_branch disp_cc_debug_clk = {
- .halt_reg = 0x600c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x600c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "disp_cc_debug_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch disp_cc_mdss_ahb_clk = {
.halt_reg = 0x4004,
.halt_check = BRANCH_HALT,
@@ -949,7 +936,6 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
};
static struct clk_regmap *disp_cc_sdm845_clocks[] = {
- [DISP_CC_DEBUG_CLK] = &disp_cc_debug_clk.clkr,
[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
[DISP_CC_MDSS_AXI_CLK] = &disp_cc_mdss_axi_clk.clkr,
[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 5fcb1f5..678dd10 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1468,19 +1468,6 @@ static struct clk_branch gcc_cpuss_rbcpr_clk = {
},
};
-static struct clk_branch gcc_cxo_tx1_clkref_clk = {
- .halt_reg = 0x8c020,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8c020,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cxo_tx1_clkref_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_ddrss_gpu_axi_clk = {
.halt_reg = 0x44038,
.halt_check = BRANCH_VOTED,
@@ -2433,32 +2420,6 @@ static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
},
};
-static struct clk_branch gcc_rx1_usb2_clkref_clk = {
- .halt_reg = 0x8c014,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8c014,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_rx1_usb2_clkref_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_rx2_qlink_clkref_clk = {
- .halt_reg = 0x8c018,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8c018,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_rx2_qlink_clkref_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_sdcc2_ahb_clk = {
.halt_reg = 0x14008,
.halt_check = BRANCH_HALT,
@@ -3151,7 +3112,6 @@ static struct clk_regmap *gcc_sdm845_clocks[] = {
[GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
[GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
[GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
- [GCC_CXO_TX1_CLKREF_CLK] = &gcc_cxo_tx1_clkref_clk.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
[GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
@@ -3238,8 +3198,6 @@ static struct clk_regmap *gcc_sdm845_clocks[] = {
[GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
[GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
[GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
- [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
- [GCC_RX2_QLINK_CLKREF_CLK] = &gcc_rx2_qlink_clkref_clk.clkr,
[GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
[GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
@@ -3337,6 +3295,8 @@ static const struct qcom_reset_map gcc_sdm845_resets[] = {
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
[GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
};
static const struct regmap_config gcc_sdm845_regmap_config = {
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index d9a626e..0115bb1 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -413,19 +413,6 @@ static struct clk_branch gpu_cc_cxo_clk = {
},
};
-static struct clk_branch gpu_cc_debug_clk = {
- .halt_reg = 0x1100,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x1100,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gpu_cc_debug_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gpu_cc_gx_cxo_clk = {
.halt_reg = 0x1060,
.halt_check = BRANCH_HALT,
@@ -544,7 +531,6 @@ static struct clk_regmap *gpu_cc_sdm845_clocks[] = {
[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
- [GPU_CC_DEBUG_CLK] = &gpu_cc_debug_clk.clkr,
[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
[GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 6ce0d76..3daefbc 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -40,6 +40,8 @@
#define PLL_CALIBRATION_SETTINGS 0x030
#define PLL_BAND_SEL_CAL_SETTINGS_THREE 0x054
#define PLL_FREQ_DETECT_SETTINGS_ONE 0x064
+#define PLL_PFILT 0x07c
+#define PLL_IFILT 0x080
#define PLL_OUTDIV 0x094
#define PLL_CORE_OVERRIDE 0x0a4
#define PLL_CORE_INPUT_OVERRIDE 0x0a8
@@ -63,6 +65,7 @@
#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x164
#define PLL_PLL_LOCK_OVERRIDE 0x180
#define PLL_PLL_LOCK_DELAY 0x184
+#define PLL_CLOCK_INVERTERS 0x18c
#define PLL_COMMON_STATUS_ONE 0x1a0
/* Register Offsets from PHY base address */
@@ -338,7 +341,6 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll,
MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
- MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x00);
MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
@@ -347,9 +349,11 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll,
MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x08);
MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SET_RATE_1, 0xc0);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
+ MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x29);
+ MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3f);
}
static void dsi_pll_commit(struct dsi_pll_10nm *pll,
@@ -367,9 +371,11 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll,
reg->frac_div_start_mid);
MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
reg->frac_div_start_high);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0xc8);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x0a);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
+ MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
+ MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, 0x0);
}
@@ -450,8 +456,8 @@ static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
{
u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
ndelay(250);
}
@@ -464,6 +470,22 @@ static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
ndelay(250);
}
+static void dsi_pll_disable_global_clk(struct mdss_pll_resources *rsc)
+{
+ u32 data;
+
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
+}
+
+static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc)
+{
+ u32 data;
+
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5)));
+}
+
static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
{
int rc;
@@ -490,6 +512,11 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
}
rsc->pll_on = true;
+
+ dsi_pll_enable_global_clk(rsc);
+ if (rsc->slave)
+ dsi_pll_enable_global_clk(rsc->slave);
+
MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
if (rsc->slave)
MDSS_PLL_REG_W(rsc->slave->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
@@ -500,8 +527,9 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
{
- dsi_pll_disable_pll_bias(rsc);
+ dsi_pll_disable_global_clk(rsc);
MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(rsc);
}
static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
@@ -613,6 +641,9 @@ static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
u32 outdiv;
u64 pll_freq, tmp64;
+ if (!vco->priv)
+ pr_err("vco priv is null\n");
+
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("failed to enable pll(%d) resource, rc=%d\n",
@@ -671,9 +702,11 @@ static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
*div = (reg_val & 0xF0) >> 4;
- if (*div == 0)
- *div = 1;
- else
+ /**
+ * Common clock framework the divider value is interpreted as one less
+ * hence we return one less for all dividers except when zero
+ */
+ if (*div != 0)
*div -= 1;
(void)mdss_pll_resource_enable(pll, false);
@@ -701,13 +734,15 @@ static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
return rc;
}
- /* In common clock framework the divider value provided is one less */
+ /**
+ * In common clock framework the divider value provided is one less and
+ * and hence adjusting the divider value by one prior to writing it to
+ * hardware
+ */
div++;
-
pixel_clk_set_div_sub(pll, div);
if (pll->slave)
pixel_clk_set_div_sub(pll->slave, div);
-
(void)mdss_pll_resource_enable(pll, false);
return 0;
@@ -728,12 +763,12 @@ static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
*div = (reg_val & 0x0F);
- /* Common clock framework will add one to divider value sent */
- if (*div == 0)
- *div = 1;
- else
+ /**
+ *Common clock framework the divider value is interpreted as one less
+ * hence we return one less for all dividers except when zero
+ */
+ if (*div != 0)
*div -= 1;
-
(void)mdss_pll_resource_enable(pll, false);
return rc;
@@ -771,6 +806,12 @@ static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
return rc;
}
+
+ /**
+ * In common clock framework the divider value provided is one less and
+ * and hence adjusting the divider value by one prior to writing it to
+ * hardware
+ */
div++;
bit_clk_set_div_sub(rsc, div);
@@ -806,9 +847,11 @@ static int post_vco_clk_get_div(void *context, unsigned int reg,
else
*div = 1;
- if (*div == 0)
- *div = 1;
- else
+ /**
+ *Common clock framework the divider value is interpreted as one less
+ * hence we return one less for all dividers except when zero
+ */
+ if (*div != 0)
*div -= 1;
(void)mdss_pll_resource_enable(pll, false);
@@ -851,8 +894,12 @@ static int post_vco_clk_set_div(void *context, unsigned int reg,
return rc;
}
+ /**
+ * In common clock framework the divider value provided is one less and
+ * and hence adjusting the divider value by one prior to writing it to
+ * hardware
+ */
div++;
-
rc = post_vco_clk_set_div_sub(pll, div);
if (!rc && pll->slave)
rc = post_vco_clk_set_div_sub(pll->slave, div);
@@ -885,9 +932,11 @@ static int post_bit_clk_get_div(void *context, unsigned int reg,
else
*div = 1;
- if (*div == 0)
- *div = 1;
- else
+ /**
+ *Common clock framework the divider value is interpreted as one less
+ * hence we return one less for all dividers except when zero
+ */
+ if (*div != 0)
*div -= 1;
(void)mdss_pll_resource_enable(pll, false);
@@ -930,8 +979,12 @@ static int post_bit_clk_set_div(void *context, unsigned int reg,
return rc;
}
+ /**
+ * In common clock framework the divider value provided is one less and
+ * and hence adjusting the divider value by one prior to writing it to
+ * hardware
+ */
div++;
-
rc = post_bit_clk_set_div_sub(pll, div);
if (!rc && pll->slave)
rc = post_bit_clk_set_div_sub(pll->slave, div);
@@ -1057,7 +1110,6 @@ static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
};
static struct clk_regmap_div dsi0pll_bitclk_src = {
- .reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
@@ -1072,7 +1124,6 @@ static struct clk_regmap_div dsi0pll_bitclk_src = {
};
static struct clk_regmap_div dsi1pll_bitclk_src = {
- .reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
@@ -1087,9 +1138,8 @@ static struct clk_regmap_div dsi1pll_bitclk_src = {
};
static struct clk_regmap_div dsi0pll_post_vco_div = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 2,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_vco_div",
@@ -1102,9 +1152,8 @@ static struct clk_regmap_div dsi0pll_post_vco_div = {
};
static struct clk_regmap_div dsi1pll_post_vco_div = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 2,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_vco_div",
@@ -1141,9 +1190,8 @@ static struct clk_fixed_factor dsi1pll_byteclk_src = {
};
static struct clk_regmap_div dsi0pll_post_bit_div = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_bit_div",
@@ -1156,9 +1204,8 @@ static struct clk_regmap_div dsi0pll_post_bit_div = {
};
static struct clk_regmap_div dsi1pll_post_bit_div = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_bit_div",
@@ -1171,12 +1218,11 @@ static struct clk_regmap_div dsi1pll_post_bit_div = {
};
static struct clk_regmap_mux dsi0pll_byteclk_mux = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 0,
.clkr = {
.hw.init = &(struct clk_init_data){
- .name = "dsi0pll_byteclk_mux",
+ .name = "dsi0_phy_pll_out_byteclk",
.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
@@ -1186,12 +1232,11 @@ static struct clk_regmap_mux dsi0pll_byteclk_mux = {
};
static struct clk_regmap_mux dsi1pll_byteclk_mux = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 0,
.clkr = {
.hw.init = &(struct clk_init_data){
- .name = "dsi1pll_byteclk_mux",
+ .name = "dsi1_phy_pll_out_byteclk",
.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
@@ -1201,15 +1246,14 @@ static struct clk_regmap_mux dsi1pll_byteclk_mux = {
};
static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 0,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_pclk_src_mux",
.parent_names = (const char *[]){"dsi0pll_post_bit_div",
- "dsi0pll_post_bit_div"},
- .num_parents = 1,
+ "dsi0pll_post_vco_div"},
+ .num_parents = 2,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
@@ -1217,15 +1261,14 @@ static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
};
static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 0,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_pclk_src_mux",
.parent_names = (const char *[]){"dsi1pll_post_bit_div",
- "dsi1pll_post_bit_div"},
- .num_parents = 1,
+ "dsi1pll_post_vco_div"},
+ .num_parents = 2,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
@@ -1233,7 +1276,6 @@ static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
};
static struct clk_regmap_div dsi0pll_pclk_src = {
- .reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
@@ -1249,7 +1291,6 @@ static struct clk_regmap_div dsi0pll_pclk_src = {
};
static struct clk_regmap_div dsi1pll_pclk_src = {
- .reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
@@ -1265,12 +1306,11 @@ static struct clk_regmap_div dsi1pll_pclk_src = {
};
static struct clk_regmap_mux dsi0pll_pclk_mux = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 0,
.clkr = {
.hw.init = &(struct clk_init_data){
- .name = "dsi0pll_pclk_mux",
+ .name = "dsi0_phy_pll_out_dsiclk",
.parent_names = (const char *[]){"dsi0pll_pclk_src"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
@@ -1280,12 +1320,11 @@ static struct clk_regmap_mux dsi0pll_pclk_mux = {
};
static struct clk_regmap_mux dsi1pll_pclk_mux = {
- .reg = 0x48,
.shift = 0,
- .width = 4,
+ .width = 0,
.clkr = {
.hw.init = &(struct clk_init_data){
- .name = "dsi1pll_pclk_mux",
+ .name = "dsi1_phy_pll_out_dsiclk",
.parent_names = (const char *[]){"dsi1pll_pclk_src"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
@@ -1339,8 +1378,8 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
}
pll_rsc_db[ndx] = pll_res;
- pll_res->priv = &plls[ndx];
plls[ndx].rsc = pll_res;
+ pll_res->priv = &plls[ndx];
pll_res->vco_delay = VCO_DELAY_USEC;
clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
@@ -1386,6 +1425,7 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
pll_res, &dsi_pll_10nm_config);
dsi0pll_byteclk_mux.clkr.regmap = rmap;
+ dsi0pll_vco_clk.priv = pll_res;
for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_10nm[i]);
@@ -1431,6 +1471,7 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_byteclk_mux.clkr.regmap = rmap;
+ dsi1pll_vco_clk.priv = pll_res;
for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index 0a0d303..7f82fda 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -150,6 +150,7 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
switch (pll_res->pll_interface_type) {
case MDSS_DSI_PLL_10NM:
rc = dsi_pll_clock_register_10nm(pdev, pll_res);
+ break;
case MDSS_UNKNOWN_PLL:
default:
rc = -EINVAL;
@@ -370,7 +371,7 @@ static int __init mdss_pll_driver_init(void)
return rc;
}
-subsys_initcall(mdss_pll_driver_init);
+fs_initcall(mdss_pll_driver_init);
static void __exit mdss_pll_driver_deinit(void)
{
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 28b7ca6..eccfcea 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -194,9 +194,7 @@ static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
WARN(1, "gdsc_base register is not defined\n");
return true;
}
-
- return ((readl_relaxed(pll_res->gdsc_base + 0x4) & BIT(31)) &&
- (!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
+ return readl_relaxed(pll_res->gdsc_base) & BIT(31) ? false : true;
}
static inline int mdss_pll_div_prepare(struct clk_hw *hw)
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 8b63979..4eb8a04 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -151,19 +151,6 @@ static struct clk_branch video_cc_at_clk = {
},
};
-static struct clk_branch video_cc_debug_clk = {
- .halt_reg = 0xa58,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa58,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "video_cc_debug_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch video_cc_qdss_trig_clk = {
.halt_reg = 0x970,
.halt_check = BRANCH_HALT,
@@ -299,7 +286,6 @@ static struct clk_branch video_cc_venus_ctl_core_clk = {
static struct clk_regmap *video_cc_sdm845_clocks[] = {
[VIDEO_CC_APB_CLK] = &video_cc_apb_clk.clkr,
[VIDEO_CC_AT_CLK] = &video_cc_at_clk.clkr,
- [VIDEO_CC_DEBUG_CLK] = &video_cc_debug_clk.clkr,
[VIDEO_CC_QDSS_TRIG_CLK] = &video_cc_qdss_trig_clk.clkr,
[VIDEO_CC_QDSS_TSCTR_DIV8_CLK] = &video_cc_qdss_tsctr_div8_clk.clkr,
[VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 3e2ab3b..9e95bf9 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -2,6 +2,7 @@
tristate "DAX: direct access to differentiated memory"
default m if NVDIMM_DAX
depends on TRANSPARENT_HUGEPAGE
+ select SRCU
help
Support raw access to differentiated (persistence, bandwidth,
latency...) memory via an mmap(2) capable character
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 152552d..1932248 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -24,6 +24,7 @@
#include "dax.h"
static dev_t dax_devt;
+DEFINE_STATIC_SRCU(dax_srcu);
static struct class *dax_class;
static DEFINE_IDA(dax_minor_ida);
static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -59,7 +60,7 @@ struct dax_region {
* @region - parent region
* @dev - device backing the character device
* @cdev - core chardev data
- * @alive - !alive + rcu grace period == no new mappings can be established
+ * @alive - !alive + srcu grace period == no new mappings can be established
* @id - child id in the region
* @num_resources - number of physical address extents in this device
* @res - array of physical address ranges
@@ -437,7 +438,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags)
{
- int rc;
+ int rc, id;
struct file *filp = vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
@@ -445,9 +446,9 @@ static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
current->comm, (flags & FAULT_FLAG_WRITE)
? "write" : "read", vma->vm_start, vma->vm_end);
- rcu_read_lock();
+ id = srcu_read_lock(&dax_srcu);
rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
- rcu_read_unlock();
+ srcu_read_unlock(&dax_srcu, id);
return rc;
}
@@ -563,11 +564,11 @@ static void unregister_dax_dev(void *dev)
* Note, rcu is not protecting the liveness of dax_dev, rcu is
* ensuring that any fault handlers that might have seen
* dax_dev->alive == true, have completed. Any fault handlers
- * that start after synchronize_rcu() has started will abort
+ * that start after synchronize_srcu() has started will abort
* upon seeing dax_dev->alive == false.
*/
dax_dev->alive = false;
- synchronize_rcu();
+ synchronize_srcu(&dax_srcu);
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
cdev_del(cdev);
device_unregister(dev);
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index cd4fdfb..d70104d 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -54,6 +54,11 @@
#define MON2_ZONE_CNT(m) ((m)->base + 0x2D8)
#define MON2_ZONE_MAX(m, zone) ((m)->base + 0x2E0 + 0x4 * zone)
+enum bwmon_type {
+ BWMON_1,
+ BWMON_2,
+};
+
struct bwmon_spec {
bool wrap_on_thres;
bool overflow;
@@ -76,7 +81,6 @@ struct bwmon {
};
#define to_bwmon(ptr) container_of(ptr, struct bwmon, hw)
-#define has_hw_sampling(m) (m->spec->hw_sampling)
#define ENABLE_MASK BIT(0)
#define THROTTLE_MASK 0x1F
@@ -86,20 +90,29 @@ struct bwmon {
#define INT_STATUS_MASK_HWS 0xF0
static DEFINE_SPINLOCK(glb_lock);
-static void mon_enable(struct bwmon *m)
+
+static __always_inline void mon_enable(struct bwmon *m, enum bwmon_type type)
{
- if (has_hw_sampling(m))
- writel_relaxed((ENABLE_MASK | m->throttle_adj), MON2_EN(m));
- else
- writel_relaxed((ENABLE_MASK | m->throttle_adj), MON_EN(m));
+ switch (type) {
+ case BWMON_1:
+ writel_relaxed(ENABLE_MASK | m->throttle_adj, MON_EN(m));
+ break;
+ case BWMON_2:
+ writel_relaxed(ENABLE_MASK | m->throttle_adj, MON2_EN(m));
+ break;
+ }
}
-static void mon_disable(struct bwmon *m)
+static __always_inline void mon_disable(struct bwmon *m, enum bwmon_type type)
{
- if (has_hw_sampling(m))
- writel_relaxed(m->throttle_adj, MON2_EN(m));
- else
+ switch (type) {
+ case BWMON_1:
writel_relaxed(m->throttle_adj, MON_EN(m));
+ break;
+ case BWMON_2:
+ writel_relaxed(m->throttle_adj, MON2_EN(m));
+ break;
+ }
/*
* mon_disable() and mon_irq_clear(),
* If latter goes first and count happen to trigger irq, we would
@@ -110,24 +123,25 @@ static void mon_disable(struct bwmon *m)
#define MON_CLEAR_BIT 0x1
#define MON_CLEAR_ALL_BIT 0x2
-static void mon_clear(struct bwmon *m, bool clear_all)
+static __always_inline
+void mon_clear(struct bwmon *m, bool clear_all, enum bwmon_type type)
{
- if (!has_hw_sampling(m)) {
+ switch (type) {
+ case BWMON_1:
writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
- goto out;
+ break;
+ case BWMON_2:
+ if (clear_all)
+ writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
+ else
+ writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
+ break;
}
-
- if (clear_all)
- writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
- else
- writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
-
/*
* The counter clear and IRQ clear bits are not in the same 4KB
* region. So, we need to make sure the counter clear is completed
* before we try to clear the IRQ or do any other counter operations.
*/
-out:
mb();
}
@@ -148,72 +162,141 @@ static void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms)
}
}
-static void mon_irq_enable(struct bwmon *m)
+static void mon_glb_irq_enable(struct bwmon *m)
{
u32 val;
- spin_lock(&glb_lock);
val = readl_relaxed(GLB_INT_EN(m));
val |= 1 << m->mport;
writel_relaxed(val, GLB_INT_EN(m));
-
- val = readl_relaxed(MON_INT_EN(m));
- val |= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_ENABLE_V1;
- writel_relaxed(val, MON_INT_EN(m));
- spin_unlock(&glb_lock);
- /*
- * make Sure irq enable complete for local and global
- * to avoid race with other monitor calls
- */
- mb();
}
-static void mon_irq_disable(struct bwmon *m)
+static __always_inline
+void mon_irq_enable(struct bwmon *m, enum bwmon_type type)
{
u32 val;
spin_lock(&glb_lock);
- val = readl_relaxed(GLB_INT_EN(m));
- val &= ~(1 << m->mport);
- writel_relaxed(val, GLB_INT_EN(m));
-
- val = readl_relaxed(MON_INT_EN(m));
- val &= has_hw_sampling(m) ? ~INT_STATUS_MASK_HWS : ~INT_ENABLE_V1;
- writel_relaxed(val, MON_INT_EN(m));
+ switch (type) {
+ case BWMON_1:
+ mon_glb_irq_enable(m);
+ val = readl_relaxed(MON_INT_EN(m));
+ val |= INT_ENABLE_V1;
+ writel_relaxed(val, MON_INT_EN(m));
+ break;
+ case BWMON_2:
+ mon_glb_irq_enable(m);
+ val = readl_relaxed(MON_INT_EN(m));
+ val |= INT_STATUS_MASK_HWS;
+ writel_relaxed(val, MON_INT_EN(m));
+ break;
+ }
spin_unlock(&glb_lock);
/*
- * make Sure irq disable complete for local and global
+ * make sure irq enable complete for local and global
* to avoid race with other monitor calls
*/
mb();
}
-static unsigned int mon_irq_status(struct bwmon *m)
+static void mon_glb_irq_disable(struct bwmon *m)
+{
+ u32 val;
+
+ val = readl_relaxed(GLB_INT_EN(m));
+ val &= ~(1 << m->mport);
+ writel_relaxed(val, GLB_INT_EN(m));
+}
+
+static __always_inline
+void mon_irq_disable(struct bwmon *m, enum bwmon_type type)
+{
+ u32 val;
+
+ spin_lock(&glb_lock);
+
+ switch (type) {
+ case BWMON_1:
+ mon_glb_irq_disable(m);
+ val = readl_relaxed(MON_INT_EN(m));
+ val &= ~INT_ENABLE_V1;
+ writel_relaxed(val, MON_INT_EN(m));
+ break;
+ case BWMON_2:
+ mon_glb_irq_disable(m);
+ val = readl_relaxed(MON_INT_EN(m));
+ val &= ~INT_STATUS_MASK_HWS;
+ writel_relaxed(val, MON_INT_EN(m));
+ break;
+ }
+ spin_unlock(&glb_lock);
+ /*
+ * make sure irq disable complete for local and global
+ * to avoid race with other monitor calls
+ */
+ mb();
+}
+
+static __always_inline
+unsigned int mon_irq_status(struct bwmon *m, enum bwmon_type type)
{
u32 mval;
- mval = readl_relaxed(MON_INT_STATUS(m));
-
- dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
- readl_relaxed(GLB_INT_STATUS(m)));
-
- mval &= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
+ switch (type) {
+ case BWMON_1:
+ mval = readl_relaxed(MON_INT_STATUS(m));
+ dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+ readl_relaxed(GLB_INT_STATUS(m)));
+ mval &= INT_STATUS_MASK;
+ break;
+ case BWMON_2:
+ mval = readl_relaxed(MON_INT_STATUS(m));
+ dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+ readl_relaxed(GLB_INT_STATUS(m)));
+ mval &= INT_STATUS_MASK_HWS;
+ break;
+ }
return mval;
}
-static void mon_irq_clear(struct bwmon *m)
+
+static void mon_glb_irq_clear(struct bwmon *m)
{
- u32 intclr;
-
- intclr = has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
-
- writel_relaxed(intclr, MON_INT_CLR(m));
+ /*
+ * Synchronize the local interrupt clear in mon_irq_clear()
+ * with the global interrupt clear here. Otherwise, the CPU
+ * may reorder the two writes and clear the global interrupt
+ * before the local interrupt, causing the global interrupt
+ * to be retriggered by the local interrupt still being high.
+ */
mb();
writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
+ /*
+ * Similarly, because the global registers are in a different
+ * region than the local registers, we need to ensure any register
+ * writes to enable the monitor after this call are ordered with the
+ * clearing here so that local writes don't happen before the
+ * interrupt is cleared.
+ */
mb();
}
+static __always_inline
+void mon_irq_clear(struct bwmon *m, enum bwmon_type type)
+{
+ switch (type) {
+ case BWMON_1:
+ writel_relaxed(INT_STATUS_MASK, MON_INT_CLR(m));
+ mon_glb_irq_clear(m);
+ break;
+ case BWMON_2:
+ writel_relaxed(INT_STATUS_MASK_HWS, MON_INT_CLR(m));
+ mon_glb_irq_clear(m);
+ break;
+ }
+}
+
static int mon_set_throttle_adj(struct bw_hwmon *hw, uint adj)
{
struct bwmon *m = to_bwmon(hw);
@@ -331,12 +414,12 @@ static u32 mon_get_limit(struct bwmon *m)
#define THRES_HIT(status) (status & BIT(0))
#define OVERFLOW(status) (status & BIT(1))
-static unsigned long mon_get_count(struct bwmon *m)
+static unsigned long mon_get_count1(struct bwmon *m)
{
unsigned long count, status;
count = readl_relaxed(MON_CNT(m));
- status = mon_irq_status(m);
+ status = mon_irq_status(m, BWMON_1);
dev_dbg(m->dev, "Counter: %08lx\n", count);
@@ -385,6 +468,23 @@ static unsigned long mon_get_zone_stats(struct bwmon *m)
return count;
}
+static __always_inline
+unsigned long mon_get_count(struct bwmon *m, enum bwmon_type type)
+{
+ unsigned long count;
+
+ switch (type) {
+ case BWMON_1:
+ count = mon_get_count1(m);
+ break;
+ case BWMON_2:
+ count = mon_get_zone_stats(m);
+ break;
+ }
+
+ return count;
+}
+
/* ********** CPUBW specific code ********** */
/* Returns MBps of read/writes for the sampling window. */
@@ -398,30 +498,41 @@ static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
return mbps;
}
-static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
+static __always_inline
+unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum bwmon_type type)
{
struct bwmon *m = to_bwmon(hw);
unsigned long count;
- mon_disable(m);
- count = has_hw_sampling(m) ? mon_get_zone_stats(m) : mon_get_count(m);
- mon_clear(m, false);
- mon_irq_clear(m);
- mon_enable(m);
+ mon_disable(m, type);
+ count = mon_get_count(m, type);
+ mon_clear(m, false, type);
+ mon_irq_clear(m, type);
+ mon_enable(m, type);
return count;
}
+static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
+{
+ return __get_bytes_and_clear(hw, BWMON_1);
+}
+
+static unsigned long get_bytes_and_clear2(struct bw_hwmon *hw)
+{
+ return __get_bytes_and_clear(hw, BWMON_2);
+}
+
static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
{
unsigned long count;
u32 limit;
struct bwmon *m = to_bwmon(hw);
- mon_disable(m);
- count = mon_get_count(m);
- mon_clear(m, false);
- mon_irq_clear(m);
+ mon_disable(m, BWMON_1);
+ count = mon_get_count1(m);
+ mon_clear(m, false, BWMON_1);
+ mon_irq_clear(m, BWMON_1);
if (likely(!m->spec->wrap_on_thres))
limit = bytes;
@@ -429,7 +540,7 @@ static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
limit = max(bytes, 500000UL);
mon_set_limit(m, limit);
- mon_enable(m);
+ mon_enable(m, BWMON_1);
return count;
}
@@ -438,21 +549,22 @@ static unsigned long set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms)
{
struct bwmon *m = to_bwmon(hw);
- mon_disable(m);
- mon_clear(m, false);
- mon_irq_clear(m);
+ mon_disable(m, BWMON_2);
+ mon_clear(m, false, BWMON_2);
+ mon_irq_clear(m, BWMON_2);
mon_set_zones(m, sample_ms);
- mon_enable(m);
+ mon_enable(m, BWMON_2);
return 0;
}
-static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+static irqreturn_t
+__bwmon_intr_handler(int irq, void *dev, enum bwmon_type type)
{
struct bwmon *m = dev;
- m->intr_status = mon_irq_status(m);
+ m->intr_status = mon_irq_status(m, type);
if (!m->intr_status)
return IRQ_NONE;
@@ -462,6 +574,16 @@ static irqreturn_t bwmon_intr_handler(int irq, void *dev)
return IRQ_HANDLED;
}
+static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+{
+ return __bwmon_intr_handler(irq, dev, BWMON_1);
+}
+
+static irqreturn_t bwmon_intr_handler2(int irq, void *dev)
+{
+ return __bwmon_intr_handler(irq, dev, BWMON_2);
+}
+
static irqreturn_t bwmon_intr_thread(int irq, void *dev)
{
struct bwmon *m = dev;
@@ -470,98 +592,180 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+static __always_inline int
+__start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps, enum bwmon_type type)
{
struct bwmon *m = to_bwmon(hw);
- u32 limit;
- u32 zone_actions = calc_zone_actions();
+ u32 limit, zone_actions;
int ret;
+ irq_handler_t handler;
- ret = request_threaded_irq(m->irq, bwmon_intr_handler,
- bwmon_intr_thread,
+ switch (type) {
+ case BWMON_1:
+ handler = bwmon_intr_handler;
+ limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
+ break;
+ case BWMON_2:
+ zone_actions = calc_zone_actions();
+ handler = bwmon_intr_handler2;
+ break;
+ }
+
+ ret = request_threaded_irq(m->irq, handler, bwmon_intr_thread,
IRQF_ONESHOT | IRQF_SHARED,
dev_name(m->dev), m);
if (ret) {
dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
- ret);
+ ret);
return ret;
}
- mon_disable(m);
+ mon_disable(m, type);
- mon_clear(m, true);
- limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
- if (has_hw_sampling(m)) {
+ mon_clear(m, false, type);
+
+ switch (type) {
+ case BWMON_1:
+ handler = bwmon_intr_handler;
+ mon_set_limit(m, limit);
+ break;
+ case BWMON_2:
mon_set_zones(m, hw->df->profile->polling_ms);
/* Set the zone actions to increment appropriate counters */
writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
- } else {
- mon_set_limit(m, limit);
+ break;
}
- mon_irq_clear(m);
- mon_irq_enable(m);
- mon_enable(m);
+ mon_irq_clear(m, type);
+ mon_irq_enable(m, type);
+ mon_enable(m, type);
return 0;
}
-static void stop_bw_hwmon(struct bw_hwmon *hw)
+static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+{
+ return __start_bw_hwmon(hw, mbps, BWMON_1);
+}
+
+static int start_bw_hwmon2(struct bw_hwmon *hw, unsigned long mbps)
+{
+ return __start_bw_hwmon(hw, mbps, BWMON_2);
+}
+
+static __always_inline
+void __stop_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
{
struct bwmon *m = to_bwmon(hw);
- mon_irq_disable(m);
+ mon_irq_disable(m, type);
free_irq(m->irq, m);
- mon_disable(m);
- mon_clear(m, true);
- mon_irq_clear(m);
+ mon_disable(m, type);
+ mon_clear(m, true, type);
+ mon_irq_clear(m, type);
+}
+
+static void stop_bw_hwmon(struct bw_hwmon *hw)
+{
+ return __stop_bw_hwmon(hw, BWMON_1);
+}
+
+static void stop_bw_hwmon2(struct bw_hwmon *hw)
+{
+ return __stop_bw_hwmon(hw, BWMON_2);
+}
+
+static __always_inline
+int __suspend_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+{
+ struct bwmon *m = to_bwmon(hw);
+
+ mon_irq_disable(m, type);
+ free_irq(m->irq, m);
+ mon_disable(m, type);
+ mon_irq_clear(m, type);
+
+ return 0;
}
static int suspend_bw_hwmon(struct bw_hwmon *hw)
{
- struct bwmon *m = to_bwmon(hw);
+ return __suspend_bw_hwmon(hw, BWMON_1);
+}
- mon_irq_disable(m);
- free_irq(m->irq, m);
- mon_disable(m);
- mon_irq_clear(m);
+static int suspend_bw_hwmon2(struct bw_hwmon *hw)
+{
+ return __suspend_bw_hwmon(hw, BWMON_2);
+}
+
+static int __resume_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+{
+ struct bwmon *m = to_bwmon(hw);
+ int ret;
+ irq_handler_t handler;
+
+ switch (type) {
+ case BWMON_1:
+ handler = bwmon_intr_handler;
+ break;
+ case BWMON_2:
+ handler = bwmon_intr_handler2;
+ break;
+ }
+
+ mon_clear(m, false, type);
+ ret = request_threaded_irq(m->irq, handler, bwmon_intr_thread,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(m->dev), m);
+ if (ret) {
+ dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+ ret);
+ return ret;
+ }
+
+ mon_irq_enable(m, type);
+ mon_enable(m, type);
return 0;
}
static int resume_bw_hwmon(struct bw_hwmon *hw)
{
- struct bwmon *m = to_bwmon(hw);
- int ret;
+ return __resume_bw_hwmon(hw, BWMON_1);
+}
- mon_clear(m, false);
- ret = request_threaded_irq(m->irq, bwmon_intr_handler,
- bwmon_intr_thread,
- IRQF_ONESHOT | IRQF_SHARED,
- dev_name(m->dev), m);
- if (ret) {
- dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
- ret);
- return ret;
- }
-
- mon_irq_enable(m);
- mon_enable(m);
-
- return 0;
+static int resume_bw_hwmon2(struct bw_hwmon *hw)
+{
+ return __resume_bw_hwmon(hw, BWMON_2);
}
/*************************************************************************/
static const struct bwmon_spec spec[] = {
- { .wrap_on_thres = true, .overflow = false, .throt_adj = false,
- .hw_sampling = false},
- { .wrap_on_thres = false, .overflow = true, .throt_adj = false,
- .hw_sampling = false},
- { .wrap_on_thres = false, .overflow = true, .throt_adj = true,
- .hw_sampling = false},
- { .wrap_on_thres = false, .overflow = true, .throt_adj = true,
- .hw_sampling = true},
+ [0] = {
+ .wrap_on_thres = true,
+ .overflow = false,
+ .throt_adj = false,
+ .hw_sampling = false
+ },
+ [1] = {
+ .wrap_on_thres = false,
+ .overflow = true,
+ .throt_adj = false,
+ .hw_sampling = false
+ },
+ [2] = {
+ .wrap_on_thres = false,
+ .overflow = true,
+ .throt_adj = true,
+ .hw_sampling = false
+ },
+ [3] = {
+ .wrap_on_thres = false,
+ .overflow = true,
+ .throt_adj = true,
+ .hw_sampling = true
+ },
};
static const struct of_device_id bimc_bwmon_match_table[] = {
@@ -577,7 +781,6 @@ static int bimc_bwmon_driver_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct bwmon *m;
- const struct of_device_id *id;
int ret;
u32 data;
@@ -593,22 +796,11 @@ static int bimc_bwmon_driver_probe(struct platform_device *pdev)
}
m->mport = data;
- id = of_match_device(bimc_bwmon_match_table, dev);
- if (!id) {
+ m->spec = of_device_get_match_data(dev);
+ if (!m->spec) {
dev_err(dev, "Unknown device type!\n");
return -ENODEV;
}
- m->spec = id->data;
-
- if (has_hw_sampling(m)) {
- ret = of_property_read_u32(dev->of_node,
- "qcom,hw-timer-hz", &data);
- if (ret) {
- dev_err(dev, "HW sampling rate not specified!\n");
- return ret;
- }
- m->hw_timer_hz = data;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
if (!res) {
@@ -641,17 +833,33 @@ static int bimc_bwmon_driver_probe(struct platform_device *pdev)
m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
if (!m->hw.of_node)
return -EINVAL;
- m->hw.start_hwmon = &start_bw_hwmon;
- m->hw.stop_hwmon = &stop_bw_hwmon;
- m->hw.suspend_hwmon = &suspend_bw_hwmon;
- m->hw.resume_hwmon = &resume_bw_hwmon;
- m->hw.get_bytes_and_clear = &get_bytes_and_clear;
- m->hw.set_thres = &set_thres;
- if (has_hw_sampling(m))
- m->hw.set_hw_events = &set_hw_events;
+
+ if (m->spec->hw_sampling) {
+ ret = of_property_read_u32(dev->of_node, "qcom,hw-timer-hz",
+ &m->hw_timer_hz);
+ if (ret) {
+ dev_err(dev, "HW sampling rate not specified!\n");
+ return ret;
+ }
+
+ m->hw.start_hwmon = start_bw_hwmon2;
+ m->hw.stop_hwmon = stop_bw_hwmon2;
+ m->hw.suspend_hwmon = suspend_bw_hwmon2;
+ m->hw.resume_hwmon = resume_bw_hwmon2;
+ m->hw.get_bytes_and_clear = get_bytes_and_clear2;
+ m->hw.set_hw_events = set_hw_events;
+ } else {
+ m->hw.start_hwmon = start_bw_hwmon;
+ m->hw.stop_hwmon = stop_bw_hwmon;
+ m->hw.suspend_hwmon = suspend_bw_hwmon;
+ m->hw.resume_hwmon = resume_bw_hwmon;
+ m->hw.get_bytes_and_clear = get_bytes_and_clear;
+ m->hw.set_thres = set_thres;
+ }
+
if (m->spec->throt_adj) {
- m->hw.set_throttle_adj = &mon_set_throttle_adj;
- m->hw.get_throttle_adj = &mon_get_throttle_adj;
+ m->hw.set_throttle_adj = mon_set_throttle_adj;
+ m->hw.get_throttle_adj = mon_get_throttle_adj;
}
ret = register_bw_hwmon(dev, &m->hw);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 5b85b8d..8f582f6 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -70,6 +70,29 @@ static struct devfreq *find_device_devfreq(struct device *dev)
}
/**
+ * devfreq_set_freq_limits() - Set min and max frequency from freq_table
+ * @devfreq: the devfreq instance
+ */
+static void devfreq_set_freq_limits(struct devfreq *devfreq)
+{
+ int idx;
+ unsigned long min = ~0, max = 0;
+
+ if (!devfreq->profile->freq_table)
+ return;
+
+ for (idx = 0; idx < devfreq->profile->max_state; idx++) {
+ if (min > devfreq->profile->freq_table[idx])
+ min = devfreq->profile->freq_table[idx];
+ if (max < devfreq->profile->freq_table[idx])
+ max = devfreq->profile->freq_table[idx];
+ }
+
+ devfreq->min_freq = min;
+ devfreq->max_freq = max;
+}
+
+/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
* @devfreq: the devfreq instance
* @freq: the target frequency
@@ -569,6 +592,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq_set_freq_table(devfreq);
mutex_lock(&devfreq->lock);
}
+ devfreq_set_freq_limits(devfreq);
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index b5d78b1..4112bef 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -109,6 +109,7 @@
dsi-staging/dsi_ctrl_hw_cmn.o \
dsi-staging/dsi_ctrl_hw_1_4.o \
dsi-staging/dsi_ctrl_hw_2_0.o \
+ dsi-staging/dsi_ctrl_hw_2_2.o \
dsi-staging/dsi_ctrl.o \
dsi-staging/dsi_catalog.o \
dsi-staging/dsi_drm.o \
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 976be99..3625ed0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -83,6 +83,19 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.clamp_enable = NULL;
ctrl->ops.clamp_disable = NULL;
break;
+ case DSI_CTRL_VERSION_2_2:
+ ctrl->ops.phy_reset_config = dsi_ctrl_hw_22_phy_reset_config;
+ ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
+ ctrl->ops.wait_for_lane_idle =
+ dsi_ctrl_hw_20_wait_for_lane_idle;
+ ctrl->ops.reg_dump_to_buffer =
+ dsi_ctrl_hw_20_reg_dump_to_buffer;
+ ctrl->ops.ulps_ops.ulps_request = NULL;
+ ctrl->ops.ulps_ops.ulps_exit = NULL;
+ ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
+ ctrl->ops.clamp_enable = NULL;
+ ctrl->ops.clamp_disable = NULL;
+ break;
default:
break;
}
@@ -121,6 +134,7 @@ int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
switch (version) {
case DSI_CTRL_VERSION_1_4:
case DSI_CTRL_VERSION_2_0:
+ case DSI_CTRL_VERSION_2_2:
dsi_catalog_cmn_init(ctrl, version);
break;
default:
@@ -167,6 +181,8 @@ static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy)
dsi_phy_hw_v3_0_ulps_exit;
phy->ops.ulps_ops.get_lanes_in_ulps =
dsi_phy_hw_v3_0_get_lanes_in_ulps;
+ phy->ops.ulps_ops.is_lanes_in_ulps =
+ dsi_phy_hw_v3_0_is_lanes_in_ulps;
phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0;
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 4a6a934..5dcdf46 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -97,6 +97,7 @@ void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy,
void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes);
u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
+bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size);
@@ -157,6 +158,8 @@ void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_dln0_phy_err(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_phy_reset_config(struct dsi_ctrl_hw *ctrl,
bool enable);
+void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
+ bool enable);
/* Definitions specific to 1.4 DSI controller hardware */
int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index cc87775..560964e 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -236,32 +236,52 @@ int dsi_core_clk_start(struct dsi_core_clks *c_clks)
return rc;
error_disable_mmss_clk:
- clk_disable_unprepare(c_clks->clks.core_mmss_clk);
+ if (c_clks->clks.core_mmss_clk)
+ clk_disable_unprepare(c_clks->clks.core_mmss_clk);
error_disable_bus_clk:
- clk_disable_unprepare(c_clks->clks.bus_clk);
+ if (c_clks->clks.bus_clk)
+ clk_disable_unprepare(c_clks->clks.bus_clk);
error_disable_iface_clk:
- clk_disable_unprepare(c_clks->clks.iface_clk);
+ if (c_clks->clks.iface_clk)
+ clk_disable_unprepare(c_clks->clks.iface_clk);
error_disable_mnoc_clk:
if (c_clks->clks.mnoc_clk)
clk_disable_unprepare(c_clks->clks.mnoc_clk);
error_disable_core_clk:
- clk_disable_unprepare(c_clks->clks.mdp_core_clk);
+ if (c_clks->clks.mdp_core_clk)
+ clk_disable_unprepare(c_clks->clks.mdp_core_clk);
error:
return rc;
}
int dsi_core_clk_stop(struct dsi_core_clks *c_clks)
{
- if (msm_bus_scale_client_update_request(c_clks->bus_handle, 0))
- pr_err("bus scale client disable failed\n");
- clk_disable_unprepare(c_clks->clks.core_mmss_clk);
- clk_disable_unprepare(c_clks->clks.bus_clk);
- clk_disable_unprepare(c_clks->clks.iface_clk);
+ int rc = 0;
+
+ if (c_clks->bus_handle) {
+ rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 0);
+ if (rc) {
+ pr_err("bus scale client disable failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (c_clks->clks.core_mmss_clk)
+ clk_disable_unprepare(c_clks->clks.core_mmss_clk);
+
+ if (c_clks->clks.bus_clk)
+ clk_disable_unprepare(c_clks->clks.bus_clk);
+
+ if (c_clks->clks.iface_clk)
+ clk_disable_unprepare(c_clks->clks.iface_clk);
+
if (c_clks->clks.mnoc_clk)
clk_disable_unprepare(c_clks->clks.mnoc_clk);
- clk_disable_unprepare(c_clks->clks.mdp_core_clk);
- return 0;
+ if (c_clks->clks.mdp_core_clk)
+ clk_disable_unprepare(c_clks->clks.mdp_core_clk);
+
+ return rc;
}
static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 5df48c3..9a71ea0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -62,6 +62,7 @@ static DEFINE_MUTEX(dsi_ctrl_list_lock);
static const enum dsi_ctrl_version dsi_ctrl_v1_4 = DSI_CTRL_VERSION_1_4;
static const enum dsi_ctrl_version dsi_ctrl_v2_0 = DSI_CTRL_VERSION_2_0;
+static const enum dsi_ctrl_version dsi_ctrl_v2_2 = DSI_CTRL_VERSION_2_2;
static const struct of_device_id msm_dsi_of_match[] = {
{
@@ -72,6 +73,10 @@ static const struct of_device_id msm_dsi_of_match[] = {
.compatible = "qcom,dsi-ctrl-hw-v2.0",
.data = &dsi_ctrl_v2_0,
},
+ {
+ .compatible = "qcom,dsi-ctrl-hw-v2.2",
+ .data = &dsi_ctrl_v2_2,
+ },
{}
};
@@ -428,15 +433,34 @@ static int dsi_ctrl_init_regmap(struct platform_device *pdev,
pr_debug("[%s] map dsi_ctrl registers to %p\n", ctrl->name,
ctrl->hw.base);
- ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
- if (IS_ERR(ptr)) {
- rc = PTR_ERR(ptr);
- return rc;
+ switch (ctrl->version) {
+ case DSI_CTRL_VERSION_1_4:
+ case DSI_CTRL_VERSION_2_0:
+ ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
+ if (IS_ERR(ptr)) {
+ pr_err("mmss_misc base address not found for [%s]\n",
+ ctrl->name);
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+ ctrl->hw.mmss_misc_base = ptr;
+ ctrl->hw.disp_cc_base = NULL;
+ break;
+ case DSI_CTRL_VERSION_2_2:
+ ptr = msm_ioremap(pdev, "disp_cc_base", ctrl->name);
+ if (IS_ERR(ptr)) {
+ pr_err("disp_cc base address not found for [%s]\n",
+ ctrl->name);
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+ ctrl->hw.disp_cc_base = ptr;
+ ctrl->hw.mmss_misc_base = NULL;
+ break;
+ default:
+ break;
}
- ctrl->hw.mmss_misc_base = ptr;
- pr_debug("[%s] map mmss_misc registers to %p\n", ctrl->name,
- ctrl->hw.mmss_misc_base);
return rc;
}
@@ -532,7 +556,7 @@ static int dsi_ctrl_clocks_init(struct platform_device *pdev,
goto fail;
}
- link->esc_clk = devm_clk_get(&pdev->dev, "core_clk");
+ link->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
if (IS_ERR(link->esc_clk)) {
rc = PTR_ERR(link->esc_clk);
pr_err("failed to get esc_clk, rc=%d\n", rc);
@@ -613,10 +637,8 @@ static int dsi_ctrl_supplies_init(struct platform_device *pdev,
rc = dsi_pwr_get_dt_vreg_data(&pdev->dev,
&ctrl->pwr_info.digital,
"qcom,core-supply-entries");
- if (rc) {
- pr_err("failed to get digital supply, rc = %d\n", rc);
- goto error;
- }
+ if (rc)
+ pr_debug("failed to get digital supply, rc = %d\n", rc);
rc = dsi_pwr_get_dt_vreg_data(&pdev->dev,
&ctrl->pwr_info.host_pwr,
@@ -663,10 +685,10 @@ static int dsi_ctrl_supplies_init(struct platform_device *pdev,
ctrl->pwr_info.host_pwr.vregs = NULL;
ctrl->pwr_info.host_pwr.count = 0;
error_digital:
- devm_kfree(&pdev->dev, ctrl->pwr_info.digital.vregs);
+ if (ctrl->pwr_info.digital.vregs)
+ devm_kfree(&pdev->dev, ctrl->pwr_info.digital.vregs);
ctrl->pwr_info.digital.vregs = NULL;
ctrl->pwr_info.digital.count = 0;
-error:
return rc;
}
@@ -1204,6 +1226,7 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
}
dsi_ctrl->cell_index = index;
+ dsi_ctrl->version = version;
dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL);
if (!dsi_ctrl->name)
@@ -1227,7 +1250,6 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
goto fail_clks;
}
- dsi_ctrl->version = version;
rc = dsi_catalog_ctrl_setup(&dsi_ctrl->hw, dsi_ctrl->version,
dsi_ctrl->cell_index);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 161024a..859d707 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -41,6 +41,7 @@ enum dsi_ctrl_version {
DSI_CTRL_VERSION_UNKNOWN,
DSI_CTRL_VERSION_1_4,
DSI_CTRL_VERSION_2_0,
+ DSI_CTRL_VERSION_2_2,
DSI_CTRL_VERSION_MAX
};
@@ -575,18 +576,26 @@ struct dsi_ctrl_hw_ops {
/*
* struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
- * @base: VA for the DSI controller base address.
- * @length: Length of the DSI controller register map.
- * @index: Instance ID of the controller.
- * @feature_map: Features supported by the DSI controller.
- * @ops: Function pointers to the operations supported by the
- * controller.
+ * @base: VA for the DSI controller base address.
+ * @length: Length of the DSI controller register map.
+ * @mmss_misc_base: Base address of mmss_misc register map.
+ * @mmss_misc_length: Length of mmss_misc register map.
+ * @disp_cc_base: Base address of disp_cc register map.
+ * @disp_cc_length: Length of disp_cc register map.
+ * @index: Instance ID of the controller.
+ * @feature_map: Features supported by the DSI controller.
+ * @ops: Function pointers to the operations supported by the
+ * controller.
+ * @supported_interrupts: Number of supported interrupts.
+ * @supported_errors: Number of supported errors.
*/
struct dsi_ctrl_hw {
void __iomem *base;
u32 length;
void __iomem *mmss_misc_base;
u32 mmss_misc_length;
+ void __iomem *disp_cc_base;
+ u32 disp_cc_length;
u32 index;
/* features */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
new file mode 100644
index 0000000..1b1e811
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-hw:" fmt
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl_reg.h"
+#include "dsi_hw.h"
+
+/* Equivalent to register DISP_CC_MISC_CMD */
+#define DISP_CC_CLAMP_REG_OFF 0x00
+
+/**
+ * dsi_ctrl_hw_22_phy_reset_config() - to configure clamp control during ulps
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: boolean to specify enable/disable.
+ */
+void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
+ bool enable)
+{
+ u32 reg = 0;
+
+ reg = DSI_DISP_CC_R32(ctrl, DISP_CC_CLAMP_REG_OFF);
+
+ /* Mask/unmask disable PHY reset bit */
+ if (enable)
+ reg &= ~BIT(ctrl->index);
+ else
+ reg |= BIT(ctrl->index);
+ DSI_DISP_CC_W32(ctrl, DISP_CC_CLAMP_REG_OFF, reg);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 8605338..122a63d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -320,8 +320,8 @@ void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl,
reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
- /* Enable Timing double buffering */
- DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+ /* Disable Timing double buffering */
+ DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x0);
pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 2c5bd76..106511c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -260,7 +260,8 @@ static int dsi_display_set_ulps(struct dsi_display *display, bool enable)
return rc;
}
- rc = dsi_phy_set_ulps(m_ctrl->phy, &display->config, enable);
+ rc = dsi_phy_set_ulps(m_ctrl->phy, &display->config, enable,
+ display->clamp_enabled);
if (rc) {
pr_err("Ulps PHY state change(%d) failed\n", enable);
return rc;
@@ -278,7 +279,8 @@ static int dsi_display_set_ulps(struct dsi_display *display, bool enable)
return rc;
}
- rc = dsi_phy_set_ulps(ctrl->phy, &display->config, enable);
+ rc = dsi_phy_set_ulps(ctrl->phy, &display->config, enable,
+ display->clamp_enabled);
if (rc) {
pr_err("Ulps PHY state change(%d) failed\n", enable);
return rc;
@@ -1365,8 +1367,7 @@ int dsi_pre_clkoff_cb(void *priv,
/*
* Enable DSI clamps only if entering idle power collapse.
*/
- if (dsi_panel_initialized(display->panel) &&
- dsi_panel_ulps_feature_enabled(display->panel)) {
+ if (dsi_panel_initialized(display->panel)) {
dsi_display_phy_idle_off(display);
rc = dsi_display_set_clamp(display, true);
if (rc)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
index 447f613..8250da3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -33,6 +33,15 @@
writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
} while (0)
+#define DSI_DISP_CC_R32(dsi_hw, off) \
+ readl_relaxed((dsi_hw)->disp_cc_base + (off))
+#define DSI_DISP_CC_W32(dsi_hw, off, val) \
+ do {\
+ pr_err("[DSI_%d][%s] - [0x%08x]\n", \
+ (dsi_hw)->index, #off, val); \
+ writel_relaxed((val), (dsi_hw)->disp_cc_base + (off)); \
+ } while (0)
+
#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index bda9c2d..b814eb8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1512,6 +1512,17 @@ static int dsi_panel_parse_reset_sequence(struct dsi_panel *panel,
return rc;
}
+static int dsi_panel_parse_features(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ panel->ulps_enabled =
+ of_property_read_bool(of_node, "qcom,ulps-enabled");
+
+ pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
+
+ return 0;
+}
+
static int dsi_panel_parse_jitter_config(struct dsi_panel *panel,
struct device_node *of_node)
{
@@ -2117,6 +2128,10 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
if (rc)
pr_err("failed to parse panel jitter config, rc=%d\n", rc);
+ rc = dsi_panel_parse_features(panel, of_node);
+ if (rc)
+ pr_err("failed to parse panel features, rc=%d\n", rc);
+
rc = dsi_panel_parse_hdr_config(panel, of_node);
if (rc)
pr_err("failed to parse hdr config, rc=%d\n", rc);
@@ -2490,7 +2505,6 @@ int dsi_panel_post_enable(struct dsi_panel *panel)
panel->name, rc);
goto error;
}
- panel->panel_initialized = false;
error:
mutex_unlock(&panel->panel_lock);
return rc;
@@ -2536,6 +2550,8 @@ int dsi_panel_disable(struct dsi_panel *panel)
panel->name, rc);
goto error;
}
+ panel->panel_initialized = false;
+
error:
mutex_unlock(&panel->panel_lock);
return rc;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 96a98bd..ebfb40b8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -388,7 +388,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
/** TODO: initialize debugfs */
dsi_phy->pdev = pdev;
platform_set_drvdata(pdev, dsi_phy);
- pr_debug("Probe successful for %s\n", dsi_phy->name);
+ pr_info("Probe successful for %s\n", dsi_phy->name);
return 0;
fail_supplies:
@@ -669,7 +669,7 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable)
}
static int dsi_phy_enable_ulps(struct msm_dsi_phy *phy,
- struct dsi_host_config *config)
+ struct dsi_host_config *config, bool clamp_enabled)
{
int rc = 0;
u32 lanes = 0;
@@ -679,17 +679,25 @@ static int dsi_phy_enable_ulps(struct msm_dsi_phy *phy,
lanes = config->common_config.data_lanes;
lanes |= DSI_CLOCK_LANE;
- rc = phy->hw.ops.ulps_ops.wait_for_lane_idle(&phy->hw, lanes);
- if (rc) {
- pr_err("lanes not entering idle, skip ULPS\n");
- return rc;
+ /*
+ * If DSI clamps are enabled, it means that the DSI lanes are
+ * already in idle state. Checking for lanes to be in idle state
+ * should be skipped during ULPS entry programming while coming
+ * out of idle screen.
+ */
+ if (!clamp_enabled) {
+ rc = phy->hw.ops.ulps_ops.wait_for_lane_idle(&phy->hw, lanes);
+ if (rc) {
+ pr_err("lanes not entering idle, skip ULPS\n");
+ return rc;
+ }
}
phy->hw.ops.ulps_ops.ulps_request(&phy->hw, &phy->cfg, lanes);
ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
- if ((lanes & ulps_lanes) != lanes) {
+ if (!phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
pr_err("Failed to enter ULPS, request=0x%x, actual=0x%x\n",
lanes, ulps_lanes);
rc = -EIO;
@@ -701,7 +709,6 @@ static int dsi_phy_enable_ulps(struct msm_dsi_phy *phy,
static int dsi_phy_disable_ulps(struct msm_dsi_phy *phy,
struct dsi_host_config *config)
{
- int rc = 0;
u32 ulps_lanes, lanes = 0;
if (config->panel_mode == DSI_OP_CMD_MODE)
@@ -710,25 +717,27 @@ static int dsi_phy_disable_ulps(struct msm_dsi_phy *phy,
ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
- if ((lanes & ulps_lanes) != lanes)
- pr_err("Mismatch between lanes in ULPS\n");
-
- lanes &= ulps_lanes;
+ if (!phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
+ pr_err("Mismatch in ULPS: lanes:%d, ulps_lanes:%d\n",
+ lanes, ulps_lanes);
+ return -EIO;
+ }
phy->hw.ops.ulps_ops.ulps_exit(&phy->hw, &phy->cfg, lanes);
ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
- if (ulps_lanes & lanes) {
+
+ if (phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
pr_err("Lanes (0x%x) stuck in ULPS\n", ulps_lanes);
- rc = -EIO;
+ return -EIO;
}
- return rc;
+ return 0;
}
int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
- bool enable)
+ bool enable, bool clamp_enabled)
{
int rc = 0;
@@ -738,7 +747,10 @@ int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
}
if (!phy->hw.ops.ulps_ops.ulps_request ||
- !phy->hw.ops.ulps_ops.ulps_exit) {
+ !phy->hw.ops.ulps_ops.ulps_exit ||
+ !phy->hw.ops.ulps_ops.get_lanes_in_ulps ||
+ !phy->hw.ops.ulps_ops.is_lanes_in_ulps ||
+ !phy->hw.ops.ulps_ops.wait_for_lane_idle) {
pr_debug("DSI PHY ULPS ops not present\n");
return 0;
}
@@ -746,7 +758,7 @@ int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
mutex_lock(&phy->phy_lock);
if (enable)
- rc = dsi_phy_enable_ulps(phy, config);
+ rc = dsi_phy_enable_ulps(phy, config, clamp_enabled);
else
rc = dsi_phy_disable_ulps(phy, config);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index 4a64855..e721486 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -182,11 +182,12 @@ int dsi_phy_disable(struct msm_dsi_phy *phy);
* @phy: DSI PHY handle
* @config: DSi host configuration information.
* @enable: Enable/Disable
+ * @clamp_enabled: mmss_clamp enabled/disabled
*
* Return: error code.
*/
int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
- bool enable);
+ bool enable, bool clamp_enabled);
/**
* dsi_phy_clk_cb_register() - Register PHY clock control callback
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index daaa78a..51c2f46 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -143,15 +143,22 @@ struct phy_ulps_config_ops {
* @phy: Pointer to DSI PHY hardware instance.
*
* Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
- * state. If 0 is returned, all the lanes are active.
+ * state.
*
* Return: List of lanes in ULPS state.
*/
u32 (*get_lanes_in_ulps)(struct dsi_phy_hw *phy);
+
+ /**
+ * is_lanes_in_ulps() - checks if the given lanes are in ulps
+ * @lanes: lanes to be checked.
+ * @ulps_lanes: lanes in ulps currenly.
+ *
+ * Return: true if all the given lanes are in ulps; false otherwise.
+ */
+ bool (*is_lanes_in_ulps)(u32 ulps, u32 ulps_lanes);
};
-
-
/**
* struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
* @regulator_enable: Enable PHY regulators.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
index 96f5c19..371239d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
@@ -159,7 +159,7 @@ static void dsi_phy_hw_v3_0_lane_settings(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
int i;
- u8 tx_dctrl[] = {0x00, 0x00, 0x00, 0x02, 0x01};
+ u8 tx_dctrl[] = {0x00, 0x00, 0x00, 0x04, 0x01};
/* Strength ctrl settings */
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
@@ -186,6 +186,10 @@ static void dsi_phy_hw_v3_0_lane_settings(struct dsi_phy_hw *phy,
DSI_W32(phy, DSIPHY_LNX_OFFSET_BOT_CTRL(i), 0x0);
DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
}
+
+ /* Toggle BIT 0 to release freeze I/0 */
+ DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), 0x05);
+ DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), 0x04);
}
/**
@@ -419,6 +423,14 @@ u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy)
return lanes;
}
+bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes)
+{
+ if (lanes & ulps_lanes)
+ return false;
+
+ return true;
+}
+
int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size)
{
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 4b263d3..4471d0b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -138,6 +138,7 @@ enum msm_mdp_crtc_property {
CRTC_PROP_MEM_IB,
CRTC_PROP_ROT_PREFILL_BW,
CRTC_PROP_ROT_CLK,
+ CRTC_PROP_ROI_V1,
/* total # of properties */
CRTC_PROP_COUNT
@@ -158,6 +159,7 @@ enum msm_mdp_conn_property {
CONNECTOR_PROP_DST_Y,
CONNECTOR_PROP_DST_W,
CONNECTOR_PROP_DST_H,
+ CONNECTOR_PROP_ROI_V1,
/* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME,
@@ -200,6 +202,38 @@ enum msm_display_caps {
};
/**
+ * struct msm_roi_alignment - region of interest alignment restrictions
+ * @xstart_pix_align: left x offset alignment restriction
+ * @width_pix_align: width alignment restriction
+ * @ystart_pix_align: top y offset alignment restriction
+ * @height_pix_align: height alignment restriction
+ * @min_width: minimum width restriction
+ * @min_height: minimum height restriction
+ */
+struct msm_roi_alignment {
+ uint32_t xstart_pix_align;
+ uint32_t width_pix_align;
+ uint32_t ystart_pix_align;
+ uint32_t height_pix_align;
+ uint32_t min_width;
+ uint32_t min_height;
+};
+
+/**
+ * struct msm_roi_caps - display's region of interest capabilities
+ * @enabled: true if some region of interest is supported
+ * @merge_rois: merge rois before sending to display
+ * @num_roi: maximum number of rois supported
+ * @align: roi alignment restrictions
+ */
+struct msm_roi_caps {
+ bool enabled;
+ bool merge_rois;
+ uint32_t num_roi;
+ struct msm_roi_alignment align;
+};
+
+/**
* struct msm_display_dsc_info - defines dsc configuration
* @version: DSC version.
* @scr_rev: DSC revision.
@@ -338,6 +372,7 @@ struct msm_compression_info {
* @vtotal: display vertical total
* @jitter: display jitter configuration
* @comp_info: Compression supported by the display
+ * @roi_caps: Region of interest capability info
*/
struct msm_display_info {
int intf_type;
@@ -361,21 +396,19 @@ struct msm_display_info {
uint32_t jitter;
struct msm_compression_info comp_info;
+ struct msm_roi_caps roi_caps;
};
#define MSM_MAX_ROI 4
/**
- * struct msm_roi_mapping - Regions of interest structure for mapping CRTC to
- * Connector output
- * @num_rects: number of valid rectangles in src and dst arrays
- * @src: source roi rectangle
- * @dst: destination roi rectangle
+ * struct msm_roi_list - list of regions of interest for a drm object
+ * @num_rects: number of valid rectangles in the roi array
+ * @roi: list of roi rectangles
*/
-struct msm_roi_mapping {
+struct msm_roi_list {
uint32_t num_rects;
- struct drm_clip_rect src[MSM_MAX_ROI];
- struct drm_clip_rect dst[MSM_MAX_ROI];
+ struct drm_clip_rect roi[MSM_MAX_ROI];
};
/**
@@ -383,7 +416,7 @@ struct msm_roi_mapping {
* @rois: Regions of interest structure for mapping CRTC to Connector output
*/
struct msm_display_kickoff_params {
- struct msm_roi_mapping *rois;
+ struct msm_roi_list *rois;
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6c2d643..e3f8261 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -21,6 +21,12 @@
#define BL_NODE_NAME_SIZE 32
+#define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\
+ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\
+ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+
static const struct drm_prop_enum_list e_topology_name[] = {
{SDE_RM_TOPOLOGY_UNKNOWN, "sde_unknown"},
{SDE_RM_TOPOLOGY_SINGLEPIPE, "sde_singlepipe"},
@@ -248,6 +254,25 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
return rc;
}
+void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
+{
+ struct sde_connector *c_conn;
+ struct dsi_display *display;
+ u32 state = enable ? DSI_CLK_ON : DSI_CLK_OFF;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ c_conn = to_sde_connector(connector);
+ display = (struct dsi_display *) c_conn->display;
+
+ if (display && c_conn->ops.clk_ctrl)
+ c_conn->ops.clk_ctrl(display->mdp_clk_handle,
+ DSI_ALL_CLKS, state);
+}
+
static void sde_connector_destroy(struct drm_connector *connector)
{
struct sde_connector *c_conn;
@@ -397,6 +422,122 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector)
return &c_state->base;
}
+static int _sde_connector_roi_v1_check_roi(
+ struct sde_connector *c_conn,
+ struct drm_clip_rect *roi_conn,
+ const struct msm_roi_caps *caps)
+{
+ const struct msm_roi_alignment *align = &caps->align;
+ int w = roi_conn->x2 - roi_conn->x1;
+ int h = roi_conn->y2 - roi_conn->y1;
+
+ if (w <= 0 || h <= 0) {
+ SDE_ERROR_CONN(c_conn, "invalid conn roi w %d h %d\n", w, h);
+ return -EINVAL;
+ }
+
+ if (w < align->min_width || w % align->width_pix_align) {
+ SDE_ERROR_CONN(c_conn,
+ "invalid conn roi width %d min %d align %d\n",
+ w, align->min_width, align->width_pix_align);
+ return -EINVAL;
+ }
+
+ if (h < align->min_height || h % align->height_pix_align) {
+ SDE_ERROR_CONN(c_conn,
+ "invalid conn roi height %d min %d align %d\n",
+ h, align->min_height, align->height_pix_align);
+ return -EINVAL;
+ }
+
+ if (roi_conn->x1 % align->xstart_pix_align) {
+ SDE_ERROR_CONN(c_conn, "invalid conn roi x1 %d align %d\n",
+ roi_conn->x1, align->xstart_pix_align);
+ return -EINVAL;
+ }
+
+ if (roi_conn->y1 % align->ystart_pix_align) {
+ SDE_ERROR_CONN(c_conn, "invalid conn roi y1 %d align %d\n",
+ roi_conn->y1, align->ystart_pix_align);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int _sde_connector_set_roi_v1(
+ struct sde_connector *c_conn,
+ struct sde_connector_state *c_state,
+ void *usr_ptr)
+{
+ struct sde_drm_roi_v1 roi_v1;
+ struct msm_display_info display_info;
+ struct msm_roi_caps *caps;
+ int i, rc;
+
+ if (!c_conn || !c_state) {
+ SDE_ERROR("invalid args\n");
+ return -EINVAL;
+ }
+
+ rc = sde_connector_get_info(&c_conn->base, &display_info);
+ if (rc) {
+ SDE_ERROR_CONN(c_conn, "display get info error: %d\n", rc);
+ return rc;
+ }
+
+ caps = &display_info.roi_caps;
+ if (!caps->enabled) {
+ SDE_ERROR_CONN(c_conn, "display roi capability is disabled\n");
+ return -ENOTSUPP;
+ }
+
+ memset(&c_state->rois, 0, sizeof(c_state->rois));
+
+ if (!usr_ptr) {
+ SDE_DEBUG_CONN(c_conn, "rois cleared\n");
+ return 0;
+ }
+
+ if (copy_from_user(&roi_v1, usr_ptr, sizeof(roi_v1))) {
+ SDE_ERROR_CONN(c_conn, "failed to copy roi_v1 data\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG_CONN(c_conn, "num_rects %d\n", roi_v1.num_rects);
+
+ if (roi_v1.num_rects == 0) {
+ SDE_DEBUG_CONN(c_conn, "rois cleared\n");
+ return 0;
+ }
+
+ if (roi_v1.num_rects > SDE_MAX_ROI_V1 ||
+ roi_v1.num_rects > caps->num_roi) {
+ SDE_ERROR_CONN(c_conn, "too many rects specified: %d\n",
+ roi_v1.num_rects);
+ return -EINVAL;
+ }
+
+ c_state->rois.num_rects = roi_v1.num_rects;
+ for (i = 0; i < roi_v1.num_rects; ++i) {
+ int rc;
+
+ rc = _sde_connector_roi_v1_check_roi(c_conn, &roi_v1.roi[i],
+ caps);
+ if (rc)
+ return rc;
+
+ c_state->rois.roi[i] = roi_v1.roi[i];
+ SDE_DEBUG_CONN(c_conn, "roi%d: roi 0x%x 0x%x 0x%x 0x%x\n", i,
+ c_state->rois.roi[i].x1,
+ c_state->rois.roi[i].y1,
+ c_state->rois.roi[i].x2,
+ c_state->rois.roi[i].y2);
+ }
+
+ return 0;
+}
+
static int sde_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
@@ -462,6 +603,12 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
SDE_ERROR("invalid topology_control: 0x%llX\n", val);
}
+ if (idx == CONNECTOR_PROP_ROI_V1) {
+ rc = _sde_connector_set_roi_v1(c_conn, c_state, (void *)val);
+ if (rc)
+ SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
+ }
+
/* check for custom property handling */
if (!rc && c_conn->ops.set_property) {
rc = c_conn->ops.set_property(connector,
@@ -511,13 +658,7 @@ static int sde_connector_atomic_get_property(struct drm_connector *connector,
idx = msm_property_index(&c_conn->property_info, property);
if (idx == CONNECTOR_PROP_RETIRE_FENCE)
- /*
- * Set a fence offset if not a virtual connector, so that the
- * fence signals after one additional commit rather than at the
- * end of the current one.
- */
- rc = sde_fence_create(&c_conn->retire_fence, val,
- c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+ rc = sde_fence_create(&c_conn->retire_fence, val, 0);
else
/* get cached property value */
rc = msm_property_atomic_get(&c_conn->property_info,
@@ -706,6 +847,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
struct sde_kms_info *info;
struct sde_connector *c_conn = NULL;
struct dsi_display *dsi_display;
+ struct msm_display_info display_info;
int rc;
if (!dev || !dev->dev_private || !encoder) {
@@ -838,6 +980,13 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
}
}
+ rc = sde_connector_get_info(&c_conn->base, &display_info);
+ if (!rc && display_info.roi_caps.enabled) {
+ msm_property_install_volatile_range(
+ &c_conn->property_info, "sde_drm_roi_v1", 0x0,
+ 0, ~0, 0, CONNECTOR_PROP_ROI_V1);
+ }
+
msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 70d4952..601299e 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -150,6 +150,14 @@ struct sde_connector_ops {
int (*pre_kickoff)(struct drm_connector *connector,
void *display,
struct msm_display_kickoff_params *params);
+
+ /**
+ * clk_ctrl - perform clk enable/disable on the connector
+ * @handle: Pointer to clk handle
+ * @type: Type of clks
+ * @enable: State of clks
+ */
+ int (*clk_ctrl)(void *handle, u32 type, u32 state);
};
/**
@@ -273,7 +281,7 @@ struct sde_connector_state {
int mmu_id;
uint64_t property_values[CONNECTOR_PROP_COUNT];
- struct msm_roi_mapping rois;
+ struct msm_roi_list rois;
};
/**
@@ -366,6 +374,13 @@ int sde_connector_get_info(struct drm_connector *connector,
struct msm_display_info *info);
/**
+ * sde_connector_clk_ctrl - enables/disables the connector clks
+ * @connector: Pointer to drm connector object
+ * @enable: true/false to enable/disable
+ */
+void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
+
+/**
* sde_connector_trigger_event - indicate that an event has occurred
* Any callbacks that have been registered against this event will
* be called from the same thread context.
@@ -425,5 +440,22 @@ int sde_connector_register_custom_event(struct sde_kms *kms,
*/
int sde_connector_pre_kickoff(struct drm_connector *connector);
+/**
+ * sde_connector_needs_offset - adjust the output fence offset based on
+ * display type
+ * @connector: Pointer to drm connector object
+ * Returns: true if offset is required, false for all other cases.
+ */
+static inline bool sde_connector_needs_offset(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector)
+ return false;
+
+ c_conn = to_sde_connector(connector);
+ return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a44dd68..6bae083 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -656,21 +656,344 @@ static void _sde_crtc_setup_dim_layer_cfg(struct drm_crtc *crtc,
}
}
+void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
+ const struct sde_rect **crtc_roi)
+{
+ struct sde_crtc_state *crtc_state;
+
+ if (!state || !crtc_roi)
+ return;
+
+ crtc_state = to_sde_crtc_state(state);
+ *crtc_roi = &crtc_state->crtc_roi;
+}
+
+static int _sde_crtc_set_roi_v1(struct drm_crtc_state *state,
+ void *usr_ptr)
+{
+ struct drm_crtc *crtc;
+ struct sde_crtc_state *cstate;
+ struct sde_drm_roi_v1 roi_v1;
+ int i;
+
+ if (!state) {
+ SDE_ERROR("invalid args\n");
+ return -EINVAL;
+ }
+
+ cstate = to_sde_crtc_state(state);
+ crtc = cstate->base.crtc;
+
+ memset(&cstate->user_roi_list, 0, sizeof(cstate->user_roi_list));
+
+ if (!usr_ptr) {
+ SDE_DEBUG("crtc%d: rois cleared\n", DRMID(crtc));
+ return 0;
+ }
+
+ if (copy_from_user(&roi_v1, usr_ptr, sizeof(roi_v1))) {
+ SDE_ERROR("crtc%d: failed to copy roi_v1 data\n", DRMID(crtc));
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("crtc%d: num_rects %d\n", DRMID(crtc), roi_v1.num_rects);
+
+ if (roi_v1.num_rects == 0) {
+ SDE_DEBUG("crtc%d: rois cleared\n", DRMID(crtc));
+ return 0;
+ }
+
+ if (roi_v1.num_rects > SDE_MAX_ROI_V1) {
+ SDE_ERROR("crtc%d: too many rects specified: %d\n", DRMID(crtc),
+ roi_v1.num_rects);
+ return -EINVAL;
+ }
+
+ cstate->user_roi_list.num_rects = roi_v1.num_rects;
+ for (i = 0; i < roi_v1.num_rects; ++i) {
+ cstate->user_roi_list.roi[i] = roi_v1.roi[i];
+ SDE_DEBUG("crtc%d: roi%d: roi (%d,%d) (%d,%d)\n",
+ DRMID(crtc), i,
+ cstate->user_roi_list.roi[i].x1,
+ cstate->user_roi_list.roi[i].y1,
+ cstate->user_roi_list.roi[i].x2,
+ cstate->user_roi_list.roi[i].y2);
+ }
+
+ return 0;
+}
+
+static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ struct sde_rect *crtc_roi;
+ struct drm_clip_rect crtc_clip, *user_rect;
+ int i, num_attached_conns = 0;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+ crtc_roi = &crtc_state->crtc_roi;
+
+ /* init to invalid range maxes */
+ crtc_clip.x1 = ~0;
+ crtc_clip.y1 = ~0;
+ crtc_clip.x2 = 0;
+ crtc_clip.y2 = 0;
+
+ for_each_connector_in_state(state->state, conn, conn_state, i) {
+ struct sde_connector_state *sde_conn_state;
+
+ if (!conn_state || conn_state->crtc != crtc)
+ continue;
+
+ if (num_attached_conns) {
+ SDE_ERROR(
+ "crtc%d: unsupported: roi on crtc w/ >1 connectors\n",
+ DRMID(crtc));
+ return -EINVAL;
+ }
+ ++num_attached_conns;
+
+ sde_conn_state = to_sde_connector_state(conn_state);
+
+ if (memcmp(&sde_conn_state->rois, &crtc_state->user_roi_list,
+ sizeof(crtc_state->user_roi_list))) {
+ SDE_ERROR("%s: crtc -> conn roi scaling unsupported\n",
+ sde_crtc->name);
+ return -EINVAL;
+ }
+ }
+
+ /* aggregate all clipping rectangles together for overall crtc roi */
+ for (i = 0; i < crtc_state->user_roi_list.num_rects; i++) {
+ user_rect = &crtc_state->user_roi_list.roi[i];
+
+ crtc_clip.x1 = min(crtc_clip.x1, user_rect->x1);
+ crtc_clip.y1 = min(crtc_clip.y1, user_rect->y1);
+ crtc_clip.x2 = max(crtc_clip.x2, user_rect->x2);
+ crtc_clip.y2 = max(crtc_clip.y2, user_rect->y2);
+
+ SDE_DEBUG(
+ "%s: conn%d roi%d (%d,%d),(%d,%d) -> crtc (%d,%d),(%d,%d)\n",
+ sde_crtc->name, DRMID(crtc), i,
+ user_rect->x1, user_rect->y1,
+ user_rect->x2, user_rect->y2,
+ crtc_clip.x1, crtc_clip.y1,
+ crtc_clip.x2, crtc_clip.y2);
+
+ }
+
+ if (crtc_clip.x2 && crtc_clip.y2) {
+ crtc_roi->x = crtc_clip.x1;
+ crtc_roi->y = crtc_clip.y1;
+ crtc_roi->w = crtc_clip.x2 - crtc_clip.x1;
+ crtc_roi->h = crtc_clip.y2 - crtc_clip.y1;
+ } else {
+ crtc_roi->x = 0;
+ crtc_roi->y = 0;
+ crtc_roi->w = 0;
+ crtc_roi->h = 0;
+ }
+
+ SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
+ crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
+
+ return 0;
+}
+
+static int _sde_crtc_set_lm_roi(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, int lm_idx)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ const struct sde_rect *crtc_roi;
+ const struct sde_rect *lm_bounds;
+ struct sde_rect *lm_roi;
+
+ if (!crtc || !state || lm_idx >= ARRAY_SIZE(crtc_state->lm_bounds))
+ return -EINVAL;
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+ crtc_roi = &crtc_state->crtc_roi;
+ lm_bounds = &crtc_state->lm_bounds[lm_idx];
+ lm_roi = &crtc_state->lm_roi[lm_idx];
+
+ if (!sde_kms_rect_is_null(crtc_roi)) {
+ sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
+ if (sde_kms_rect_is_null(lm_roi)) {
+ SDE_ERROR("unsupported R/L only partial update\n");
+ return -EINVAL;
+ }
+ } else {
+ memcpy(lm_roi, lm_bounds, sizeof(*lm_roi));
+ }
+
+ SDE_DEBUG("%s: lm%d roi (%d,%d,%d,%d)\n", sde_crtc->name, lm_idx,
+ lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
+
+ return 0;
+}
+
+static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ const struct sde_rect *roi_prv, *roi_cur;
+ int lm_idx;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ /*
+ * On certain HW, ROIs must be centered on the split between LMs,
+ * and be of equal width.
+ */
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+
+ roi_prv = &crtc_state->lm_roi[0];
+ for (lm_idx = 1; lm_idx < sde_crtc->num_mixers; lm_idx++) {
+ roi_cur = &crtc_state->lm_roi[lm_idx];
+
+ /* check lm rois are equal width & first roi ends at 2nd roi */
+ if (((roi_prv->x + roi_prv->w) != roi_cur->x) ||
+ (roi_prv->w != roi_cur->w)) {
+ SDE_ERROR("%s: roi lm%d x %d w %d lm%d x %d w %d\n",
+ sde_crtc->name,
+ lm_idx-1, roi_prv->x, roi_prv->w,
+ lm_idx, roi_cur->x, roi_cur->w);
+ return -EINVAL;
+ }
+ roi_prv = roi_cur;
+ }
+
+ return 0;
+}
+
+static int _sde_crtc_check_planes_within_crtc_roi(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ const struct sde_rect *crtc_roi;
+ struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ /*
+ * Reject commit if a Plane CRTC destination coordinates fall outside
+ * the partial CRTC ROI. LM output is determined via connector ROIs,
+ * if they are specified, not Plane CRTC ROIs.
+ */
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+ crtc_roi = &crtc_state->crtc_roi;
+
+ if (sde_kms_rect_is_null(crtc_roi))
+ return 0;
+
+ drm_atomic_crtc_state_for_each_plane(plane, state) {
+ struct sde_rect plane_roi, intersection;
+
+ pstate = drm_atomic_get_plane_state(state->state, plane);
+ if (IS_ERR_OR_NULL(pstate)) {
+ int rc = PTR_ERR(pstate);
+
+ SDE_ERROR("%s: failed to get plane%d state, %d\n",
+ sde_crtc->name, plane->base.id, rc);
+ return rc;
+ }
+
+ plane_roi.x = pstate->crtc_x;
+ plane_roi.y = pstate->crtc_y;
+ plane_roi.w = pstate->crtc_w;
+ plane_roi.h = pstate->crtc_h;
+ sde_kms_rect_intersect(crtc_roi, &plane_roi, &intersection);
+ if (!sde_kms_rect_is_equal(&plane_roi, &intersection)) {
+ SDE_ERROR(
+ "%s: plane%d crtc roi (%d,%d,%d,%d) outside crtc roi (%d,%d,%d,%d)\n",
+ sde_crtc->name, plane->base.id,
+ plane_roi.x, plane_roi.y,
+ plane_roi.w, plane_roi.h,
+ crtc_roi->x, crtc_roi->y,
+ crtc_roi->w, crtc_roi->h);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _sde_crtc_check_rois(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ int lm_idx;
+ int rc;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ sde_crtc = to_sde_crtc(crtc);
+
+ rc = _sde_crtc_set_crtc_roi(crtc, state);
+ if (rc)
+ return rc;
+
+ for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
+ rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx);
+ if (rc)
+ return rc;
+ }
+
+ rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state);
+ if (rc)
+ return rc;
+
+ rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
static void _sde_crtc_program_lm_output_roi(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *crtc_state;
+ const struct sde_rect *lm_roi;
+ struct sde_hw_mixer *hw_lm;
int lm_idx, lm_horiz_position;
+ if (!crtc)
+ return;
+
sde_crtc = to_sde_crtc(crtc);
crtc_state = to_sde_crtc_state(crtc->state);
lm_horiz_position = 0;
for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
- const struct sde_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
- struct sde_hw_mixer *hw_lm = sde_crtc->mixers[lm_idx].hw_lm;
struct sde_hw_mixer_cfg cfg;
+ lm_roi = &crtc_state->lm_roi[lm_idx];
+ hw_lm = sde_crtc->mixers[lm_idx].hw_lm;
+
+ SDE_EVT32(DRMID(crtc_state->base.crtc), lm_idx,
+ lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
+
if (sde_kms_rect_is_null(lm_roi))
continue;
@@ -742,9 +1065,12 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
- SDE_EVT32(DRMID(plane), state->src_x, state->src_y,
- state->src_w >> 16, state->src_h >> 16, state->crtc_x,
- state->crtc_y, state->crtc_w, state->crtc_h);
+ SDE_EVT32(DRMID(crtc), DRMID(plane),
+ state->fb ? state->fb->base.id : -1,
+ state->src_x >> 16, state->src_y >> 16,
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h);
for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
struct sde_rect intersect;
@@ -877,6 +1203,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&sde_crtc->stage_cfg, i);
}
+
+ _sde_crtc_program_lm_output_roi(crtc);
}
void sde_crtc_prepare_commit(struct drm_crtc *crtc,
@@ -1329,14 +1657,18 @@ static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc,
crtc_split_width = sde_crtc_mixer_width(sde_crtc, adj_mode);
for (i = 0; i < sde_crtc->num_mixers; i++) {
- struct sde_rect *lm_bound = &cstate->lm_bounds[i];
-
- lm_bound->x = crtc_split_width * i;
- lm_bound->y = 0;
- lm_bound->w = crtc_split_width;
- lm_bound->h = adj_mode->vdisplay;
- SDE_EVT32(DRMID(crtc), i, lm_bound->x, lm_bound->y,
- lm_bound->w, lm_bound->h);
+ cstate->lm_bounds[i].x = crtc_split_width * i;
+ cstate->lm_bounds[i].y = 0;
+ cstate->lm_bounds[i].w = crtc_split_width;
+ cstate->lm_bounds[i].h = adj_mode->vdisplay;
+ memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i],
+ sizeof(cstate->lm_roi[i]));
+ SDE_EVT32(DRMID(crtc), i,
+ cstate->lm_bounds[i].x, cstate->lm_bounds[i].y,
+ cstate->lm_bounds[i].w, cstate->lm_bounds[i].h);
+ SDE_DEBUG("%s: lm%d bnd&roi (%d,%d,%d,%d)\n", sde_crtc->name, i,
+ cstate->lm_roi[i].x, cstate->lm_roi[i].y,
+ cstate->lm_roi[i].w, cstate->lm_roi[i].h);
}
drm_mode_debug_printmodeline(adj_mode);
@@ -1366,10 +1698,10 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
sde_crtc = to_sde_crtc(crtc);
dev = crtc->dev;
- if (!sde_crtc->num_mixers)
+ if (!sde_crtc->num_mixers) {
_sde_crtc_setup_mixers(crtc);
-
- _sde_crtc_setup_lm_bounds(crtc, crtc->state);
+ _sde_crtc_setup_lm_bounds(crtc, crtc->state);
+ }
if (sde_crtc->event) {
WARN_ON(sde_crtc->event);
@@ -2117,6 +2449,11 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ rc = _sde_crtc_check_rois(crtc, state);
+ if (rc) {
+ SDE_ERROR("crtc%d failed roi check %d\n", crtc->base.id, rc);
+ goto end;
+ }
end:
_sde_crtc_rp_free_unused(&cstate->rp);
@@ -2243,6 +2580,9 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
}
+ msm_property_install_volatile_range(&sde_crtc->property_info,
+ "sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
+
sde_kms_info_reset(info);
sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
@@ -2315,6 +2655,9 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
case CRTC_PROP_DIM_LAYER_V1:
_sde_crtc_set_dim_layer_v1(cstate, (void *)val);
break;
+ case CRTC_PROP_ROI_V1:
+ ret = _sde_crtc_set_roi_v1(state, (void *)val);
+ break;
default:
/* nothing to do */
break;
@@ -2364,19 +2707,28 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
int i, ret = -EINVAL;
+ bool conn_offset = 0;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
} else {
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(state);
+
+ for (i = 0; i < cstate->num_connectors; ++i) {
+ conn_offset = sde_connector_needs_offset(
+ cstate->connectors[i]);
+ if (conn_offset)
+ break;
+ }
+
i = msm_property_index(&sde_crtc->property_info, property);
if (i == CRTC_PROP_OUTPUT_FENCE) {
uint32_t offset = sde_crtc_get_property(cstate,
CRTC_PROP_OUTPUT_FENCE_OFFSET);
- ret = sde_fence_create(
- &sde_crtc->output_fence, val, offset);
+ ret = sde_fence_create(&sde_crtc->output_fence, val,
+ offset + conn_offset);
if (ret)
SDE_ERROR("fence create failed\n");
} else {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 36231d4..7ad0955 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -250,6 +250,11 @@ struct sde_crtc_respool {
* @rsc_client : sde rsc client when mode is valid
* @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
* Origin top left of CRTC.
+ * @crtc_roi : Current CRTC ROI. Possibly sub-rectangle of mode.
+ * Origin top left of CRTC.
+ * @lm_roi : Current LM ROI, possibly sub-rectangle of mode.
+ * Origin top left of CRTC.
+ * @user_roi_list : List of user's requested ROIs as from set property
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @property_blobs: Reference pointers for blob properties
@@ -270,6 +275,9 @@ struct sde_crtc_state {
bool rsc_update;
struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
+ struct sde_rect crtc_roi;
+ struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
+ struct msm_roi_list user_roi_list;
uint64_t property_values[CRTC_PROP_COUNT];
uint64_t input_fence_timeout_ns;
@@ -433,4 +441,14 @@ void *sde_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag);
*/
void sde_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag);
+/**
+ * sde_crtc_get_crtc_roi - retrieve the crtc_roi from the given state object
+ * used to allow the planes to adjust their final lm out_xy value in the
+ * case of partial update
+ * @crtc_state: Pointer to crtc state
+ * @crtc_roi: Output pointer to crtc roi in the given state
+ */
+void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
+ const struct sde_rect **crtc_roi);
+
#endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 8c41b12..742ea20 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -284,6 +284,24 @@ void sde_encoder_helper_split_config(
}
}
+static void _sde_encoder_adjust_mode(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_display_mode *cur_mode;
+
+ if (!connector || !adj_mode)
+ return;
+
+ list_for_each_entry(cur_mode, &connector->modes, head) {
+ if (cur_mode->vdisplay == adj_mode->vdisplay &&
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ cur_mode->vrefresh == adj_mode->vrefresh) {
+ adj_mode->private = cur_mode->private;
+ adj_mode->private_flags = cur_mode->private_flags;
+ }
+ }
+}
+
static int sde_encoder_virt_atomic_check(
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
@@ -312,6 +330,15 @@ static int sde_encoder_virt_atomic_check(
adj_mode = &crtc_state->adjusted_mode;
SDE_EVT32(DRMID(drm_enc));
+ /*
+ * display drivers may populate private fields of the drm display mode
+ * structure while registering possible modes of a connector with DRM.
+ * These private fields are not populated back while DRM invokes
+ * the mode_set callbacks. This module retrieves and populates the
+ * private fields of the given mode.
+ */
+ _sde_encoder_adjust_mode(conn_state->connector, adj_mode);
+
/* perform atomic check on the first physical encoder (master) */
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 11cca1f..cfa3b5e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -490,8 +490,8 @@ static uint32_t _sde_copy_formats(
return 0;
for (i = 0, cur_pos = dst_list_pos;
- (cur_pos < (dst_list_size - 1)) && src_list[i].fourcc_format
- && (i < src_list_size); ++i, ++cur_pos)
+ (cur_pos < (dst_list_size - 1)) && (i < src_list_size)
+ && src_list[i].fourcc_format; ++i, ++cur_pos)
dst_list[cur_pos] = src_list[i];
dst_list[cur_pos].fourcc_format = 0;
@@ -565,7 +565,7 @@ static int _validate_dt_entry(struct device_node *np,
rc = -EINVAL;
}
*off_count = 0;
- memset(prop_count, 0, sizeof(int *) * prop_size);
+ memset(prop_count, 0, sizeof(int) * prop_size);
return rc;
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 2393e61..4a5479d 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -566,7 +566,8 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.get_info = dsi_display_get_info,
.set_backlight = dsi_display_set_backlight,
.soft_reset = dsi_display_soft_reset,
- .pre_kickoff = dsi_conn_pre_kickoff
+ .pre_kickoff = dsi_conn_pre_kickoff,
+ .clk_ctrl = dsi_display_clk_ctrl
};
static const struct sde_connector_ops wb_ops = {
.post_init = sde_wb_connector_post_init,
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index bd6b302..8662207 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -2204,6 +2204,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct sde_rect src, dst;
+ const struct sde_rect *crtc_roi;
bool q16_data = true;
int idx;
@@ -2283,6 +2284,11 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
}
}
+ /* re-program the output rects always in the case of partial update */
+ sde_crtc_get_crtc_roi(crtc->state, &crtc_roi);
+ if (!sde_kms_rect_is_null(crtc_roi))
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+
if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
@@ -2320,6 +2326,13 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
src.y &= ~0x1;
}
+ /*
+ * adjust layer mixer position of the sspp in the presence
+ * of a partial update to the active lm origin
+ */
+ dst.x -= crtc_roi->x;
+ dst.y -= crtc_roi->y;
+
psde->pipe_cfg.src_rect = src;
psde->pipe_cfg.dst_rect = dst;
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 62efe8e..9a68dbe 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -25,10 +25,44 @@
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
#include <linux/sde_io_util.h>
+#include <linux/sde_rsc.h>
#include "sde_power_handle.h"
#include "sde_trace.h"
+static void sde_power_event_trigger_locked(struct sde_power_handle *phandle,
+ u32 event_type)
+{
+ struct sde_power_event *event;
+
+ list_for_each_entry(event, &phandle->event_list, list) {
+ if (event->event_type & event_type)
+ event->cb_fnc(event_type, event->usr);
+ }
+}
+
+static int sde_power_rsc_update(struct sde_power_handle *phandle, bool enable)
+{
+ u32 rsc_state;
+
+ /* creates the rsc client on the first enable */
+ if (!phandle->rsc_client_init) {
+ phandle->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX,
+ "sde_power_handle", false);
+ if (IS_ERR_OR_NULL(phandle->rsc_client)) {
+ pr_debug("sde rsc client create failed :%ld\n",
+ PTR_ERR(phandle->rsc_client));
+ phandle->rsc_client = NULL;
+ }
+ phandle->rsc_client_init = true;
+ }
+
+ rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
+
+ return sde_rsc_client_state_update(phandle->rsc_client,
+ rsc_state, NULL, -1);
+}
+
struct sde_power_client *sde_power_client_create(
struct sde_power_handle *phandle, char *client_name)
{
@@ -48,6 +82,7 @@ struct sde_power_client *sde_power_client_create(
strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
client->usecase_ndx = VOTE_INDEX_DISABLE;
client->id = id;
+ client->active = true;
pr_debug("client %s created:%pK id :%d\n", client_name,
client, id);
id++;
@@ -62,6 +97,9 @@ void sde_power_client_destroy(struct sde_power_handle *phandle,
{
if (!client || !phandle) {
pr_err("reg bus vote: invalid client handle\n");
+ } else if (!client->active) {
+ pr_err("sde power deinit already done\n");
+ kfree(client);
} else {
pr_debug("bus vote client %s destroyed:%pK id:%u\n",
client->name, client, client->id);
@@ -661,6 +699,11 @@ int sde_power_resource_init(struct platform_device *pdev,
}
INIT_LIST_HEAD(&phandle->power_client_clist);
+ INIT_LIST_HEAD(&phandle->event_list);
+
+ phandle->rsc_client = NULL;
+ phandle->rsc_client_init = false;
+
mutex_init(&phandle->phandle_lock);
return rc;
@@ -672,10 +715,12 @@ int sde_power_resource_init(struct platform_device *pdev,
clk_err:
msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
vreg_err:
- devm_kfree(&pdev->dev, mp->vreg_config);
+ if (mp->vreg_config)
+ devm_kfree(&pdev->dev, mp->vreg_config);
mp->num_vreg = 0;
parse_vreg_err:
- devm_kfree(&pdev->dev, mp->clk_config);
+ if (mp->clk_config)
+ devm_kfree(&pdev->dev, mp->clk_config);
mp->num_clk = 0;
end:
return rc;
@@ -685,6 +730,8 @@ void sde_power_resource_deinit(struct platform_device *pdev,
struct sde_power_handle *phandle)
{
struct dss_module_power *mp;
+ struct sde_power_client *curr_client, *next_client;
+ struct sde_power_event *curr_event, *next_event;
if (!phandle || !pdev) {
pr_err("invalid input param\n");
@@ -692,6 +739,26 @@ void sde_power_resource_deinit(struct platform_device *pdev,
}
mp = &phandle->mp;
+ mutex_lock(&phandle->phandle_lock);
+ list_for_each_entry_safe(curr_client, next_client,
+ &phandle->power_client_clist, list) {
+ pr_err("cliend:%s-%d still registered with refcount:%d\n",
+ curr_client->name, curr_client->id,
+ curr_client->refcount);
+ curr_client->active = false;
+ list_del(&curr_client->list);
+ }
+
+ list_for_each_entry_safe(curr_event, next_event,
+ &phandle->event_list, list) {
+ pr_err("event:%d, client:%s still registered\n",
+ curr_event->event_type,
+ curr_event->client_name);
+ curr_event->active = false;
+ list_del(&curr_event->list);
+ }
+ mutex_unlock(&phandle->phandle_lock);
+
sde_power_data_bus_unregister(&phandle->data_bus_handle);
sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
@@ -708,6 +775,9 @@ void sde_power_resource_deinit(struct platform_device *pdev,
mp->num_vreg = 0;
mp->num_clk = 0;
+
+ if (phandle->rsc_client)
+ sde_rsc_client_destroy(phandle->rsc_client);
}
int sde_power_resource_enable(struct sde_power_handle *phandle,
@@ -757,6 +827,9 @@ int sde_power_resource_enable(struct sde_power_handle *phandle,
goto end;
if (enable) {
+ sde_power_event_trigger_locked(phandle,
+ SDE_POWER_EVENT_PRE_ENABLE);
+
rc = sde_power_data_bus_update(&phandle->data_bus_handle,
enable);
if (rc) {
@@ -764,10 +837,13 @@ int sde_power_resource_enable(struct sde_power_handle *phandle,
goto data_bus_hdl_err;
}
- rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
- if (rc) {
- pr_err("failed to enable vregs rc=%d\n", rc);
- goto vreg_err;
+ if (!phandle->rsc_client_init) {
+ rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
+ enable);
+ if (rc) {
+ pr_err("failed to enable vregs rc=%d\n", rc);
+ goto vreg_err;
+ }
}
rc = sde_power_reg_bus_update(phandle->reg_bus_hdl,
@@ -777,20 +853,39 @@ int sde_power_resource_enable(struct sde_power_handle *phandle,
goto reg_bus_hdl_err;
}
+ rc = sde_power_rsc_update(phandle, true);
+ if (rc) {
+ pr_err("failed to update rsc\n");
+ goto rsc_err;
+ }
+
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
if (rc) {
pr_err("clock enable failed rc:%d\n", rc);
goto clk_err;
}
+
+ sde_power_event_trigger_locked(phandle,
+ SDE_POWER_EVENT_POST_ENABLE);
+
} else {
+ sde_power_event_trigger_locked(phandle,
+ SDE_POWER_EVENT_PRE_DISABLE);
+
msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+ sde_power_rsc_update(phandle, false);
+
sde_power_reg_bus_update(phandle->reg_bus_hdl,
max_usecase_ndx);
- msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
-
+ if (!phandle->rsc_client_init)
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
+ enable);
sde_power_data_bus_update(&phandle->data_bus_handle, enable);
+
+ sde_power_event_trigger_locked(phandle,
+ SDE_POWER_EVENT_POST_DISABLE);
}
end:
@@ -798,9 +893,12 @@ int sde_power_resource_enable(struct sde_power_handle *phandle,
return rc;
clk_err:
+ sde_power_rsc_update(phandle, false);
+rsc_err:
sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx);
reg_bus_hdl_err:
- msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+ if (!phandle->rsc_client_init)
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
vreg_err:
sde_power_data_bus_update(&phandle->data_bus_handle, 0);
data_bus_hdl_err:
@@ -903,3 +1001,52 @@ struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
return clk;
}
+
+struct sde_power_event *sde_power_handle_register_event(
+ struct sde_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name)
+{
+ struct sde_power_event *event;
+
+ if (!phandle) {
+ pr_err("invalid power handle\n");
+ return ERR_PTR(-EINVAL);
+ } else if (!cb_fnc || !event_type) {
+ pr_err("no callback fnc or event type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ event = kzalloc(sizeof(struct sde_power_event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ event->event_type = event_type;
+ event->cb_fnc = cb_fnc;
+ event->usr = usr;
+ strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+ event->active = true;
+
+ mutex_lock(&phandle->phandle_lock);
+ list_add(&event->list, &phandle->event_list);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return event;
+}
+
+void sde_power_handle_unregister_event(
+ struct sde_power_handle *phandle,
+ struct sde_power_event *event)
+{
+ if (!phandle || !event) {
+ pr_err("invalid phandle or event\n");
+ } else if (!event->active) {
+ pr_err("power handle deinit already done\n");
+ kfree(event);
+ } else {
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&event->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(event);
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 4e262a3..b26ef9f 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -23,6 +23,18 @@
#include <linux/sde_io_util.h>
+/* event will be triggered before power handler disable */
+#define SDE_POWER_EVENT_PRE_DISABLE 0x1
+
+/* event will be triggered after power handler disable */
+#define SDE_POWER_EVENT_POST_DISABLE 0x2
+
+/* event will be triggered before power handler enable */
+#define SDE_POWER_EVENT_PRE_ENABLE 0x4
+
+/* event will be triggered after power handler enable */
+#define SDE_POWER_EVENT_POST_ENABLE 0x8
+
/**
* mdss_bus_vote_type: register bus vote type
* VOTE_INDEX_DISABLE: removes the client vote
@@ -59,6 +71,7 @@ enum sde_power_handle_data_bus_client {
* @list: list to attach power handle master list
* @ab: arbitrated bandwidth for each bus client
* @ib: instantaneous bandwidth for each bus client
+ * @active: inidcates the state of sde power handle
*/
struct sde_power_client {
char name[MAX_CLIENT_NAME_LEN];
@@ -68,6 +81,7 @@ struct sde_power_client {
struct list_head list;
u64 ab[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
u64 ib[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ bool active;
};
/**
@@ -90,6 +104,24 @@ struct sde_power_data_bus_handle {
u32 ao_bw_uc_idx;
};
+/*
+ * struct sde_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to SDE_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of sde power handle
+ */
+struct sde_power_event {
+ char client_name[MAX_CLIENT_NAME_LEN];
+ void (*cb_fnc)(u32 event_type, void *usr);
+ void *usr;
+ u32 event_type;
+ struct list_head list;
+ bool active;
+};
+
/**
* struct sde_power_handle: power handle main struct
* @mp: module power for clock and regulator
@@ -99,6 +131,9 @@ struct sde_power_data_bus_handle {
* @usecase_ndx: current usecase index
* @reg_bus_hdl: current register bus handle
* @data_bus_handle: context structure for data bus control
+ * @event_list: current power handle event list
+ * @rsc_client: sde rsc client pointer
+ * @rsc_client_init: boolean to control rsc client create
*/
struct sde_power_handle {
struct dss_module_power mp;
@@ -108,6 +143,9 @@ struct sde_power_handle {
u32 current_usecase_ndx;
u32 reg_bus_hdl;
struct sde_power_data_bus_handle data_bus_handle;
+ struct list_head event_list;
+ struct sde_rsc_client *rsc_client;
+ bool rsc_client_init;
};
/**
@@ -226,4 +264,28 @@ int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
struct sde_power_client *pclient, int enable);
+/**
+ * sde_power_handle_register_event - register a callback function for an event.
+ * Clients can register for multiple events with a single register.
+ * Any block with access to phandle can register for the event
+ * notification.
+ * @phandle: power handle containing the resources
+ * @event_type: event type to register; refer SDE_POWER_HANDLE_EVENT_*
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback on event trigger
+ *
+ * Return: event pointer if success, or error code otherwise
+ */
+struct sde_power_event *sde_power_handle_register_event(
+ struct sde_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name);
+/**
+ * sde_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle: power handle containing the resources
+ * @event: event pointer returned after power handle register
+ */
+void sde_power_handle_unregister_event(struct sde_power_handle *phandle,
+ struct sde_power_event *event);
+
#endif /* _SDE_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index c1b812a..d762904 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -388,7 +388,7 @@ static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc)
static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
struct sde_rsc_cmd_config *config,
- struct sde_rsc_client *caller_client, bool wait_req)
+ struct sde_rsc_client *caller_client)
{
struct sde_rsc_client *client;
int rc = STATE_UPDATE_NOT_ALLOWED;
@@ -416,8 +416,8 @@ static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
if (rsc->hw_ops.state_update)
rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
- /* wait for vsync */
- if (!rc && wait_req)
+ /* wait for vsync for vid to cmd state switch */
+ if (!rc && (rsc->current_state == SDE_RSC_VID_STATE))
drm_wait_one_vblank(rsc->master_drm,
rsc->primary_client->crtc_id);
end:
@@ -436,13 +436,19 @@ static bool sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc)
if (rsc->hw_ops.state_update)
rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE);
+
+ /* wait for vsync for cmd to clk state switch */
+ if (!rc && rsc->primary_client &&
+ (rsc->current_state == SDE_RSC_CMD_STATE))
+ drm_wait_one_vblank(rsc->master_drm,
+ rsc->primary_client->crtc_id);
end:
return rc;
}
static bool sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
struct sde_rsc_cmd_config *config,
- struct sde_rsc_client *caller_client, bool wait_req)
+ struct sde_rsc_client *caller_client)
{
int rc = 0;
@@ -454,8 +460,9 @@ static bool sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
if (rsc->hw_ops.state_update)
rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
- /* wait for vsync */
- if (!rc && rsc->primary_client && wait_req)
+ /* wait for vsync for cmd to vid state switch */
+ if (!rc && rsc->primary_client &&
+ (rsc->current_state == SDE_RSC_CMD_STATE))
drm_wait_one_vblank(rsc->master_drm,
rsc->primary_client->crtc_id);
return rc;
@@ -481,7 +488,6 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
{
int rc = 0;
struct sde_rsc_priv *rsc;
- bool wait_requested = false;
if (!caller_client) {
pr_err("invalid client for rsc state update\n");
@@ -512,11 +518,7 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
__builtin_return_address(0), rsc->current_state,
caller_client->name, state);
- /* only switch state needs vsync wait */
- wait_requested = (rsc->current_state == SDE_RSC_VID_STATE) ||
- (rsc->current_state == SDE_RSC_CMD_STATE);
-
- if (rsc->power_collapse)
+ if (rsc->current_state == SDE_RSC_IDLE_STATE)
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
switch (state) {
@@ -526,7 +528,7 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
/* video state client might be exiting; try cmd state switch */
if (rc == TRY_CMD_MODE_SWITCH) {
rc = sde_rsc_switch_to_cmd(rsc, NULL,
- rsc->primary_client, wait_requested);
+ rsc->primary_client);
if (!rc)
state = SDE_RSC_CMD_STATE;
@@ -539,13 +541,11 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
break;
case SDE_RSC_CMD_STATE:
- rc = sde_rsc_switch_to_cmd(rsc, config, caller_client,
- wait_requested);
+ rc = sde_rsc_switch_to_cmd(rsc, config, caller_client);
break;
case SDE_RSC_VID_STATE:
- rc = sde_rsc_switch_to_vid(rsc, config, caller_client,
- wait_requested);
+ rc = sde_rsc_switch_to_vid(rsc, config, caller_client);
break;
case SDE_RSC_CLK_STATE:
@@ -561,7 +561,7 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
rc = 0;
goto clk_disable;
} else if (rc) {
- pr_err("state update failed rc:%d\n", rc);
+ pr_debug("state:%d update failed rc:%d\n", state, rc);
goto clk_disable;
}
@@ -569,7 +569,7 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
rsc->current_state = state;
clk_disable:
- if (rsc->power_collapse)
+ if (rsc->current_state == SDE_RSC_IDLE_STATE)
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
end:
mutex_unlock(&rsc->client_lock);
@@ -615,14 +615,9 @@ int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
caller_client->name, ab_vote, ib_vote);
mutex_lock(&rsc->client_lock);
- if ((caller_client->current_state == SDE_RSC_IDLE_STATE) ||
- (rsc->current_state == SDE_RSC_IDLE_STATE)) {
-
- pr_err("invalid state: client state:%d rsc state:%d\n",
- caller_client->current_state, rsc->current_state);
- rc = -EINVAL;
- goto end;
- }
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto clk_enable_fail;
if (rsc->hw_ops.is_amc_mode)
amc_mode = rsc->hw_ops.is_amc_mode(rsc);
@@ -644,14 +639,19 @@ int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
}
}
+ rpmh_invalidate(rsc->disp_rsc);
sde_power_data_bus_set_quota(&rsc->phandle, rsc->pclient,
SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, ab_vote, ib_vote);
+ rpmh_flush(rsc->disp_rsc);
if (rsc->hw_ops.tcs_use_ok)
rsc->hw_ops.tcs_use_ok(rsc);
end:
+ sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+clk_enable_fail:
mutex_unlock(&rsc->client_lock);
+
return rc;
}
EXPORT_SYMBOL(sde_rsc_client_vote);
@@ -668,6 +668,10 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
rsc = s->private;
mutex_lock(&rsc->client_lock);
+ ret = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (ret)
+ goto end;
+
seq_printf(s, "rsc current state:%d\n", rsc->current_state);
seq_printf(s, "wraper backoff time(ns):%d\n",
rsc->timer_config.static_wakeup_time_ns);
@@ -691,17 +695,15 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
seq_printf(s, "\t client:%s state:%d\n",
client->name, client->current_state);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-
if (rsc->hw_ops.debug_show) {
ret = rsc->hw_ops.debug_show(s, rsc);
if (ret)
pr_err("sde rsc: hw debug failed ret:%d\n", ret);
}
-
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
- mutex_unlock(&rsc->client_lock);
+end:
+ mutex_unlock(&rsc->client_lock);
return 0;
}
@@ -722,20 +724,23 @@ static ssize_t _sde_debugfs_mode_ctrl_read(struct file *file, char __user *buf,
{
struct sde_rsc_priv *rsc = file->private_data;
char buffer[MAX_BUFFER_SIZE];
- int blen = 0;
+ int blen = 0, rc;
if (*ppos || !rsc || !rsc->hw_ops.mode_ctrl)
return 0;
mutex_lock(&rsc->client_lock);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto end;
blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
MAX_BUFFER_SIZE, 0);
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
- mutex_unlock(&rsc->client_lock);
+end:
+ mutex_unlock(&rsc->client_lock);
if (blen < 0)
return 0;
@@ -752,6 +757,7 @@ static ssize_t _sde_debugfs_mode_ctrl_write(struct file *file,
struct sde_rsc_priv *rsc = file->private_data;
char *input, *mode;
u32 mode0_state = 0, mode1_state = 0, mode2_state = 0;
+ int rc;
if (!rsc || !rsc->hw_ops.mode_ctrl)
return 0;
@@ -767,7 +773,9 @@ static ssize_t _sde_debugfs_mode_ctrl_write(struct file *file,
input[count - 1] = '\0';
mutex_lock(&rsc->client_lock);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto clk_enable_fail;
mode = strnstr(input, "mode0=", strlen("mode0="));
if (mode) {
@@ -794,9 +802,10 @@ static ssize_t _sde_debugfs_mode_ctrl_write(struct file *file,
end:
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+clk_enable_fail:
mutex_unlock(&rsc->client_lock);
- pr_err("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
+ pr_info("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
mode2_state);
kfree(input);
return count;
@@ -814,20 +823,23 @@ static ssize_t _sde_debugfs_vsync_mode_read(struct file *file, char __user *buf,
{
struct sde_rsc_priv *rsc = file->private_data;
char buffer[MAX_BUFFER_SIZE];
- int blen = 0;
+ int blen = 0, rc;
if (*ppos || !rsc || !rsc->hw_ops.hw_vsync)
return 0;
mutex_lock(&rsc->client_lock);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto end;
blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
MAX_BUFFER_SIZE, 0);
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
- mutex_unlock(&rsc->client_lock);
+end:
+ mutex_unlock(&rsc->client_lock);
if (blen < 0)
return 0;
@@ -844,6 +856,7 @@ static ssize_t _sde_debugfs_vsync_mode_write(struct file *file,
struct sde_rsc_priv *rsc = file->private_data;
char *input, *vsync_mode;
u32 vsync_state = 0;
+ int rc;
if (!rsc || !rsc->hw_ops.hw_vsync)
return 0;
@@ -865,7 +878,9 @@ static ssize_t _sde_debugfs_vsync_mode_write(struct file *file,
}
mutex_lock(&rsc->client_lock);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto end;
if (vsync_state)
rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
@@ -874,8 +889,9 @@ static ssize_t _sde_debugfs_vsync_mode_write(struct file *file,
rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
- mutex_unlock(&rsc->client_lock);
+end:
+ mutex_unlock(&rsc->client_lock);
kfree(input);
return count;
}
@@ -930,6 +946,8 @@ static void sde_rsc_deinit(struct platform_device *pdev,
msm_dss_iounmap(&rsc->wrapper_io);
if (rsc->drv_io.base)
msm_dss_iounmap(&rsc->drv_io);
+ if (rsc->disp_rsc)
+ rpmh_release(rsc->disp_rsc);
if (rsc->pclient)
sde_power_client_destroy(&rsc->phandle, rsc->pclient);
@@ -1038,6 +1056,17 @@ static int sde_rsc_probe(struct platform_device *pdev)
goto sde_rsc_fail;
}
+ rsc->disp_rsc = rpmh_get_byname(pdev, "disp_rsc");
+ if (IS_ERR_OR_NULL(rsc->disp_rsc)) {
+ ret = PTR_ERR(rsc->disp_rsc);
+ rsc->disp_rsc = NULL;
+ pr_err("sde rsc:get display rsc failed ret:%d\n", ret);
+ goto sde_rsc_fail;
+ }
+ rpmh_invalidate(rsc->disp_rsc);
+ /* call flush to disable the disp rsc interrupt */
+ rpmh_flush(rsc->disp_rsc);
+
ret = msm_dss_ioremap_byname(pdev, &rsc->wrapper_io, "wrapper");
if (ret) {
pr_err("sde rsc: wrapper io data mapping failed ret=%d\n", ret);
@@ -1084,7 +1113,6 @@ static int sde_rsc_probe(struct platform_device *pdev)
snprintf(name, MAX_RSC_CLIENT_NAME_LEN, "%s%d", "sde_rsc", counter);
_sde_rsc_init_debugfs(rsc, name);
counter++;
- rsc->power_collapse = true;
ret = component_add(&pdev->dev, &sde_rsc_comp_ops);
if (ret)
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index de579c1..b63fbc6 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -36,6 +36,7 @@
#define SDE_RSCC_AMC_TCS_MODE_IRQ_STATUS_DRV0 0x1c00
#define SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0 0xc04
+#define SDE_RSCC_SOFT_WAKEUP_TIME_HI_DRV0 0xc08
#define SDE_RSCC_MAX_IDLE_DURATION_DRV0 0xc0c
#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0 0x1000
#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0 0x1004
@@ -224,7 +225,9 @@ static int rsc_hw_solver_init(struct sde_rsc_priv *rsc)
pr_debug("rsc solver init\n");
dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0,
- 0x7FFFFFFF, rsc->debug_mode);
+ 0xFFFFFFFF, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_HI_DRV0,
+ 0xFFFFFFFF, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_MAX_IDLE_DURATION_DRV0,
0xEFFFFFFF, rsc->debug_mode);
@@ -308,6 +311,15 @@ int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
+ /* update qtimers to high during clk & video mode state */
+ if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+ (rsc->current_state == SDE_RSC_CLK_STATE)) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
+ 0xffffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
+ 0xffffffff, rsc->debug_mode);
+ }
+
wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
rsc->debug_mode);
wrapper_status |= BIT(3);
@@ -357,8 +369,6 @@ int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
return 0;
end:
- regulator_set_mode(rsc->fs, REGULATOR_MODE_NORMAL);
-
rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
return rc;
@@ -378,8 +388,7 @@ int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state)
if ((state == SDE_RSC_VID_STATE) || (state == SDE_RSC_CLK_STATE)) {
reg = dss_reg_r(&rsc->wrapper_io,
SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
- reg |= BIT(8);
- reg &= ~(BIT(1) | BIT(0));
+ reg &= ~(BIT(8) | BIT(0));
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
reg, rsc->debug_mode);
}
@@ -411,7 +420,7 @@ int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state)
rc = 0;
break;
}
- usleep_range(1, 2);
+ usleep_range(10, 100);
}
reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
@@ -419,14 +428,9 @@ int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state)
reg &= ~BIT(13);
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
reg, rsc->debug_mode);
-
if (rc)
pr_err("vdd reg is not enabled yet\n");
- rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_NORMAL);
- if (rc)
- pr_err("vdd reg normal mode set failed rc:%d\n", rc);
-
rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
return rc;
@@ -454,6 +458,9 @@ static int sde_rsc_state_update(struct sde_rsc_priv *rsc,
0x1, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
0x0, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io,
+ SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0, 0x7,
+ rsc->debug_mode);
reg = dss_reg_r(&rsc->wrapper_io,
SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
reg |= (BIT(0) | BIT(8));
@@ -477,8 +484,9 @@ static int sde_rsc_state_update(struct sde_rsc_priv *rsc,
reg &= ~(BIT(1) | BIT(0));
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
reg, rsc->debug_mode);
- dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
- 0x1, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io,
+ SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0, 0x5,
+ rsc->debug_mode);
/* make sure that solver mode is override */
wmb();
@@ -487,6 +495,17 @@ static int sde_rsc_state_update(struct sde_rsc_priv *rsc,
case SDE_RSC_CLK_STATE:
pr_debug("clk state handling\n");
+
+ reg = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
+ reg &= ~(BIT(8) | BIT(0));
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ reg, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io,
+ SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0, 0x5,
+ rsc->debug_mode);
+ /* make sure that solver mode is disabled */
+ wmb();
break;
case SDE_RSC_IDLE_STATE:
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
index 30810fe..b83a866 100644
--- a/drivers/gpu/drm/msm/sde_rsc_priv.h
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -111,6 +111,7 @@ struct sde_rsc_timer_config {
* @pclient: module power client of phandle
* @fs: "MDSS GDSC" handle
*
+ * @disp_rsc: display rsc handle
* @drv_io: sde drv io data mapping
* @wrapper_io: wrapper io data mapping
*
@@ -141,6 +142,7 @@ struct sde_rsc_priv {
struct sde_power_client *pclient;
struct regulator *fs;
+ struct rpmh_client *disp_rsc;
struct dss_io_data drv_io;
struct dss_io_data wrapper_io;
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 28d93a9..69b639a 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -678,6 +678,7 @@
#define A6XX_GMU_DCVS_RETURN 0x1CBFF
#define A6XX_GMU_CM3_SYSRESET 0x1F800
#define A6XX_GMU_CM3_BOOT_CONFIG 0x1F801
+#define A6XX_GMU_CM3_FW_BUSY 0x1F81A
#define A6XX_GMU_CM3_FW_INIT_RESULT 0x1F81C
#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x1F8C0
#define A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x1F8C1
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 6609357..3c3f99f 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -857,13 +857,14 @@ static int a6xx_hm_enable(struct adreno_device *adreno_dev)
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct gmu_device *gmu = &device->gmu;
- if (!IS_ERR_OR_NULL(gmu->gx_gdsc)) {
- ret = regulator_enable(gmu->gx_gdsc);
- if (ret) {
- dev_err(&gmu->pdev->dev,
- "Failed to turn on GPU HM HS\n");
- return ret;
- }
+ if (regulator_is_enabled(gmu->gx_gdsc))
+ return 0;
+
+ ret = regulator_enable(gmu->gx_gdsc);
+ if (ret) {
+ dev_err(&gmu->pdev->dev,
+ "Failed to turn on GPU HM HS\n");
+ return ret;
}
ret = clk_set_rate(pwr->grp_clks[0],
@@ -885,15 +886,15 @@ static int a6xx_hm_disable(struct adreno_device *adreno_dev)
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct gmu_device *gmu = &device->gmu;
+ if (!regulator_is_enabled(gmu->gx_gdsc))
+ return 0;
+
clk_disable_unprepare(pwr->grp_clks[0]);
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
- if (IS_ERR_OR_NULL(gmu->gx_gdsc))
- return 0;
-
return regulator_disable(gmu->gx_gdsc);
}
@@ -1302,16 +1303,11 @@ static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = &device->gmu;
- if (timed_poll_check(device, A6XX_GMU_RPMH_POWER_STATE,
- gmu->idle_level, GMU_START_TIMEOUT, 0xf)) {
- dev_err(&gmu->pdev->dev,
- "GMU is not going to powerstate %d\n",
- gmu->idle_level);
- return -ETIMEDOUT;
- }
+ /* TODO: Remove this register write when firmware is updated */
+ kgsl_gmu_regwrite(device, A6XX_GMU_CM3_FW_BUSY, 0);
if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
- 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
+ 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
dev_err(&gmu->pdev->dev, "GMU is not idling\n");
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index b1f832f..2a1d352 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -137,11 +137,8 @@ static void sync_event_print(struct seq_file *s,
break;
}
case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
- char fence_str[128];
-
- kgsl_dump_fence(sync_event->handle,
- fence_str, sizeof(fence_str));
- seq_printf(s, "sync: [%pK] %s", sync_event->handle, fence_str);
+ seq_printf(s, "sync: [%pK] %s", sync_event->handle,
+ sync_event->fence_name);
break;
}
default:
@@ -241,6 +238,9 @@ static void cmdobj_print(struct seq_file *s,
static void drawobj_print(struct seq_file *s,
struct kgsl_drawobj *drawobj)
{
+ if (!kref_get_unless_zero(&drawobj->refcount))
+ return;
+
if (drawobj->type == SYNCOBJ_TYPE)
syncobj_print(s, SYNCOBJ(drawobj));
else if ((drawobj->type == CMDOBJ_TYPE) ||
@@ -251,6 +251,7 @@ static void drawobj_print(struct seq_file *s,
print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags),
drawobj->flags);
+ kgsl_drawobj_put(drawobj);
seq_puts(s, "\n");
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 9f4e185..f77d438 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -95,6 +95,9 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
goto stats;
}
+ if (!kref_get_unless_zero(&drawobj->refcount))
+ goto stats;
+
if (drawobj->type == SYNCOBJ_TYPE) {
struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
@@ -106,6 +109,8 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
kgsl_dump_syncpoints(device, syncobj);
}
}
+
+ kgsl_drawobj_put(drawobj);
}
stats:
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2283096..15f68bf 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1955,7 +1955,7 @@ static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
}
handle = kgsl_sync_fence_async_wait(event.fd,
- gpuobj_free_fence_func, entry);
+ gpuobj_free_fence_func, entry, NULL, 0);
/* if handle is NULL the fence has already signaled */
if (handle == NULL)
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 3a87e6e..bca3d57 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -44,7 +44,7 @@ static struct kmem_cache *memobjs_cache;
static struct kmem_cache *sparseobjs_cache;
-static void drawobj_destroy_object(struct kref *kref)
+void kgsl_drawobj_destroy_object(struct kref *kref)
{
struct kgsl_drawobj *drawobj = container_of(kref,
struct kgsl_drawobj, refcount);
@@ -68,12 +68,6 @@ static void drawobj_destroy_object(struct kref *kref)
}
}
-static inline void drawobj_put(struct kgsl_drawobj *drawobj)
-{
- if (drawobj)
- kref_put(&drawobj->refcount, drawobj_destroy_object);
-}
-
void kgsl_dump_syncpoints(struct kgsl_device *device,
struct kgsl_drawobj_sync *syncobj)
{
@@ -100,16 +94,11 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
retired);
break;
}
- case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
- char fence_str[128];
-
- kgsl_dump_fence(event->handle,
- fence_str, sizeof(fence_str));
- dev_err(device->dev,
- " fence: %s\n", fence_str);
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ dev_err(device->dev, " fence: %s\n",
+ event->fence_name);
break;
}
- }
}
}
@@ -117,13 +106,23 @@ static void syncobj_timer(unsigned long data)
{
struct kgsl_device *device;
struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data;
- struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj *drawobj;
struct kgsl_drawobj_sync_event *event;
unsigned int i;
- if (syncobj == NULL || drawobj->context == NULL)
+ if (syncobj == NULL)
return;
+ drawobj = DRAWOBJ(syncobj);
+
+ if (!kref_get_unless_zero(&drawobj->refcount))
+ return;
+
+ if (drawobj->context == NULL) {
+ kgsl_drawobj_put(drawobj);
+ return;
+ }
+
device = drawobj->context->device;
dev_err(device->dev,
@@ -147,18 +146,14 @@ static void syncobj_timer(unsigned long data)
dev_err(device->dev, " [%d] TIMESTAMP %d:%d\n",
i, event->context->id, event->timestamp);
break;
- case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
- char fence_str[128];
-
- kgsl_dump_fence(event->handle,
- fence_str, sizeof(fence_str));
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
dev_err(device->dev, " [%d] FENCE %s\n",
- i, fence_str);
+ i, event->fence_name);
break;
}
- }
}
+ kgsl_drawobj_put(drawobj);
dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
}
@@ -204,7 +199,7 @@ static void drawobj_sync_func(struct kgsl_device *device,
drawobj_sync_expire(device, event);
kgsl_context_put(event->context);
- drawobj_put(&event->syncobj->base);
+ kgsl_drawobj_put(&event->syncobj->base);
}
static inline void memobj_list_free(struct list_head *list)
@@ -265,7 +260,7 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (kgsl_sync_fence_async_cancel(event->handle))
- drawobj_put(drawobj);
+ kgsl_drawobj_put(drawobj);
break;
}
}
@@ -321,21 +316,19 @@ void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
else
return;
- drawobj_put(drawobj);
+ kgsl_drawobj_put(drawobj);
}
EXPORT_SYMBOL(kgsl_drawobj_destroy);
static void drawobj_sync_fence_func(void *priv)
{
struct kgsl_drawobj_sync_event *event = priv;
- char fence_str[128];
- kgsl_dump_fence(event->handle, fence_str, sizeof(fence_str));
- trace_syncpoint_fence_expire(event->syncobj, fence_str);
+ trace_syncpoint_fence_expire(event->syncobj, event->fence_name);
drawobj_sync_expire(event->device, event);
- drawobj_put(&event->syncobj->base);
+ kgsl_drawobj_put(&event->syncobj->base);
}
/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
@@ -352,7 +345,6 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
struct kgsl_drawobj_sync_event *event;
unsigned int id;
- char fence_str[128];
kref_get(&drawobj->refcount);
@@ -369,7 +361,8 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
set_bit(event->id, &syncobj->pending);
event->handle = kgsl_sync_fence_async_wait(sync->fd,
- drawobj_sync_fence_func, event);
+ drawobj_sync_fence_func, event,
+ event->fence_name, sizeof(event->fence_name));
if (IS_ERR_OR_NULL(event->handle)) {
int ret = PTR_ERR(event->handle);
@@ -377,7 +370,7 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
- drawobj_put(drawobj);
+ kgsl_drawobj_put(drawobj);
/*
* If ret == 0 the fence was already signaled - print a trace
@@ -389,8 +382,7 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
return ret;
}
- kgsl_dump_fence(event->handle, fence_str, sizeof(fence_str));
- trace_syncpoint_fence(syncobj, fence_str);
+ trace_syncpoint_fence(syncobj, event->fence_name);
return 0;
}
@@ -457,7 +449,7 @@ static int drawobj_add_sync_timestamp(struct kgsl_device *device,
if (ret) {
clear_bit(event->id, &syncobj->pending);
- drawobj_put(drawobj);
+ kgsl_drawobj_put(drawobj);
} else {
trace_syncpoint_timestamp(syncobj, context, sync->timestamp);
}
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index 5ec98ed..06eef7f 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -105,6 +105,8 @@ struct kgsl_drawobj_sync {
unsigned long timeout_jiffies;
};
+#define KGSL_FENCE_NAME_LEN 74
+
/**
* struct kgsl_drawobj_sync_event
* @id: identifer (positiion within the pending bitmap)
@@ -114,6 +116,7 @@ struct kgsl_drawobj_sync {
* register this event
* @timestamp: Pending timestamp for the event
* @handle: Pointer to a sync fence handle
+ * @fence_name: A fence name string to describe the fence
* @device: Pointer to the KGSL device
*/
struct kgsl_drawobj_sync_event {
@@ -123,6 +126,7 @@ struct kgsl_drawobj_sync_event {
struct kgsl_context *context;
unsigned int timestamp;
struct kgsl_sync_fence_cb *handle;
+ char fence_name[KGSL_FENCE_NAME_LEN];
struct kgsl_device *device;
};
@@ -206,6 +210,8 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj);
+void kgsl_drawobj_destroy_object(struct kref *kref);
+
static inline bool kgsl_drawobj_events_pending(
struct kgsl_drawobj_sync *syncobj)
{
@@ -220,4 +226,11 @@ static inline bool kgsl_drawobj_event_pending(
return test_bit(bit, &syncobj->pending);
}
+
+static inline void kgsl_drawobj_put(struct kgsl_drawobj *drawobj)
+{
+ if (drawobj)
+ kref_put(&drawobj->refcount, kgsl_drawobj_destroy_object);
+}
+
#endif /* __KGSL_DRAWOBJ_H */
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 416085f..0c821cd 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1369,24 +1369,39 @@ int gmu_start(struct kgsl_device *device)
return ret;
}
+#define GMU_IDLE_TIMEOUT 10 /* ms */
+
/* Caller shall ensure GPU is ready for SLUMBER */
void gmu_stop(struct kgsl_device *device)
{
struct gmu_device *gmu = &device->gmu;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ unsigned long t;
+ bool idle = false;
if (!test_bit(GMU_CLK_ON, &gmu->flags))
return;
- if (gpudev->wait_for_gmu_idle &&
- !gpudev->wait_for_gmu_idle(adreno_dev)) {
- dev_err(&gmu->pdev->dev, "Failure to stop gmu");
- return;
+ if (gpudev->hw_isidle) {
+ t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
+ while (!time_after(jiffies, t)) {
+ if (gpudev->hw_isidle(adreno_dev)) {
+ idle = true;
+ break;
+ }
+ cpu_relax();
+ }
}
gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
+ if (!idle || (gpudev->wait_for_gmu_idle &&
+ gpudev->wait_for_gmu_idle(adreno_dev))) {
+ dev_err(&gmu->pdev->dev, "Failure to stop GMU");
+ return;
+ }
+
/* Pending message in all queues are abandoned */
hfi_stop(gmu);
clear_bit(GMU_HFI_ON, &gmu->flags);
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index a2ca67c..7055eb7 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -146,7 +146,7 @@ enum gpu_idle_level {
GPU_HW_NAP = 0x4,
GPU_HW_MIN_VOLT = 0x5,
GPU_HW_MIN_DDR = 0x6,
- GPU_HW_SLUMBER = 0xF
+ GPU_HW_SLUMBER = 0x7
};
/**
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 9c078b6..973a2ff 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -431,8 +431,27 @@ static void kgsl_sync_fence_callback(struct fence *fence, struct fence_cb *cb)
kfree(kcb);
}
+static void kgsl_get_fence_name(struct fence *fence,
+ char *fence_name, int name_len)
+{
+ char *ptr = fence_name;
+ char *last = fence_name + name_len;
+
+ ptr += snprintf(ptr, last - ptr, "%s %s",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence));
+
+ if ((ptr + 2) >= last)
+ return;
+
+ if (fence->ops->fence_value_str) {
+ ptr += snprintf(ptr, last - ptr, ": ");
+ fence->ops->fence_value_str(fence, ptr, last - ptr);
+ }
+}
+
struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv)
+ void (*func)(void *priv), void *priv, char *fence_name, int name_len)
{
struct kgsl_sync_fence_cb *kcb;
struct fence *fence;
@@ -453,6 +472,9 @@ struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
kcb->priv = priv;
kcb->func = func;
+ if (fence_name)
+ kgsl_get_fence_name(fence, fence_name, name_len);
+
/* if status then error or signaled */
status = fence_add_callback(fence, &kcb->fence_cb,
kgsl_sync_fence_callback);
@@ -789,43 +811,3 @@ static const struct fence_ops kgsl_syncsource_fence_ops = {
.release = kgsl_syncsource_fence_release,
};
-void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
- char *fence_str, int len)
-{
- struct fence *fence;
- char *ptr = fence_str;
- char *last = fence_str + len;
-
- if (!handle || !handle->fence) {
- snprintf(fence_str, len, "NULL");
- return;
- }
-
- fence = handle->fence;
-
- ptr += snprintf(ptr, last - ptr, "%s %s",
- fence->ops->get_timeline_name(fence),
- fence->ops->get_driver_name(fence));
- if (ptr >= last)
- return;
-
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = !!strlen(value);
-
- if (success) {
- ptr += snprintf(ptr, last - ptr, ": %s", value);
- if (ptr >= last)
- return;
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
- ptr += snprintf(ptr, last - ptr, " / %s", value);
- }
- }
-}
-
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index dc84c54..99fe0e1 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -91,7 +91,8 @@ void kgsl_sync_timeline_destroy(struct kgsl_context *context);
void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline);
struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv);
+ void (*func)(void *priv), void *priv,
+ char *fence_name, int name_len);
int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb);
@@ -109,8 +110,8 @@ void kgsl_syncsource_put(struct kgsl_syncsource *syncsource);
void kgsl_syncsource_cleanup(struct kgsl_process_private *private,
struct kgsl_syncsource *syncsource);
-void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
- char *fence_str, int len);
+void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event,
+ char *fence_str, int len);
#else
static inline int kgsl_add_fence_event(struct kgsl_device *device,
@@ -134,8 +135,10 @@ static inline void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline)
{
}
-static inline struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv)
+
+struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
+ void (*func)(void *priv), void *priv,
+ char *fence_name, int name_len)
{
return NULL;
}
@@ -185,8 +188,8 @@ static inline void kgsl_syncsource_cleanup(struct kgsl_process_private *private,
}
-void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
- char *fence_str, int len)
+void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event,
+ char *fence_str, int len)
{
}
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index a2ce81a..8a57ed2 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,7 +19,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/clk.h>
+#include <linux/amba/bus.h>
#include <linux/cpu_pm.h>
#include <linux/topology.h>
#include <linux/of.h>
@@ -379,7 +379,7 @@ int coresight_cti_map_trigin(struct coresight_cti *cti, int trig, int ch)
* within the mutex lock region in addition to within the spinlock.
*/
if (drvdata->refcnt == 0) {
- ret = clk_prepare_enable(drvdata->clk);
+ ret = pm_runtime_get_sync(drvdata->dev);
if (ret)
goto err1;
}
@@ -402,7 +402,7 @@ int coresight_cti_map_trigin(struct coresight_cti *cti, int trig, int ch)
* adjusting its value.
*/
if (drvdata->refcnt == 0)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
err1:
cti_trigin_gpio_disable(drvdata);
err0:
@@ -463,7 +463,7 @@ int coresight_cti_map_trigout(struct coresight_cti *cti, int trig, int ch)
* within the mutex lock region in addition to within the spinlock.
*/
if (drvdata->refcnt == 0) {
- ret = clk_prepare_enable(drvdata->clk);
+ ret = pm_runtime_get_sync(drvdata->dev);
if (ret)
goto err1;
}
@@ -485,7 +485,7 @@ int coresight_cti_map_trigout(struct coresight_cti *cti, int trig, int ch)
* __cti_map_trigout so it is safe to check it against 0.
*/
if (drvdata->refcnt == 0)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
err1:
cti_trigout_gpio_disable(drvdata);
err0:
@@ -563,7 +563,7 @@ void coresight_cti_unmap_trigin(struct coresight_cti *cti, int trig, int ch)
* within the mutex lock region in addition to within the spinlock.
*/
if (drvdata->refcnt == 0)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
if (drvdata->gpio_trigin->trig == trig)
cti_trigin_gpio_disable(drvdata);
@@ -632,7 +632,7 @@ void coresight_cti_unmap_trigout(struct coresight_cti *cti, int trig, int ch)
* within the mutex lock region in addition to within the spinlock.
*/
if (drvdata->refcnt == 0)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
if (drvdata->gpio_trigout->trig == trig)
cti_trigout_gpio_disable(drvdata);
@@ -1388,34 +1388,29 @@ static struct notifier_block cti_cpu_pm_notifier = {
.notifier_call = cti_cpu_pm_callback,
};
-static int cti_probe(struct platform_device *pdev)
+static int cti_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
int trig;
- struct device *dev = &pdev->dev;
+ struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct cti_drvdata *drvdata;
- struct resource *res;
struct coresight_desc *desc;
struct device_node *cpu_node;
- pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- pdev->dev.platform_data = pdata;
+ adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
/* Store the driver data pointer for use in exported functions */
- drvdata->dev = &pdev->dev;
- platform_set_drvdata(pdev, drvdata);
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cti-base");
- if (!res)
- return -ENODEV;
-
- drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
@@ -1423,21 +1418,13 @@ static int cti_probe(struct platform_device *pdev)
mutex_init(&drvdata->mutex);
- drvdata->clk = devm_clk_get(dev, "core_clk");
- if (IS_ERR(drvdata->clk))
- return PTR_ERR(drvdata->clk);
-
- ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
- if (ret)
- return ret;
-
drvdata->gpio_trigin = devm_kzalloc(dev, sizeof(struct cti_pctrl),
GFP_KERNEL);
if (!drvdata->gpio_trigin)
return -ENOMEM;
drvdata->gpio_trigin->trig = -1;
- ret = of_property_read_u32(pdev->dev.of_node,
+ ret = of_property_read_u32(adev->dev.of_node,
"qcom,cti-gpio-trigin", &trig);
if (!ret)
drvdata->gpio_trigin->trig = trig;
@@ -1450,7 +1437,7 @@ static int cti_probe(struct platform_device *pdev)
return -ENOMEM;
drvdata->gpio_trigout->trig = -1;
- ret = of_property_read_u32(pdev->dev.of_node,
+ ret = of_property_read_u32(adev->dev.of_node,
"qcom,cti-gpio-trigout", &trig);
if (!ret)
drvdata->gpio_trigout->trig = trig;
@@ -1458,7 +1445,7 @@ static int cti_probe(struct platform_device *pdev)
return ret;
drvdata->cpu = -1;
- cpu_node = of_parse_phandle(pdev->dev.of_node, "cpu", 0);
+ cpu_node = of_parse_phandle(adev->dev.of_node, "cpu", 0);
if (cpu_node) {
drvdata->cpu = pdata ? pdata->cpu : -1;
if (drvdata->cpu == -1) {
@@ -1468,7 +1455,7 @@ static int cti_probe(struct platform_device *pdev)
}
if (!cti_save_disable)
- drvdata->cti_save = of_property_read_bool(pdev->dev.of_node,
+ drvdata->cti_save = of_property_read_bool(adev->dev.of_node,
"qcom,cti-save");
if (drvdata->cti_save) {
drvdata->state = devm_kzalloc(dev, sizeof(struct cti_state),
@@ -1476,18 +1463,18 @@ static int cti_probe(struct platform_device *pdev)
if (!drvdata->state)
return -ENOMEM;
- drvdata->cti_hwclk = of_property_read_bool(pdev->dev.of_node,
+ drvdata->cti_hwclk = of_property_read_bool(adev->dev.of_node,
"qcom,cti-hwclk");
}
if (drvdata->cti_save && !drvdata->cti_hwclk) {
- ret = clk_prepare_enable(drvdata->clk);
+ ret = pm_runtime_get_sync(drvdata->dev);
if (ret)
return ret;
}
mutex_lock(&cti_lock);
drvdata->cti.name = ((struct coresight_platform_data *)
- (pdev->dev.platform_data))->name;
+ (adev->dev.platform_data))->name;
list_add_tail(&drvdata->cti.link, &cti_list);
mutex_unlock(&cti_lock);
@@ -1497,8 +1484,8 @@ static int cti_probe(struct platform_device *pdev)
goto err;
}
desc->type = CORESIGHT_DEV_TYPE_NONE;
- desc->pdata = pdev->dev.platform_data;
- desc->dev = &pdev->dev;
+ desc->pdata = adev->dev.platform_data;
+ desc->dev = &adev->dev;
desc->groups = cti_attr_grps;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev)) {
@@ -1511,56 +1498,35 @@ static int cti_probe(struct platform_device *pdev)
cpu_pm_register_notifier(&cti_cpu_pm_notifier);
registered++;
}
-
+ pm_runtime_put(&adev->dev);
dev_dbg(dev, "CTI initialized\n");
return 0;
err:
if (drvdata->cti_save && !drvdata->cti_hwclk)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
return ret;
}
-static int cti_remove(struct platform_device *pdev)
-{
- struct cti_drvdata *drvdata = platform_get_drvdata(pdev);
-
- if (drvdata->cti_save) {
- registered--;
- if (!registered)
- cpu_pm_unregister_notifier(&cti_cpu_pm_notifier);
- }
- coresight_unregister(drvdata->csdev);
- if (drvdata->cti_save && !drvdata->cti_hwclk)
- clk_disable_unprepare(drvdata->clk);
- return 0;
-}
-
-static const struct of_device_id cti_match[] = {
- {.compatible = "arm,coresight-cti"},
- {}
+static struct amba_id cti_ids[] = {
+ {
+ .id = 0x0003b966,
+ .mask = 0x0003ffff,
+ .data = "CTI",
+ },
+ { 0, 0},
};
-static struct platform_driver cti_driver = {
- .probe = cti_probe,
- .remove = cti_remove,
- .driver = {
+static struct amba_driver cti_driver = {
+ .drv = {
.name = "coresight-cti",
.owner = THIS_MODULE,
- .of_match_table = cti_match,
+ .suppress_bind_attrs = true,
},
+ .probe = cti_probe,
+ .id_table = cti_ids,
};
-static int __init cti_init(void)
-{
- return platform_driver_register(&cti_driver);
-}
-module_init(cti_init);
-
-static void __exit cti_exit(void)
-{
- platform_driver_unregister(&cti_driver);
-}
-module_exit(cti_exit);
+builtin_amba_driver(cti_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight CTI driver");
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
index 0bd8b78..98547a9 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -47,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
-
CS_UNLOCK(drvdata->base);
/*
@@ -85,7 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
CS_LOCK(drvdata->base);
- pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR disabled\n");
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index f5018fc..10e8da4 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -436,8 +436,11 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
drvdata->size = SZ_1M;
+ if (of_property_read_bool(np, "arm,sg-enable"))
+ drvdata->memtype = TMC_ETR_MEM_TYPE_SG;
+ else
+ drvdata->memtype = TMC_ETR_MEM_TYPE_CONTIG;
drvdata->mem_size = drvdata->size;
- drvdata->memtype = TMC_ETR_MEM_TYPE_CONTIG;
drvdata->mem_type = drvdata->memtype;
} else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index c96087d..5d2d087 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,10 +14,10 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
-#include <linux/clk.h>
#include <linux/bitmap.h>
#include <linux/of.h>
#include <linux/coresight.h>
@@ -53,7 +53,6 @@ struct tpda_drvdata {
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
- struct clk *clk;
struct mutex lock;
bool enable;
uint32_t atid;
@@ -183,11 +182,6 @@ static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
static int tpda_enable(struct coresight_device *csdev, int inport, int outport)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
mutex_lock(&drvdata->lock);
__tpda_enable(drvdata, inport);
@@ -221,8 +215,6 @@ static void tpda_disable(struct coresight_device *csdev, int inport,
drvdata->enable = false;
mutex_unlock(&drvdata->lock);
- clk_disable_unprepare(drvdata->clk);
-
dev_info(drvdata->dev, "TPDA inport %d disabled\n", inport);
}
@@ -653,31 +645,27 @@ static void tpda_init_default_data(struct tpda_drvdata *drvdata)
drvdata->freq_ts = true;
}
-static int tpda_probe(struct platform_device *pdev)
+static int tpda_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
- struct device *dev = &pdev->dev;
+ struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct tpda_drvdata *drvdata;
- struct resource *res;
struct coresight_desc *desc;
- pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- pdev->dev.platform_data = pdata;
+ adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &pdev->dev;
- platform_set_drvdata(pdev, drvdata);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tpda-base");
- if (!res)
- return -ENODEV;
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
- drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
@@ -687,22 +675,10 @@ static int tpda_probe(struct platform_device *pdev)
if (ret)
return ret;
- drvdata->clk = devm_clk_get(dev, "core_clk");
- if (IS_ERR(drvdata->clk))
- return PTR_ERR(drvdata->clk);
-
- ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
if (!coresight_authstatus_enabled(drvdata->base))
goto err;
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
tpda_init_default_data(drvdata);
@@ -712,8 +688,8 @@ static int tpda_probe(struct platform_device *pdev)
desc->type = CORESIGHT_DEV_TYPE_LINK;
desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc->ops = &tpda_cs_ops;
- desc->pdata = pdev->dev.platform_data;
- desc->dev = &pdev->dev;
+ desc->pdata = adev->dev.platform_data;
+ desc->dev = &adev->dev;
desc->groups = tpda_attr_grps;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev))
@@ -722,44 +698,29 @@ static int tpda_probe(struct platform_device *pdev)
dev_dbg(drvdata->dev, "TPDA initialized\n");
return 0;
err:
- clk_disable_unprepare(drvdata->clk);
return -EPERM;
}
-static int tpda_remove(struct platform_device *pdev)
-{
- struct tpda_drvdata *drvdata = platform_get_drvdata(pdev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
-static const struct of_device_id tpda_match[] = {
- {.compatible = "qcom,coresight-tpda"},
- {}
+static struct amba_id tpda_ids[] = {
+ {
+ .id = 0x0003b969,
+ .mask = 0x0003ffff,
+ .data = "TPDA",
+ },
+ { 0, 0},
};
-static struct platform_driver tpda_driver = {
- .probe = tpda_probe,
- .remove = tpda_remove,
- .driver = {
+static struct amba_driver tpda_driver = {
+ .drv = {
.name = "coresight-tpda",
.owner = THIS_MODULE,
- .of_match_table = tpda_match,
+ .suppress_bind_attrs = true,
},
+ .probe = tpda_probe,
+ .id_table = tpda_ids,
};
-static int __init tpda_init(void)
-{
- return platform_driver_register(&tpda_driver);
-}
-module_init(tpda_init);
-
-static void __exit tpda_exit(void)
-{
- platform_driver_unregister(&tpda_driver);
-}
-module_exit(tpda_exit);
+builtin_amba_driver(tpda_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Aggregator driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 69511cd..36e3db2 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -13,11 +13,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
-#include <linux/clk.h>
#include <linux/bitmap.h>
#include <linux/of.h>
#include <linux/coresight.h>
@@ -246,7 +245,6 @@ struct tpdm_drvdata {
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
- struct clk *clk;
struct mutex lock;
bool enable;
bool clk_enable;
@@ -648,11 +646,6 @@ static int tpdm_enable(struct coresight_device *csdev,
struct perf_event *event, u32 mode)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
mutex_lock(&drvdata->lock);
__tpdm_enable(drvdata);
@@ -732,8 +725,6 @@ static void tpdm_disable(struct coresight_device *csdev,
drvdata->enable = false;
mutex_unlock(&drvdata->lock);
- clk_disable_unprepare(drvdata->clk);
-
dev_info(drvdata->dev, "TPDM tracing disabled\n");
}
@@ -3939,57 +3930,40 @@ static void tpdm_init_default_data(struct tpdm_drvdata *drvdata)
drvdata->cmb->trig_ts = true;
}
-static int tpdm_probe(struct platform_device *pdev)
+static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret, i;
uint32_t pidr, devid;
- struct device *dev = &pdev->dev;
+ struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct tpdm_drvdata *drvdata;
- struct resource *res;
struct coresight_desc *desc;
static int traceid = TPDM_TRACE_ID_START;
uint32_t version;
- pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- pdev->dev.platform_data = pdata;
+ adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &pdev->dev;
- platform_set_drvdata(pdev, drvdata);
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tpdm-base");
- if (!res)
- return -ENODEV;
-
- drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
- drvdata->clk_enable = of_property_read_bool(pdev->dev.of_node,
+ drvdata->clk_enable = of_property_read_bool(adev->dev.of_node,
"qcom,clk-enable");
- drvdata->msr_fix_req = of_property_read_bool(pdev->dev.of_node,
+ drvdata->msr_fix_req = of_property_read_bool(adev->dev.of_node,
"qcom,msr-fix-req");
mutex_init(&drvdata->lock);
- drvdata->clk = devm_clk_get(dev, "core_clk");
- if (IS_ERR(drvdata->clk))
- return PTR_ERR(drvdata->clk);
-
- ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
version = tpdm_readl(drvdata, CORESIGHT_PERIPHIDR2);
drvdata->version = BMVAL(version, 4, 7);
@@ -4017,7 +3991,7 @@ static int tpdm_probe(struct platform_device *pdev)
drvdata->bc_counters_avail = BMVAL(devid, 6, 10) + 1;
drvdata->tc_counters_avail = BMVAL(devid, 4, 5) + 1;
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
drvdata->traceid = traceid++;
@@ -4027,8 +4001,8 @@ static int tpdm_probe(struct platform_device *pdev)
desc->type = CORESIGHT_DEV_TYPE_SOURCE;
desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
desc->ops = &tpdm_cs_ops;
- desc->pdata = pdev->dev.platform_data;
- desc->dev = &pdev->dev;
+ desc->pdata = adev->dev.platform_data;
+ desc->dev = &adev->dev;
desc->groups = tpdm_attr_grps;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev))
@@ -4042,40 +4016,26 @@ static int tpdm_probe(struct platform_device *pdev)
return 0;
}
-static int tpdm_remove(struct platform_device *pdev)
-{
- struct tpdm_drvdata *drvdata = platform_get_drvdata(pdev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
-static const struct of_device_id tpdm_match[] = {
- {.compatible = "qcom,coresight-tpdm"},
- {}
+static struct amba_id tpdm_ids[] = {
+ {
+ .id = 0x0003b968,
+ .mask = 0x0003ffff,
+ .data = "TPDM",
+ },
+ { 0, 0},
};
-static struct platform_driver tpdm_driver = {
- .probe = tpdm_probe,
- .remove = tpdm_remove,
- .driver = {
+static struct amba_driver tpdm_driver = {
+ .drv = {
.name = "coresight-tpdm",
.owner = THIS_MODULE,
- .of_match_table = tpdm_match,
+ .suppress_bind_attrs = true,
},
+ .probe = tpdm_probe,
+ .id_table = tpdm_ids,
};
-static int __init tpdm_init(void)
-{
- return platform_driver_register(&tpdm_driver);
-}
-module_init(tpdm_init);
-
-static void __exit tpdm_exit(void)
-{
- platform_driver_unregister(&tpdm_driver);
-}
-module_exit(tpdm_exit);
+builtin_amba_driver(tpdm_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver");
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index db7d1d6..7826994 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,6 +1118,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
@@ -1524,6 +1525,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1864e76..dd96670 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -4375,7 +4375,7 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
/* Attempt to register child devices */
ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
if (ret)
- return -EINVAL;
+ return -EPROBE_DEFER;
return 0;
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c90fbf0..261c125 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1750,3 +1750,23 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
+
+/*
+ * Return the id asoociated with a pci device.
+ */
+int iommu_fwspec_get_id(struct device *dev, u32 *id)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ if (!fwspec)
+ return -EINVAL;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ if (fwspec->num_ids != 1)
+ return -EINVAL;
+
+ *id = fwspec->ids[0];
+ return 0;
+}
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index a9ddf0f..b31c0f1 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -15,6 +15,7 @@
#include <linux/atomic.h>
#include <linux/bitmap.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -86,6 +87,7 @@
#define TCS_TYPE_NR 4
#define TCS_MBOX_TOUT_MS 2000
#define MAX_POOL_SIZE (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+#define TCS_M_INIT 0xFFFF
struct tcs_drv;
@@ -97,10 +99,12 @@ struct tcs_response {
struct tasklet_struct tasklet;
struct delayed_work dwork;
int err;
+ int idx;
+ bool in_use;
};
struct tcs_response_pool {
- struct tcs_response *resp;
+ struct tcs_response resp[MAX_POOL_SIZE];
spinlock_t lock;
DECLARE_BITMAP(avail, MAX_POOL_SIZE);
};
@@ -116,8 +120,6 @@ struct tcs_mbox {
int ncpt; /* num cmds per tcs */
DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
spinlock_t tcs_lock; /* TCS type lock */
- spinlock_t tcs_m_lock[MAX_TCS_PER_TYPE];
- struct tcs_response *resp[MAX_TCS_PER_TYPE];
};
/* One per MBOX controller */
@@ -133,7 +135,9 @@ struct tcs_drv {
int num_tcs;
struct workqueue_struct *wq;
struct tcs_response_pool *resp_pool;
- atomic_t tcs_in_use[TCS_TYPE_NR * MAX_TCS_PER_TYPE];
+ atomic_t tcs_in_use[MAX_POOL_SIZE];
+ atomic_t tcs_send_count[MAX_POOL_SIZE];
+ atomic_t tcs_irq_count[MAX_POOL_SIZE];
};
static void tcs_notify_tx_done(unsigned long data);
@@ -148,16 +152,13 @@ static int tcs_response_pool_init(struct tcs_drv *drv)
if (!pool)
return -ENOMEM;
- pool->resp = devm_kzalloc(&drv->pdev->dev, sizeof(*pool->resp) *
- MAX_POOL_SIZE, GFP_KERNEL);
- if (!pool->resp)
- return -ENOMEM;
-
for (i = 0; i < MAX_POOL_SIZE; i++) {
tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
(unsigned long) &pool->resp[i]);
- INIT_DELAYED_WORK(&pool->resp[i].dwork,
- tcs_notify_timeout);
+ INIT_DELAYED_WORK(&pool->resp[i].dwork, tcs_notify_timeout);
+ pool->resp[i].drv = drv;
+ pool->resp[i].idx = i;
+ pool->resp[i].m = TCS_M_INIT;
}
spin_lock_init(&pool->lock);
@@ -166,39 +167,59 @@ static int tcs_response_pool_init(struct tcs_drv *drv)
return 0;
}
-static struct tcs_response *get_response_from_pool(struct tcs_drv *drv)
+static struct tcs_response *setup_response(struct tcs_drv *drv,
+ struct tcs_mbox_msg *msg, struct mbox_chan *chan,
+ u32 m, int err)
{
struct tcs_response_pool *pool = drv->resp_pool;
struct tcs_response *resp = ERR_PTR(-ENOMEM);
- unsigned long flags;
int pos;
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock(&pool->lock);
pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
if (pos != MAX_POOL_SIZE) {
bitmap_set(pool->avail, pos, 1);
resp = &pool->resp[pos];
- memset(resp, 0, sizeof(*resp));
- tasklet_init(&resp->tasklet, tcs_notify_tx_done,
- (unsigned long) resp);
- INIT_DELAYED_WORK(&resp->dwork, tcs_notify_timeout);
- resp->drv = drv;
+ resp->chan = chan;
+ resp->msg = msg;
+ resp->m = m;
+ resp->err = err;
+ resp->in_use = false;
}
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock(&pool->lock);
return resp;
}
-static void free_response_to_pool(struct tcs_response *resp)
+static void free_response(struct tcs_response *resp)
{
struct tcs_response_pool *pool = resp->drv->resp_pool;
- unsigned long flags;
- int i;
- spin_lock_irqsave(&pool->lock, flags);
- i = resp - pool->resp;
- bitmap_clear(pool->avail, i, 1);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_lock(&pool->lock);
+ resp->err = -EINVAL;
+ bitmap_clear(pool->avail, resp->idx, 1);
+ spin_unlock(&pool->lock);
+}
+
+static inline struct tcs_response *get_response(struct tcs_drv *drv, u32 m)
+{
+ struct tcs_response_pool *pool = drv->resp_pool;
+ struct tcs_response *resp = NULL;
+ int pos = 0;
+
+ do {
+ pos = find_next_bit(pool->avail, MAX_POOL_SIZE, pos);
+ if (pos == MAX_POOL_SIZE)
+ break;
+ resp = &pool->resp[pos];
+ if (resp->m == m && !resp->in_use) {
+ resp->in_use = true;
+ break;
+ }
+ pos++;
+ } while (1);
+
+ return resp;
}
static inline u32 read_drv_config(void __iomem *base)
@@ -226,7 +247,7 @@ static inline void write_tcs_reg_sync(void __iomem *base, int reg, int m, int n,
write_tcs_reg(base, reg, m, n, data);
if (data == read_tcs_reg(base, reg, m, n))
break;
- cpu_relax();
+ udelay(1);
} while (1);
}
@@ -311,13 +332,6 @@ static inline struct tcs_mbox *get_tcs_for_msg(struct tcs_drv *drv,
return get_tcs_of_type(drv, type);
}
-static inline struct tcs_response *get_tcs_response(struct tcs_drv *drv, int m)
-{
- struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
-
- return tcs ? tcs->resp[m - tcs->tcs_offset] : NULL;
-}
-
static inline void send_tcs_response(struct tcs_response *resp)
{
tasklet_schedule(&resp->tasklet);
@@ -340,7 +354,6 @@ static irqreturn_t tcs_irq_handler(int irq, void *p)
struct tcs_mbox *tcs;
struct tcs_response *resp;
struct tcs_cmd *cmd;
- u32 irq_clear = 0;
u32 data;
/* Know which TCSes were triggered */
@@ -350,8 +363,9 @@ static irqreturn_t tcs_irq_handler(int irq, void *p)
if (!(irq_status & BIT(m)))
continue;
- /* Find the TCS that triggered */
- resp = get_tcs_response(drv, m);
+ atomic_inc(&drv->tcs_irq_count[m]);
+
+ resp = get_response(drv, m);
if (!resp) {
pr_err("No resp request for TCS-%d\n", m);
continue;
@@ -397,18 +411,13 @@ static irqreturn_t tcs_irq_handler(int irq, void *p)
write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
}
- /* Notify the client that this request is completed. */
+ /* Clear the TCS IRQ status */
+ write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
+
+ /* Clean up response object and notify mbox in tasklet */
send_tcs_response(resp);
- irq_clear |= BIT(m);
- }
- /* Clear the TCS IRQ status */
- write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, irq_clear);
-
- /* Mark the TCS as free */
- for (m = 0; irq_status >= BIT(m); m++) {
- if (!(irq_status & BIT(m)))
- continue;
+ /* Notify the client that this request is completed. */
atomic_set(&drv->tcs_in_use[m], 0);
}
@@ -435,8 +444,8 @@ static void tcs_notify_tx_done(unsigned long data)
int err = resp->err;
int m = resp->m;
- free_response_to_pool(resp);
mbox_notify_tx_done(chan, msg, m, err);
+ free_response(resp);
}
/**
@@ -452,37 +461,66 @@ static void tcs_notify_timeout(struct work_struct *work)
struct tcs_mbox_msg *msg = resp->msg;
struct tcs_drv *drv = resp->drv;
int m = resp->m;
+ u32 irq_status;
+ struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
+ bool pending = false;
+ int sent_count, irq_count;
+ int i;
+ unsigned long flags;
- /*
- * In case the RPMH resource fails to respond to the completion
- * request, the TCS would be blocked forever waiting on the response.
- * There is no way to recover from this case.
- */
+ /* Read while holding a lock, to get a consistent state snapshot */
+ spin_lock_irqsave(&tcs->tcs_lock, flags);
+ irq_status = read_tcs_reg(drv->reg_base, TCS_DRV_IRQ_STATUS, 0, 0);
+ sent_count = atomic_read(&drv->tcs_send_count[m]);
+ irq_count = atomic_read(&drv->tcs_irq_count[m]);
+
if (!tcs_is_free(drv, m)) {
- bool pending = false;
struct tcs_cmd *cmd;
- int i;
u32 addr;
for (i = 0; i < msg->num_payload; i++) {
cmd = &msg->payload[i];
addr = read_tcs_reg(drv->reg_base, TCS_DRV_CMD_ADDR,
m, i);
- pending = (cmd->addr == addr);
- }
- if (pending) {
- pr_err("TCS-%d blocked waiting for RPMH to respond.\n",
- m);
- for (i = 0; i < msg->num_payload; i++)
- pr_err("Addr: 0x%x Data: 0x%x\n",
- msg->payload[i].addr,
- msg->payload[i].data);
- BUG();
+ pending |= (cmd->addr == addr);
}
}
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
- free_response_to_pool(resp);
+ if (pending) {
+ pr_err("TCS-%d waiting for response. (sent=%d recvd=%d ctrlr-sts=0x%x)\n",
+ m, sent_count, irq_count, irq_status & (u32)BIT(m));
+ for (i = 0; i < msg->num_payload; i++)
+ pr_err("Addr: 0x%x Data: 0x%x\n",
+ msg->payload[i].addr,
+ msg->payload[i].data);
+ /*
+ * In case the RPMH resource fails to respond to the
+ * completion request, the TCS would be blocked forever
+ * waiting on the response. There is no way to recover
+ * from such a case. But WARN() to investigate any false
+ * positives.
+ */
+ WARN_ON(irq_status & BIT(m));
+
+ /* Clear the TCS status register so we could try again */
+ write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
+
+ /* Increment the response count, so it doesn't keep adding up */
+ atomic_inc(&drv->tcs_irq_count[m]);
+
+ /*
+ * If the request was fire-n-forget then the controller,
+ * then our controller is OK, but the accelerator may be
+ * in a bad state.
+ * Let the upper layers figure out what needs to be done
+ * in such a case. Return error code and carry on.
+ */
+ atomic_set(&drv->tcs_in_use[m], 0);
+ }
+
mbox_notify_tx_done(chan, msg, -1, -ETIMEDOUT);
+ free_response(resp);
}
static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
@@ -525,8 +563,6 @@ static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, cmd_enable);
if (trigger) {
- /* Mark the TCS as busy */
- atomic_set(&drv->tcs_in_use[m], 1);
/* HW req: Clear the DRV_CONTROL and enable TCS again */
write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, 0);
write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
@@ -560,54 +596,45 @@ static bool tcs_drv_is_idle(struct mbox_controller *mbox)
return true;
}
-static void wait_for_req_inflight(struct tcs_drv *drv, struct tcs_mbox *tcs,
+static int check_for_req_inflight(struct tcs_drv *drv, struct tcs_mbox *tcs,
struct tcs_mbox_msg *msg)
{
- u32 curr_enabled;
+ u32 curr_enabled, addr;
int i, j, k;
- bool is_free;
+ void __iomem *base = drv->reg_base;
+ int m = tcs->tcs_offset;
- do {
- is_free = true;
- for (i = 1; i > tcs->tcs_mask; i = i << 1) {
- if (!(tcs->tcs_mask & i))
+ for (i = 0; i < tcs->num_tcs; i++, m++) {
+ if (tcs_is_free(drv, m))
+ continue;
+
+ curr_enabled = read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
+ for (j = 0; j < curr_enabled; j++) {
+ if (!(curr_enabled & BIT(j)))
continue;
- if (tcs_is_free(drv, i))
- continue;
- curr_enabled = read_tcs_reg(drv->reg_base,
- TCS_DRV_CMD_ENABLE, i, 0);
- for (j = 0; j < msg->num_payload; j++) {
- for (k = 0; k < curr_enabled; k++) {
- if (!(curr_enabled & BIT(k)))
- continue;
- if (tcs->cmd_addr[k] ==
- msg->payload[j].addr) {
- is_free = false;
- goto retry;
- }
- }
+ addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, j);
+ for (k = 0; k < msg->num_payload; k++) {
+ if (addr == msg->payload[k].addr)
+ return -EBUSY;
}
}
-retry:
- if (!is_free)
- cpu_relax();
- } while (!is_free);
+ }
+
+ return 0;
}
static int find_free_tcs(struct tcs_mbox *tcs)
{
- int slot, m = 0;
+ int slot = -EBUSY;
+ int m = 0;
/* Loop until we find a free AMC */
- do {
+ for (m = 0; m < tcs->num_tcs; m++) {
if (tcs_is_free(tcs->drv, tcs->tcs_offset + m)) {
slot = m * tcs->ncpt;
break;
}
- if (++m >= tcs->num_tcs)
- m = 0;
- cpu_relax();
- } while (1);
+ }
return slot;
}
@@ -663,26 +690,6 @@ static int find_slots(struct tcs_mbox *tcs, struct tcs_mbox_msg *msg)
return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM;
}
-static struct tcs_response *setup_response(struct tcs_mbox *tcs,
- struct mbox_chan *chan, struct tcs_mbox_msg *msg, int m)
-{
- struct tcs_response *resp = get_response_from_pool(tcs->drv);
-
- if (IS_ERR(resp))
- return resp;
-
- if (m < tcs->tcs_offset)
- return ERR_PTR(-EINVAL);
-
- tcs->resp[m - tcs->tcs_offset] = resp;
- resp->msg = msg;
- resp->chan = chan;
- resp->m = m;
- resp->err = 0;
-
- return resp;
-}
-
static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
bool trigger)
{
@@ -690,21 +697,36 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
int d = drv->drv_id;
struct tcs_mbox *tcs;
- int i, slot, offset, m, n;
+ int i, slot, offset, m, n, ret;
struct tcs_response *resp = NULL;
+ unsigned long flags;
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
+ if (trigger)
+ resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
+
/* Identify the sequential slots that we can write to */
- spin_lock(&tcs->tcs_lock);
+ spin_lock_irqsave(&tcs->tcs_lock, flags);
slot = find_slots(tcs, msg);
if (slot < 0) {
dev_err(dev, "No TCS slot found.\n");
- spin_unlock(&tcs->tcs_lock);
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+ if (resp)
+ free_response(resp);
return slot;
}
+
+ if (trigger) {
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret) {
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+ return ret;
+ }
+ }
+
/* Mark the slots as in-use, before we unlock */
if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
bitmap_set(tcs->slots, slot, msg->num_payload);
@@ -713,27 +735,16 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
tcs->cmd_addr[slot + i] = msg->payload[i].addr;
- if (trigger)
- resp = setup_response(tcs, chan, msg,
- slot / tcs->ncpt + tcs->tcs_offset);
-
- spin_unlock(&tcs->tcs_lock);
-
- /*
- * Find the TCS corresponding to the slot and start writing.
- * Break down 'slot' into a 'n' position in the 'm'th TCS.
- */
offset = slot / tcs->ncpt;
m = offset + tcs->tcs_offset;
n = slot % tcs->ncpt;
- spin_lock(&tcs->tcs_m_lock[offset]);
+ /* Block, if we have an address from the msg in flight */
if (trigger) {
- /* Block, if we have an address from the msg in flight */
- wait_for_req_inflight(drv, tcs, msg);
- /* If the TCS is busy there is nothing to do but spin wait */
- while (!tcs_is_free(drv, m))
- cpu_relax();
+ resp->m = m;
+ /* Mark the TCS as busy */
+ atomic_set(&drv->tcs_in_use[m], 1);
+ atomic_inc(&drv->tcs_send_count[m]);
}
/* Write to the TCS or AMC */
@@ -743,7 +754,7 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
if (trigger)
schedule_tcs_err_response(resp);
- spin_unlock(&tcs->tcs_m_lock[offset]);
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
return 0;
}
@@ -760,24 +771,21 @@ static int tcs_mbox_invalidate(struct mbox_chan *chan)
int m, i;
int inv_types[] = { WAKE_TCS, SLEEP_TCS };
int type = 0;
+ unsigned long flags;
do {
tcs = get_tcs_of_type(drv, inv_types[type]);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock(&tcs->tcs_lock);
+ spin_lock_irqsave(&tcs->tcs_lock, flags);
for (i = 0; i < tcs->num_tcs; i++) {
m = i + tcs->tcs_offset;
- spin_lock(&tcs->tcs_m_lock[i]);
- while (!tcs_is_free(drv, m))
- cpu_relax();
__tcs_buffer_invalidate(drv->reg_base, m);
- spin_unlock(&tcs->tcs_m_lock[i]);
}
/* Mark the TCS as free */
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
- spin_unlock(&tcs->tcs_lock);
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
} while (++type < ARRAY_SIZE(inv_types));
return 0;
@@ -799,6 +807,7 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
struct tcs_mbox_msg *msg = data;
const struct device *dev = chan->cl->dev;
int ret = -EINVAL;
+ int count = 0;
if (!msg) {
dev_err(dev, "Payload error.\n");
@@ -835,17 +844,21 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
tcs_mbox_invalidate(chan);
/* Post the message to the TCS and trigger */
- ret = tcs_mbox_write(chan, msg, true);
+ do {
+ ret = tcs_mbox_write(chan, msg, true);
+ if (ret == -EBUSY) {
+ ret = -EIO;
+ udelay(10);
+ } else
+ break;
+ } while (++count < 10);
tx_fail:
if (ret) {
struct tcs_drv *drv = container_of(chan->mbox,
- struct tcs_drv, mbox);
- struct tcs_response *resp = get_response_from_pool(drv);
-
- resp->chan = chan;
- resp->msg = msg;
- resp->err = ret;
+ struct tcs_drv, mbox);
+ struct tcs_response *resp = setup_response(
+ drv, msg, chan, TCS_M_INIT, ret);
dev_err(dev, "Error sending RPMH message %d\n", ret);
send_tcs_response(resp);
@@ -873,6 +886,7 @@ static int tcs_control_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg)
const struct device *dev = chan->cl->dev;
struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
struct tcs_mbox *tcs;
+ unsigned long flags;
tcs = get_tcs_of_type(drv, CONTROL_TCS);
if (IS_ERR(tcs))
@@ -883,9 +897,9 @@ static int tcs_control_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg)
return -EINVAL;
}
- spin_lock(&tcs->tcs_lock);
+ spin_lock_irqsave(&tcs->tcs_lock, flags);
__tcs_write_hidden(tcs->drv, drv->drv_id, msg);
- spin_unlock(&tcs->tcs_lock);
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
return 0;
}
@@ -1040,8 +1054,6 @@ static int tcs_drv_probe(struct platform_device *pdev)
tcs->ncpt = (tcs->type == CONTROL_TCS) ? TCS_HIDDEN_MAX_SLOTS
: ncpt;
spin_lock_init(&tcs->tcs_lock);
- for (j = 0; j < ARRAY_SIZE(tcs->tcs_m_lock); j++)
- spin_lock_init(&tcs->tcs_m_lock[j]);
if (tcs->num_tcs <= 0 || tcs->type == CONTROL_TCS)
continue;
@@ -1116,7 +1128,7 @@ static int tcs_drv_probe(struct platform_device *pdev)
return irq;
ret = devm_request_irq(&pdev->dev, irq, tcs_irq_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
"tcs_irq", drv);
if (ret)
return ret;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 4f75a19..019a775 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -28,25 +28,34 @@
#endif
static struct cam_req_mgr_util_hdl_tbl *hdl_tbl;
-static struct mutex hdl_tbl_mutex = __MUTEX_INITIALIZER(hdl_tbl_mutex);
+static DEFINE_SPINLOCK(hdl_tbl_lock);
int cam_req_mgr_util_init(void)
{
int rc = 0;
int bitmap_size;
+ static struct cam_req_mgr_util_hdl_tbl *hdl_tbl_local;
- mutex_lock(&hdl_tbl_mutex);
if (hdl_tbl) {
rc = -EINVAL;
pr_err("Hdl_tbl is already present\n");
goto hdl_tbl_check_failed;
}
- hdl_tbl = kzalloc(sizeof(*hdl_tbl), GFP_KERNEL);
- if (!hdl_tbl) {
+ hdl_tbl_local = kzalloc(sizeof(*hdl_tbl), GFP_KERNEL);
+ if (!hdl_tbl_local) {
rc = -ENOMEM;
goto hdl_tbl_alloc_failed;
}
+ spin_lock_bh(&hdl_tbl_lock);
+ if (hdl_tbl) {
+ spin_unlock_bh(&hdl_tbl_lock);
+ rc = -EEXIST;
+ kfree(hdl_tbl_local);
+ goto hdl_tbl_check_failed;
+ }
+ hdl_tbl = hdl_tbl_local;
+ spin_unlock_bh(&hdl_tbl_lock);
bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES) * sizeof(long);
hdl_tbl->bitmap = kzalloc(sizeof(bitmap_size), GFP_KERNEL);
@@ -55,7 +64,6 @@ int cam_req_mgr_util_init(void)
goto bitmap_alloc_fail;
}
hdl_tbl->bits = bitmap_size * BITS_PER_BYTE;
- mutex_unlock(&hdl_tbl_mutex);
return rc;
@@ -64,16 +72,15 @@ int cam_req_mgr_util_init(void)
hdl_tbl = NULL;
hdl_tbl_alloc_failed:
hdl_tbl_check_failed:
- mutex_unlock(&hdl_tbl_mutex);
return rc;
}
int cam_req_mgr_util_deinit(void)
{
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
@@ -81,7 +88,7 @@ int cam_req_mgr_util_deinit(void)
hdl_tbl->bitmap = NULL;
kfree(hdl_tbl);
hdl_tbl = NULL;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return 0;
}
@@ -90,10 +97,10 @@ int cam_req_mgr_util_free_hdls(void)
{
int i = 0;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
@@ -107,7 +114,7 @@ int cam_req_mgr_util_free_hdls(void)
}
}
bitmap_zero(hdl_tbl->bitmap, CAM_REQ_MGR_MAX_HANDLES);
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return 0;
}
@@ -132,17 +139,17 @@ int32_t cam_create_session_hdl(void *priv)
int rand = 0;
int32_t handle = 0;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
pr_err("Unable to create session handle\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@@ -154,7 +161,7 @@ int32_t cam_create_session_hdl(void *priv)
hdl_tbl->hdl[idx].state = HDL_ACTIVE;
hdl_tbl->hdl[idx].priv = priv;
hdl_tbl->hdl[idx].ops = NULL;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return handle;
}
@@ -165,17 +172,17 @@ int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data)
int rand = 0;
int32_t handle;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
pr_err("Unable to create device handle\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@@ -187,7 +194,7 @@ int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data)
hdl_tbl->hdl[idx].state = HDL_ACTIVE;
hdl_tbl->hdl[idx].priv = hdl_data->priv;
hdl_tbl->hdl[idx].ops = hdl_data->ops;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return handle;
}
@@ -198,7 +205,7 @@ void *cam_get_device_priv(int32_t dev_hdl)
int type;
void *priv;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
goto device_priv_fail;
@@ -227,12 +234,12 @@ void *cam_get_device_priv(int32_t dev_hdl)
}
priv = hdl_tbl->hdl[idx].priv;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return priv;
device_priv_fail:
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return NULL;
}
@@ -242,7 +249,7 @@ void *cam_get_device_ops(int32_t dev_hdl)
int type;
void *ops;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
goto device_ops_fail;
@@ -271,12 +278,12 @@ void *cam_get_device_ops(int32_t dev_hdl)
}
ops = hdl_tbl->hdl[idx].ops;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return ops;
device_ops_fail:
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return NULL;
}
@@ -285,7 +292,7 @@ static int cam_destroy_hdl(int32_t dev_hdl, int dev_hdl_type)
int idx;
int type;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
goto destroy_hdl_fail;
@@ -315,12 +322,12 @@ static int cam_destroy_hdl(int32_t dev_hdl, int dev_hdl_type)
hdl_tbl->hdl[idx].state = HDL_FREE;
clear_bit(idx, hdl_tbl->bitmap);
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return 0;
destroy_hdl_fail:
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index e327723..08466b1 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -104,14 +104,14 @@ static void cam_req_mgr_process_workq(struct work_struct *w)
workq = (struct cam_req_mgr_core_workq *)
container_of(w, struct cam_req_mgr_core_workq, work);
- spin_lock(&workq->task.lock);
list_for_each_entry_safe(task, task_save,
&workq->task.process_head, entry) {
atomic_sub(1, &workq->task.pending_cnt);
+ spin_lock(&workq->task.lock);
list_del_init(&task->entry);
+ spin_unlock(&workq->task.lock);
cam_req_mgr_process_task(task);
}
- spin_unlock(&workq->task.lock);
CRM_DBG("processed task %p free_cnt %d",
task, atomic_read(&workq->task.free_cnt));
}
@@ -138,7 +138,6 @@ int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task)
goto end;
}
- spin_lock(&workq->task.lock);
if (task->cancel == 1) {
cam_req_mgr_workq_put_task(task);
CRM_WARN("task aborted and queued back to pool");
@@ -146,12 +145,14 @@ int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task)
spin_unlock(&workq->task.lock);
goto end;
}
+ spin_lock(&workq->task.lock);
list_add_tail(&task->entry,
&workq->task.process_head);
+ spin_unlock(&workq->task.lock);
atomic_add(1, &workq->task.pending_cnt);
CRM_DBG("enq task %p pending_cnt %d",
task, atomic_read(&workq->task.pending_cnt));
- spin_unlock(&workq->task.lock);
+
queue_work(workq->job, &workq->work);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index a0b53bb..3bf6ce0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -140,6 +140,18 @@ struct sde_smmu_client {
int domain;
};
+/*
+ * struct sde_rot_debug_bus: rotator debugbus header structure
+ * @wr_addr: write address for debugbus controller
+ * @block_id: rotator debugbus block id
+ * @test_id: rotator debugbus test id
+ */
+struct sde_rot_debug_bus {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+};
+
struct sde_rot_vbif_debug_bus {
u32 disable_bus_addr;
u32 block_bus_addr;
@@ -191,6 +203,8 @@ struct sde_rot_data_type {
struct sde_rot_vbif_debug_bus *nrt_vbif_dbg_bus;
u32 nrt_vbif_dbg_bus_size;
+ struct sde_rot_debug_bus *rot_dbg_bus;
+ u32 rot_dbg_bus_size;
struct sde_rot_regdump *regdump;
u32 regdump_size;
@@ -199,6 +213,8 @@ struct sde_rot_data_type {
int sec_cam_en;
struct ion_client *iclient;
+
+ bool clk_always_on;
};
int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index e56c70a..e9ff67c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -30,6 +30,7 @@
#define SDE_EVTLOG_DEFAULT_PANIC 1
#define SDE_EVTLOG_DEFAULT_REGDUMP SDE_ROT_DBG_DUMP_IN_MEM
#define SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
+#define SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
/*
* evtlog will print this number of entries when it is called through
@@ -53,6 +54,8 @@
#define GROUP_BYTES 4
#define ROW_BYTES 16
+#define SDE_ROT_TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+
static DEFINE_SPINLOCK(sde_rot_xlock);
/*
@@ -86,11 +89,14 @@ struct tlog {
* @panic_on_err - boolean indicates issue panic after EVTLOG dump
* @enable_reg_dump - control in-log/memory dump for rotator registers
* @enable_vbif_dbgbus_dump - control in-log/memory dump for VBIF debug bus
+ * @enable_rot_dbgbus_dump - control in-log/memroy dump for rotator debug bus
* @evtlog_dump_work - schedule work strucutre for timeout handler
* @work_dump_reg - storage for register dump control in schedule work
* @work_panic - storage for panic control in schedule work
* @work_vbif_dbgbus - storage for VBIF debug bus control in schedule work
+ * @work_rot_dbgbus - storage for rotator debug bus control in schedule work
* @nrt_vbif_dbgbus_dump - memory buffer for VBIF debug bus dumping
+ * @rot_dbgbus_dump - memory buffer for rotator debug bus dumping
* @reg_dump_array - memory buffer for rotator registers dumping
*/
struct sde_rot_dbg_evtlog {
@@ -103,14 +109,88 @@ struct sde_rot_dbg_evtlog {
u32 panic_on_err;
u32 enable_reg_dump;
u32 enable_vbif_dbgbus_dump;
+ u32 enable_rot_dbgbus_dump;
struct work_struct evtlog_dump_work;
bool work_dump_reg;
bool work_panic;
bool work_vbif_dbgbus;
+ bool work_rot_dbgbus;
u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
+ u32 *rot_dbgbus_dump;
u32 *reg_dump_array[SDE_ROT_DEBUG_BASE_MAX];
} sde_rot_dbg_evtlog;
+static void sde_rot_dump_debug_bus(u32 bus_dump_flag, u32 **dump_mem)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ bool in_log, in_mem;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct sde_rot_debug_bus *head;
+ phys_addr_t phys = 0;
+ int i;
+ u32 offset;
+ void __iomem *base;
+
+ in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
+ in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
+ base = mdata->sde_io.base;
+
+ if (!base || !mdata->rot_dbg_bus || !mdata->rot_dbg_bus_size)
+ return;
+
+ pr_info("======== SDE Rotator Debug bus DUMP =========\n");
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+ mdata->rot_dbg_bus_size * 4 * sizeof(u32),
+ &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
+ __func__, dump_addr,
+ dump_addr + (u32)mdata->rot_dbg_bus_size * 16);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ sde_smmu_ctrl(1);
+
+ for (i = 0; i < mdata->rot_dbg_bus_size; i++) {
+ head = mdata->rot_dbg_bus + i;
+ writel_relaxed(SDE_ROT_TEST_MASK(head->block_id, head->test_id),
+ base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+
+ offset = head->wr_addr + 0x4;
+
+ status = readl_relaxed(base + offset);
+
+ if (in_log)
+ pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id, head->test_id,
+ status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, base + head->wr_addr);
+ }
+
+ sde_smmu_ctrl(0);
+
+ pr_info("========End Debug bus=========\n");
+}
+
/*
* sde_rot_evtlog_is_enabled - helper function for checking EVTLOG
* enable/disable
@@ -518,18 +598,26 @@ static ssize_t sde_rot_evtlog_dump_write(struct file *file,
* @dump_vbif_debug_bus: boolean indicates VBIF debug bus dump
*/
static void sde_rot_evtlog_dump_helper(bool dead, const char *panic_name,
- bool dump_rot, bool dump_vbif_debug_bus)
+ bool dump_rot, bool dump_vbif_debug_bus, bool dump_rot_debug_bus)
{
sde_rot_evtlog_dump_all();
- if (dump_rot)
- sde_rot_dump_reg_all();
+ if (dump_rot_debug_bus)
+ sde_rot_dump_debug_bus(
+ sde_rot_dbg_evtlog.enable_rot_dbgbus_dump,
+ &sde_rot_dbg_evtlog.rot_dbgbus_dump);
if (dump_vbif_debug_bus)
sde_rot_dump_vbif_debug_bus(
sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump,
&sde_rot_dbg_evtlog.nrt_vbif_dbgbus_dump);
+ /*
+ * Rotator registers always dump last
+ */
+ if (dump_rot)
+ sde_rot_dump_reg_all();
+
if (dead)
panic(panic_name);
}
@@ -544,7 +632,8 @@ static void sde_rot_evtlog_debug_work(struct work_struct *work)
sde_rot_dbg_evtlog.work_panic,
"evtlog_workitem",
sde_rot_dbg_evtlog.work_dump_reg,
- sde_rot_dbg_evtlog.work_vbif_dbgbus);
+ sde_rot_dbg_evtlog.work_vbif_dbgbus,
+ sde_rot_dbg_evtlog.work_rot_dbgbus);
}
/*
@@ -569,6 +658,7 @@ void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
bool dead = false;
bool dump_rot = false;
bool dump_vbif_dbgbus = false;
+ bool dump_rot_dbgbus = false;
char *blk_name = NULL;
va_list args;
@@ -590,6 +680,9 @@ void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
if (!strcmp(blk_name, "vbif_dbg_bus"))
dump_vbif_dbgbus = true;
+ if (!strcmp(blk_name, "rot_dbg_bus"))
+ dump_rot_dbgbus = true;
+
if (!strcmp(blk_name, "panic"))
dead = true;
}
@@ -600,10 +693,11 @@ void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
sde_rot_dbg_evtlog.work_panic = dead;
sde_rot_dbg_evtlog.work_dump_reg = dump_rot;
sde_rot_dbg_evtlog.work_vbif_dbgbus = dump_vbif_dbgbus;
+ sde_rot_dbg_evtlog.work_rot_dbgbus = dump_rot_dbgbus;
schedule_work(&sde_rot_dbg_evtlog.evtlog_dump_work);
} else {
sde_rot_evtlog_dump_helper(dead, name, dump_rot,
- dump_vbif_dbgbus);
+ dump_vbif_dbgbus, dump_rot_dbgbus);
}
}
@@ -836,6 +930,13 @@ static int sde_rotator_base_create_debugfs(
return -EINVAL;
}
+ mdata->clk_always_on = false;
+ if (!debugfs_create_bool("clk_always_on", 0644,
+ debugfs_root, &mdata->clk_always_on)) {
+ SDEROT_WARN("failed to create debugfs clk_always_on\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -919,12 +1020,16 @@ static int sde_rotator_evtlog_create_debugfs(
&sde_rot_dbg_evtlog.enable_reg_dump);
debugfs_create_u32("vbif_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
&sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump);
+ debugfs_create_u32("rot_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
+ &sde_rot_dbg_evtlog.enable_rot_dbgbus_dump);
sde_rot_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
sde_rot_dbg_evtlog.panic_on_err = SDE_EVTLOG_DEFAULT_PANIC;
sde_rot_dbg_evtlog.enable_reg_dump = SDE_EVTLOG_DEFAULT_REGDUMP;
sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump =
SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP;
+ sde_rot_dbg_evtlog.enable_rot_dbgbus_dump =
+ SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP;
pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
sde_rot_dbg_evtlog.evtlog_enable,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index a152573..8f2746d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -385,11 +385,95 @@ static u32 sde_hw_rotator_v4_outpixfmts[] = {
};
static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
- {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
+ {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
{0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
{0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
};
+static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
+ /*
+ * rottop - 0xA8850
+ */
+ /* REGDMA */
+ { 0XA8850, 0, 0 },
+ { 0XA8850, 0, 1 },
+ { 0XA8850, 0, 2 },
+ { 0XA8850, 0, 3 },
+ { 0XA8850, 0, 4 },
+
+ /* ROT_WB */
+ { 0XA8850, 1, 0 },
+ { 0XA8850, 1, 1 },
+ { 0XA8850, 1, 2 },
+ { 0XA8850, 1, 3 },
+ { 0XA8850, 1, 4 },
+ { 0XA8850, 1, 5 },
+ { 0XA8850, 1, 6 },
+ { 0XA8850, 1, 7 },
+
+ /* UBWC_DEC */
+ { 0XA8850, 2, 0 },
+
+ /* UBWC_ENC */
+ { 0XA8850, 3, 0 },
+
+ /* ROT_FETCH_0 */
+ { 0XA8850, 4, 0 },
+ { 0XA8850, 4, 1 },
+ { 0XA8850, 4, 2 },
+ { 0XA8850, 4, 3 },
+ { 0XA8850, 4, 4 },
+ { 0XA8850, 4, 5 },
+ { 0XA8850, 4, 6 },
+ { 0XA8850, 4, 7 },
+
+ /* ROT_FETCH_1 */
+ { 0XA8850, 5, 0 },
+ { 0XA8850, 5, 1 },
+ { 0XA8850, 5, 2 },
+ { 0XA8850, 5, 3 },
+ { 0XA8850, 5, 4 },
+ { 0XA8850, 5, 5 },
+ { 0XA8850, 5, 6 },
+ { 0XA8850, 5, 7 },
+
+ /* ROT_FETCH_2 */
+ { 0XA8850, 6, 0 },
+ { 0XA8850, 6, 1 },
+ { 0XA8850, 6, 2 },
+ { 0XA8850, 6, 3 },
+ { 0XA8850, 6, 4 },
+ { 0XA8850, 6, 5 },
+ { 0XA8850, 6, 6 },
+ { 0XA8850, 6, 7 },
+
+ /* ROT_FETCH_3 */
+ { 0XA8850, 7, 0 },
+ { 0XA8850, 7, 1 },
+ { 0XA8850, 7, 2 },
+ { 0XA8850, 7, 3 },
+ { 0XA8850, 7, 4 },
+ { 0XA8850, 7, 5 },
+ { 0XA8850, 7, 6 },
+ { 0XA8850, 7, 7 },
+
+ /* ROT_FETCH_4 */
+ { 0XA8850, 8, 0 },
+ { 0XA8850, 8, 1 },
+ { 0XA8850, 8, 2 },
+ { 0XA8850, 8, 3 },
+ { 0XA8850, 8, 4 },
+ { 0XA8850, 8, 5 },
+ { 0XA8850, 8, 6 },
+ { 0XA8850, 8, 7 },
+
+ /* ROT_UNPACK_0*/
+ { 0XA8850, 9, 0 },
+ { 0XA8850, 9, 1 },
+ { 0XA8850, 9, 2 },
+ { 0XA8850, 9, 3 },
+};
+
static struct sde_rot_regdump sde_rot_r3_regdump[] = {
{ "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
{ "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
@@ -1430,7 +1514,8 @@ static u32 sde_hw_rotator_wait_done_regdma(
sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
if (status & ROT_ERROR_BIT)
- SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus", "panic");
+ SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
+ "vbif_dbg_bus", "panic");
return sts;
}
@@ -1614,8 +1699,8 @@ void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
SDEROT_ERR(
"Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
swts, hwts, regdmasts, rotsts);
- SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus",
- "panic");
+ SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
+ "vbif_dbg_bus", "panic");
}
/* Turn off rotator clock after checking rotator registers */
@@ -2134,6 +2219,17 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
BIT(XIN_WRITEBACK));
+ /*
+ * For debug purpose, disable clock gating, i.e. Clocks always on
+ */
+ if (mdata->clk_always_on) {
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
+ 0xFFFF);
+ SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
+ }
+
return 0;
error:
@@ -2260,6 +2356,9 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
mdata->nrt_vbif_dbg_bus_size =
ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
+ mdata->rot_dbg_bus = rot_dbgbus_r3;
+ mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
+
mdata->regdump = sde_rot_r3_regdump;
mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 0fa3262..ac6ded0 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1115,6 +1115,14 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) * 2;
break;
}
+ case HAL_PARAM_SECURE:
+ {
+ create_pkt_enable(pkt->rg_property_data,
+ HFI_PROPERTY_PARAM_SECURE_SESSION,
+ ((struct hal_enable *)pdata)->enable);
+ pkt->size += sizeof(u32) * 2;
+ break;
+ }
case HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER:
{
create_pkt_enable(pkt->rg_property_data,
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 85e721f..3d3b7e9 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -778,7 +778,10 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
msm_dcvs_try_enable(inst);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+ property_id = HAL_PARAM_SECURE;
inst->flags |= VIDC_SECURE;
+ property_val = !!(inst->flags & VIDC_SECURE);
+ pdata = &property_val;
dprintk(VIDC_DBG, "Setting secure mode to: %d\n",
!!(inst->flags & VIDC_SECURE));
break;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index c102687..e198d8e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1501,6 +1501,9 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
inst->flags |= VIDC_SECURE;
+ property_id = HAL_PARAM_SECURE;
+ property_val = !!(inst->flags & VIDC_SECURE);
+ pdata = &property_val;
dprintk(VIDC_INFO, "Setting secure mode to: %d\n",
!!(inst->flags & VIDC_SECURE));
break;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index c042a4a..1a1078d 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1965,6 +1965,8 @@ static int venus_hfi_core_release(void *dev)
if (device->res->pm_qos_latency_us &&
pm_qos_request_active(&device->qos))
pm_qos_remove_request(&device->qos);
+
+ __resume(device);
__set_state(device, VENUS_STATE_DEINIT);
__unload_fw(device);
@@ -4054,6 +4056,8 @@ static void __unload_fw(struct venus_hfi_device *device)
__venus_power_off(device);
device->resources.fw.cookie = NULL;
__deinit_resources(device);
+
+ dprintk(VIDC_PROF, "Firmware unloaded successfully\n");
}
static int venus_hfi_get_fw_info(void *dev, struct hal_fw_info *fw_info)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index b0beeec..f8e0a6a 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -223,6 +223,7 @@ enum hal_property {
HAL_PARAM_VENC_IFRAMESIZE_TYPE,
HAL_PARAM_VIDEO_CORES_USAGE,
HAL_PARAM_VIDEO_WORK_MODE,
+ HAL_PARAM_SECURE,
};
enum hal_domain {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 04d7d01..d6c4bcb 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -218,6 +218,8 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED \
(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
+#define HFI_PROPERTY_PARAM_SECURE_SESSION \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x011)
#define HFI_PROPERTY_PARAM_WORK_MODE \
(HFI_PROPERTY_PARAM_COMMON_START + 0x015)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index bcc296b..d8e9599 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2200,6 +2200,17 @@ static int mmc_blk_err_check(struct mmc_card *card,
int need_retune = card->host->need_retune;
int ecc_err = 0, gen_err = 0;
+ if (card->host->sdr104_wa && mmc_card_sd(card) &&
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !card->sdr104_blocked &&
+ (brq->data.error == -EILSEQ ||
+ brq->data.error == -EIO ||
+ brq->data.error == -ETIMEDOUT ||
+ brq->cmd.error == -EILSEQ ||
+ brq->cmd.error == -EIO ||
+ brq->cmd.error == -ETIMEDOUT))
+ card->err_in_sdr104 = true;
+
/*
* sbc.error indicates a problem with the set block count
* command. No data will have been transferred.
@@ -3640,6 +3651,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
struct mmc_async_req *areq;
const u8 packed_nr = 2;
u8 reqs = 0;
+ bool reset = false;
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
unsigned long waitfor = jiffies;
#endif
@@ -3685,6 +3697,26 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
+ if (card->err_in_sdr104) {
+ /*
+ * Data CRC/timeout errors will manifest as CMD/DATA
+ * ERR. But we'd like to retry these too.
+ * Moreover, no harm done if this fails too for multiple
+ * times, we anyway reduce the bus-speed and retry the
+ * same request.
+ * If that fails too, we don't override this status.
+ */
+ if (status == MMC_BLK_ABORT ||
+ status == MMC_BLK_CMD_ERR ||
+ status == MMC_BLK_DATA_ERR ||
+ status == MMC_BLK_RETRY)
+ /* reset on all of these errors and retry */
+ reset = true;
+
+ status = MMC_BLK_RETRY;
+ card->err_in_sdr104 = false;
+ }
+
switch (status) {
case MMC_BLK_SUCCESS:
case MMC_BLK_PARTIAL:
@@ -3725,8 +3757,32 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
break;
case MMC_BLK_RETRY:
retune_retry_done = brq->retune_retry_done;
- if (retry++ < MMC_BLK_MAX_RETRIES)
+ if (retry++ < MMC_BLK_MAX_RETRIES) {
break;
+ } else if (reset) {
+ reset = false;
+ /*
+ * If we exhaust all the retries due to
+ * CRC/timeout errors in SDR140 mode with UHS SD
+ * cards, re-configure the card in SDR50
+ * bus-speed mode.
+ * All subsequent re-init of this card will be
+ * in SDR50 mode, unless it is removed and
+ * re-inserted. When new UHS SD cards are
+ * inserted, it may start at SDR104 mode if
+ * supported by the card.
+ */
+ pr_err("%s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ req->rq_disk->disk_name);
+ mmc_host_clear_sdr104(card->host);
+ mmc_suspend_clk_scaling(card->host);
+ mmc_blk_reset(md, card->host, type);
+ /* SDR104 mode is blocked from now on */
+ card->sdr104_blocked = true;
+ /* retry 5 times again */
+ retry = 0;
+ break;
+ }
/* Fall through */
case MMC_BLK_ABORT:
if (!mmc_blk_reset(md, card->host, type) &&
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 9987859..1397d03 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -617,12 +617,25 @@ static int mmc_devfreq_create_freq_table(struct mmc_host *host)
}
out:
- clk_scaling->devfreq_profile.freq_table = (unsigned long *)clk_scaling->freq_table;
+ /**
+ * devfreq requires unsigned long type freq_table while the
+ * freq_table in clk_scaling is un32. Here allocates an individual
+ * memory space for it and release it when exit clock scaling.
+ */
+ clk_scaling->devfreq_profile.freq_table = kzalloc(
+ clk_scaling->freq_table_sz *
+ sizeof(*(clk_scaling->devfreq_profile.freq_table)),
+ GFP_KERNEL);
+ if (!clk_scaling->devfreq_profile.freq_table)
+ return -ENOMEM;
clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
- for (i = 0; i < clk_scaling->freq_table_sz; i++)
+ for (i = 0; i < clk_scaling->freq_table_sz; i++) {
+ clk_scaling->devfreq_profile.freq_table[i] =
+ clk_scaling->freq_table[i];
pr_debug("%s: freq[%d] = %u\n",
mmc_hostname(host), i, clk_scaling->freq_table[i]);
+ }
return 0;
}
@@ -858,6 +871,8 @@ int mmc_exit_clk_scaling(struct mmc_host *host)
return err;
}
+ kfree(host->clk_scaling.devfreq_profile.freq_table);
+
host->clk_scaling.devfreq = NULL;
atomic_set(&host->clk_scaling.devfreq_abort, 1);
pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
@@ -4175,6 +4190,10 @@ int _mmc_detect_card_removed(struct mmc_host *host)
if (ret) {
mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
+ }
pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
@@ -4281,7 +4300,7 @@ void mmc_rescan(struct work_struct *work)
mmc_release_host(host);
goto out;
}
-+ mmc_rescan_try_freq(host, host->f_min);
+ mmc_rescan_try_freq(host, host->f_min);
mmc_release_host(host);
out:
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 3f39058..2adf42c 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -80,7 +80,6 @@ void mmc_init_context_info(struct mmc_host *host);
extern bool mmc_can_scale_clk(struct mmc_host *host);
extern int mmc_init_clk_scaling(struct mmc_host *host);
-extern int mmc_suspend_clk_scaling(struct mmc_host *host);
extern int mmc_resume_clk_scaling(struct mmc_host *host);
extern int mmc_exit_clk_scaling(struct mmc_host *host);
extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 7f66ad3..7112f9f 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -432,26 +432,26 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104) &&
(card->host->f_max > UHS_SDR104_MIN_DTR)) {
- card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
- } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50) &&
- (card->host->f_max > UHS_DDR50_MIN_DTR)) {
- card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50) &&
(card->host->f_max > UHS_SDR50_MIN_DTR)) {
- card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50) &&
+ (card->host->f_max > UHS_DDR50_MIN_DTR)) {
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25) &&
(card->host->f_max > UHS_SDR25_MIN_DTR)) {
- card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
- card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
}
@@ -1313,6 +1313,8 @@ static int _mmc_sd_resume(struct mmc_host *host)
#endif
mmc_card_clr_suspended(host->card);
+ if (host->card->sdr104_blocked)
+ goto out;
err = mmc_resume_clk_scaling(host);
if (err) {
pr_err("%s: %s: fail to resume clock scaling (%d)\n",
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 7123ef9..445fc47 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -830,6 +830,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
switch (uhs) {
case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_DDR50:
pinctrl = imx_data->pins_100mhz;
break;
case MMC_TIMING_UHS_SDR104:
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 9650085..fe62b69 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1939,6 +1939,8 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
if (of_get_property(np, "qcom,core_3_0v_support", NULL))
pdata->core_3_0v_support = true;
+ pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
+
return pdata;
out:
return NULL;
@@ -4422,6 +4424,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (msm_host->pdata->nonhotplug)
msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
+ msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
+
init_completion(&msm_host->pwr_irq_completion);
if (gpio_is_valid(msm_host->pdata->status_gpio)) {
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 533b241..53b1953 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -151,6 +151,7 @@ struct sdhci_msm_pltfm_data {
unsigned char sup_ice_clk_cnt;
struct sdhci_msm_pm_qos_data pm_qos_data;
bool core_3_0v_support;
+ bool sdr104_wa;
};
struct sdhci_msm_bus_vote {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 744e520..53a6ae8 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3096,7 +3096,10 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
mmc_hostname(host->mmc), intmask,
host->data->error, ktime_to_ms(ktime_sub(
ktime_get(), host->data_start_time)));
- sdhci_dumpregs(host);
+
+ if (!host->mmc->sdr104_wa ||
+ (host->mmc->ios.timing != MMC_TIMING_UHS_SDR104))
+ sdhci_dumpregs(host);
}
sdhci_finish_data(host);
} else {
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0134ba3..3971256 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- if (bytes == 0) {
- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
index 61de231..3c89a73 100644
--- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
+++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
@@ -210,6 +210,7 @@ void wcnss_prealloc_check_memory_leak(void)
#else
void wcnss_prealloc_check_memory_leak(void) {}
#endif
+EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
int wcnss_pre_alloc_reset(void)
{
@@ -225,6 +226,7 @@ int wcnss_pre_alloc_reset(void)
return n;
}
+EXPORT_SYMBOL(wcnss_pre_alloc_reset);
static int __init wcnss_pre_alloc_init(void)
{
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 8b3216cd..6523cb0 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -6457,7 +6457,6 @@ static int msm_pcie_probe(struct platform_device *pdev)
}
dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
- msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
msm_pcie_dev[rc_idx].pdev);
@@ -6508,6 +6507,8 @@ static int msm_pcie_probe(struct platform_device *pdev)
goto decrease_rc_num;
}
+ msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
+
msm_pcie_dev[rc_idx].drv_ready = true;
if (msm_pcie_dev[rc_idx].boot_option &
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index d45fa51..a37947b 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -126,6 +126,7 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
__stringify(IPA_CLIENT_Q6_DECOMP_PROD),
__stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
__stringify(IPA_CLIENT_UC_USB_PROD),
+ __stringify(IPA_CLIENT_ETHERNET_PROD),
/* Below PROD client type is only for test purpose */
__stringify(IPA_CLIENT_TEST_PROD),
@@ -164,6 +165,7 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
__stringify(IPA_CLIENT_Q6_DECOMP_CONS),
__stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
__stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+ __stringify(IPA_CLIENT_ETHERNET_CONS),
/* Below CONS client type is only for test purpose */
__stringify(IPA_CLIENT_TEST_CONS),
__stringify(IPA_CLIENT_TEST1_CONS),
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 15bb7b4..af4d4c8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -1584,6 +1584,7 @@ static int ipa_init_smem_region(int memory_region_size,
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc;
struct ipa_mem_buffer mem;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc;
if (memory_region_size == 0)
@@ -1603,7 +1604,7 @@ static int ipa_init_smem_region(int memory_region_size,
memset(mem.base, 0, mem.size);
cmd = kzalloc(sizeof(*cmd),
- GFP_KERNEL);
+ flag);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -2166,6 +2167,7 @@ int _ipa_init_sram_v2(void)
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc = {0};
struct ipa_mem_buffer mem;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = 0;
phys_addr = ipa_ctx->ipa_wrapper_base +
@@ -2203,7 +2205,7 @@ int _ipa_init_sram_v2(void)
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -2314,6 +2316,7 @@ int _ipa_init_hdr_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_hdr_init_local *cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = 0;
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
@@ -2325,7 +2328,7 @@ int _ipa_init_hdr_v2(void)
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n");
rc = -ENOMEM;
@@ -2360,6 +2363,7 @@ int _ipa_init_hdr_v2_5(void)
struct ipa_mem_buffer mem;
struct ipa_hdr_init_local *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
@@ -2370,7 +2374,7 @@ int _ipa_init_hdr_v2_5(void)
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n");
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
@@ -2411,7 +2415,7 @@ int _ipa_init_hdr_v2_5(void)
memset(mem.base, 0, mem.size);
memset(&desc, 0, sizeof(desc));
- dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_KERNEL);
+ dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
if (dma_cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
dma_free_coherent(ipa_ctx->pdev,
@@ -2462,6 +2466,7 @@ int _ipa_init_rt4_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v4_routing_init *v4_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2486,7 +2491,7 @@ int _ipa_init_rt4_v2(void)
entry++;
}
- v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
+ v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 routing init command object\n");
rc = -ENOMEM;
@@ -2522,6 +2527,7 @@ int _ipa_init_rt6_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v6_routing_init *v6_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2546,7 +2552,7 @@ int _ipa_init_rt6_v2(void)
entry++;
}
- v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
+ v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 routing init command object\n");
rc = -ENOMEM;
@@ -2582,6 +2588,7 @@ int _ipa_init_flt4_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v4_filter_init *v4_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2604,7 +2611,7 @@ int _ipa_init_flt4_v2(void)
entry++;
}
- v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
+ v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 fliter init command object\n");
rc = -ENOMEM;
@@ -2640,6 +2647,7 @@ int _ipa_init_flt6_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v6_filter_init *v6_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2662,7 +2670,7 @@ int _ipa_init_flt6_v2(void)
entry++;
}
- v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
+ v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 fliter init command object\n");
rc = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 3dca3e6..a822f66 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -322,8 +322,8 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
dma_address = desc->dma_address;
tx_pkt->no_unmap_dma = true;
}
- if (!dma_address) {
- IPAERR("failed to DMA wrap\n");
+ if (dma_mapping_error(ipa_ctx->pdev, dma_address)) {
+ IPAERR("dma_map_single failed\n");
goto fail_dma_map;
}
@@ -445,7 +445,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
}
dma_addr = dma_map_single(ipa_ctx->pdev,
transfer.iovec, size, DMA_TO_DEVICE);
- if (!dma_addr) {
+ if (dma_mapping_error(ipa_ctx->pdev, dma_addr)) {
IPAERR("dma_map_single failed for sps xfr buff\n");
kfree(transfer.iovec);
return -EFAULT;
@@ -493,6 +493,15 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
tx_pkt->mem.base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ipa_ctx->pdev,
+ tx_pkt->mem.phys_base)) {
+ IPAERR("dma_map_single ");
+ IPAERR("failed\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+
} else {
tx_pkt->mem.phys_base = desc[i].dma_address;
tx_pkt->no_unmap_dma = true;
@@ -1873,8 +1882,8 @@ static void ipa_wq_repl_rx(struct work_struct *work)
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
__func__, (void *)rx_pkt->data.dma_addr,
ptr, sys);
@@ -2029,18 +2038,20 @@ static void ipa_alloc_wlan_rx_common_cache(u32 size)
ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
}
+ spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
list_add_tail(&rx_pkt->link,
&ipa_ctx->wc_memb.wlan_comm_desc_list);
rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
ipa_ctx->wc_memb.wlan_comm_free_cnt++;
+ spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
}
@@ -2101,8 +2112,8 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -2159,9 +2170,10 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0)
+ if (dma_mapping_error(ipa_ctx->pdev, rx_pkt->data.dma_addr)) {
+ IPAERR("dma_map_single failure for rx_pkt\n");
goto fail_dma_mapping;
+ }
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 25f8923..046f77f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -268,6 +268,7 @@ int __ipa_commit_hdr_v2(void)
struct ipa_mem_buffer mem;
struct ipa_hdr_init_system *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
+ gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = -EFAULT;
if (ipa_generate_hdr_hw_tbl(&mem)) {
@@ -281,7 +282,7 @@ int __ipa_commit_hdr_v2(void)
IPA_MEM_PART(apps_hdr_size));
goto fail_send_cmd;
} else {
- dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_ATOMIC);
+ dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
if (dma_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -303,7 +304,7 @@ int __ipa_commit_hdr_v2(void)
IPA_MEM_PART(apps_hdr_size_ddr));
goto fail_send_cmd;
} else {
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("fail to alloc hdr init cmd\n");
rc = -ENOMEM;
@@ -359,6 +360,7 @@ int __ipa_commit_hdr_v2_5(void)
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL;
struct ipa_register_write *reg_write_cmd = NULL;
+ gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = -EFAULT;
u32 proc_ctx_size;
u32 proc_ctx_ofst;
@@ -383,7 +385,7 @@ int __ipa_commit_hdr_v2_5(void)
IPA_MEM_PART(apps_hdr_size));
goto fail_send_cmd1;
} else {
- dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), GFP_ATOMIC);
+ dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), flag);
if (dma_cmd_hdr == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -406,7 +408,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1;
} else {
hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd),
- GFP_ATOMIC);
+ flag);
if (hdr_init_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -431,7 +433,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1;
} else {
dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx),
- GFP_ATOMIC);
+ flag);
if (dma_cmd_ctx == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -456,7 +458,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1;
} else {
reg_write_cmd = kzalloc(sizeof(*reg_write_cmd),
- GFP_ATOMIC);
+ flag);
if (reg_write_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -722,6 +724,11 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->hdr,
entry->hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa_ctx->pdev,
+ entry->phys_base)) {
+ IPAERR("dma_map_single failure for entry\n");
+ goto fail_dma_mapping;
+ }
}
} else {
entry->is_hdr_proc_ctx = false;
@@ -798,6 +805,8 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
list_del(&entry->link);
dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
entry->hdr_len, DMA_TO_DEVICE);
+fail_dma_mapping:
+ entry->is_hdr_proc_ctx = false;
bad_hdr_len:
entry->cookie = 0;
kmem_cache_free(ipa_ctx->hdr_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 84849a2..21fdec0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -695,6 +695,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
struct ipa_mem_buffer head;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u16 avail;
u32 num_modem_rt_index;
int rc = 0;
@@ -745,7 +746,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
}
cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
- GFP_KERNEL);
+ flag);
if (cmd1 == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -762,7 +763,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
if (lcl) {
cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
- GFP_KERNEL);
+ flag);
if (cmd2 == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 3937cfe..9d25e4a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3509,6 +3509,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
if (!gsi_channel_props.ring_base_vaddr) {
IPAERR("fail to dma alloc %u bytes\n",
gsi_channel_props.ring_len);
+ result = -ENOMEM;
goto fail_alloc_channel_ring;
}
gsi_channel_props.ring_base_addr = dma_addr;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 2ca33d8..3af4486 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1246,7 +1246,9 @@ struct ipa3_plat_drv_res {
* Order and type of members should not be changed without a suitable change
* to DTS file or the code that reads it.
*
- * IPA v3.0 SRAM memory layout:
+ * IPA SRAM memory layout:
+ * +-------------------------+
+ * | UC MEM |
* +-------------------------+
* | UC INFO |
* +-------------------------+
@@ -1314,10 +1316,14 @@ struct ipa3_plat_drv_res {
* +-------------------------+
* | CANARY |
* +-------------------------+
+ * | CANARY |
+ * +-------------------------+
* | MODEM MEM |
* +-------------------------+
* | CANARY |
* +-------------------------+
+ * | UC EVENT RING | From IPA 3.5
+ * +-------------------------+
*/
struct ipa3_mem_partition {
u32 ofst_start;
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index efcc4e8..9f04957 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -28,6 +28,7 @@
#include <asm/cacheflush.h>
#include <asm/system_misc.h>
+#include <asm/memory.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/restart.h>
@@ -57,11 +58,17 @@ static int download_mode = 1;
#ifdef CONFIG_QCOM_DLOAD_MODE
#define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
#define DL_MODE_PROP "qcom,msm-imem-download_mode"
+#ifdef CONFIG_RANDOMIZE_BASE
+#define KASLR_OFFSET_PROP "qcom,msm-imem-kaslr_offset"
+#endif
static int in_panic;
static void *dload_mode_addr;
static bool dload_mode_enabled;
static void *emergency_dload_mode_addr;
+#ifdef CONFIG_RANDOMIZE_BASE
+static void *kaslr_imem_addr;
+#endif
static bool scm_dload_supported;
static int dload_set(const char *val, struct kernel_param *kp);
@@ -420,6 +427,27 @@ static int msm_restart_probe(struct platform_device *pdev)
pr_err("unable to map imem EDLOAD mode offset\n");
}
+#ifdef CONFIG_RANDOMIZE_BASE
+#define KASLR_OFFSET_BIT_MASK 0x00000000FFFFFFFF
+ np = of_find_compatible_node(NULL, NULL, KASLR_OFFSET_PROP);
+ if (!np) {
+ pr_err("unable to find DT imem KASLR_OFFSET node\n");
+ } else {
+ kaslr_imem_addr = of_iomap(np, 0);
+ if (!kaslr_imem_addr)
+ pr_err("unable to map imem KASLR offset\n");
+ }
+
+ if (kaslr_imem_addr && scm_is_secure_device()) {
+ __raw_writel(0xdead4ead, kaslr_imem_addr);
+ __raw_writel(KASLR_OFFSET_BIT_MASK &
+ (kimage_vaddr - KIMAGE_VADDR), kaslr_imem_addr + 4);
+ __raw_writel(KASLR_OFFSET_BIT_MASK &
+ ((kimage_vaddr - KIMAGE_VADDR) >> 32),
+ kaslr_imem_addr + 8);
+ iounmap(kaslr_imem_addr);
+ }
+#endif
#endif
np = of_find_compatible_node(NULL, NULL,
"qcom,msm-imem-restart_reason");
@@ -484,4 +512,4 @@ static int __init msm_restart_init(void)
{
return platform_driver_register(&msm_restart_driver);
}
-device_initcall(msm_restart_init);
+pure_initcall(msm_restart_init);
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index b985ecd..54bef52 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -434,23 +434,28 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
return rc;
}
- split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
- pval.intval = slave_fcc_ua;
- rc = power_supply_set_property(chip->pl_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
- if (rc < 0) {
- pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
- return rc;
- }
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
+ split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
- chip->slave_fcc_ua = slave_fcc_ua;
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
+ return rc;
+ }
- pval.intval = master_fcc_ua;
- rc = power_supply_set_property(chip->main_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
- if (rc < 0) {
- pr_err("Could not set main fcc, rc=%d\n", rc);
- return rc;
+ chip->slave_fcc_ua = slave_fcc_ua;
+
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n", rc);
+ return rc;
+ }
}
pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
@@ -511,13 +516,14 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
return 0;
}
-#define ICL_STEP_UV 25000
+#define ICL_STEP_UA 25000
static int usb_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
int rc;
struct pl_data *chip = data;
union power_supply_propval pval = {0, };
+ bool rerun_aicl = false;
if (!chip->main_psy)
return 0;
@@ -543,22 +549,28 @@ static int usb_icl_vote_callback(struct votable *votable, void *data,
}
/* rerun AICL if new ICL is above settled ICL */
- if (icl_ua > pval.intval) {
- /* set a lower ICL */
- pval.intval = max(pval.intval - ICL_STEP_UV, ICL_STEP_UV);
- power_supply_set_property(chip->main_psy,
- POWER_SUPPLY_PROP_CURRENT_MAX,
- &pval);
- /* wait for ICL change */
- msleep(100);
+ if (icl_ua > pval.intval)
+ rerun_aicl = true;
- pval.intval = icl_ua;
+ if (rerun_aicl) {
+ /* set a lower ICL */
+ pval.intval = max(pval.intval - ICL_STEP_UA, ICL_STEP_UA);
power_supply_set_property(chip->main_psy,
POWER_SUPPLY_PROP_CURRENT_MAX,
&pval);
/* wait for ICL change */
msleep(100);
}
+
+ /* set the effective ICL */
+ pval.intval = icl_ua;
+ power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+ if (rerun_aicl)
+ /* wait for ICL change */
+ msleep(100);
+
vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
return 0;
@@ -678,9 +690,6 @@ static bool is_main_available(struct pl_data *chip)
chip->main_psy = power_supply_get_by_name("main");
- if (chip->main_psy)
- rerun_election(chip->usb_icl_votable);
-
return !!chip->main_psy;
}
@@ -859,7 +868,18 @@ static void status_change_work(struct work_struct *work)
struct pl_data *chip = container_of(work,
struct pl_data, status_change_work);
- if (!is_main_available(chip))
+ if (!chip->main_psy && is_main_available(chip)) {
+ /*
+ * re-run election for FCC/FV/ICL once main_psy
+ * is available to ensure all votes are reflected
+ * on hardware
+ */
+ rerun_election(chip->usb_icl_votable);
+ rerun_election(chip->fcc_votable);
+ rerun_election(chip->fv_votable);
+ }
+
+ if (!chip->main_psy)
return;
if (!is_batt_available(chip))
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index ca551a5a..5983b5c 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -128,11 +128,6 @@ enum fg_irq_index {
FG_IRQ_MAX,
};
-/* WA flags */
-enum {
- DELTA_SOC_IRQ_WA = BIT(0),
-};
-
/*
* List of FG_SRAM parameters. Please add a parameter only if it is an entry
* that will be used either to configure an entity (e.g. termination current)
@@ -152,6 +147,7 @@ enum fg_sram_param_id {
FG_SRAM_CC_SOC,
FG_SRAM_CC_SOC_SW,
FG_SRAM_ACT_BATT_CAP,
+ FG_SRAM_TIMEBASE,
/* Entries below here are configurable during initialization */
FG_SRAM_CUTOFF_VOLT,
FG_SRAM_EMPTY_VOLT,
@@ -212,6 +208,7 @@ struct fg_alg_flag {
enum wa_flags {
PMI8998_V1_REV_WA = BIT(0),
+ PM660_TSMC_OSC_WA = BIT(1),
};
enum slope_limit_status {
@@ -325,6 +322,23 @@ static const struct fg_pt fg_ln_table[] = {
{ 128000, 4852 },
};
+/* each tuple is - <temperature in degC, Timebase> */
+static const struct fg_pt fg_tsmc_osc_table[] = {
+ { -20, 395064 },
+ { -10, 398114 },
+ { 0, 401669 },
+ { 10, 404641 },
+ { 20, 408856 },
+ { 25, 412449 },
+ { 30, 416532 },
+ { 40, 420289 },
+ { 50, 425020 },
+ { 60, 430160 },
+ { 70, 434175 },
+ { 80, 439475 },
+ { 90, 444992 },
+};
+
struct fg_chip {
struct device *dev;
struct pmic_revid_data *pmic_rev_id;
@@ -336,6 +350,7 @@ struct fg_chip {
struct power_supply *dc_psy;
struct power_supply *parallel_psy;
struct iio_channel *batt_id_chan;
+ struct iio_channel *die_temp_chan;
struct fg_memif *sram;
struct fg_irq_info *irqs;
struct votable *awake_votable;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index b42a65d..806460f 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -47,6 +47,7 @@
#define ESR_UPD_TIGHT_LOW_TEMP_OFFSET 2
#define ESR_UPD_BROAD_LOW_TEMP_OFFSET 3
#define KI_COEFF_MED_DISCHG_WORD 9
+#define TIMEBASE_OFFSET 1
#define KI_COEFF_MED_DISCHG_OFFSET 3
#define KI_COEFF_HI_DISCHG_WORD 10
#define KI_COEFF_HI_DISCHG_OFFSET 0
@@ -261,6 +262,8 @@ static struct fg_sram_param pmi8998_v2_sram_params[] = {
fg_decode_cc_soc),
PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
1, 1, 0, NULL, fg_decode_default),
+ PARAM(TIMEBASE, KI_COEFF_MED_DISCHG_WORD, TIMEBASE_OFFSET, 2, 1000,
+ 61000, 0, fg_encode_default, NULL),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -1967,7 +1970,7 @@ static int fg_esr_fcc_config(struct fg_chip *chip)
{
union power_supply_propval prop = {0, };
int rc;
- bool parallel_en = false;
+ bool parallel_en = false, qnovo_en = false;
if (is_parallel_charger_available(chip)) {
rc = power_supply_get_property(chip->parallel_psy,
@@ -1980,19 +1983,25 @@ static int fg_esr_fcc_config(struct fg_chip *chip)
parallel_en = prop.intval;
}
- fg_dbg(chip, FG_POWER_SUPPLY, "charge_status: %d parallel_en: %d esr_fcc_ctrl_en: %d\n",
- chip->charge_status, parallel_en, chip->esr_fcc_ctrl_en);
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE, &prop);
+ if (!rc)
+ qnovo_en = prop.intval;
+
+ fg_dbg(chip, FG_POWER_SUPPLY, "chg_sts: %d par_en: %d qnov_en: %d esr_fcc_ctrl_en: %d\n",
+ chip->charge_status, parallel_en, qnovo_en,
+ chip->esr_fcc_ctrl_en);
if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
- parallel_en) {
+ (parallel_en || qnovo_en)) {
if (chip->esr_fcc_ctrl_en)
return 0;
/*
- * When parallel charging is enabled, configure ESR FCC to
- * 300mA to trigger an ESR pulse. Without this, FG can ask
- * the main charger to increase FCC when it is supposed to
- * decrease it.
+ * When parallel charging or Qnovo is enabled, configure ESR
+ * FCC to 300mA to trigger an ESR pulse. Without this, FG can
+ * request the main charger to increase FCC when it is supposed
+ * to decrease it.
*/
rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
ESR_FAST_CRG_IVAL_MASK |
@@ -2011,8 +2020,8 @@ static int fg_esr_fcc_config(struct fg_chip *chip)
/*
* If we're here, then it means either the device is not in
- * charging state or parallel charging is disabled. Disable
- * ESR fast charge current control in SW.
+ * charging state or parallel charging / Qnovo is disabled.
+ * Disable ESR fast charge current control in SW.
*/
rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
ESR_FAST_CRG_CTL_EN_BIT, 0);
@@ -3365,6 +3374,40 @@ static int fg_memif_init(struct fg_chip *chip)
return fg_ima_init(chip);
}
+static int fg_adjust_timebase(struct fg_chip *chip)
+{
+ int rc = 0, die_temp;
+ s32 time_base = 0;
+ u8 buf[2] = {0};
+
+ if ((chip->wa_flags & PM660_TSMC_OSC_WA) && chip->die_temp_chan) {
+ rc = iio_read_channel_processed(chip->die_temp_chan, &die_temp);
+ if (rc < 0) {
+ pr_err("Error in reading die_temp, rc:%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_lerp(fg_tsmc_osc_table, ARRAY_SIZE(fg_tsmc_osc_table),
+ die_temp / 1000, &time_base);
+ if (rc < 0) {
+ pr_err("Error to lookup fg_tsmc_osc_table rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_encode(chip->sp, FG_SRAM_TIMEBASE, time_base, buf);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_TIMEBASE].addr_word,
+ chip->sp[FG_SRAM_TIMEBASE].addr_byte, buf,
+ chip->sp[FG_SRAM_TIMEBASE].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing timebase, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
/* INTERRUPT HANDLERS STAY HERE */
static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
@@ -3486,6 +3529,10 @@ static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
chip->health = prop.intval;
if (chip->last_batt_temp != batt_temp) {
+ rc = fg_adjust_timebase(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting timebase, rc=%d\n", rc);
+
chip->last_batt_temp = batt_temp;
power_supply_changed(chip->batt_psy);
}
@@ -3555,6 +3602,10 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
if (rc < 0)
pr_err("Error in validating ESR, rc=%d\n", rc);
+ rc = fg_adjust_timebase(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting timebase, rc=%d\n", rc);
+
if (batt_psy_initialized(chip))
power_supply_changed(chip->batt_psy);
@@ -3897,6 +3948,8 @@ static int fg_parse_dt(struct fg_chip *chip)
chip->sp = pmi8998_v2_sram_params;
chip->alg_flags = pmi8998_v2_alg_flags;
chip->use_ima_single_mode = true;
+ if (chip->pmic_rev_id->fab_id == PM660_FAB_ID_TSMC)
+ chip->wa_flags |= PM660_TSMC_OSC_WA;
break;
default:
return -EINVAL;
@@ -4238,6 +4291,21 @@ static int fg_gen3_probe(struct platform_device *pdev)
return rc;
}
+ rc = of_property_match_string(chip->dev->of_node,
+ "io-channel-names", "rradc_die_temp");
+ if (rc >= 0) {
+ chip->die_temp_chan = iio_channel_get(chip->dev,
+ "rradc_die_temp");
+ if (IS_ERR(chip->die_temp_chan)) {
+ if (PTR_ERR(chip->die_temp_chan) != -EPROBE_DEFER)
+ pr_err("rradc_die_temp unavailable %ld\n",
+ PTR_ERR(chip->die_temp_chan));
+ rc = PTR_ERR(chip->die_temp_chan);
+ chip->die_temp_chan = NULL;
+ return rc;
+ }
+ }
+
chip->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb,
chip);
if (IS_ERR(chip->awake_votable)) {
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index c74dc89..eb97eb0 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -89,7 +89,16 @@
#define QNOVO_STRM_CTRL 0xA8
#define QNOVO_IADC_OFFSET_OVR_VAL 0xA9
#define QNOVO_IADC_OFFSET_OVR 0xAA
+
#define QNOVO_DISABLE_CHARGING 0xAB
+#define ERR_SWITCHER_DISABLED BIT(7)
+#define ERR_JEITA_SOFT_CONDITION BIT(6)
+#define ERR_BAT_OV BIT(5)
+#define ERR_CV_MODE BIT(4)
+#define ERR_BATTERY_MISSING BIT(3)
+#define ERR_SAFETY_TIMER_EXPIRED BIT(2)
+#define ERR_CHARGING_DISABLED BIT(1)
+#define ERR_JEITA_HARD_CONDITION BIT(0)
#define QNOVO_TR_IADC_OFFSET_0 0xF1
#define QNOVO_TR_IADC_OFFSET_1 0xF2
@@ -1107,24 +1116,28 @@ static int qnovo_update_status(struct qnovo *chip)
{
u8 val = 0;
int rc;
- bool charging;
+ bool ok_to_qnovo;
bool changed = false;
rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
if (rc < 0) {
pr_err("Couldn't read error sts rc = %d\n", rc);
- charging = false;
+ ok_to_qnovo = false;
} else {
- charging = !(val & QNOVO_ERROR_CHARGING_DISABLED);
+ /*
+ * For CV mode keep qnovo enabled, userspace is expected to
+ * disable it after few runs
+ */
+ ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? true : false;
}
- if (chip->ok_to_qnovo ^ charging) {
+ if (chip->ok_to_qnovo ^ ok_to_qnovo) {
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !charging, 0);
- if (!charging)
+ vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !ok_to_qnovo, 0);
+ if (!ok_to_qnovo)
vote(chip->disable_votable, USER_VOTER, true, 0);
- chip->ok_to_qnovo = charging;
+ chip->ok_to_qnovo = ok_to_qnovo;
changed = true;
}
@@ -1247,6 +1260,16 @@ static int qnovo_hw_init(struct qnovo *chip)
chip->v_gain_mega = 1000000000 + (s64)(s8)vadc_gain * GAIN_LSB_FACTOR;
chip->v_gain_mega = div_s64(chip->v_gain_mega, 1000);
+ /* allow charger error conditions to disable qnovo, CV mode excluded */
+ val = ERR_SWITCHER_DISABLED | ERR_JEITA_SOFT_CONDITION | ERR_BAT_OV |
+ ERR_BATTERY_MISSING | ERR_SAFETY_TIMER_EXPIRED |
+ ERR_CHARGING_DISABLED | ERR_JEITA_HARD_CONDITION;
+ rc = qnovo_write(chip, QNOVO_DISABLE_CHARGING, &val, 1);
+ if (rc < 0) {
+ pr_err("Couldn't write QNOVO_DISABLE_CHARGING rc = %d\n", rc);
+ return rc;
+ }
+
return 0;
}
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 8fd45f18..e802fbd 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -244,6 +244,8 @@ struct smb_dt_props {
int boost_threshold_ua;
int fv_uv;
int wipower_max_uw;
+ int min_freq_khz;
+ int max_freq_khz;
u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
struct device_node *revid_dev_node;
@@ -338,6 +340,18 @@ static int smb2_parse_dt(struct smb2 *chip)
if (rc < 0)
chip->dt.boost_threshold_ua = MICRO_P1A;
+ rc = of_property_read_u32(node,
+ "qcom,min-freq-khz",
+ &chip->dt.min_freq_khz);
+ if (rc < 0)
+ chip->dt.min_freq_khz = -EINVAL;
+
+ rc = of_property_read_u32(node,
+ "qcom,max-freq-khz",
+ &chip->dt.max_freq_khz);
+ if (rc < 0)
+ chip->dt.max_freq_khz = -EINVAL;
+
rc = of_property_read_u32(node, "qcom,wipower-max-uw",
&chip->dt.wipower_max_uw);
if (rc < 0)
@@ -526,6 +540,12 @@ static int smb2_usb_set_prop(struct power_supply *psy,
struct smb_charger *chg = &chip->chg;
int rc = 0;
+ mutex_lock(&chg->lock);
+ if (!chg->typec_present) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
switch (psp) {
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
rc = smblib_set_prop_usb_voltage_min(chg, val);
@@ -564,6 +584,8 @@ static int smb2_usb_set_prop(struct power_supply *psy,
break;
}
+unlock:
+ mutex_unlock(&chg->lock);
return rc;
}
@@ -1336,10 +1358,12 @@ static int smb2_configure_typec(struct smb_charger *chg)
return rc;
}
- /* disable try.SINK mode */
- rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT, 0);
+ /* disable try.SINK mode and legacy cable IRQs */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT |
+ TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT |
+ TYPEC_LEGACY_CABLE_INT_EN_BIT, 0);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't set TRYSINK_MODE rc=%d\n", rc);
+ dev_err(chg->dev, "Couldn't set Type-C config rc=%d\n", rc);
return rc;
}
@@ -1438,6 +1462,16 @@ static int smb2_init_hw(struct smb2 *chip)
smblib_get_charge_param(chg, &chg->param.dc_icl,
&chip->dt.dc_icl_ua);
+ if (chip->dt.min_freq_khz > 0) {
+ chg->param.freq_buck.min_u = chip->dt.min_freq_khz;
+ chg->param.freq_boost.min_u = chip->dt.min_freq_khz;
+ }
+
+ if (chip->dt.max_freq_khz > 0) {
+ chg->param.freq_buck.max_u = chip->dt.max_freq_khz;
+ chg->param.freq_boost.max_u = chip->dt.max_freq_khz;
+ }
+
/* set a slower soft start setting for OTG */
rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG2_REG,
ENG_SSUPPLY_IVREF_OTG_SS_MASK, OTG_SS_SLOW);
@@ -1485,6 +1519,8 @@ static int smb2_init_hw(struct smb2 *chip)
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
true, 0);
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+ true, 0);
vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
chip->dt.hvdcp_disable, 0);
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
@@ -2022,6 +2058,16 @@ static int smb2_request_interrupts(struct smb2 *chip)
return rc;
}
+static void smb2_disable_interrupts(struct smb_charger *chg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+ if (smb2_irqs[i].irq > 0)
+ disable_irq(smb2_irqs[i].irq);
+ }
+}
+
#if defined(CONFIG_DEBUG_FS)
static int force_batt_psy_update_write(void *data, u64 val)
@@ -2233,7 +2279,7 @@ static int smb2_probe(struct platform_device *pdev)
rc = smblib_get_prop_batt_health(chg, &val);
if (rc < 0) {
pr_err("Couldn't get batt health rc=%d\n", rc);
- goto cleanup;
+ val.intval = POWER_SUPPLY_HEALTH_UNKNOWN;
}
batt_health = val.intval;
@@ -2284,6 +2330,9 @@ static void smb2_shutdown(struct platform_device *pdev)
struct smb2 *chip = platform_get_drvdata(pdev);
struct smb_charger *chg = &chip->chg;
+ /* disable all interrupts */
+ smb2_disable_interrupts(chg);
+
/* configure power role for UFP */
smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_POWER_ROLE_CMD_MASK, UFP_EN_CMD_BIT);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index c8deedd..7d5a8bd 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -542,30 +542,6 @@ static void smblib_rerun_apsd(struct smb_charger *chg)
smblib_err(chg, "Couldn't re-run APSD rc=%d\n", rc);
}
-static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
-{
- const struct apsd_result *apsd_result;
-
- /*
- * PD_INACTIVE_VOTER on hvdcp_disable_votable indicates whether
- * apsd rerun was tried earlier
- */
- if (get_client_vote(chg->hvdcp_disable_votable_indirect,
- PD_INACTIVE_VOTER)) {
- vote(chg->hvdcp_disable_votable_indirect,
- PD_INACTIVE_VOTER, false, 0);
- /* ensure hvdcp is enabled */
- if (!get_effective_result(
- chg->hvdcp_disable_votable_indirect)) {
- apsd_result = smblib_get_apsd_result(chg);
- if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) {
- smblib_rerun_apsd(chg);
- }
- }
- }
- return 0;
-}
-
static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
{
const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
@@ -684,6 +660,7 @@ static void smblib_uusb_removal(struct smb_charger *chg)
chg->voltage_max_uv = MICRO_5V;
chg->usb_icl_delta_ua = 0;
chg->pulse_cnt = 0;
+ chg->uusb_apsd_rerun_done = false;
/* clear USB ICL vote for USB_PSY_VOTER */
rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
@@ -751,6 +728,7 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg)
rc);
}
+ chg->uusb_apsd_rerun_done = true;
smblib_rerun_apsd(chg);
return 0;
@@ -1020,6 +998,7 @@ static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
struct smb_charger *chg = data;
int rc;
u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
+ u8 stat;
/* vote to enable/disable HW autonomous INOV */
vote(chg->hvdcp_hw_inov_dis_votable, client, !hvdcp_enable, 0);
@@ -1041,6 +1020,16 @@ static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
return rc;
}
+ rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc);
+ return rc;
+ }
+
+ /* re-run APSD if HVDCP was detected */
+ if (stat & QC_CHARGER_BIT)
+ smblib_rerun_apsd(chg);
+
return 0;
}
@@ -1134,6 +1123,22 @@ static int smblib_usb_irq_enable_vote_callback(struct votable *votable,
return 0;
}
+static int smblib_typec_irq_disable_vote_callback(struct votable *votable,
+ void *data, int disable, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ if (!chg->irq_info[TYPE_C_CHANGE_IRQ].irq)
+ return 0;
+
+ if (disable)
+ disable_irq_nosync(chg->irq_info[TYPE_C_CHANGE_IRQ].irq);
+ else
+ enable_irq(chg->irq_info[TYPE_C_CHANGE_IRQ].irq);
+
+ return 0;
+}
+
/*******************
* VCONN REGULATOR *
* *****************/
@@ -1142,7 +1147,7 @@ static int smblib_usb_irq_enable_vote_callback(struct votable *votable,
static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
- u8 otg_stat, stat4;
+ u8 otg_stat, val;
int rc = 0, i;
if (!chg->external_vconn) {
@@ -1173,17 +1178,12 @@ static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
* VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
* for Vconn, and it should be set with reverse polarity of CC_OUT.
*/
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
- return rc;
- }
-
smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
- stat4 = stat4 & CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
+ val = chg->typec_status[3] &
+ CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT,
- VCONN_EN_VALUE_BIT | stat4);
+ VCONN_EN_VALUE_BIT | val);
if (rc < 0) {
smblib_err(chg, "Couldn't enable vconn setting rc=%d\n", rc);
return rc;
@@ -1531,6 +1531,21 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
break;
}
+ if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
+ return 0;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT |
+ ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT;
+ if (!stat)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
return 0;
}
@@ -2131,23 +2146,13 @@ int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc = 0;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
- return rc;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n",
- stat);
-
- if (stat & CC_ATTACHED_BIT)
- val->intval = (bool)(stat & CC_ORIENTATION_BIT) + 1;
+ if (chg->typec_status[3] & CC_ATTACHED_BIT)
+ val->intval =
+ (bool)(chg->typec_status[3] & CC_ORIENTATION_BIT) + 1;
else
val->intval = 0;
- return rc;
+ return 0;
}
static const char * const smblib_typec_mode_name[] = {
@@ -2165,17 +2170,7 @@ static const char * const smblib_typec_mode_name[] = {
static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_1_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_1 rc=%d\n", rc);
- return POWER_SUPPLY_TYPEC_NONE;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_1 = 0x%02x\n", stat);
-
- switch (stat) {
+ switch (chg->typec_status[0]) {
case 0:
return POWER_SUPPLY_TYPEC_NONE;
case UFP_TYPEC_RDSTD_BIT:
@@ -2193,17 +2188,7 @@ static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_2_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_2 rc=%d\n", rc);
- return POWER_SUPPLY_TYPEC_NONE;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_2 = 0x%02x\n", stat);
-
- switch (stat & DFP_TYPEC_MASK) {
+ switch (chg->typec_status[1] & DFP_TYPEC_MASK) {
case DFP_RA_RA_BIT:
return POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
case DFP_RD_RD_BIT:
@@ -2224,28 +2209,17 @@ static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
int smblib_get_prop_typec_mode(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ if (!(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
val->intval = POWER_SUPPLY_TYPEC_NONE;
- return rc;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat);
-
- if (!(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
- val->intval = POWER_SUPPLY_TYPEC_NONE;
- return rc;
+ return 0;
}
- if (stat & UFP_DFP_MODE_STATUS_BIT)
+ if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
val->intval = smblib_get_prop_dfp_mode(chg);
else
val->intval = smblib_get_prop_ufp_mode(chg);
- return rc;
+ return 0;
}
int smblib_get_prop_typec_power_role(struct smb_charger *chg,
@@ -2337,16 +2311,7 @@ int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc;
- u8 ctrl;
-
- rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG rc=%d\n",
- rc);
- return rc;
- }
- val->intval = ctrl & EXIT_SNK_BASED_ON_CC_BIT;
+ val->intval = chg->pd_hard_reset;
return 0;
}
@@ -2542,53 +2507,60 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc;
- u8 stat = 0;
- bool cc_debounced;
- bool orientation;
- bool pd_active = val->intval;
+ bool orientation, cc_debounced, sink_attached, hvdcp;
+ u8 stat;
- if (!get_effective_result(chg->pd_allowed_votable)) {
- smblib_err(chg, "PD is not allowed\n");
+ if (!get_effective_result(chg->pd_allowed_votable))
return -EINVAL;
- }
- vote(chg->apsd_disable_votable, PD_VOTER, pd_active, 0);
- vote(chg->pd_allowed_votable, PD_VOTER, pd_active, 0);
- vote(chg->usb_irq_enable_votable, PD_VOTER, pd_active, 0);
-
- /*
- * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line
- * when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override) is set
- * or when VCONN_EN_VALUE_BIT is set.
- */
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ rc = smblib_read(chg, APSD_STATUS_REG, &stat);
if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc);
return rc;
}
- if (pd_active) {
- orientation = stat & CC_ORIENTATION_BIT;
+ cc_debounced = (bool)
+ (chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+ sink_attached = (bool)
+ (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
+ hvdcp = stat & QC_CHARGER_BIT;
+
+ chg->pd_active = val->intval;
+ if (chg->pd_active) {
+ vote(chg->apsd_disable_votable, PD_VOTER, true, 0);
+ vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
+ vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
+
+ /*
+ * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2
+ * line when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override)
+ * is set or when VCONN_EN_VALUE_BIT is set.
+ */
+ orientation = chg->typec_status[3] & CC_ORIENTATION_BIT;
rc = smblib_masked_write(chg,
TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
VCONN_EN_ORIENTATION_BIT,
orientation ? 0 : VCONN_EN_ORIENTATION_BIT);
- if (rc < 0) {
+ if (rc < 0)
smblib_err(chg,
"Couldn't enable vconn on CC line rc=%d\n", rc);
- return rc;
- }
+
+ /* SW controlled CC_OUT */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_SPARE_CFG_BIT, TYPEC_SPARE_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable SW cc_out rc=%d\n",
+ rc);
+
/*
* Enforce 500mA for PD until the real vote comes in later.
* It is guaranteed that pd_active is set prior to
* pd_current_max
*/
rc = vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
- if (rc < 0) {
+ if (rc < 0)
smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
- rc);
- return rc;
- }
+ rc);
/* since PD was found the cable must be non-legacy */
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
@@ -2596,36 +2568,40 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
/* clear USB ICL vote for DCP_VOTER */
rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote DCP from USB ICL rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't un-vote DCP from USB ICL rc=%d\n",
+ rc);
/* remove USB_PSY_VOTER */
rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
- if (rc < 0) {
+ if (rc < 0)
smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
- return rc;
- }
+ } else {
+ vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
+ vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
+ vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
+ vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+ false, 0);
+
+ /* HW controlled CC_OUT */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_SPARE_CFG_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n",
+ rc);
+
+ /*
+ * This WA should only run for HVDCP. Non-legacy SDP/CDP could
+ * draw more, but this WA will remove Rd causing VBUS to drop,
+ * and data could be interrupted. Non-legacy DCP could also draw
+ * more, but it may impact compliance.
+ */
+ if (!chg->typec_legacy_valid && cc_debounced &&
+ !sink_attached && hvdcp)
+ schedule_work(&chg->legacy_detection_work);
}
- /* CC pin selection s/w override in PD session; h/w otherwise. */
- rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
- TYPEC_SPARE_CFG_BIT,
- pd_active ? TYPEC_SPARE_CFG_BIT : 0);
- if (rc < 0) {
- smblib_err(chg, "Couldn't change cc_out ctrl to %s rc=%d\n",
- pd_active ? "SW" : "HW", rc);
- return rc;
- }
-
- cc_debounced = (bool)(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
- if (!pd_active && cc_debounced)
- try_rerun_apsd_for_hvdcp(chg);
-
- chg->pd_active = pd_active;
smblib_update_usb_type(chg);
power_supply_changed(chg->usb_psy);
-
return rc;
}
@@ -2728,88 +2704,70 @@ static struct reg_info cc2_detach_settings[] = {
static int smblib_cc2_sink_removal_enter(struct smb_charger *chg)
{
- int rc = 0;
- union power_supply_propval cc2_val = {0, };
+ int rc, ccout, ufp_mode;
+ u8 stat;
if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
- return rc;
+ return 0;
- if (chg->cc2_sink_detach_flag != CC2_SINK_NONE)
- return rc;
+ if (chg->cc2_detach_wa_active)
+ return 0;
- rc = smblib_get_prop_typec_cc_orientation(chg, &cc2_val);
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
if (rc < 0) {
- smblib_err(chg, "Couldn't get cc orientation rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return rc;
}
- if (cc2_val.intval == 1)
- return rc;
+ ccout = (stat & CC_ATTACHED_BIT) ?
+ (!!(stat & CC_ORIENTATION_BIT) + 1) : 0;
+ ufp_mode = (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT) ?
+ !(stat & UFP_DFP_MODE_STATUS_BIT) : 0;
- rc = smblib_get_prop_typec_mode(chg, &cc2_val);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
- return rc;
- }
+ if (ccout != 2)
+ return 0;
- switch (cc2_val.intval) {
- case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
- smblib_reg_block_update(chg, cc2_detach_settings);
- chg->cc2_sink_detach_flag = CC2_SINK_STD;
- schedule_work(&chg->rdstd_cc2_detach_work);
- break;
- case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
- case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
- chg->cc2_sink_detach_flag = CC2_SINK_MEDIUM_HIGH;
- break;
- default:
- break;
- }
+ if (!ufp_mode)
+ return 0;
+ chg->cc2_detach_wa_active = true;
+ /* The CC2 removal WA will cause a type-c-change IRQ storm */
+ smblib_reg_block_update(chg, cc2_detach_settings);
+ schedule_work(&chg->rdstd_cc2_detach_work);
return rc;
}
static int smblib_cc2_sink_removal_exit(struct smb_charger *chg)
{
- int rc = 0;
-
if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
- return rc;
+ return 0;
- if (chg->cc2_sink_detach_flag == CC2_SINK_STD) {
- cancel_work_sync(&chg->rdstd_cc2_detach_work);
- smblib_reg_block_restore(chg, cc2_detach_settings);
- }
+ if (!chg->cc2_detach_wa_active)
+ return 0;
- chg->cc2_sink_detach_flag = CC2_SINK_NONE;
-
- return rc;
+ chg->cc2_detach_wa_active = false;
+ cancel_work_sync(&chg->rdstd_cc2_detach_work);
+ smblib_reg_block_restore(chg, cc2_detach_settings);
+ return 0;
}
int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
const union power_supply_propval *val)
{
- int rc;
+ int rc = 0;
+ if (chg->pd_hard_reset == val->intval)
+ return rc;
+
+ chg->pd_hard_reset = val->intval;
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- EXIT_SNK_BASED_ON_CC_BIT,
- (val->intval) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
- if (rc < 0) {
- smblib_err(chg, "Could not set EXIT_SNK_BASED_ON_CC rc=%d\n",
+ EXIT_SNK_BASED_ON_CC_BIT,
+ (chg->pd_hard_reset) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set EXIT_SNK_BASED_ON_CC rc=%d\n",
rc);
- return rc;
- }
- vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, val->intval, 0);
-
- if (val->intval)
- rc = smblib_cc2_sink_removal_enter(chg);
- else
- rc = smblib_cc2_sink_removal_exit(chg);
-
- if (rc < 0) {
- smblib_err(chg, "Could not detect cc2 removal rc=%d\n", rc);
- return rc;
- }
+ vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER,
+ chg->pd_hard_reset, 0);
return rc;
}
@@ -3116,25 +3074,43 @@ irqreturn_t smblib_handle_usbin_uv(int irq, void *data)
return IRQ_HANDLED;
}
-#define PL_DELAY_MS 30000
-irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
+static void smblib_micro_usb_plugin(struct smb_charger *chg, bool vbus_rising)
{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
+ if (vbus_rising) {
+ /* use the typec flag even though its not typec */
+ chg->typec_present = 1;
+ } else {
+ chg->typec_present = 0;
+ smblib_update_usb_type(chg);
+ extcon_set_cable_state_(chg->extcon, EXTCON_USB, false);
+ smblib_uusb_removal(chg);
+ }
+}
+
+static void smblib_typec_usb_plugin(struct smb_charger *chg, bool vbus_rising)
+{
+ if (vbus_rising)
+ smblib_cc2_sink_removal_exit(chg);
+ else
+ smblib_cc2_sink_removal_enter(chg);
+}
+
+#define PL_DELAY_MS 30000
+void smblib_usb_plugin_locked(struct smb_charger *chg)
+{
int rc;
u8 stat;
bool vbus_rising;
rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
- return IRQ_HANDLED;
+ smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+ return;
}
vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
- smblib_set_opt_freq_buck(chg,
- vbus_rising ? chg->chg_freq.freq_5V :
- chg->chg_freq.freq_removal);
+ smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
+ chg->chg_freq.freq_removal);
/* fetch the DPDM regulator */
if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
@@ -3171,17 +3147,26 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
rc);
}
-
- if (chg->micro_usb_mode) {
- smblib_update_usb_type(chg);
- extcon_set_cable_state_(chg->extcon, EXTCON_USB, false);
- smblib_uusb_removal(chg);
- }
}
+ if (chg->micro_usb_mode)
+ smblib_micro_usb_plugin(chg, vbus_rising);
+ else
+ smblib_typec_usb_plugin(chg, vbus_rising);
+
power_supply_changed(chg->usb_psy);
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n",
- irq_data->name, vbus_rising ? "attached" : "detached");
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
+ vbus_rising ? "attached" : "detached");
+}
+
+irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ mutex_lock(&chg->lock);
+ smblib_usb_plugin_locked(chg);
+ mutex_unlock(&chg->lock);
return IRQ_HANDLED;
}
@@ -3350,9 +3335,6 @@ static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
if (rising) {
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
false, 0);
- if (get_effective_result(chg->pd_disallowed_votable_indirect))
- /* could be a legacy cable, try doing hvdcp */
- try_rerun_apsd_for_hvdcp(chg);
/* enable HDC and ICL irq for QC2/3 charger */
if (qc_charger)
@@ -3387,6 +3369,10 @@ static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
{
+ /* while PD is active it should have complete ICL control */
+ if (chg->pd_active)
+ return;
+
switch (pst) {
case POWER_SUPPLY_TYPE_USB:
/*
@@ -3426,7 +3412,7 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
apsd_result = smblib_update_usb_type(chg);
- if (!chg->pd_active)
+ if (!chg->typec_legacy_valid)
smblib_force_legacy_icl(chg, apsd_result->pst);
switch (apsd_result->bit) {
@@ -3472,6 +3458,17 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
}
smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+ if (chg->micro_usb_mode && (stat & APSD_DTC_STATUS_DONE_BIT)
+ && !chg->uusb_apsd_rerun_done) {
+ /*
+ * Force re-run APSD to handle slow insertion related
+ * charger-mis-detection.
+ */
+ chg->uusb_apsd_rerun_done = true;
+ smblib_rerun_apsd(chg);
+ return IRQ_HANDLED;
+ }
+
smblib_handle_apsd_done(chg,
(bool)(stat & APSD_DTC_STATUS_DONE_BIT));
@@ -3505,71 +3502,6 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
return IRQ_HANDLED;
}
-static void typec_source_removal(struct smb_charger *chg)
-{
- int rc;
-
- /* reset legacy unknown vote */
- vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
-
- /* reset both usbin current and voltage votes */
- vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
- vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
-
- cancel_delayed_work_sync(&chg->hvdcp_detect_work);
-
- if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
- /* re-enable AUTH_IRQ_EN_CFG_BIT */
- rc = smblib_masked_write(chg,
- USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
- AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't enable QC auth setting rc=%d\n", rc);
- }
-
- /* reconfigure allowed voltage for HVDCP */
- rc = smblib_set_adapter_allowance(chg,
- USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
- if (rc < 0)
- smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
- rc);
-
- chg->voltage_min_uv = MICRO_5V;
- chg->voltage_max_uv = MICRO_5V;
-
- /* clear USB ICL vote for PD_VOTER */
- rc = vote(chg->usb_icl_votable, PD_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg, "Couldn't un-vote PD from USB ICL rc=%d\n", rc);
-
- /* clear USB ICL vote for USB_PSY_VOTER */
- rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote USB_PSY from USB ICL rc=%d\n", rc);
-
- /* clear USB ICL vote for DCP_VOTER */
- rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
-
-}
-
-static void typec_source_insertion(struct smb_charger *chg)
-{
- /*
- * at any time we want LEGACY_UNKNOWN, PD, or USB_PSY to be voting for
- * ICL, so vote LEGACY_UNKNOWN here if none of the above three have
- * casted their votes
- */
- if (!is_client_vote_enabled(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER)
- && !is_client_vote_enabled(chg->usb_icl_votable, PD_VOTER)
- && !is_client_vote_enabled(chg->usb_icl_votable, USB_PSY_VOTER))
- vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
-}
-
static void typec_sink_insertion(struct smb_charger *chg)
{
/* when a sink is inserted we should not wait on hvdcp timeout to
@@ -3590,30 +3522,50 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
{
int rc;
- cancel_delayed_work_sync(&chg->pl_enable_work);
- vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
- vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+ chg->cc2_detach_wa_active = false;
+ /* reset APSD voters */
+ vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
+ vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
+
+ cancel_delayed_work_sync(&chg->pl_enable_work);
+ cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+ /* reset input current limit voters */
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
+ vote(chg->usb_icl_votable, PD_VOTER, false, 0);
+ vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+ vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+ vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+
+ /* reset hvdcp voters */
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
+ vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, true, 0);
+
+ /* reset power delivery voters */
+ vote(chg->pd_allowed_votable, PD_VOTER, false, 0);
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
+
+ /* reset usb irq voters */
vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
vote(chg->usb_irq_enable_votable, QC_VOTER, false, 0);
- /* reset votes from vbus_cc_short */
- vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
- true, 0);
- vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
- true, 0);
- /*
- * cable could be removed during hard reset, remove its vote to
- * disable apsd
- */
- vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
+ /* reset parallel voters */
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+ vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
chg->vconn_attempts = 0;
chg->otg_attempts = 0;
chg->pulse_cnt = 0;
chg->usb_icl_delta_ua = 0;
+ chg->voltage_min_uv = MICRO_5V;
+ chg->voltage_max_uv = MICRO_5V;
+ chg->pd_active = 0;
+ chg->pd_hard_reset = 0;
+ chg->typec_legacy_valid = false;
/* enable APSD CC trigger for next insertion */
rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
@@ -3621,15 +3573,48 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
if (rc < 0)
smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n", rc);
- smblib_update_usb_type(chg);
- typec_source_removal(chg);
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ /* re-enable AUTH_IRQ_EN_CFG_BIT */
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable QC auth setting rc=%d\n", rc);
+ }
+
+ /* reconfigure allowed voltage for HVDCP */
+ rc = smblib_set_adapter_allowance(chg,
+ USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+ rc);
+
+ /* enable DRP */
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_POWER_ROLE_CMD_MASK, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+
+ /* HW controlled CC_OUT */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_SPARE_CFG_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n", rc);
+
+ /* restore crude sensor */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc);
+
typec_sink_removal(chg);
+ smblib_update_usb_type(chg);
}
static void smblib_handle_typec_insertion(struct smb_charger *chg,
- bool sink_attached, bool legacy_cable)
+ bool sink_attached)
{
- int rp, rc;
+ int rc;
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
@@ -3639,59 +3624,36 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg,
smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
rc);
- if (sink_attached) {
- typec_source_removal(chg);
+ if (sink_attached)
typec_sink_insertion(chg);
- } else {
- typec_source_insertion(chg);
+ else
typec_sink_removal(chg);
- }
-
- rp = smblib_get_prop_ufp_mode(chg);
- if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
- || rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
- smblib_dbg(chg, PR_MISC, "VBUS & CC could be shorted; keeping HVDCP disabled\n");
- vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
- true, 0);
- } else {
- vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
- false, 0);
- }
}
static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
- bool rising, bool sink_attached, bool legacy_cable)
+ bool rising, bool sink_attached)
{
int rc;
union power_supply_propval pval = {0, };
- if (rising)
- smblib_handle_typec_insertion(chg, sink_attached, legacy_cable);
- else
- smblib_handle_typec_removal(chg);
+ if (rising) {
+ if (!chg->typec_present) {
+ chg->typec_present = true;
+ smblib_dbg(chg, PR_MISC, "TypeC insertion\n");
+ smblib_handle_typec_insertion(chg, sink_attached);
+ }
+ } else {
+ if (chg->typec_present) {
+ chg->typec_present = false;
+ smblib_dbg(chg, PR_MISC, "TypeC removal\n");
+ smblib_handle_typec_removal(chg);
+ }
+ }
rc = smblib_get_prop_typec_mode(chg, &pval);
if (rc < 0)
smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
- /*
- * HW BUG - after cable is removed, medium or high rd reading
- * falls to std. Use it for signal of typec cc detachment in
- * software WA.
- */
- if (chg->cc2_sink_detach_flag == CC2_SINK_MEDIUM_HIGH
- && pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
-
- chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
-
- rc = smblib_masked_write(chg,
- TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- EXIT_SNK_BASED_ON_CC_BIT, 0);
- if (rc < 0)
- smblib_err(chg, "Couldn't get prop typec mode rc=%d\n",
- rc);
- }
-
smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
rising ? "rising" : "falling",
smblib_typec_mode_name[pval.intval]);
@@ -3717,50 +3679,54 @@ irqreturn_t smblib_handle_usb_typec_change_for_uusb(struct smb_charger *chg)
return IRQ_HANDLED;
}
+static void smblib_usb_typec_change(struct smb_charger *chg)
+{
+ int rc;
+ bool debounce_done, sink_attached;
+
+ rc = smblib_multibyte_read(chg, TYPE_C_STATUS_1_REG,
+ chg->typec_status, 5);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't cache USB Type-C status rc=%d\n", rc);
+ return;
+ }
+
+ debounce_done =
+ (bool)(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+ sink_attached =
+ (bool)(chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
+
+ smblib_handle_typec_debounce_done(chg, debounce_done, sink_attached);
+
+ if (chg->typec_status[3] & TYPEC_VBUS_ERROR_STATUS_BIT)
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: vbus-error\n");
+
+ if (chg->typec_status[3] & TYPEC_VCONN_OVERCURR_STATUS_BIT)
+ schedule_work(&chg->vconn_oc_work);
+
+ power_supply_changed(chg->usb_psy);
+}
+
irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- int rc;
- u8 stat4, stat5;
- bool debounce_done, sink_attached, legacy_cable;
- if (chg->micro_usb_mode)
- return smblib_handle_usb_typec_change_for_uusb(chg);
-
- /* WA - not when PD hard_reset WIP on cc2 in sink mode */
- if (chg->cc2_sink_detach_flag == CC2_SINK_STD)
- return IRQ_HANDLED;
-
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ if (chg->micro_usb_mode) {
+ smblib_handle_usb_typec_change_for_uusb(chg);
return IRQ_HANDLED;
}
- rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
+ if (chg->cc2_detach_wa_active || chg->typec_en_dis_active) {
+ smblib_dbg(chg, PR_INTERRUPT, "Ignoring since %s active\n",
+ chg->cc2_detach_wa_active ?
+ "cc2_detach_wa" : "typec_en_dis");
return IRQ_HANDLED;
}
- debounce_done = (bool)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
- sink_attached = (bool)(stat4 & UFP_DFP_MODE_STATUS_BIT);
- legacy_cable = (bool)(stat5 & TYPEC_LEGACY_CABLE_STATUS_BIT);
-
- smblib_handle_typec_debounce_done(chg,
- debounce_done, sink_attached, legacy_cable);
-
- if (stat4 & TYPEC_VBUS_ERROR_STATUS_BIT)
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s vbus-error\n",
- irq_data->name);
-
- if (stat4 & TYPEC_VCONN_OVERCURR_STATUS_BIT)
- schedule_work(&chg->vconn_oc_work);
-
- power_supply_changed(chg->usb_psy);
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat4);
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_5 = 0x%02x\n", stat5);
+ mutex_lock(&chg->lock);
+ smblib_usb_typec_change(chg);
+ mutex_unlock(&chg->lock);
return IRQ_HANDLED;
}
@@ -3788,7 +3754,7 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- int rc;
+ int rc, usb_icl;
u8 stat;
if (!(chg->wa_flags & BOOST_BACK_WA))
@@ -3800,8 +3766,9 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
return IRQ_HANDLED;
}
- if ((stat & USE_USBIN_BIT) &&
- get_effective_result(chg->usb_icl_votable) < USBIN_25MA)
+ /* skip suspending input if its already suspended by some other voter */
+ usb_icl = get_effective_result(chg->usb_icl_votable);
+ if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && usb_icl < USBIN_25MA)
return IRQ_HANDLED;
if (stat & USE_DCIN_BIT)
@@ -3839,12 +3806,7 @@ static void smblib_hvdcp_detect_work(struct work_struct *work)
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
false, 0);
- if (get_effective_result(chg->pd_disallowed_votable_indirect))
- /* pd is still disabled, try hvdcp */
- try_rerun_apsd_for_hvdcp(chg);
- else
- /* notify pd now that pd is allowed */
- power_supply_changed(chg->usb_psy);
+ power_supply_changed(chg->usb_psy);
}
static void bms_update_work(struct work_struct *work)
@@ -3885,11 +3847,13 @@ static void clear_hdc_work(struct work_struct *work)
static void rdstd_cc2_detach_work(struct work_struct *work)
{
int rc;
- u8 stat;
- struct smb_irq_data irq_data = {NULL, "cc2-removal-workaround"};
+ u8 stat4, stat5;
struct smb_charger *chg = container_of(work, struct smb_charger,
rdstd_cc2_detach_work);
+ if (!chg->cc2_detach_wa_active)
+ return;
+
/*
* WA steps -
* 1. Enable both UFP and DFP, wait for 10ms.
@@ -3897,7 +3861,7 @@ static void rdstd_cc2_detach_work(struct work_struct *work)
* 3. Removal detected if both TYPEC_DEBOUNCE_DONE_STATUS
* and TIMER_STAGE bits are gone, otherwise repeat all by
* work rescheduling.
- * Note, work will be cancelled when pd_hard_reset is 0.
+ * Note, work will be cancelled when USB_PLUGIN rises.
*/
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
@@ -3920,30 +3884,35 @@ static void rdstd_cc2_detach_work(struct work_struct *work)
usleep_range(30000, 31000);
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return;
}
- if (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)
- goto rerun;
- rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+ rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
if (rc < 0) {
smblib_err(chg,
"Couldn't read TYPE_C_STATUS_5_REG rc=%d\n", rc);
return;
}
- if (stat & TIMER_STAGE_2_BIT)
+
+ if ((stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT)
+ || (stat5 & TIMER_STAGE_2_BIT)) {
+ smblib_dbg(chg, PR_MISC, "rerunning DD=%d TS2BIT=%d\n",
+ (int)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT),
+ (int)(stat5 & TIMER_STAGE_2_BIT));
goto rerun;
+ }
- /* Bingo, cc2 removal detected */
+ smblib_dbg(chg, PR_MISC, "Bingo CC2 Removal detected\n");
+ chg->cc2_detach_wa_active = false;
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ EXIT_SNK_BASED_ON_CC_BIT, 0);
smblib_reg_block_restore(chg, cc2_detach_settings);
- chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
- irq_data.parent_data = chg;
- smblib_handle_usb_typec_change(0, &irq_data);
-
+ mutex_lock(&chg->lock);
+ smblib_usb_typec_change(chg);
+ mutex_unlock(&chg->lock);
return;
rerun:
@@ -4166,6 +4135,56 @@ static void smblib_pl_enable_work(struct work_struct *work)
vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
}
+static void smblib_legacy_detection_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ legacy_detection_work);
+ int rc;
+ u8 stat;
+ bool legacy, rp_high;
+
+ mutex_lock(&chg->lock);
+ chg->typec_en_dis_active = 1;
+ smblib_dbg(chg, PR_MISC, "running legacy unknown workaround\n");
+ rc = smblib_masked_write(chg,
+ TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_DISABLE_CMD_BIT,
+ TYPEC_DISABLE_CMD_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable type-c rc=%d\n", rc);
+
+ /* wait for the adapter to turn off VBUS */
+ msleep(500);
+
+ rc = smblib_masked_write(chg,
+ TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_DISABLE_CMD_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable type-c rc=%d\n", rc);
+
+ /* wait for type-c detection to complete */
+ msleep(100);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read typec stat5 rc = %d\n", rc);
+ goto unlock;
+ }
+
+ chg->typec_legacy_valid = true;
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+ legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
+ rp_high = smblib_get_prop_ufp_mode(chg) ==
+ POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+ if (!legacy || !rp_high)
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+ false, 0);
+
+unlock:
+ chg->typec_en_dis_active = 0;
+ mutex_unlock(&chg->lock);
+}
+
static int smblib_create_votables(struct smb_charger *chg)
{
int rc = 0;
@@ -4298,6 +4317,15 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
+ chg->typec_irq_disable_votable = create_votable("TYPEC_IRQ_DISABLE",
+ VOTE_SET_ANY,
+ smblib_typec_irq_disable_vote_callback,
+ chg);
+ if (IS_ERR(chg->typec_irq_disable_votable)) {
+ rc = PTR_ERR(chg->typec_irq_disable_votable);
+ return rc;
+ }
+
return rc;
}
@@ -4323,6 +4351,8 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->apsd_disable_votable);
if (chg->hvdcp_hw_inov_dis_votable)
destroy_votable(chg->hvdcp_hw_inov_dis_votable);
+ if (chg->typec_irq_disable_votable)
+ destroy_votable(chg->typec_irq_disable_votable);
}
static void smblib_iio_deinit(struct smb_charger *chg)
@@ -4343,6 +4373,7 @@ int smblib_init(struct smb_charger *chg)
{
int rc = 0;
+ mutex_init(&chg->lock);
mutex_init(&chg->write_lock);
mutex_init(&chg->otg_oc_lock);
INIT_WORK(&chg->bms_update_work, bms_update_work);
@@ -4355,6 +4386,7 @@ int smblib_init(struct smb_charger *chg)
INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
+ INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
chg->fake_capacity = -EINVAL;
switch (chg->mode) {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 49b9d3d..b0d84f0 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -61,6 +61,7 @@ enum print_reason {
#define SW_QC3_VOTER "SW_QC3_VOTER"
#define AICL_RERUN_VOTER "AICL_RERUN_VOTER"
#define LEGACY_UNKNOWN_VOTER "LEGACY_UNKNOWN_VOTER"
+#define CC2_WA_VOTER "CC2_WA_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -71,13 +72,6 @@ enum smb_mode {
NUM_MODES,
};
-enum cc2_sink_type {
- CC2_SINK_NONE = 0,
- CC2_SINK_STD,
- CC2_SINK_MEDIUM_HIGH,
- CC2_SINK_WA_DONE,
-};
-
enum {
QC_CHARGER_DETECTION_WA_BIT = BIT(0),
BOOST_BACK_WA = BIT(1),
@@ -236,6 +230,7 @@ struct smb_charger {
int smb_version;
/* locks */
+ struct mutex lock;
struct mutex write_lock;
struct mutex ps_change_lock;
struct mutex otg_oc_lock;
@@ -276,6 +271,7 @@ struct smb_charger {
struct votable *apsd_disable_votable;
struct votable *hvdcp_hw_inov_dis_votable;
struct votable *usb_irq_enable_votable;
+ struct votable *typec_irq_disable_votable;
/* work */
struct work_struct bms_update_work;
@@ -289,6 +285,7 @@ struct smb_charger {
struct delayed_work otg_ss_done_work;
struct delayed_work icl_change_work;
struct delayed_work pl_enable_work;
+ struct work_struct legacy_detection_work;
/* cached status */
int voltage_min_uv;
@@ -312,10 +309,16 @@ struct smb_charger {
int vconn_attempts;
int default_icl_ua;
int otg_cl_ua;
+ bool uusb_apsd_rerun_done;
+ bool pd_hard_reset;
+ bool typec_present;
+ u8 typec_status[5];
+ bool typec_legacy_valid;
/* workaround flag */
u32 wa_flags;
- enum cc2_sink_type cc2_sink_detach_flag;
+ bool cc2_detach_wa_active;
+ bool typec_en_dis_active;
int boost_current_ua;
int temp_speed_reading_count;
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 167666a..3f260a4 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -1025,4 +1025,14 @@ enum {
/* CHGR FREQ Peripheral registers */
#define FREQ_CLK_DIV_REG (CHGR_FREQ_BASE + 0x50)
+/* SMB1355 specific registers */
+#define SMB1355_TEMP_COMP_STATUS_REG (MISC_BASE + 0x07)
+#define SKIN_TEMP_RST_HOT_BIT BIT(6)
+#define SKIN_TEMP_UB_HOT_BIT BIT(5)
+#define SKIN_TEMP_LB_HOT_BIT BIT(4)
+#define DIE_TEMP_TSD_HOT_BIT BIT(3)
+#define DIE_TEMP_RST_HOT_BIT BIT(2)
+#define DIE_TEMP_UB_HOT_BIT BIT(1)
+#define DIE_TEMP_LB_HOT_BIT BIT(0)
+
#endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index 0d1f2a6..b92a482 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -1655,7 +1655,7 @@ static int smb1351_parallel_get_property(struct power_supply *psy,
switch (prop) {
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
- val->intval = !chip->usb_suspended_status;
+ val->intval = !chip->parallel_charger_suspended;
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
if (!chip->parallel_charger_suspended)
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 83374bb..a29871b 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -104,6 +104,8 @@ struct smb138x {
struct smb_dt_props dt;
struct power_supply *parallel_psy;
u32 wa_flags;
+ struct pmic_revid_data *pmic_rev_id;
+ char *name;
};
static int __debug_mask;
@@ -167,6 +169,14 @@ static int smb138x_parse_dt(struct smb138x *chip)
if (rc < 0)
chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID;
+ /* check that smb1355 is configured to run in mid-mid mode */
+ if (chip->pmic_rev_id->pmic_subtype == SMB1355_SUBTYPE
+ && chip->dt.pl_mode != POWER_SUPPLY_PL_USBMID_USBMID) {
+ pr_err("Smb1355 can only run in MID-MID mode, saw = %d mode\n",
+ chip->dt.pl_mode);
+ return -EINVAL;
+ }
+
chip->dt.suspend_input = of_property_read_bool(node,
"qcom,suspend-input");
@@ -479,6 +489,30 @@ static int smb138x_init_batt_psy(struct smb138x *chip)
* PARALLEL PSY REGISTRATION *
*****************************/
+static int smb1355_get_prop_connector_health(struct smb138x *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ u8 temp;
+ int rc;
+
+ rc = smblib_read(chg, SMB1355_TEMP_COMP_STATUS_REG, &temp);
+ if (rc < 0) {
+ pr_err("Couldn't read comp stat reg rc = %d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ if (temp & SKIN_TEMP_RST_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+ if (temp & SKIN_TEMP_UB_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ if (temp & SKIN_TEMP_LB_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_WARM;
+
+ return POWER_SUPPLY_HEALTH_COOL;
+}
+
static int smb138x_get_prop_connector_health(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -536,16 +570,32 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_PIN_ENABLED,
POWER_SUPPLY_PROP_INPUT_SUSPEND,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
- POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CHARGER_TEMP,
- POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PARALLEL_MODE,
POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static enum power_supply_property smb1355_parallel_props[] = {
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_PIN_ENABLED,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_PARALLEL_MODE,
+ POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -583,14 +633,6 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
else
val->intval = 0;
break;
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
- || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
- rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
- &val->intval);
- else
- val->intval = 0;
- break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
break;
@@ -598,28 +640,46 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- rc = smblib_get_prop_slave_current_now(chg, val);
- break;
- case POWER_SUPPLY_PROP_CHARGER_TEMP:
- rc = smb138x_get_prop_charger_temp(chip, val);
- break;
- case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
- rc = smblib_get_prop_charger_temp_max(chg, val);
- break;
case POWER_SUPPLY_PROP_MODEL_NAME:
- val->strval = "smb138x";
+ val->strval = chip->name;
break;
case POWER_SUPPLY_PROP_PARALLEL_MODE:
val->intval = chip->dt.pl_mode;
break;
case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
- val->intval = smb138x_get_prop_connector_health(chip);
+ if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
+ val->intval = smb138x_get_prop_connector_health(chip);
+ else
+ val->intval = smb1355_get_prop_connector_health(chip);
break;
case POWER_SUPPLY_PROP_SET_SHIP_MODE:
/* Not in ship mode as long as device is active */
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP:
+ if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
+ rc = smb138x_get_prop_charger_temp(chip, val);
+ else
+ rc = smblib_get_prop_charger_temp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+ rc = smblib_get_prop_charger_temp_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
+ rc = smblib_get_prop_slave_current_now(chg, val);
+ else
+ rc = -ENODATA;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if ((chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
+ && ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)))
+ rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+ &val->intval);
+ else
+ rc = -ENODATA;
+ break;
default:
pr_err("parallel power supply get prop %d not supported\n",
prop);
@@ -703,7 +763,7 @@ static int smb138x_parallel_prop_is_writeable(struct power_supply *psy,
return 0;
}
-static const struct power_supply_desc parallel_psy_desc = {
+static struct power_supply_desc parallel_psy_desc = {
.name = "parallel",
.type = POWER_SUPPLY_TYPE_PARALLEL,
.properties = smb138x_parallel_props,
@@ -731,6 +791,28 @@ static int smb138x_init_parallel_psy(struct smb138x *chip)
return 0;
}
+static int smb1355_init_parallel_psy(struct smb138x *chip)
+{
+ struct power_supply_config parallel_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ parallel_cfg.drv_data = chip;
+ parallel_cfg.of_node = chg->dev->of_node;
+
+ /* change to smb1355's property list */
+ parallel_psy_desc.properties = smb1355_parallel_props;
+ parallel_psy_desc.num_properties = ARRAY_SIZE(smb1355_parallel_props);
+ chip->parallel_psy = devm_power_supply_register(chg->dev,
+ ¶llel_psy_desc,
+ ¶llel_cfg);
+ if (IS_ERR(chip->parallel_psy)) {
+ pr_err("Couldn't register parallel power supply\n");
+ return PTR_ERR(chip->parallel_psy);
+ }
+
+ return 0;
+}
+
/******************************
* VBUS REGULATOR REGISTRATION *
******************************/
@@ -1050,7 +1132,6 @@ static int smb138x_init_hw(struct smb138x *chip)
static int smb138x_setup_wa_flags(struct smb138x *chip)
{
- struct pmic_revid_data *pmic_rev_id;
struct device_node *revid_dev_node;
revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
@@ -1060,8 +1141,8 @@ static int smb138x_setup_wa_flags(struct smb138x *chip)
return -EINVAL;
}
- pmic_rev_id = get_revid_data(revid_dev_node);
- if (IS_ERR_OR_NULL(pmic_rev_id)) {
+ chip->pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
/*
* the revid peripheral must be registered, any failure
* here only indicates that the rev-id module has not
@@ -1070,14 +1151,14 @@ static int smb138x_setup_wa_flags(struct smb138x *chip)
return -EPROBE_DEFER;
}
- switch (pmic_rev_id->pmic_subtype) {
+ switch (chip->pmic_rev_id->pmic_subtype) {
case SMB1381_SUBTYPE:
- if (pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
+ if (chip->pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
chip->wa_flags |= OOB_COMP_WA_BIT;
break;
default:
pr_err("PMIC subtype %d not supported\n",
- pmic_rev_id->pmic_subtype);
+ chip->pmic_rev_id->pmic_subtype);
return -EINVAL;
}
@@ -1375,6 +1456,7 @@ static int smb138x_master_probe(struct smb138x *chip)
chg->param = v1_params;
+ chip->name = "smb1381";
rc = smblib_init(chg);
if (rc < 0) {
pr_err("Couldn't initialize smblib rc=%d\n", rc);
@@ -1435,7 +1517,7 @@ static int smb138x_master_probe(struct smb138x *chip)
return rc;
}
-static int smb138x_slave_probe(struct smb138x *chip)
+static int smb1355_slave_probe(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
int rc = 0;
@@ -1448,6 +1530,55 @@ static int smb138x_slave_probe(struct smb138x *chip)
goto cleanup;
}
+ rc = smb138x_parse_dt(chip);
+ if (rc < 0) {
+ pr_err("Couldn't parse device tree rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb138x_init_slave_hw(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize hardware rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb1355_init_parallel_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb138x_determine_initial_slave_status(chip);
+ if (rc < 0) {
+ pr_err("Couldn't determine initial status rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb138x_request_interrupts(chip);
+ if (rc < 0) {
+ pr_err("Couldn't request interrupts rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ smblib_deinit(chg);
+ return rc;
+}
+
+static int smb1381_slave_probe(struct smb138x *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ chg->param = v1_params;
+
+ rc = smblib_init(chg);
+ if (rc < 0) {
+ pr_err("Couldn't initialize smblib rc=%d\n", rc);
+ goto cleanup;
+ }
chg->iio.temp_max_chan = iio_channel_get(chg->dev, "charger_temp_max");
if (IS_ERR(chg->iio.temp_max_chan)) {
rc = PTR_ERR(chg->iio.temp_max_chan);
@@ -1515,25 +1646,71 @@ static int smb138x_slave_probe(struct smb138x *chip)
goto cleanup;
}
- return rc;
+ return 0;
cleanup:
smblib_deinit(chg);
- if (chip->parallel_psy)
- power_supply_unregister(chip->parallel_psy);
- if (chg->vbus_vreg && chg->vbus_vreg->rdev)
- regulator_unregister(chg->vbus_vreg->rdev);
return rc;
}
+static int slave_probe(struct smb138x *chip)
+{
+ struct device_node *revid_dev_node;
+ int rc = 0;
+
+ revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property\n");
+ return -EINVAL;
+ }
+
+ chip->pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ switch (chip->pmic_rev_id->pmic_subtype) {
+ case SMB1355_SUBTYPE:
+ chip->name = "smb1355";
+ rc = smb1355_slave_probe(chip);
+ break;
+ case SMB1381_SUBTYPE:
+ chip->name = "smb1381";
+ rc = smb1381_slave_probe(chip);
+ break;
+ default:
+ pr_err("Unsupported pmic subtype = 0x%02x\n",
+ chip->pmic_rev_id->pmic_subtype);
+ rc = -EINVAL;
+ }
+
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("Couldn't probe SMB138X rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static const struct of_device_id match_table[] = {
{
- .compatible = "qcom,smb138x-charger",
- .data = (void *) PARALLEL_MASTER
+ .compatible = "qcom,smb138x-charger",
+ .data = (void *) PARALLEL_MASTER,
},
{
- .compatible = "qcom,smb138x-parallel-slave",
- .data = (void *) PARALLEL_SLAVE
+ .compatible = "qcom,smb138x-parallel-slave",
+ .data = (void *) PARALLEL_SLAVE,
+ },
+ {
+ .compatible = "qcom,smb1355-parallel-slave",
+ .data = (void *) PARALLEL_SLAVE,
},
{ },
};
@@ -1580,7 +1757,7 @@ static int smb138x_probe(struct platform_device *pdev)
rc = smb138x_master_probe(chip);
break;
case PARALLEL_SLAVE:
- rc = smb138x_slave_probe(chip);
+ rc = slave_probe(chip);
break;
default:
pr_err("Couldn't find a matching mode %d\n", chip->chg.mode);
@@ -1594,7 +1771,8 @@ static int smb138x_probe(struct platform_device *pdev)
goto cleanup;
}
- pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
+ pr_info("%s probed successfully mode=%d pl_mode = %d\n",
+ chip->name, chip->chg.mode, chip->dt.pl_mode);
return rc;
cleanup:
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 0b35caa..b759776 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -48,11 +48,6 @@
#include <soc/qcom/socinfo.h>
#include <soc/qcom/ramdump.h>
-#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
-#include <net/cnss_prealloc.h>
-#endif
-
-
#include "wlan_firmware_service_v01.h"
#ifdef CONFIG_ICNSS_DEBUG
@@ -1968,8 +1963,6 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
if (ret < 0) {
icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
ret, priv->state);
- wcnss_prealloc_check_memory_leak();
- wcnss_pre_alloc_reset();
goto out;
}
@@ -2099,8 +2092,6 @@ static int icnss_driver_event_register_driver(void *data)
if (ret) {
icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
ret, penv->state);
- wcnss_prealloc_check_memory_leak();
- wcnss_pre_alloc_reset();
goto power_off;
}
@@ -2126,8 +2117,6 @@ static int icnss_driver_event_unregister_driver(void *data)
penv->ops->remove(&penv->pdev->dev);
clear_bit(ICNSS_DRIVER_PROBED, &penv->state);
- wcnss_prealloc_check_memory_leak();
- wcnss_pre_alloc_reset();
penv->ops = NULL;
@@ -2152,8 +2141,6 @@ static int icnss_call_driver_remove(struct icnss_priv *priv)
penv->ops->remove(&priv->pdev->dev);
clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
- wcnss_prealloc_check_memory_leak();
- wcnss_pre_alloc_reset();
icnss_hw_power_off(penv);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 1635bab..e30c159 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -88,8 +88,9 @@ static struct rpmh_msg *get_msg_from_pool(struct rpmh_client *rc)
struct rpmh_mbox *rpm = rc->rpmh;
struct rpmh_msg *msg = NULL;
int pos;
+ unsigned long flags;
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
pos = find_first_zero_bit(rpm->fast_req, RPMH_MAX_FAST_RES);
if (pos != RPMH_MAX_FAST_RES) {
bitmap_set(rpm->fast_req, pos, 1);
@@ -98,7 +99,7 @@ static struct rpmh_msg *get_msg_from_pool(struct rpmh_client *rc)
msg->bit = pos;
msg->rc = rc;
}
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return msg;
}
@@ -108,7 +109,7 @@ static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
atomic_dec(rpm_msg->wait_count);
- wake_up_interruptible(rpm_msg->waitq);
+ wake_up(rpm_msg->waitq);
}
static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
@@ -117,6 +118,7 @@ static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
atomic_t *wc = rpm_msg->wait_count;
wait_queue_head_t *waitq = rpm_msg->waitq;
+ unsigned long flags;
rpm_msg->err = r;
@@ -143,15 +145,15 @@ static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
/* If we allocated the pool, set it as available */
if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
}
/* Signal the blocking thread we are done */
if (waitq) {
atomic_dec(wc);
- wake_up_interruptible(waitq);
+ wake_up(waitq);
}
}
@@ -174,8 +176,9 @@ static struct rpmh_req *cache_rpm_request(struct rpmh_client *rc,
{
struct rpmh_req *req;
struct rpmh_mbox *rpm = rc->rpmh;
+ unsigned long flags;
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
req = __find_req(rc, cmd->addr);
if (req)
goto existing;
@@ -210,7 +213,7 @@ static struct rpmh_req *cache_rpm_request(struct rpmh_client *rc,
unlock:
rpm->dirty = true;
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return req;
}
@@ -329,9 +332,7 @@ int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
if (ret < 0)
return ret;
- ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
- if (ret)
- return ret;
+ wait_event(waitq, atomic_read(&wait_count) == 0);
return rpm_msg.err;
}
@@ -423,13 +424,11 @@ int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
rpm_msg.msg.num_payload = n;
ret = __rpmh_write(rc, state, &rpm_msg);
- if (ret < 0)
- return ret;
-
- ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
if (ret)
return ret;
+ wait_event(waitq, atomic_read(&wait_count) == 0);
+
return rpm_msg.err;
}
EXPORT_SYMBOL(rpmh_write);
@@ -494,8 +493,6 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
if (IS_ERR_OR_NULL(rpm_msg[i]))
return PTR_ERR(rpm_msg[i]);
- rpm_msg[i]->waitq = &waitq;
- rpm_msg[i]->wait_count = &wait_count;
cmd += n[i];
}
@@ -504,16 +501,18 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
might_sleep();
atomic_set(&wait_count, count);
for (i = 0; i < count; i++) {
+ rpm_msg[i]->waitq = &waitq;
+ rpm_msg[i]->wait_count = &wait_count;
/* Bypass caching and write to mailbox directly */
ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
if (ret < 0)
return ret;
}
- return wait_event_interruptible(waitq,
- atomic_read(&wait_count) == 0);
+ wait_event(waitq, atomic_read(&wait_count) == 0);
} else {
/* Send Sleep requests to the controller, expect no response */
for (i = 0; i < count; i++) {
+ rpm_msg[i]->waitq = NULL;
ret = mbox_send_controller_data(rc->chan,
&rpm_msg[i]->msg);
/* Clean up our call by spoofing tx_done */
@@ -521,6 +520,8 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
}
return 0;
}
+
+ return 0;
}
EXPORT_SYMBOL(rpmh_write_passthru);
@@ -568,6 +569,7 @@ int rpmh_invalidate(struct rpmh_client *rc)
{
DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
struct rpmh_mbox *rpm;
+ unsigned long flags;
if (IS_ERR_OR_NULL(rc))
return -EINVAL;
@@ -579,9 +581,9 @@ int rpmh_invalidate(struct rpmh_client *rc)
rpm_msg.msg.invalidate = true;
rpm_msg.msg.is_complete = false;
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
rpm->dirty = true;
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
}
@@ -623,9 +625,7 @@ int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
return ret;
/* Wait until the response is received from RPMH */
- ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
- if (ret)
- return ret;
+ wait_event(waitq, atomic_read(&wait_count) == 0);
/* Read the data back from the tcs_mbox_msg structrure */
*resp = rpm_msg.cmd[0].data;
@@ -671,6 +671,7 @@ int rpmh_flush(struct rpmh_client *rc)
struct rpmh_req *p;
struct rpmh_mbox *rpm = rc->rpmh;
int ret;
+ unsigned long flags;
if (IS_ERR_OR_NULL(rc))
return -EINVAL;
@@ -681,13 +682,13 @@ int rpmh_flush(struct rpmh_client *rc)
if (!mbox_controller_is_idle(rc->chan))
return -EBUSY;
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
if (!rpm->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n");
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return 0;
}
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
/*
* Nobody else should be calling this function other than sleep,
@@ -708,9 +709,9 @@ int rpmh_flush(struct rpmh_client *rc)
return ret;
}
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
rpm->dirty = false;
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return 0;
}
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index f3d6209..7a784aa 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,7 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/memory_dump.h>
#include <soc/qcom/watchdog.h>
+#include <linux/dma-mapping.h>
#define MODULE_NAME "msm_watchdog"
#define WDT0_ACCSCSSNBARK_INT 0
@@ -49,6 +50,7 @@
#define SCM_SET_REGSAVE_CMD 0x2
#define SCM_SVC_SEC_WDOG_DIS 0x7
#define MAX_CPU_CTX_SIZE 2048
+#define MAX_CPU_SCANDUMP_SIZE 0x10000
static struct msm_watchdog_data *wdog_data;
@@ -557,6 +559,49 @@ static void configure_bark_dump(struct msm_watchdog_data *wdog_dd)
return;
}
+static void configure_scandump(struct msm_watchdog_data *wdog_dd)
+{
+ int ret;
+ struct msm_dump_entry dump_entry;
+ struct msm_dump_data *cpu_data;
+ int cpu;
+ static dma_addr_t dump_addr;
+ static void *dump_vaddr;
+
+ for_each_cpu(cpu, cpu_present_mask) {
+ cpu_data = devm_kzalloc(wdog_dd->dev,
+ sizeof(struct msm_dump_data),
+ GFP_KERNEL);
+ if (!cpu_data)
+ continue;
+
+ dump_vaddr = (void *) dma_alloc_coherent(wdog_dd->dev,
+ MAX_CPU_SCANDUMP_SIZE,
+ &dump_addr,
+ GFP_KERNEL);
+ if (!dump_vaddr) {
+ dev_err(wdog_dd->dev, "Couldn't get memory for dump\n");
+ continue;
+ }
+ memset(dump_vaddr, 0x0, MAX_CPU_SCANDUMP_SIZE);
+
+ cpu_data->addr = dump_addr;
+ cpu_data->len = MAX_CPU_SCANDUMP_SIZE;
+ dump_entry.id = MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu;
+ dump_entry.addr = virt_to_phys(cpu_data);
+ ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+ &dump_entry);
+ if (ret) {
+ dev_err(wdog_dd->dev, "Dump setup failed, id = %d\n",
+ MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu);
+ dma_free_coherent(wdog_dd->dev, MAX_CPU_SCANDUMP_SIZE,
+ dump_vaddr,
+ dump_addr);
+ devm_kfree(wdog_dd->dev, cpu_data);
+ }
+ }
+}
+
static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
{
int error = 0;
@@ -617,6 +662,7 @@ static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
delay_time = msecs_to_jiffies(wdog_dd->pet_time);
wdog_dd->min_slack_ticks = UINT_MAX;
wdog_dd->min_slack_ns = ULLONG_MAX;
+ configure_scandump(wdog_dd);
configure_bark_dump(wdog_dd);
timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 203287f..94661cf 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -930,7 +930,6 @@ struct cifs_tcon {
bool use_persistent:1; /* use persistent instead of durable handles */
#ifdef CONFIG_CIFS_SMB2
bool print:1; /* set if connection to printer share */
- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index fc537c2..87b87e0 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
}
+static bool
+cifs_can_echo(struct TCP_Server_Info *server)
+{
+ if (server->tcpStatus == CifsGood)
+ return true;
+
+ return false;
+}
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
.get_dfs_refer = CIFSGetDFSRefer,
.qfs_tcon = cifs_qfs_tcon,
.is_path_accessible = cifs_is_path_accessible,
+ .can_echo = cifs_can_echo,
.query_path_info = cifs_query_path_info,
.query_file_info = cifs_query_file_info,
.get_srv_inum = cifs_get_srv_inum,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7080dac..8021853 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1084,9 +1084,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
else
return -EIO;
- if (tcon && tcon->bad_network_name)
- return -ENOENT;
-
if ((tcon && tcon->seal) &&
((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
cifs_dbg(VFS, "encryption requested but no server support");
@@ -1188,8 +1185,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
- if (tcon)
- tcon->bad_network_name = true;
}
goto tcon_exit;
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ca16c5d..87ab02e 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -622,6 +622,11 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
return err;
lock_2_inodes(dir, inode);
+
+ /* Handle O_TMPFILE corner case, it is allowed to link a O_TMPFILE. */
+ if (inode->i_nlink == 0)
+ ubifs_delete_orphan(c, inode->i_ino);
+
inc_nlink(inode);
ihold(inode);
inode->i_ctime = ubifs_current_time(inode);
@@ -641,6 +646,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
drop_nlink(inode);
+ if (inode->i_nlink == 0)
+ ubifs_add_orphan(c, inode->i_ino);
unlock_2_inodes(dir, inode);
ubifs_release_budget(c, &req);
iput(inode);
@@ -1088,9 +1095,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
struct timespec time;
unsigned int uninitialized_var(saved_nlink);
- if (flags & ~RENAME_NOREPLACE)
- return -EINVAL;
-
/*
* Budget request settings: deletion direntry, new direntry, removing
* the old inode, and changing old and new parent directory inodes.
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
index e169172..0d9d9f6 100644
--- a/include/dt-bindings/clock/qcom,camcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -34,71 +34,70 @@
#define CAM_CC_CSIPHY0_CLK 17
#define CAM_CC_CSIPHY1_CLK 18
#define CAM_CC_CSIPHY2_CLK 19
-#define CAM_CC_DEBUG_CLK 20
-#define CAM_CC_FAST_AHB_CLK_SRC 21
-#define CAM_CC_FD_CORE_CLK 22
-#define CAM_CC_FD_CORE_CLK_SRC 23
-#define CAM_CC_FD_CORE_UAR_CLK 24
-#define CAM_CC_ICP_APB_CLK 25
-#define CAM_CC_ICP_ATB_CLK 26
-#define CAM_CC_ICP_CLK 27
-#define CAM_CC_ICP_CLK_SRC 28
-#define CAM_CC_ICP_CTI_CLK 29
-#define CAM_CC_ICP_TS_CLK 30
-#define CAM_CC_IFE_0_AXI_CLK 31
-#define CAM_CC_IFE_0_CLK 32
-#define CAM_CC_IFE_0_CLK_SRC 33
-#define CAM_CC_IFE_0_CPHY_RX_CLK 34
-#define CAM_CC_IFE_0_CSID_CLK 35
-#define CAM_CC_IFE_0_CSID_CLK_SRC 36
-#define CAM_CC_IFE_0_DSP_CLK 37
-#define CAM_CC_IFE_1_AXI_CLK 38
-#define CAM_CC_IFE_1_CLK 39
-#define CAM_CC_IFE_1_CLK_SRC 40
-#define CAM_CC_IFE_1_CPHY_RX_CLK 41
-#define CAM_CC_IFE_1_CSID_CLK 42
-#define CAM_CC_IFE_1_CSID_CLK_SRC 43
-#define CAM_CC_IFE_1_DSP_CLK 44
-#define CAM_CC_IFE_LITE_CLK 45
-#define CAM_CC_IFE_LITE_CLK_SRC 46
-#define CAM_CC_IFE_LITE_CPHY_RX_CLK 47
-#define CAM_CC_IFE_LITE_CSID_CLK 48
-#define CAM_CC_IFE_LITE_CSID_CLK_SRC 49
-#define CAM_CC_IPE_0_AHB_CLK 50
-#define CAM_CC_IPE_0_AREG_CLK 51
-#define CAM_CC_IPE_0_AXI_CLK 52
-#define CAM_CC_IPE_0_CLK 53
-#define CAM_CC_IPE_0_CLK_SRC 54
-#define CAM_CC_IPE_1_AHB_CLK 55
-#define CAM_CC_IPE_1_AREG_CLK 56
-#define CAM_CC_IPE_1_AXI_CLK 57
-#define CAM_CC_IPE_1_CLK 58
-#define CAM_CC_IPE_1_CLK_SRC 59
-#define CAM_CC_JPEG_CLK 60
-#define CAM_CC_JPEG_CLK_SRC 61
-#define CAM_CC_LRME_CLK 62
-#define CAM_CC_LRME_CLK_SRC 63
-#define CAM_CC_MCLK0_CLK 64
-#define CAM_CC_MCLK0_CLK_SRC 65
-#define CAM_CC_MCLK1_CLK 66
-#define CAM_CC_MCLK1_CLK_SRC 67
-#define CAM_CC_MCLK2_CLK 68
-#define CAM_CC_MCLK2_CLK_SRC 69
-#define CAM_CC_MCLK3_CLK 70
-#define CAM_CC_MCLK3_CLK_SRC 71
-#define CAM_CC_PLL0 72
-#define CAM_CC_PLL0_OUT_EVEN 73
-#define CAM_CC_PLL1 74
-#define CAM_CC_PLL1_OUT_EVEN 75
-#define CAM_CC_PLL2 76
-#define CAM_CC_PLL2_OUT_EVEN 77
-#define CAM_CC_PLL2_OUT_ODD 78
-#define CAM_CC_PLL3 79
-#define CAM_CC_PLL3_OUT_EVEN 80
-#define CAM_CC_PLL_TEST_CLK 81
-#define CAM_CC_SLOW_AHB_CLK_SRC 82
-#define CAM_CC_SOC_AHB_CLK 83
-#define CAM_CC_SYS_TMR_CLK 84
+#define CAM_CC_FAST_AHB_CLK_SRC 20
+#define CAM_CC_FD_CORE_CLK 21
+#define CAM_CC_FD_CORE_CLK_SRC 22
+#define CAM_CC_FD_CORE_UAR_CLK 23
+#define CAM_CC_ICP_APB_CLK 24
+#define CAM_CC_ICP_ATB_CLK 25
+#define CAM_CC_ICP_CLK 26
+#define CAM_CC_ICP_CLK_SRC 27
+#define CAM_CC_ICP_CTI_CLK 28
+#define CAM_CC_ICP_TS_CLK 29
+#define CAM_CC_IFE_0_AXI_CLK 30
+#define CAM_CC_IFE_0_CLK 31
+#define CAM_CC_IFE_0_CLK_SRC 32
+#define CAM_CC_IFE_0_CPHY_RX_CLK 33
+#define CAM_CC_IFE_0_CSID_CLK 34
+#define CAM_CC_IFE_0_CSID_CLK_SRC 35
+#define CAM_CC_IFE_0_DSP_CLK 36
+#define CAM_CC_IFE_1_AXI_CLK 37
+#define CAM_CC_IFE_1_CLK 38
+#define CAM_CC_IFE_1_CLK_SRC 39
+#define CAM_CC_IFE_1_CPHY_RX_CLK 40
+#define CAM_CC_IFE_1_CSID_CLK 41
+#define CAM_CC_IFE_1_CSID_CLK_SRC 42
+#define CAM_CC_IFE_1_DSP_CLK 43
+#define CAM_CC_IFE_LITE_CLK 44
+#define CAM_CC_IFE_LITE_CLK_SRC 45
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 46
+#define CAM_CC_IFE_LITE_CSID_CLK 47
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 48
+#define CAM_CC_IPE_0_AHB_CLK 49
+#define CAM_CC_IPE_0_AREG_CLK 50
+#define CAM_CC_IPE_0_AXI_CLK 51
+#define CAM_CC_IPE_0_CLK 52
+#define CAM_CC_IPE_0_CLK_SRC 53
+#define CAM_CC_IPE_1_AHB_CLK 54
+#define CAM_CC_IPE_1_AREG_CLK 55
+#define CAM_CC_IPE_1_AXI_CLK 56
+#define CAM_CC_IPE_1_CLK 57
+#define CAM_CC_IPE_1_CLK_SRC 58
+#define CAM_CC_JPEG_CLK 59
+#define CAM_CC_JPEG_CLK_SRC 60
+#define CAM_CC_LRME_CLK 61
+#define CAM_CC_LRME_CLK_SRC 62
+#define CAM_CC_MCLK0_CLK 63
+#define CAM_CC_MCLK0_CLK_SRC 64
+#define CAM_CC_MCLK1_CLK 65
+#define CAM_CC_MCLK1_CLK_SRC 66
+#define CAM_CC_MCLK2_CLK 67
+#define CAM_CC_MCLK2_CLK_SRC 68
+#define CAM_CC_MCLK3_CLK 69
+#define CAM_CC_MCLK3_CLK_SRC 70
+#define CAM_CC_PLL0 71
+#define CAM_CC_PLL0_OUT_EVEN 72
+#define CAM_CC_PLL1 73
+#define CAM_CC_PLL1_OUT_EVEN 74
+#define CAM_CC_PLL2 75
+#define CAM_CC_PLL2_OUT_EVEN 76
+#define CAM_CC_PLL2_OUT_ODD 77
+#define CAM_CC_PLL3 78
+#define CAM_CC_PLL3_OUT_EVEN 79
+#define CAM_CC_PLL_TEST_CLK 80
+#define CAM_CC_SLOW_AHB_CLK_SRC 81
+#define CAM_CC_SOC_AHB_CLK 82
+#define CAM_CC_SYS_TMR_CLK 83
#define TITAN_CAM_CC_BPS_BCR 0
#define TITAN_CAM_CC_CAMNOC_BCR 1
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index b1988e4..24dd11e 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -14,49 +14,48 @@
#ifndef _DT_BINDINGS_CLK_MSM_DISP_CC_SDM845_H
#define _DT_BINDINGS_CLK_MSM_DISP_CC_SDM845_H
-#define DISP_CC_DEBUG_CLK 0
-#define DISP_CC_MDSS_AHB_CLK 1
-#define DISP_CC_MDSS_AXI_CLK 2
-#define DISP_CC_MDSS_BYTE0_CLK 3
-#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
-#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
-#define DISP_CC_MDSS_BYTE1_CLK 6
-#define DISP_CC_MDSS_BYTE1_CLK_SRC 7
-#define DISP_CC_MDSS_BYTE1_INTF_CLK 8
-#define DISP_CC_MDSS_DP_AUX_CLK 9
-#define DISP_CC_MDSS_DP_AUX_CLK_SRC 10
-#define DISP_CC_MDSS_DP_CRYPTO_CLK 11
-#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 12
-#define DISP_CC_MDSS_DP_LINK_CLK 13
-#define DISP_CC_MDSS_DP_LINK_CLK_SRC 14
-#define DISP_CC_MDSS_DP_LINK_INTF_CLK 15
-#define DISP_CC_MDSS_DP_PIXEL1_CLK 16
-#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 17
-#define DISP_CC_MDSS_DP_PIXEL_CLK 18
-#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 19
-#define DISP_CC_MDSS_ESC0_CLK 20
-#define DISP_CC_MDSS_ESC0_CLK_SRC 21
-#define DISP_CC_MDSS_ESC1_CLK 22
-#define DISP_CC_MDSS_ESC1_CLK_SRC 23
-#define DISP_CC_MDSS_MDP_CLK 24
-#define DISP_CC_MDSS_MDP_CLK_SRC 25
-#define DISP_CC_MDSS_MDP_LUT_CLK 26
-#define DISP_CC_MDSS_PCLK0_CLK 27
-#define DISP_CC_MDSS_PCLK0_CLK_SRC 28
-#define DISP_CC_MDSS_PCLK1_CLK 29
-#define DISP_CC_MDSS_PCLK1_CLK_SRC 30
-#define DISP_CC_MDSS_QDSS_AT_CLK 31
-#define DISP_CC_MDSS_QDSS_TSCTR_DIV8_CLK 32
-#define DISP_CC_MDSS_ROT_CLK 33
-#define DISP_CC_MDSS_ROT_CLK_SRC 34
-#define DISP_CC_MDSS_RSCC_AHB_CLK 35
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK 36
-#define DISP_CC_MDSS_VSYNC_CLK 37
-#define DISP_CC_MDSS_VSYNC_CLK_SRC 38
-#define DISP_CC_PLL0 39
-#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 40
-#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 41
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 42
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AXI_CLK 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 4
+#define DISP_CC_MDSS_BYTE1_CLK 5
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK 8
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 10
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_CLK 12
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 16
+#define DISP_CC_MDSS_DP_PIXEL_CLK 17
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 18
+#define DISP_CC_MDSS_ESC0_CLK 19
+#define DISP_CC_MDSS_ESC0_CLK_SRC 20
+#define DISP_CC_MDSS_ESC1_CLK 21
+#define DISP_CC_MDSS_ESC1_CLK_SRC 22
+#define DISP_CC_MDSS_MDP_CLK 23
+#define DISP_CC_MDSS_MDP_CLK_SRC 24
+#define DISP_CC_MDSS_MDP_LUT_CLK 25
+#define DISP_CC_MDSS_PCLK0_CLK 26
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 27
+#define DISP_CC_MDSS_PCLK1_CLK 28
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 29
+#define DISP_CC_MDSS_QDSS_AT_CLK 30
+#define DISP_CC_MDSS_QDSS_TSCTR_DIV8_CLK 31
+#define DISP_CC_MDSS_ROT_CLK 32
+#define DISP_CC_MDSS_ROT_CLK_SRC 33
+#define DISP_CC_MDSS_RSCC_AHB_CLK 34
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 35
+#define DISP_CC_MDSS_VSYNC_CLK 36
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 37
+#define DISP_CC_PLL0 38
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 39
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 40
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 41
#define DISP_CC_MDSS_CORE_BCR 0
#define DISP_CC_MDSS_GCC_CLOCKS_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index f7a6978..73a8c0b 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -35,161 +35,158 @@
#define GCC_CPUSS_GNOC_CLK 17
#define GCC_CPUSS_RBCPR_CLK 18
#define GCC_CPUSS_RBCPR_CLK_SRC 19
-#define GCC_CXO_TX1_CLKREF_CLK 20
-#define GCC_DDRSS_GPU_AXI_CLK 21
-#define GCC_DISP_AHB_CLK 22
-#define GCC_DISP_AXI_CLK 23
-#define GCC_DISP_GPLL0_CLK_SRC 24
-#define GCC_DISP_GPLL0_DIV_CLK_SRC 25
-#define GCC_DISP_XO_CLK 26
-#define GCC_GP1_CLK 27
-#define GCC_GP1_CLK_SRC 28
-#define GCC_GP2_CLK 29
-#define GCC_GP2_CLK_SRC 30
-#define GCC_GP3_CLK 31
-#define GCC_GP3_CLK_SRC 32
-#define GCC_GPU_CFG_AHB_CLK 33
-#define GCC_GPU_GPLL0_CLK_SRC 34
-#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
-#define GCC_GPU_MEMNOC_GFX_CLK 36
-#define GCC_GPU_SNOC_DVM_GFX_CLK 37
-#define GCC_MSS_AXIS2_CLK 38
-#define GCC_MSS_CFG_AHB_CLK 39
-#define GCC_MSS_GPLL0_DIV_CLK_SRC 40
-#define GCC_MSS_MFAB_AXIS_CLK 41
-#define GCC_MSS_Q6_MEMNOC_AXI_CLK 42
-#define GCC_MSS_SNOC_AXI_CLK 43
-#define GCC_PCIE_0_AUX_CLK 44
-#define GCC_PCIE_0_AUX_CLK_SRC 45
-#define GCC_PCIE_0_CFG_AHB_CLK 46
-#define GCC_PCIE_0_CLKREF_CLK 47
-#define GCC_PCIE_0_MSTR_AXI_CLK 48
-#define GCC_PCIE_0_PIPE_CLK 49
-#define GCC_PCIE_0_SLV_AXI_CLK 50
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 51
-#define GCC_PCIE_1_AUX_CLK 52
-#define GCC_PCIE_1_AUX_CLK_SRC 53
-#define GCC_PCIE_1_CFG_AHB_CLK 54
-#define GCC_PCIE_1_CLKREF_CLK 55
-#define GCC_PCIE_1_MSTR_AXI_CLK 56
-#define GCC_PCIE_1_PIPE_CLK 57
-#define GCC_PCIE_1_SLV_AXI_CLK 58
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 59
-#define GCC_PCIE_PHY_AUX_CLK 60
-#define GCC_PCIE_PHY_REFGEN_CLK 61
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC 62
-#define GCC_PDM2_CLK 63
-#define GCC_PDM2_CLK_SRC 64
-#define GCC_PDM_AHB_CLK 65
-#define GCC_PDM_XO4_CLK 66
-#define GCC_PRNG_AHB_CLK 67
-#define GCC_QMIP_CAMERA_AHB_CLK 68
-#define GCC_QMIP_DISP_AHB_CLK 69
-#define GCC_QMIP_VIDEO_AHB_CLK 70
-#define GCC_QUPV3_WRAP0_S0_CLK 71
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72
-#define GCC_QUPV3_WRAP0_S1_CLK 73
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74
-#define GCC_QUPV3_WRAP0_S2_CLK 75
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76
-#define GCC_QUPV3_WRAP0_S3_CLK 77
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78
-#define GCC_QUPV3_WRAP0_S4_CLK 79
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80
-#define GCC_QUPV3_WRAP0_S5_CLK 81
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82
-#define GCC_QUPV3_WRAP0_S6_CLK 83
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC 84
-#define GCC_QUPV3_WRAP0_S7_CLK 85
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC 86
-#define GCC_QUPV3_WRAP1_S0_CLK 87
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC 88
-#define GCC_QUPV3_WRAP1_S1_CLK 89
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC 90
-#define GCC_QUPV3_WRAP1_S2_CLK 91
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC 92
-#define GCC_QUPV3_WRAP1_S3_CLK 93
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC 94
-#define GCC_QUPV3_WRAP1_S4_CLK 95
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC 96
-#define GCC_QUPV3_WRAP1_S5_CLK 97
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC 98
-#define GCC_QUPV3_WRAP1_S6_CLK 99
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC 100
-#define GCC_QUPV3_WRAP1_S7_CLK 101
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC 102
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK 103
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK 104
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK 105
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK 106
-#define GCC_RX1_USB2_CLKREF_CLK 107
-#define GCC_RX2_QLINK_CLKREF_CLK 108
-#define GCC_SDCC2_AHB_CLK 109
-#define GCC_SDCC2_APPS_CLK 110
-#define GCC_SDCC2_APPS_CLK_SRC 111
-#define GCC_SDCC4_AHB_CLK 112
-#define GCC_SDCC4_APPS_CLK 113
-#define GCC_SDCC4_APPS_CLK_SRC 114
-#define GCC_SYS_NOC_CPUSS_AHB_CLK 115
-#define GCC_TSIF_AHB_CLK 116
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK 117
-#define GCC_TSIF_REF_CLK 118
-#define GCC_TSIF_REF_CLK_SRC 119
-#define GCC_UFS_CARD_AHB_CLK 120
-#define GCC_UFS_CARD_AXI_CLK 121
-#define GCC_UFS_CARD_AXI_CLK_SRC 122
-#define GCC_UFS_CARD_CLKREF_CLK 123
-#define GCC_UFS_CARD_ICE_CORE_CLK 124
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 125
-#define GCC_UFS_CARD_PHY_AUX_CLK 126
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 127
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 128
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 129
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 130
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK 131
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 132
-#define GCC_UFS_MEM_CLKREF_CLK 133
-#define GCC_UFS_PHY_AHB_CLK 134
-#define GCC_UFS_PHY_AXI_CLK 135
-#define GCC_UFS_PHY_AXI_CLK_SRC 136
-#define GCC_UFS_PHY_ICE_CORE_CLK 137
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 138
-#define GCC_UFS_PHY_PHY_AUX_CLK 139
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 140
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 141
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 142
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 143
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK 144
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 145
-#define GCC_USB30_PRIM_MASTER_CLK 146
-#define GCC_USB30_PRIM_MASTER_CLK_SRC 147
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK 148
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 149
-#define GCC_USB30_PRIM_SLEEP_CLK 150
-#define GCC_USB30_SEC_MASTER_CLK 151
-#define GCC_USB30_SEC_MASTER_CLK_SRC 152
-#define GCC_USB30_SEC_MOCK_UTMI_CLK 153
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 154
-#define GCC_USB30_SEC_SLEEP_CLK 155
-#define GCC_USB3_PRIM_CLKREF_CLK 156
-#define GCC_USB3_PRIM_PHY_AUX_CLK 157
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 158
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 159
-#define GCC_USB3_PRIM_PHY_PIPE_CLK 160
-#define GCC_USB3_SEC_CLKREF_CLK 161
-#define GCC_USB3_SEC_PHY_AUX_CLK 162
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 163
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK 164
-#define GCC_USB3_SEC_PHY_PIPE_CLK 165
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK 166
-#define GCC_VIDEO_AHB_CLK 167
-#define GCC_VIDEO_AXI_CLK 168
-#define GCC_VIDEO_XO_CLK 169
-#define GPLL0 170
-#define GPLL0_OUT_EVEN 171
-#define GPLL0_OUT_MAIN 172
-#define GPLL1 173
-#define GPLL1_OUT_MAIN 174
+#define GCC_DDRSS_GPU_AXI_CLK 20
+#define GCC_DISP_AHB_CLK 21
+#define GCC_DISP_AXI_CLK 22
+#define GCC_DISP_GPLL0_CLK_SRC 23
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 24
+#define GCC_DISP_XO_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_MSS_AXIS2_CLK 37
+#define GCC_MSS_CFG_AHB_CLK 38
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 39
+#define GCC_MSS_MFAB_AXIS_CLK 40
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 41
+#define GCC_MSS_SNOC_AXI_CLK 42
+#define GCC_PCIE_0_AUX_CLK 43
+#define GCC_PCIE_0_AUX_CLK_SRC 44
+#define GCC_PCIE_0_CFG_AHB_CLK 45
+#define GCC_PCIE_0_CLKREF_CLK 46
+#define GCC_PCIE_0_MSTR_AXI_CLK 47
+#define GCC_PCIE_0_PIPE_CLK 48
+#define GCC_PCIE_0_SLV_AXI_CLK 49
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 50
+#define GCC_PCIE_1_AUX_CLK 51
+#define GCC_PCIE_1_AUX_CLK_SRC 52
+#define GCC_PCIE_1_CFG_AHB_CLK 53
+#define GCC_PCIE_1_CLKREF_CLK 54
+#define GCC_PCIE_1_MSTR_AXI_CLK 55
+#define GCC_PCIE_1_PIPE_CLK 56
+#define GCC_PCIE_1_SLV_AXI_CLK 57
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 58
+#define GCC_PCIE_PHY_AUX_CLK 59
+#define GCC_PCIE_PHY_REFGEN_CLK 60
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 61
+#define GCC_PDM2_CLK 62
+#define GCC_PDM2_CLK_SRC 63
+#define GCC_PDM_AHB_CLK 64
+#define GCC_PDM_XO4_CLK 65
+#define GCC_PRNG_AHB_CLK 66
+#define GCC_QMIP_CAMERA_AHB_CLK 67
+#define GCC_QMIP_DISP_AHB_CLK 68
+#define GCC_QMIP_VIDEO_AHB_CLK 69
+#define GCC_QUPV3_WRAP0_S0_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S1_CLK 72
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S2_CLK 74
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S3_CLK 76
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S4_CLK 78
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S5_CLK 80
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S6_CLK 82
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S7_CLK 84
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S0_CLK 86
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S1_CLK 88
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S2_CLK 90
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S3_CLK 92
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S4_CLK 94
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S5_CLK 96
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S6_CLK 98
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S7_CLK 100
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 101
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 102
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 103
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 104
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 105
+#define GCC_SDCC2_AHB_CLK 106
+#define GCC_SDCC2_APPS_CLK 107
+#define GCC_SDCC2_APPS_CLK_SRC 108
+#define GCC_SDCC4_AHB_CLK 109
+#define GCC_SDCC4_APPS_CLK 110
+#define GCC_SDCC4_APPS_CLK_SRC 111
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 112
+#define GCC_TSIF_AHB_CLK 113
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 114
+#define GCC_TSIF_REF_CLK 115
+#define GCC_TSIF_REF_CLK_SRC 116
+#define GCC_UFS_CARD_AHB_CLK 117
+#define GCC_UFS_CARD_AXI_CLK 118
+#define GCC_UFS_CARD_AXI_CLK_SRC 119
+#define GCC_UFS_CARD_CLKREF_CLK 120
+#define GCC_UFS_CARD_ICE_CORE_CLK 121
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 122
+#define GCC_UFS_CARD_PHY_AUX_CLK 123
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 124
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 125
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 126
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 127
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 128
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 129
+#define GCC_UFS_MEM_CLKREF_CLK 130
+#define GCC_UFS_PHY_AHB_CLK 131
+#define GCC_UFS_PHY_AXI_CLK 132
+#define GCC_UFS_PHY_AXI_CLK_SRC 133
+#define GCC_UFS_PHY_ICE_CORE_CLK 134
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 135
+#define GCC_UFS_PHY_PHY_AUX_CLK 136
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 137
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 139
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 141
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 142
+#define GCC_USB30_PRIM_MASTER_CLK 143
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 144
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 146
+#define GCC_USB30_PRIM_SLEEP_CLK 147
+#define GCC_USB30_SEC_MASTER_CLK 148
+#define GCC_USB30_SEC_MASTER_CLK_SRC 149
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 150
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 151
+#define GCC_USB30_SEC_SLEEP_CLK 152
+#define GCC_USB3_PRIM_CLKREF_CLK 153
+#define GCC_USB3_PRIM_PHY_AUX_CLK 154
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 155
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 156
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 157
+#define GCC_USB3_SEC_CLKREF_CLK 158
+#define GCC_USB3_SEC_PHY_AUX_CLK 159
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 160
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 161
+#define GCC_USB3_SEC_PHY_PIPE_CLK 162
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 163
+#define GCC_VIDEO_AHB_CLK 164
+#define GCC_VIDEO_AXI_CLK 165
+#define GCC_VIDEO_XO_CLK 166
+#define GPLL0 167
+#define GPLL0_OUT_EVEN 168
+#define GPLL0_OUT_MAIN 169
+#define GPLL1 170
+#define GPLL1_OUT_MAIN 171
/* GCC reset clocks */
#define GCC_GPU_BCR 0
@@ -217,6 +214,8 @@
#define GCC_USB3PHY_PHY_SEC_BCR 22
#define GCC_USB3_DP_PHY_SEC_BCR 23
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 24
+#define GCC_PCIE_0_PHY_BCR 25
+#define GCC_PCIE_1_PHY_BCR 26
/* Dummy clocks for rate measurement */
#define MEASURE_ONLY_SNOC_CLK 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
new file mode 100644
index 0000000..6243588
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDX24_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SDX24_H
+
+/* GCC clock registers */
+#define GCC_BLSP1_AHB_CLK 0
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 1
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 2
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 3
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 4
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 5
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 6
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 7
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 8
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 9
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 10
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 11
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 12
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 13
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 14
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 15
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 16
+#define GCC_BLSP1_SLEEP_CLK 17
+#define GCC_BLSP1_UART1_APPS_CLK 18
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 19
+#define GCC_BLSP1_UART2_APPS_CLK 20
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 21
+#define GCC_BLSP1_UART3_APPS_CLK 22
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 23
+#define GCC_BLSP1_UART4_APPS_CLK 24
+#define GCC_BLSP1_UART4_APPS_CLK_SRC 25
+#define GCC_BOOT_ROM_AHB_CLK 26
+#define GCC_CE1_AHB_CLK 27
+#define GCC_CE1_AXI_CLK 28
+#define GCC_CE1_CLK 29
+#define GCC_CPUSS_AHB_CLK 30
+#define GCC_CPUSS_AHB_CLK_SRC 31
+#define GCC_CPUSS_GNOC_CLK 32
+#define GCC_CPUSS_GPLL0_CLK_SRC 33
+#define GCC_CPUSS_RBCPR_CLK 34
+#define GCC_CPUSS_RBCPR_CLK_SRC 35
+#define GCC_GP1_CLK 36
+#define GCC_GP1_CLK_SRC 37
+#define GCC_GP2_CLK 38
+#define GCC_GP2_CLK_SRC 39
+#define GCC_GP3_CLK 40
+#define GCC_GP3_CLK_SRC 41
+#define GCC_MSS_CFG_AHB_CLK 42
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 43
+#define GCC_MSS_SNOC_AXI_CLK 44
+#define GCC_PCIE_AUX_CLK 45
+#define GCC_PCIE_AUX_PHY_CLK_SRC 46
+#define GCC_PCIE_CFG_AHB_CLK 47
+#define GCC_PCIE_MSTR_AXI_CLK 48
+#define GCC_PCIE_PHY_REFGEN_CLK 49
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 50
+#define GCC_PCIE_PIPE_CLK 51
+#define GCC_PCIE_SLEEP_CLK 52
+#define GCC_PCIE_SLV_AXI_CLK 53
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 54
+#define GCC_PDM2_CLK 55
+#define GCC_PDM2_CLK_SRC 56
+#define GCC_PDM_AHB_CLK 57
+#define GCC_PDM_XO4_CLK 58
+#define GCC_PRNG_AHB_CLK 59
+#define GCC_SDCC1_AHB_CLK 60
+#define GCC_SDCC1_APPS_CLK 61
+#define GCC_SDCC1_APPS_CLK_SRC 62
+#define GCC_SPMI_FETCHER_AHB_CLK 63
+#define GCC_SPMI_FETCHER_CLK 64
+#define GCC_SPMI_FETCHER_CLK_SRC 65
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 66
+#define GCC_USB30_MASTER_CLK 67
+#define GCC_USB30_MASTER_CLK_SRC 68
+#define GCC_USB30_MOCK_UTMI_CLK 69
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 70
+#define GCC_USB30_SLEEP_CLK 71
+#define GCC_USB3_PHY_AUX_CLK 72
+#define GCC_USB3_PHY_AUX_CLK_SRC 73
+#define GCC_USB3_PHY_PIPE_CLK 74
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 75
+#define GCC_XO_DIV4_CLK 76
+#define GPLL0 77
+#define GPLL0_OUT_EVEN 78
+
+/* GDSCs */
+#define PCIE_GDSC 0
+#define USB30_GDSC 1
+
+/* CPU clocks */
+#define CLOCK_A7SS 0
+
+/* GCC reset clocks */
+#define GCC_BLSP1_QUP1_BCR 0
+#define GCC_BLSP1_QUP2_BCR 1
+#define GCC_BLSP1_QUP3_BCR 2
+#define GCC_BLSP1_QUP4_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_UART3_BCR 5
+#define GCC_BLSP1_UART4_BCR 6
+#define GCC_CE1_BCR 7
+#define GCC_PCIE_BCR 8
+#define GCC_PCIE_PHY_BCR 9
+#define GCC_PDM_BCR 10
+#define GCC_PRNG_BCR 11
+#define GCC_SDCC1_BCR 12
+#define GCC_SPMI_FETCHER_BCR 13
+#define GCC_USB30_BCR 14
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
index 13de1e1..c43a9f8 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -17,37 +17,36 @@
/* GPUCC clock registers */
#define GPU_CC_ACD_AHB_CLK 0
#define GPU_CC_ACD_CXO_CLK 1
-#define GPU_CC_AHB_CLK 2
+#define GPU_CC_AHB_CLK 2
#define GPU_CC_CRC_AHB_CLK 3
#define GPU_CC_CX_APB_CLK 4
#define GPU_CC_CX_GMU_CLK 5
#define GPU_CC_CX_QDSS_AT_CLK 6
#define GPU_CC_CX_QDSS_TRIG_CLK 7
-#define GPU_CC_CX_QDSS_TSCTR_CLK 8
-#define GPU_CC_CX_SNOC_DVM_CLK 9
+#define GPU_CC_CX_QDSS_TSCTR_CLK 8
+#define GPU_CC_CX_SNOC_DVM_CLK 9
#define GPU_CC_CXO_AON_CLK 10
-#define GPU_CC_CXO_CLK 11
-#define GPU_CC_DEBUG_CLK 12
-#define GPU_CC_GX_CXO_CLK 13
-#define GPU_CC_GX_GMU_CLK 14
-#define GPU_CC_GX_QDSS_TSCTR_CLK 15
-#define GPU_CC_GX_VSENSE_CLK 16
-#define GPU_CC_PLL0_OUT_MAIN 17
-#define GPU_CC_PLL0_OUT_ODD 18
-#define GPU_CC_PLL0_OUT_TEST 19
-#define GPU_CC_PLL1 20
-#define GPU_CC_PLL1_OUT_EVEN 21
-#define GPU_CC_PLL1_OUT_MAIN 22
-#define GPU_CC_PLL1_OUT_ODD 23
-#define GPU_CC_PLL1_OUT_TEST 24
-#define GPU_CC_PLL_TEST_CLK 25
-#define GPU_CC_RBCPR_AHB_CLK 26
-#define GPU_CC_RBCPR_CLK 27
-#define GPU_CC_RBCPR_CLK_SRC 28
-#define GPU_CC_SLEEP_CLK 29
-#define GPU_CC_GMU_CLK_SRC 30
-#define GPU_CC_CX_GFX3D_CLK 31
-#define GPU_CC_CX_GFX3D_SLV_CLK 32
+#define GPU_CC_CXO_CLK 11
+#define GPU_CC_GX_CXO_CLK 12
+#define GPU_CC_GX_GMU_CLK 13
+#define GPU_CC_GX_QDSS_TSCTR_CLK 14
+#define GPU_CC_GX_VSENSE_CLK 15
+#define GPU_CC_PLL0_OUT_MAIN 16
+#define GPU_CC_PLL0_OUT_ODD 17
+#define GPU_CC_PLL0_OUT_TEST 18
+#define GPU_CC_PLL1 19
+#define GPU_CC_PLL1_OUT_EVEN 20
+#define GPU_CC_PLL1_OUT_MAIN 21
+#define GPU_CC_PLL1_OUT_ODD 22
+#define GPU_CC_PLL1_OUT_TEST 23
+#define GPU_CC_PLL_TEST_CLK 24
+#define GPU_CC_RBCPR_AHB_CLK 25
+#define GPU_CC_RBCPR_CLK 26
+#define GPU_CC_RBCPR_CLK_SRC 27
+#define GPU_CC_SLEEP_CLK 28
+#define GPU_CC_GMU_CLK_SRC 29
+#define GPU_CC_CX_GFX3D_CLK 30
+#define GPU_CC_CX_GFX3D_SLV_CLK 31
/* GPUCC reset clock registers */
#define GPUCC_GPU_CC_ACD_BCR 0
@@ -63,5 +62,5 @@
#define GPU_CC_PLL0 0
#define GPU_CC_PLL0_OUT_EVEN 1
#define GPU_CC_GX_GFX3D_CLK_SRC 2
-#define GPU_CC_GX_GFX3D_CLK 3
+#define GPU_CC_GX_GFX3D_CLK 3
#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h
index 723d2e0..b362852d 100644
--- a/include/dt-bindings/clock/qcom,videocc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,18 +16,17 @@
#define VIDEO_CC_APB_CLK 0
#define VIDEO_CC_AT_CLK 1
-#define VIDEO_CC_DEBUG_CLK 2
-#define VIDEO_CC_QDSS_TRIG_CLK 3
-#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 4
-#define VIDEO_CC_VCODEC0_AXI_CLK 5
-#define VIDEO_CC_VCODEC0_CORE_CLK 6
-#define VIDEO_CC_VCODEC1_AXI_CLK 7
-#define VIDEO_CC_VCODEC1_CORE_CLK 8
-#define VIDEO_CC_VENUS_AHB_CLK 9
-#define VIDEO_CC_VENUS_CLK_SRC 10
-#define VIDEO_CC_VENUS_CTL_AXI_CLK 11
-#define VIDEO_CC_VENUS_CTL_CORE_CLK 12
-#define VIDEO_PLL0 13
+#define VIDEO_CC_QDSS_TRIG_CLK 2
+#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 3
+#define VIDEO_CC_VCODEC0_AXI_CLK 4
+#define VIDEO_CC_VCODEC0_CORE_CLK 5
+#define VIDEO_CC_VCODEC1_AXI_CLK 6
+#define VIDEO_CC_VCODEC1_CORE_CLK 7
+#define VIDEO_CC_VENUS_AHB_CLK 8
+#define VIDEO_CC_VENUS_CLK_SRC 9
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 10
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 11
+#define VIDEO_PLL0 12
#define VIDEO_CC_INTERFACE_BCR 0
#define VIDEO_CC_VCODEC0_BCR 1
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 5d3a4cd..1f6892c 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -435,6 +435,7 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
+int iommu_fwspec_get_id(struct device *dev, u32 *id);
#else /* CONFIG_IOMMU_API */
@@ -705,6 +706,11 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
return -ENODEV;
}
+static inline int iommu_fwspec_get_id(struct device *dev, u32 *id)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d0ec667..72dd7ba 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -439,6 +439,8 @@ struct mmc_card {
u8 *cached_ext_csd;
bool cmdq_init;
struct mmc_bkops_info bkops;
+ bool err_in_sdr104;
+ bool sdr104_blocked;
};
/*
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 5d5aff1..959414b 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -179,6 +179,7 @@ extern int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index,
extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
+extern int mmc_suspend_clk_scaling(struct mmc_host *host);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index ba1e826..ecfc173 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -587,6 +587,8 @@ struct mmc_host {
struct io_latency_state io_lat_s;
#endif
+ bool sdr104_wa;
+
/*
* Set to 1 to just stop the SDCLK to the card without
* actually disabling the clock from it's source.
@@ -751,6 +753,16 @@ static inline int mmc_host_uhs(struct mmc_host *host)
MMC_CAP_UHS_DDR50);
}
+static inline void mmc_host_clear_sdr104(struct mmc_host *host)
+{
+ host->caps &= ~MMC_CAP_UHS_SDR104;
+}
+
+static inline void mmc_host_set_sdr104(struct mmc_host *host)
+{
+ host->caps |= MMC_CAP_UHS_SDR104;
+}
+
static inline int mmc_host_packed_wr(struct mmc_host *host)
{
return host->caps2 & MMC_CAP2_PACKED_WR;
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index a0e2283..7fca674 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -208,6 +208,12 @@
#define PM660_V1P1_REV3 0x01
#define PM660_V1P1_REV4 0x01
+/* PM660L REV_ID */
+#define PM660L_V1P1_REV1 0x00
+#define PM660L_V1P1_REV2 0x00
+#define PM660L_V1P1_REV3 0x01
+#define PM660L_V1P1_REV4 0x01
+
/* PMI8998 FAB_ID */
#define PMI8998_FAB_ID_SMIC 0x11
#define PMI8998_FAB_ID_GF 0x30
@@ -229,6 +235,9 @@
/* SMB1381 */
#define SMB1381_SUBTYPE 0x17
+/* SMB1355 */
+#define SMB1355_SUBTYPE 0x1C
+
struct pmic_revid_data {
u8 rev1;
u8 rev2;
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8ec7c30..931b494 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -260,27 +260,6 @@ static inline int scsi_is_wlun(u64 lun)
#define SCSI_INQ_PQ_NOT_CON 0x01
#define SCSI_INQ_PQ_NOT_CAP 0x03
-
-/*
- * Here are some scsi specific ioctl commands which are sometimes useful.
- *
- * Note that include/linux/cdrom.h also defines IOCTL 0x5300 - 0x5395
- */
-
-/* Used to obtain PUN and LUN info. Conflicts with CDROMAUDIOBUFSIZ */
-#define SCSI_IOCTL_GET_IDLUN 0x5382
-
-/* 0x5383 and 0x5384 were used for SCSI_IOCTL_TAGGED_{ENABLE,DISABLE} */
-
-/* Used to obtain the host number of a device. */
-#define SCSI_IOCTL_PROBE_HOST 0x5385
-
-/* Used to obtain the bus number for a device */
-#define SCSI_IOCTL_GET_BUS_NUMBER 0x5386
-
-/* Used to obtain the PCI location of a device */
-#define SCSI_IOCTL_GET_PCI 0x5387
-
/* Pull a u32 out of a SCSI message (using BE SCSI conventions) */
static inline __u32 scsi_to_u32(__u8 *ptr)
{
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index a7b87aa..dbae8e8 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,7 +62,7 @@ static inline uint32_t msm_dump_table_version(void)
#define MSM_DUMP_MINOR(val) (val & 0xFFFFF)
-#define MAX_NUM_ENTRIES 0x120
+#define MAX_NUM_ENTRIES 0x140
enum msm_dump_data_ids {
MSM_DUMP_DATA_CPU_CTX = 0x00,
@@ -82,10 +82,12 @@ enum msm_dump_data_ids {
MSM_DUMP_DATA_VSENSE = 0xE9,
MSM_DUMP_DATA_RPM = 0xEA,
MSM_DUMP_DATA_SCANDUMP = 0xEB,
+ MSM_DUMP_DATA_RPMH = 0xEC,
MSM_DUMP_DATA_TMC_ETF = 0xF0,
MSM_DUMP_DATA_TMC_REG = 0x100,
MSM_DUMP_DATA_LOG_BUF = 0x110,
MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
+ MSM_DUMP_DATA_SCANDUMP_PER_CPU = 0x130,
MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES,
};
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index b1bf6aa..74034c6 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -344,4 +344,16 @@ struct sde_drm_wb_cfg {
uint64_t modes;
};
+#define SDE_MAX_ROI_V1 4
+
+/**
+ * struct sde_drm_roi_v1 - list of regions of interest for a drm object
+ * @num_rects: number of valid rectangles in the roi array
+ * @roi: list of roi rectangles
+ */
+struct sde_drm_roi_v1 {
+ uint32_t num_rects;
+ struct drm_clip_rect roi[SDE_MAX_ROI_V1];
+};
+
#endif /* _SDE_DRM_H_ */
diff --git a/include/uapi/scsi/scsi_ioctl.h b/include/uapi/scsi/scsi_ioctl.h
index 516c581a..d9ce5cc 100644
--- a/include/uapi/scsi/scsi_ioctl.h
+++ b/include/uapi/scsi/scsi_ioctl.h
@@ -17,9 +17,25 @@
#define SCSI_REMOVAL_PREVENT 1
#define SCSI_REMOVAL_ALLOW 0
-#ifdef __KERNEL__
+/*
+ * Here are some scsi specific ioctl commands which are sometimes useful.
+ *
+ * Note that include/linux/cdrom.h also defines IOCTL 0x5300 - 0x5395
+ */
-struct scsi_device;
+/* Used to obtain PUN and LUN info. Conflicts with CDROMAUDIOBUFSIZ */
+#define SCSI_IOCTL_GET_IDLUN 0x5382
+
+/* 0x5383 and 0x5384 were used for SCSI_IOCTL_TAGGED_{ENABLE,DISABLE} */
+
+/* Used to obtain the host number of a device. */
+#define SCSI_IOCTL_PROBE_HOST 0x5385
+
+/* Used to obtain the bus number for a device */
+#define SCSI_IOCTL_GET_BUS_NUMBER 0x5386
+
+/* Used to obtain the PCI location of a device */
+#define SCSI_IOCTL_GET_PCI 0x5387
/*
* Structures used for scsi_ioctl et al.
@@ -42,9 +58,11 @@ typedef struct scsi_fctargaddress {
unsigned char host_wwn[8]; // include NULL term.
} Scsi_FCTargAddress;
+#ifdef __KERNEL__
+struct scsi_device;
+
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
extern int scsi_ioctl(struct scsi_device *, int, void __user *);
-
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9d1ed58..6670008 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2877,7 +2877,7 @@ static int cgroup_procs_write_permission(struct task_struct *task,
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid) &&
- !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
+ !ns_capable(tcred->user_ns, CAP_SYS_NICE))
ret = -EACCES;
if (!ret && cgroup_on_dfl(dst_cgrp)) {
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 69e0689..27d96e2 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -12,11 +12,14 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
+#include <linux/kthread.h>
#include <linux/slab.h>
#include <trace/events/power.h>
#include "sched.h"
+#define SUGOV_KTHREAD_PRIORITY 50
+
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
@@ -32,11 +35,14 @@ struct sugov_policy {
u64 last_freq_update_time;
s64 freq_update_delay_ns;
unsigned int next_freq;
+ unsigned int cached_raw_freq;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
- struct work_struct work;
+ struct kthread_work work;
struct mutex work_lock;
+ struct kthread_worker worker;
+ struct task_struct *thread;
bool work_in_progress;
bool need_freq_update;
@@ -46,7 +52,6 @@ struct sugov_cpu {
struct update_util_data update_util;
struct sugov_policy *sg_policy;
- unsigned int cached_raw_freq;
unsigned long iowait_boost;
unsigned long iowait_boost_max;
u64 last_update;
@@ -110,7 +115,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
- * @sg_cpu: schedutil cpu object to compute the new frequency for.
+ * @sg_policy: schedutil policy object to compute the new frequency for.
* @util: Current CPU utilization.
* @max: CPU capacity.
*
@@ -130,19 +135,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
* next_freq (as calculated above) is returned, subject to policy min/max and
* cpufreq driver limitations.
*/
-static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
- unsigned long max)
+static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ unsigned long util, unsigned long max)
{
- struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
freq = (freq + (freq >> 2)) * util / max;
- if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+ if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
return sg_policy->next_freq;
- sg_cpu->cached_raw_freq = freq;
+ sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq);
}
@@ -207,7 +211,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
} else {
sugov_get_util(&util, &max);
sugov_iowait_boost(sg_cpu, &util, &max);
- next_f = get_next_freq(sg_cpu, util, max);
+ next_f = get_next_freq(sg_policy, util, max);
}
sugov_update_commit(sg_policy, time, next_f);
}
@@ -261,7 +265,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
sugov_iowait_boost(j_sg_cpu, &util, &max);
}
- return get_next_freq(sg_cpu, util, max);
+ return get_next_freq(sg_policy, util, max);
}
static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -291,7 +295,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
raw_spin_unlock(&sg_policy->update_lock);
}
-static void sugov_work(struct work_struct *work)
+static void sugov_work(struct kthread_work *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
@@ -308,7 +312,21 @@ static void sugov_irq_work(struct irq_work *irq_work)
struct sugov_policy *sg_policy;
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
- schedule_work_on(smp_processor_id(), &sg_policy->work);
+
+ /*
+ * For RT and deadline tasks, the schedutil governor shoots the
+ * frequency to maximum. Special care must be taken to ensure that this
+ * kthread doesn't result in the same behavior.
+ *
+ * This is (mostly) guaranteed by the work_in_progress flag. The flag is
+ * updated only at the end of the sugov_work() function and before that
+ * the schedutil governor rejects all other frequency scaling requests.
+ *
+ * There is a very rare case though, where the RT thread yields right
+ * after the work_in_progress flag is cleared. The effects of that are
+ * neglected for now.
+ */
+ kthread_queue_work(&sg_policy->worker, &sg_policy->work);
}
/************************** sysfs interface ************************/
@@ -371,19 +389,64 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
return NULL;
sg_policy->policy = policy;
- init_irq_work(&sg_policy->irq_work, sugov_irq_work);
- INIT_WORK(&sg_policy->work, sugov_work);
- mutex_init(&sg_policy->work_lock);
raw_spin_lock_init(&sg_policy->update_lock);
return sg_policy;
}
static void sugov_policy_free(struct sugov_policy *sg_policy)
{
- mutex_destroy(&sg_policy->work_lock);
kfree(sg_policy);
}
+static int sugov_kthread_create(struct sugov_policy *sg_policy)
+{
+ struct task_struct *thread;
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
+ struct cpufreq_policy *policy = sg_policy->policy;
+ int ret;
+
+ /* kthread only required for slow path */
+ if (policy->fast_switch_enabled)
+ return 0;
+
+ kthread_init_work(&sg_policy->work, sugov_work);
+ kthread_init_worker(&sg_policy->worker);
+ thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
+ "sugov:%d",
+ cpumask_first(policy->related_cpus));
+ if (IS_ERR(thread)) {
+ pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
+ return PTR_ERR(thread);
+ }
+
+ ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m);
+ if (ret) {
+ kthread_stop(thread);
+ pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
+ return ret;
+ }
+
+ sg_policy->thread = thread;
+ kthread_bind_mask(thread, policy->related_cpus);
+ init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+ mutex_init(&sg_policy->work_lock);
+
+ wake_up_process(thread);
+
+ return 0;
+}
+
+static void sugov_kthread_stop(struct sugov_policy *sg_policy)
+{
+ /* kthread only required for slow path */
+ if (sg_policy->policy->fast_switch_enabled)
+ return;
+
+ kthread_flush_worker(&sg_policy->worker);
+ kthread_stop(sg_policy->thread);
+ mutex_destroy(&sg_policy->work_lock);
+}
+
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
{
struct sugov_tunables *tunables;
@@ -416,16 +479,24 @@ static int sugov_init(struct cpufreq_policy *policy)
if (policy->governor_data)
return -EBUSY;
+ cpufreq_enable_fast_switch(policy);
+
sg_policy = sugov_policy_alloc(policy);
- if (!sg_policy)
- return -ENOMEM;
+ if (!sg_policy) {
+ ret = -ENOMEM;
+ goto disable_fast_switch;
+ }
+
+ ret = sugov_kthread_create(sg_policy);
+ if (ret)
+ goto free_sg_policy;
mutex_lock(&global_tunables_lock);
if (global_tunables) {
if (WARN_ON(have_governor_per_policy())) {
ret = -EINVAL;
- goto free_sg_policy;
+ goto stop_kthread;
}
policy->governor_data = sg_policy;
sg_policy->tunables = global_tunables;
@@ -437,7 +508,7 @@ static int sugov_init(struct cpufreq_policy *policy)
tunables = sugov_tunables_alloc(sg_policy);
if (!tunables) {
ret = -ENOMEM;
- goto free_sg_policy;
+ goto stop_kthread;
}
tunables->rate_limit_us = LATENCY_MULTIPLIER;
@@ -454,20 +525,25 @@ static int sugov_init(struct cpufreq_policy *policy)
if (ret)
goto fail;
- out:
+out:
mutex_unlock(&global_tunables_lock);
-
- cpufreq_enable_fast_switch(policy);
return 0;
- fail:
+fail:
policy->governor_data = NULL;
sugov_tunables_free(tunables);
- free_sg_policy:
+stop_kthread:
+ sugov_kthread_stop(sg_policy);
+
+free_sg_policy:
mutex_unlock(&global_tunables_lock);
sugov_policy_free(sg_policy);
+
+disable_fast_switch:
+ cpufreq_disable_fast_switch(policy);
+
pr_err("initialization failed (error %d)\n", ret);
return ret;
}
@@ -478,8 +554,6 @@ static void sugov_exit(struct cpufreq_policy *policy)
struct sugov_tunables *tunables = sg_policy->tunables;
unsigned int count;
- cpufreq_disable_fast_switch(policy);
-
mutex_lock(&global_tunables_lock);
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
@@ -489,7 +563,9 @@ static void sugov_exit(struct cpufreq_policy *policy)
mutex_unlock(&global_tunables_lock);
+ sugov_kthread_stop(sg_policy);
sugov_policy_free(sg_policy);
+ cpufreq_disable_fast_switch(policy);
}
static int sugov_start(struct cpufreq_policy *policy)
@@ -502,25 +578,19 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->next_freq = UINT_MAX;
sg_policy->work_in_progress = false;
sg_policy->need_freq_update = false;
+ sg_policy->cached_raw_freq = 0;
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+ memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->sg_policy = sg_policy;
- if (policy_is_shared(policy)) {
- sg_cpu->util = 0;
- sg_cpu->max = 0;
- sg_cpu->flags = SCHED_CPUFREQ_RT;
- sg_cpu->last_update = 0;
- sg_cpu->cached_raw_freq = 0;
- sg_cpu->iowait_boost = 0;
- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- sugov_update_shared);
- } else {
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- sugov_update_single);
- }
+ sg_cpu->flags = SCHED_CPUFREQ_RT;
+ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+ policy_is_shared(policy) ?
+ sugov_update_shared :
+ sugov_update_single);
}
return 0;
}
@@ -535,8 +605,10 @@ static void sugov_stop(struct cpufreq_policy *policy)
synchronize_sched();
- irq_work_sync(&sg_policy->irq_work);
- cancel_work_sync(&sg_policy->work);
+ if (!policy->fast_switch_enabled) {
+ irq_work_sync(&sg_policy->irq_work);
+ kthread_cancel_work_sync(&sg_policy->work);
+ }
}
static void sugov_limits(struct cpufreq_policy *policy)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f30847a..f5c016e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3435,11 +3435,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *reader;
+ struct buffer_page *head_page;
+ struct buffer_page *commit_page;
+ unsigned commit;
cpu_buffer = iter->cpu_buffer;
- return iter->head_page == cpu_buffer->commit_page &&
- iter->head == rb_commit_index(cpu_buffer);
+ /* Remember, trace recording is off when iterator is in use */
+ reader = cpu_buffer->reader_page;
+ head_page = cpu_buffer->head_page;
+ commit_page = cpu_buffer->commit_page;
+ commit = rb_page_commit(commit_page);
+
+ return ((iter->head_page == commit_page && iter->head == commit) ||
+ (iter->head_page == reader && commit_page == head_page &&
+ head_page->read == commit &&
+ iter->head == rb_page_commit(cpu_buffer->reader_page)));
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 63c6d28..ebf9498 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6576,11 +6576,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
return ret;
out_reg:
+ ret = alloc_snapshot(&global_trace);
+ if (ret < 0)
+ goto out;
+
ret = register_ftrace_function_probe(glob, ops, count);
- if (ret >= 0)
- alloc_snapshot(&global_trace);
-
+ out:
return ret < 0 ? ret : 0;
}
diff --git a/mm/Kconfig b/mm/Kconfig
index 0183305..eb10c90 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -720,3 +720,13 @@
bool
config ARCH_HAS_PKEYS
bool
+
+config FORCE_ALLOC_FROM_DMA_ZONE
+ bool "Force certain memory allocators to always return ZONE_DMA memory"
+ depends on ZONE_DMA
+ help
+ Ensure certain memory allocators always return memory from ZONE_DMA.
+ This option helps ensure that clients who require ZONE_DMA memory are
+ always using ZONE_DMA memory.
+
+ If unsure, say "n".
diff --git a/mm/migrate.c b/mm/migrate.c
index f49de3cf..435f674 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -183,9 +183,9 @@ void putback_movable_pages(struct list_head *l)
unlock_page(page);
put_page(page);
} else {
- putback_lru_page(page);
dec_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
+ putback_lru_page(page);
}
}
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9d3f6d3..b4d398b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2595,16 +2595,23 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
sc->nr_scanned - nr_scanned,
node_lru_pages);
+ /*
+ * Record the subtree's reclaim efficiency. The reclaimed
+ * pages from slab is excluded here because the corresponding
+ * scanned pages is not accounted. Moreover, freeing a page
+ * by slab shrinking depends on each slab's object population,
+ * making the cost model (i.e. scan:free) different from that
+ * of LRU.
+ */
+ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
+ sc->nr_scanned - nr_scanned,
+ sc->nr_reclaimed - nr_reclaimed);
+
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
- /* Record the subtree's reclaim efficiency */
- vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
- sc->nr_scanned - nr_scanned,
- sc->nr_reclaimed - nr_reclaimed);
-
if (sc->nr_reclaimed - nr_reclaimed)
reclaimable = true;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f0f462c..8de6707 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -209,6 +209,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
u8 *data;
bool pfmemalloc;
+ if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+ gfp_mask |= GFP_DMA;
+
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
@@ -367,6 +370,9 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
unsigned long flags;
void *data;
+ if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+ gfp_mask |= GFP_DMA;
+
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
data = __alloc_page_frag(nc, fragsz, gfp_mask);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a697ddf..acaaf61 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -208,6 +208,51 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
return len;
}
+static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb,
+ int rtap_vendor_space)
+{
+ struct {
+ struct ieee80211_hdr_3addr hdr;
+ u8 category;
+ u8 action_code;
+ } __packed action;
+
+ if (!sdata)
+ return;
+
+ BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
+
+ if (skb->len < rtap_vendor_space + sizeof(action) +
+ VHT_MUMIMO_GROUPS_DATA_LEN)
+ return;
+
+ if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
+ return;
+
+ skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
+
+ if (!ieee80211_is_action(action.hdr.frame_control))
+ return;
+
+ if (action.category != WLAN_CATEGORY_VHT)
+ return;
+
+ if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
+ return;
+
+ if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
+ return;
+
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&sdata->skb_queue, skb);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+}
+
/*
* ieee80211_add_rx_radiotap_header - add radiotap header
*
@@ -515,7 +560,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
struct net_device *prev_dev = NULL;
int present_fcs_len = 0;
unsigned int rtap_vendor_space = 0;
- struct ieee80211_mgmt *mgmt;
struct ieee80211_sub_if_data *monitor_sdata =
rcu_dereference(local->monitor_sdata);
@@ -553,6 +597,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
return remove_monitor_info(local, origskb, rtap_vendor_space);
}
+ ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
+
/* room for the radiotap header based on driver features */
rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
needed_headroom = rt_hdrlen - rtap_vendor_space;
@@ -618,23 +664,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
ieee80211_rx_stats(sdata->dev, skb->len);
}
- mgmt = (void *)skb->data;
- if (monitor_sdata &&
- skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
- ieee80211_is_action(mgmt->frame_control) &&
- mgmt->u.action.category == WLAN_CATEGORY_VHT &&
- mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
- is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
- ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
- struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
-
- if (mu_skb) {
- mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
- skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
- ieee80211_queue_work(&local->hw, &monitor_sdata->work);
- }
- }
-
if (prev_dev) {
skb->dev = prev_dev;
netif_receive_skb(skb);
@@ -3617,6 +3646,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
!ether_addr_equal(bssid, hdr->addr1))
return false;
}
+
+ /*
+ * 802.11-2016 Table 9-26 says that for data frames, A1 must be
+ * the BSSID - we've checked that already but may have accepted
+ * the wildcard (ff:ff:ff:ff:ff:ff).
+ *
+ * It also says:
+ * The BSSID of the Data frame is determined as follows:
+ * a) If the STA is contained within an AP or is associated
+ * with an AP, the BSSID is the address currently in use
+ * by the STA contained in the AP.
+ *
+ * So we should not accept data frames with an address that's
+ * multicast.
+ *
+ * Accepting it also opens a security problem because stations
+ * could encrypt it with the GTK and inject traffic that way.
+ */
+ if (ieee80211_is_data(hdr->frame_control) && multicast)
+ return false;
+
return true;
case NL80211_IFTYPE_WDS:
if (bssid || !ieee80211_is_data(hdr->frame_control))
diff --git a/scripts/build-all.py b/scripts/build-all.py
index d36e96f..bd468cd 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -307,10 +307,12 @@
r'qsd*_defconfig',
r'mpq*_defconfig',
r'sdm[0-9]*_defconfig',
+ r'sdx*_defconfig',
)
arch64_pats = (
r'msm*_defconfig',
r'sdm[0-9]*_defconfig',
+ r'sdx*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
diff --git a/security/keys/gc.c b/security/keys/gc.c
index addf060..9cb4fe4 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
* immediately unlinked.
*/
struct key_type key_type_dead = {
- .name = "dead",
+ .name = ".dead",
};
/*
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index d580ad0..dbbfd77 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -271,7 +271,8 @@ long keyctl_get_keyring_ID(key_serial_t id, int create)
* Create and join an anonymous session keyring or join a named session
* keyring, creating it if necessary. A named session keyring must have Search
* permission for it to be joined. Session keyrings without this permit will
- * be skipped over.
+ * be skipped over. It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
*
* If successful, the ID of the joined session keyring will be returned.
*/
@@ -288,12 +289,16 @@ long keyctl_join_session_keyring(const char __user *_name)
ret = PTR_ERR(name);
goto error;
}
+
+ ret = -EPERM;
+ if (name[0] == '.')
+ goto error_name;
}
/* join the session */
ret = join_session_keyring(name);
+error_name:
kfree(name);
-
error:
return ret;
}
@@ -1251,8 +1256,8 @@ long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
* Read or set the default keyring in which request_key() will cache keys and
* return the old setting.
*
- * If a process keyring is specified then this will be created if it doesn't
- * yet exist. The old setting will be returned if successful.
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
@@ -1277,11 +1282,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
ret = install_process_keyring_to_cred(new);
- if (ret < 0) {
- if (ret != -EEXIST)
- goto error;
- ret = 0;
- }
+ if (ret < 0)
+ goto error;
goto set;
case KEY_REQKEY_DEFL_DEFAULT:
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 40a8852..45536c6 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -127,13 +127,18 @@ int install_user_keyrings(void)
}
/*
- * Install a fresh thread keyring directly to new credentials. This keyring is
- * allowed to overrun the quota.
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
+ if (new->thread_keyring)
+ return 0;
+
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
@@ -146,7 +151,9 @@ int install_thread_keyring_to_cred(struct cred *new)
}
/*
- * Install a fresh thread keyring, discarding the old one.
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
static int install_thread_keyring(void)
{
@@ -157,8 +164,6 @@ static int install_thread_keyring(void)
if (!new)
return -ENOMEM;
- BUG_ON(new->thread_keyring);
-
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
@@ -169,17 +174,17 @@ static int install_thread_keyring(void)
}
/*
- * Install a process keyring directly to a credentials struct.
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
*
- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other value on any other error
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
- return -EEXIST;
+ return 0;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
@@ -193,11 +198,9 @@ int install_process_keyring_to_cred(struct cred *new)
}
/*
- * Make sure a process keyring is installed for the current process. The
- * existing process keyring is not replaced.
+ * Install a process keyring to the current task if it didn't have one already.
*
- * Returns 0 if there is a process keyring by the end of this function, some
- * error otherwise.
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
static int install_process_keyring(void)
{
@@ -211,14 +214,18 @@ static int install_process_keyring(void)
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
- return ret != -EEXIST ? ret : 0;
+ return ret;
}
return commit_creds(new);
}
/*
- * Install a session keyring directly to a credentials struct.
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any. If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
@@ -253,8 +260,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
}
/*
- * Install a session keyring, discarding the old one. If a keyring is not
- * supplied, an empty one is invented.
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any. If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
static int install_session_keyring(struct key *keyring)
{
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 51c27b7..b75ba98 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -5247,7 +5247,6 @@ static struct snd_soc_dai_link msm_tasha_fe_dai_links[] = {
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.dpcm_playback = 1,
- .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index 826f566..654806e 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -153,27 +153,27 @@ static int msm_pcm_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_voice *voice;
- if (!strcmp("VoLTE", substream->pcm->id)) {
+ if (!strncmp("VoLTE", substream->pcm->id, 5)) {
voice = &voice_info[VOLTE_SESSION_INDEX];
pr_debug("%s: Open VoLTE Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("Voice2", substream->pcm->id)) {
+ } else if (!strncmp("Voice2", substream->pcm->id, 6)) {
voice = &voice_info[VOICE2_SESSION_INDEX];
pr_debug("%s: Open Voice2 Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("QCHAT", substream->pcm->id)) {
+ } else if (!strncmp("QCHAT", substream->pcm->id, 5)) {
voice = &voice_info[QCHAT_SESSION_INDEX];
pr_debug("%s: Open QCHAT Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("VoWLAN", substream->pcm->id)) {
+ } else if (!strncmp("VoWLAN", substream->pcm->id, 6)) {
voice = &voice_info[VOWLAN_SESSION_INDEX];
pr_debug("%s: Open VoWLAN Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("VoiceMMode1", substream->pcm->id)) {
+ } else if (!strncmp("VoiceMMode1", substream->pcm->id, 11)) {
voice = &voice_info[VOICEMMODE1_INDEX];
pr_debug("%s: Open VoiceMMode1 Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("VoiceMMode2", substream->pcm->id)) {
+ } else if (!strncmp("VoiceMMode2", substream->pcm->id, 11)) {
voice = &voice_info[VOICEMMODE2_INDEX];
pr_debug("%s: Open VoiceMMode2 Substream Id=%s\n",
__func__, substream->pcm->id);
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index f64074d..1c03d8c 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -335,7 +335,6 @@ static struct snd_soc_dai_link msm_ext_tasha_fe_dai[] = {
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.dpcm_playback = 1,
- .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 96f3f85..2b8c9c7 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -397,7 +397,8 @@ static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
"KHZ_88P2", "KHZ_96", "KHZ_176P4",
"KHZ_192", "KHZ_352P8", "KHZ_384"};
static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
- "KHZ_192"};
+ "KHZ_192", "KHZ_32", "KHZ_44P1",
+ "KHZ_88P2", "KHZ_176P4" };
static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -1466,6 +1467,22 @@ static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
return idx;
switch (ext_disp_rx_cfg[idx].sample_rate) {
+ case SAMPLING_RATE_176P4KHZ:
+ sample_rate_val = 6;
+ break;
+
+ case SAMPLING_RATE_88P2KHZ:
+ sample_rate_val = 5;
+ break;
+
+ case SAMPLING_RATE_44P1KHZ:
+ sample_rate_val = 4;
+ break;
+
+ case SAMPLING_RATE_32KHZ:
+ sample_rate_val = 3;
+ break;
+
case SAMPLING_RATE_192KHZ:
sample_rate_val = 2;
break;
@@ -1496,6 +1513,18 @@ static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
return idx;
switch (ucontrol->value.integer.value[0]) {
+ case 6:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_176P4KHZ;
+ break;
+ case 5:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_88P2KHZ;
+ break;
+ case 4:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_44P1KHZ;
+ break;
+ case 3:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_32KHZ;
+ break;
case 2:
ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
break;