Merge "ASoC: wcd934x: Fix sequence for efuse sensing"
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index c467327..54bbf25 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -32,6 +32,8 @@
- clocks : List of phandle and clock specifier pairs
- clock-names : List of clock input name strings sorted in the same
order as the clocks property.
+- qcom,keep_radio_on_during_sleep: Boolean flag to indicate if to suspend to d3hot
+ instead of turning off the device
Example:
wil6210: qcom,wil6210 {
@@ -56,5 +58,6 @@
clocks = <&clock_gcc clk_rf_clk3>,
<&clock_gcc clk_rf_clk3_pin>;
clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
+ qcom,keep_radio_on_during_sleep;
};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index b0eed20..5cf2cb8 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -132,8 +132,6 @@
power collapse feature available or not.
- qcom,sde-has-mixer-gc: Boolean property to indicate if mixer has gamma correction
feature available or not.
-- qcom,sde-has-cdp: Boolean property to indicate if cdp feature is
- available or not.
- qcom,sde-sspp-clk-ctrl: Array of offsets describing clk control
offsets for dynamic clock gating. 1st value
in the array represents offset of the control
@@ -148,10 +146,6 @@
control register. Number of offsets defined should
match the number of offsets defined in
property: qcom,sde-sspp-off.
-- qcom,sde-sspp-danger-lut: A 3 cell property, with a format of <linear, tile, nrt>,
- indicating the danger luts on sspp.
-- qcom,sde-sspp-safe-lut: A 3 cell property, with a format of <linear, tile, nrt>,
- indicating the safe luts on sspp.
- qcom,sde-sspp-excl-rect: Array of u32 values indicating exclusion rectangle
support on each sspp.
- qcom,sde-sspp-smart-dma-priority: Array of u32 values indicating hw pipe
@@ -240,6 +234,8 @@
of (pps, OT limit), where pps is pixel per second and
OT limit is the write limit to apply if the given
pps is not exceeded.
+- qcom,sde-vbif-memtype-0: Array of u32 vbif memory type settings, group 0
+- qcom,sde-vbif-memtype-1: Array of u32 vbif memory type settings, group 1
- qcom,sde-wb-id: Array of writeback ids corresponding to the
offsets defined in property: qcom,sde-wb-off.
- qcom,sde-wb-clk-ctrl: Array of 2 cell property describing clk control
@@ -301,6 +297,31 @@
priority for realtime clients.
- qcom,sde-vbif-qos-nrt-remap: This array is used to program vbif qos remapper register
priority for non-realtime clients.
+- qcom,sde-danger-lut: A 4 cell property, with a format of <linear,
+ tile, nrt, cwb>,
+ indicating the danger luts on sspp.
+- qcom,sde-safe-lut: A 4 cell property, with a format of <linear,
+ tile, nrt, cwb>,
+ indicating the safe luts on sspp.
+- qcom,sde-qos-lut-linear: Array of 3 cell property, with a format of
+ <fill level, lut hi, lut lo> in ascending fill level
+ indicating the qos luts for linear format on sspp.
+ Zero fill level on the last entry identifies the default lut.
+- qcom,sde-qos-lut-macrotile: Array of 3 cell property, with a format of
+ <fill level, lut hi, lut lo> in ascending fill level
+ indicating the qos luts for macrotile format on sspp.
+ Zero fill level on the last entry identifies the default lut.
+- qcom,sde-qos-lut-nrt: Array of 3 cell property, with a format of
+ <fill level, lut hi, lut lo> in ascending fill level
+ indicating the qos luts for nrt (e.g wfd) on sspp.
+ Zero fill level on the last entry identifies the default lut.
+- qcom,sde-qos-lut-cwb: Array of 3 cell property, with a format of
+ <fill level, lut hi, lut lo> in ascending fill level
+ indicating the qos luts for cwb on sspp.
+ Zero fill level on the last entry identifies the default lut.
+- qcom,sde-cdp-setting: Array of 2 cell property, with a format of
+ <read enable, write enable> for cdp use cases in
+ order of <real_time>, and <non_real_time>.
Bus Scaling Subnodes:
- qcom,sde-reg-bus: Property to provide Bus scaling for register access for
@@ -429,7 +450,6 @@
qcom,sde-ubwc-static = <0x100>;
qcom,sde-ubwc-swizzle = <0>;
qcom,sde-panic-per-pipe;
- qcom,sde-has-cdp;
qcom,sde-has-src-split;
qcom,sde-has-dim-layer;
qcom,sde-sspp-src-size = <0x100>;
@@ -471,8 +491,35 @@
qcom,sde-wb-id = <2>;
qcom,sde-wb-clk-ctrl = <0x2bc 16>;
- qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
- qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+ qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
+ 0x00000000>;
+ qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+ qcom,sde-qos-lut-linear =
+ <4 0x00000000 0x00000357>,
+ <5 0x00000000 0x00003357>,
+ <6 0x00000000 0x00023357>,
+ <7 0x00000000 0x00223357>,
+ <8 0x00000000 0x02223357>,
+ <9 0x00000000 0x22223357>,
+ <10 0x00000002 0x22223357>,
+ <11 0x00000022 0x22223357>,
+ <12 0x00000222 0x22223357>,
+ <13 0x00002222 0x22223357>,
+ <14 0x00012222 0x22223357>,
+ <0 0x00112222 0x22223357>;
+ qcom,sde-qos-lut-macrotile =
+ <10 0x00000003 0x44556677>,
+ <11 0x00000033 0x44556677>,
+ <12 0x00000233 0x44556677>,
+ <13 0x00002233 0x44556677>,
+ <14 0x00012233 0x44556677>,
+ <0 0x00112233 0x44556677>;
+ qcom,sde-qos-lut-nrt =
+ <0 0x00000000 0x00000000>;
+ qcom,sde-qos-lut-cwb =
+ <0 0x75300000 0x00000000>;
+
+ qcom,sde-cdp-setting = <1 1>, <1 0>;
qcom,sde-vbif-off = <0 0>;
qcom,sde-vbif-id = <0 1>;
@@ -482,6 +529,8 @@
<124416000 4>, <248832000 16>;
qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
<124416000 4>, <248832000 16>;
+ qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+ qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
qcom,sde-dram-channels = <2>;
qcom,sde-num-nrt-paths = <1>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index d0d7fff..59fa6a0 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -16,7 +16,7 @@
"qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
"qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
"qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
- "qcom,mdss_hdmi_pll_8998"
+ "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm".
- cell-index: Specifies the controller used
- reg: offset and length of the register set for the device.
- reg-names : names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 375eaf2..1394fd3 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -74,14 +74,6 @@
address size faults are due to a fundamental programming
error from which we don't care about recovering anyways.
-- qcom,skip-init : Disable resetting configuration for all context banks
- during device reset. This is useful for targets where
- some context banks are dedicated to other execution
- environments outside of Linux and those other EEs are
- programming their own stream match tables, SCTLR, etc.
- Without setting this option we will trample on their
- configuration.
-
- qcom,dynamic : Allow dynamic domains to be attached. This is only
useful if the upstream hardware is capable of switching
between multiple domains within a single context bank.
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 12d32ec..0123682 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -145,22 +145,30 @@
- qcom,fg-esr-timer-charging
Usage: optional
- Value type: <u32>
+ Value type: <prop-encoded-array>
Definition: Number of cycles between ESR pulses while the battery is
- charging.
+ charging. Array of 2 elements if specified.
+ Element 0 - Retry value for timer
+ Element 1 - Maximum value for timer
- qcom,fg-esr-timer-awake
Usage: optional
- Value type: <u32>
+ Value type: <prop-encoded-array>
Definition: Number of cycles between ESR pulses while the system is
- awake and the battery is discharging.
+ awake and the battery is discharging. Array of 2 elements
+ if specified.
+ Element 0 - Retry value for timer
+ Element 1 - Maximum value for timer
- qcom,fg-esr-timer-asleep
Usage: optional
- Value type: <u32>
+ Value type: <prop-encoded-array>
Definition: Number of cycles between ESR pulses while the system is
asleep and the battery is discharging. This option requires
- qcom,fg-esr-timer-awake to be defined.
+ qcom,fg-esr-timer-awake to be defined. Array of 2 elements
+ if specified.
+ Element 0 - Retry value for timer
+ Element 1 - Maximum value for timer
- qcom,fg-esr-pulse-thresh-ma
Usage: optional
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index e0ab31f..3a09b28 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2256,8 +2256,8 @@
- qcom,wcn-btfm : Property to specify if WCN BT/FM chip is used for the target
- qcom,msm-mbhc-usbc-audio-supported : Property to specify if analog audio feature is
enabled or not.
-- qcom,usbc-analog-en1_gpio : EN1 GPIO to enable USB type-C analog audio
-- qcom,usbc-analog-en2_n_gpio : EN2 GPIO to enable USB type-C analog audio
+- qcom,usbc-analog-en1-gpio : EN1 GPIO to enable USB type-C analog audio
+- qcom,usbc-analog-en2-gpio : EN2 GPIO to enable USB type-C analog audio
- qcom,usbc-analog-force_detect_gpio : Force detect GPIO to enable USB type-C analog audio
Example:
@@ -2333,8 +2333,8 @@
qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
"SpkrRight", "SpkrLeft";
qcom,msm-mbhc-usbc-audio-supported = <1>;
- qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
- qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
+ qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+ qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
qcom,usbc-analog-force_detect_gpio = <&wcd_usbc_analog_f_gpio>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index ea89751..0c2ae5f 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -63,7 +63,6 @@
<0x150c2000 0x20>;
reg-names = "base", "tcu-base";
#iommu-cells = <2>;
- qcom,skip-init;
qcom,use-3-lvl-tables;
#global-interrupts = <1>;
#size-cells = <1>;
@@ -330,9 +329,19 @@
apps_iommu_test_device {
compatible = "iommu-debug-test";
/*
- * This SID belongs to QUP1-GSI. We can't use a fake SID for
+ * This SID belongs to TSIF. We can't use a fake SID for
* the apps_smmu device.
*/
- iommus = <&apps_smmu 0x16 0>;
+ iommus = <&apps_smmu 0x20 0>;
+ };
+
+ apps_iommu_coherent_test_device {
+ compatible = "iommu-debug-test";
+ /*
+ * This SID belongs to QUP1-DMA. We can't use a fake SID for
+ * the apps_smmu device.
+ */
+ iommus = <&apps_smmu 0x3 0>;
+ dma-coherent;
};
};
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 886e792..660dac5 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -211,7 +211,7 @@
compatible = "qcom,qpnp-pdphy";
reg = <0x1700 0x100>;
vdd-pdphy-supply = <&pm8998_l24>;
- vbus-supply = <&smb2_vbus>;
+ vbus-supply = <&ext_5v_boost>;
vconn-supply = <&smb2_vconn>;
interrupts = <0x2 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
<0x2 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
@@ -270,8 +270,9 @@
io-channels = <&pmi8998_rradc 0>;
io-channel-names = "rradc_batt_id";
qcom,rradc-base = <0x4500>;
- qcom,fg-esr-timer-awake = <96>;
- qcom,fg-esr-timer-asleep = <256>;
+ qcom,fg-esr-timer-awake = <96 96>;
+ qcom,fg-esr-timer-asleep = <256 256>;
+ qcom,fg-esr-timer-charging = <0 96>;
qcom,cycle-counter-en;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
new file mode 100644
index 0000000..61ef7ff
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ system_heap: qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ qcom,ion-heap@22 { /* ADSP HEAP */
+ reg = <22>;
+ memory-region = <&adsp_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@27 { /* QSEECOM HEAP */
+ reg = <27>;
+ memory-region = <&qseecom_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@13 { /* SPSS HEAP */
+ reg = <13>;
+ memory-region = <&sp_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+ reg = <10>;
+ memory-region = <&secure_display_memory>;
+ qcom,ion-heap-type = "HYP_CMA";
+ };
+
+ qcom,ion-heap@9 {
+ reg = <9>;
+ qcom,ion-heap-type = "SYSTEM_SECURE";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 2cbb990..7bef48d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -279,9 +279,113 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+
+ removed_regions: removed_regions@85700000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x85700000 0 0x3800000>;
+ };
+
+ pil_camera_mem: camera_region@8ab00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x8ab00000 0 0x500000>;
+ };
+
+ pil_modem_mem: modem_region@8b000000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x8b000000 0 0x7e00000>;
+ };
+
+ pil_video_mem: pil_video_region@92e00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x92e00000 0 0x500000>;
+ };
+
+ pil_cdsp_mem: cdsp_regions@93300000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x93300000 0 0x600000>;
+ };
+
+ pil_mba_mem: pil_mba_region@0x93900000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x93900000 0 0x200000>;
+ };
+
+ pil_adsp_mem: pil_adsp_region@93b00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x93b00000 0 0x1e00000>;
+ };
+
+ pil_ipa_fw_mem: pil_ipa_fw_region@95900000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95900000 0 0x10000>;
+ };
+
+ pil_ipa_gsi_mem: pil_ipa_gsi_region@95910000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95910000 0 0x5000>;
+ };
+
+ pil_gpu_mem: pil_gpu_region@95915000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95915000 0 0x1000>;
+ };
+
+ adsp_mem: adsp_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0xc00000>;
+ };
+
+ qseecom_mem: qseecom_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x1400000>;
+ };
+
+ sp_mem: sp_region { /* SPSS-HLOS ION shared mem */
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x800000>;
+ };
+
+ secure_display_memory: secure_display_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x5c00000>;
+ };
+
+ /* global autoconfigured region for contiguous allocations */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x2000000>;
+ linux,cma-default;
+ };
};
};
+#include "sdm670-ion.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index f27b9da..4b7a680 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -27,7 +27,7 @@
};
&mdss_mdp {
- connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+ connectors = <&sde_rscc &sde_wb>;
};
&dsi_sharp_4k_dsc_video {
@@ -35,7 +35,7 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 4627e60..fcf6ad1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -27,7 +27,7 @@
};
&mdss_mdp {
- connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+ connectors = <&sde_rscc &sde_wb>;
};
&dsi_sharp_4k_dsc_video {
@@ -35,7 +35,7 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index fcc09a0..709c89d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -78,6 +78,9 @@
qcom,hph-en0-gpio = <&tavil_hph_en0>;
qcom,hph-en1-gpio = <&tavil_hph_en1>;
qcom,tavil-mclk-clk-freq = <9600000>;
+
+ qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&compr>,
@@ -136,6 +139,18 @@
<&wsa881x_0213>, <&wsa881x_0214>;
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
"SpkrLeft", "SpkrRight";
+
+ qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+ };
+
+ wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
};
wcd9xxx_intc: wcd9xxx-irq {
@@ -169,6 +184,14 @@
qocm,wcd-dsp-glink {
compatible = "qcom,wcd-dsp-glink";
};
+
+ qcom,wcd-dsp-mgr {
+ compatible = "qcom,wcd-dsp-mgr";
+ qcom,wdsp-components = <&wcd934x_cdc 0>,
+ <&wcd_spi_0 1>,
+ <&glink_spi_xprt_wdsp 2>;
+ qcom,img-filename = "cpe_9340";
+ };
};
&slim_aud {
@@ -229,5 +252,13 @@
qcom,cdc-mad-dmic-rate = <600000>;
qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+ wcd_spi_0: wcd_spi {
+ compatible = "qcom,wcd-spi-v2";
+ qcom,master-bus-num = <0>;
+ qcom,chip-select = <0>;
+ qcom,max-frequency = <9600000>;
+ qcom,mem-base-addr = <0x100000>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index b1c91bf..e26f888 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -25,12 +25,14 @@
<0x1380000 0x40000>,
<0x1740000 0x40000>,
<0x1620000 0x40000>,
+ <0x1620000 0x40000>,
<0x1620000 0x40000>;
reg-names = "aggre1_noc-base", "aggre2_noc-base",
"config_noc-base", "dc_noc-base",
"gladiator_noc-base", "mc_virt-base", "mem_noc-base",
- "mmss_noc-base", "system_noc-base", "ipa_virt-base";
+ "mmss_noc-base", "system_noc-base", "ipa_virt-base",
+ "camnoc_virt-base";
mbox-names = "apps_rsc", "disp_rsc";
mboxes = <&apps_rsc 0 &disp_rsc 0>;
@@ -368,6 +370,15 @@
clocks = <>;
};
+ fab_camnoc_virt: fab-camnoc_virt {
+ cell-id = <MSM_BUS_FAB_CAMNOC_VIRT>;
+ label = "fab-camnoc_virt";
+ qcom,fab-dev;
+ qcom,base-name = "camnoc_virt-base";
+ qcom,bypass-qos-prg;
+ clocks = <>;
+ };
+
fab_config_noc: fab-config_noc {
cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
label = "fab-config_noc";
@@ -654,6 +665,33 @@
qcom,bus-dev = <&fab_aggre2_noc>;
};
+ mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP>;
+ label = "mas-qxm-camnoc-hf0-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
+ mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP>;
+ label = "mas-qxm-camnoc-hf1-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
+ mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP>;
+ label = "mas-qxm-camnoc-sf-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
mas_qhm_spdm: mas-qhm-spdm {
cell-id = <MSM_BUS_MASTER_SPDM>;
label = "mas-qhm-spdm";
@@ -900,12 +938,23 @@
qcom,bus-dev = <&fab_mmss_noc>;
};
- mas_qxm_camnoc_hf: mas-qxm-camnoc-hf {
- cell-id = <MSM_BUS_MASTER_CAMNOC_HF>;
- label = "mas-qxm-camnoc-hf";
+ mas_qxm_camnoc_hf0: mas-qxm-camnoc-hf0 {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF0>;
+ label = "mas-qxm-camnoc-hf0";
qcom,buswidth = <32>;
- qcom,agg-ports = <2>;
- qcom,qport = <1 2>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <1>;
+ qcom,connections = <&slv_qns_mem_noc_hf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm1>;
+ };
+
+ mas_qxm_camnoc_hf1: mas-qxm-camnoc-hf1 {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF1>;
+ label = "mas-qxm-camnoc-hf1";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <2>;
qcom,connections = <&slv_qns_mem_noc_hf>;
qcom,bus-dev = <&fab_mmss_noc>;
qcom,bcms = <&bcm_mm1>;
@@ -1193,6 +1242,15 @@
qcom,bcms = <&bcm_sn11>;
};
+ slv_qns_camnoc_uncomp:slv-qns-camnoc-uncomp {
+ cell-id = <MSM_BUS_SLAVE_CAMNOC_UNCOMP>;
+ label = "slv-qns-camnoc-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ qcom,bcms = <&bcm_mm1>;
+ };
+
slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg {
cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
label = "slv-qhs-a1-noc-cfg";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index cb20e0f..aaef335 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -389,10 +389,10 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_HF0
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF0
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
};
qcom,axi-port-camnoc {
qcom,msm-bus,name = "cam_hf_1_camnoc";
@@ -400,10 +400,10 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
};
};
qcom,axi-port2 {
@@ -414,21 +414,21 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_HF1
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF1
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
};
qcom,axi-port-camnoc {
- qcom,msm-bus,name = "cam_hf_1_camnoc";
+ qcom,msm-bus,name = "cam_hf_2_camnoc";
qcom,msm-bus-vector-dyn-vote;
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
};
};
qcom,axi-port3 {
@@ -439,10 +439,10 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_SF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_SF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_SF
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_SF
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
};
qcom,axi-port-camnoc {
qcom,msm-bus,name = "cam_sf_1_camnoc";
@@ -450,10 +450,10 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_SF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_SF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
index fff9160..ef964ae 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
@@ -13,8 +13,13 @@
/dts-v1/;
/plugin/;
-#include "sdm845.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
#include "sdm845-cdp.dtsi"
+#include "sdm845-qupv3.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM845 v1 CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 7038d48..5e370d6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -237,7 +237,7 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
@@ -247,7 +247,29 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
@@ -522,3 +544,7 @@
&wil6210 {
status = "ok";
};
+
+&ext_5v_boost {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 97573ea..e32ec6e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -780,7 +780,8 @@
};
tpdm_lpass: tpdm@6844000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6844000 0x1000>;
reg-names = "tpdm-base";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
index 79fa580..548bd49 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
@@ -13,8 +13,13 @@
/dts-v1/;
/plugin/;
-#include "sdm845.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
#include "sdm845-mtp.dtsi"
+#include "sdm845-qupv3.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM845 v1 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 521fd6b3..b5c471f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -89,7 +89,7 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
@@ -99,9 +99,31 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
};
&dsi_sim_vid {
@@ -299,6 +321,10 @@
status = "okay";
};
+&ext_5v_boost {
+ status = "ok";
+};
+
&usb_qmp_phy {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index f534891..9946a25 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -299,6 +299,63 @@
};
};
+ /* USB C analog configuration */
+ wcd_usbc_analog_en1 {
+ wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+ mux {
+ pins = "gpio49";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio49";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+ mux {
+ pins = "gpio49";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio49";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ wcd_usbc_analog_en2 {
+ wcd_usbc_analog_en2_idle: wcd_usbc_ana_en2_idle {
+ mux {
+ pins = "gpio51";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio51";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_usbc_analog_en2_active: wcd_usbc_ana_en2_active {
+ mux {
+ pins = "gpio51";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio51";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
pri_aux_pcm_clk {
pri_aux_pcm_clk_sleep: pri_aux_pcm_clk_sleep {
mux {
@@ -2839,4 +2896,13 @@
power-source = <0>;
};
};
+
+ usb2_ext_5v_boost {
+ usb2_ext_5v_boost_default: usb2_ext_5v_boost_default {
+ pins = "gpio10";
+ function = "normal";
+ output-low;
+ power-source = <0>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 3bf1ea4..bba95a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -105,6 +105,41 @@
qcom,wsa-max-devs = <1>;
qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+
+ qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+ };
+};
+
+&wcd934x_cdc {
+ wcd: wcd_pinctrl@5 {
+ us_euro_sw_wcd_active: us_euro_sw_wcd_active {
+ mux {
+ pins = "gpio1";
+ };
+
+ config {
+ pins = "gpio1";
+ /delete-property/ output-high;
+ bias-high-impedance;
+ };
+ };
+
+ us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep {
+ mux {
+ pins = "gpio1";
+ };
+
+ config {
+ pins = "gpio1";
+ /delete-property/ output-low;
+ bias-high-impedance;
+ };
+ };
};
};
@@ -169,7 +204,7 @@
};
&mdss_mdp {
- connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+ connectors = <&sde_rscc &sde_wb>;
};
&dsi_sharp_4k_dsc_video {
@@ -177,7 +212,7 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
@@ -191,3 +226,7 @@
&wil6210 {
status = "ok";
};
+
+&ext_5v_boost {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 19b8744..7befe3b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
/* Stub regulators */
@@ -1260,8 +1261,12 @@
pm8005_s1_level: regulator-s1-level {
regulator-name = "pm8005_s1_level";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ regulator-min-microvolt
+ = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ regulator-max-microvolt
+ = <RPMH_REGULATOR_LEVEL_MAX>;
+ qcom,init-voltage-level
+ = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
};
};
@@ -1290,13 +1295,21 @@
qcom,init-voltage = <600000>;
};
};
+
+ ext_5v_boost: ext_5v_boost {
+ status = "disabled";
+ compatible = "regulator-fixed";
+ regulator-name = "ext_5v_boost";
+ gpio = <&pmi8998_gpios 10 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ regulator-enable-ramp-delay = <1600>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_ext_5v_boost_default>;
+ };
};
&pmi8998_charger {
- smb2_vbus: qcom,smb2-vbus {
- regulator-name = "smb2-vbus";
- };
-
smb2_vconn: qcom,smb2-vconn {
regulator-name = "smb2-vconn";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 255c0b3..1500bb5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -248,10 +248,10 @@
label = "dsi_nt35597_truly_dsc_cmd_display";
qcom,display-type = "primary";
- qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
- qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
- <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+ qcom,dsi-ctrl = <&mdss_dsi1>;
+ qcom,dsi-phy = <&mdss_dsi_phy1>;
+ clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
+ <&mdss_dsi1_pll PCLK_MUX_1_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -272,10 +272,10 @@
label = "dsi_nt35597_truly_dsc_video_display";
qcom,display-type = "primary";
- qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
- qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
- <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+ qcom,dsi-ctrl = <&mdss_dsi1>;
+ qcom,dsi-phy = <&mdss_dsi_phy1>;
+ clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
+ <&mdss_dsi1_pll PCLK_MUX_1_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -368,10 +368,106 @@
cell-index = <0>;
label = "wb_display";
};
+
+ sde_dp: qcom,dp_display@0{
+ cell-index = <0>;
+ compatible = "qcom,dp-display";
+
+ gdsc-supply = <&mdss_core_gdsc>;
+ vdda-1p2-supply = <&pm8998_l26>;
+ vdda-0p9-supply = <&pm8998_l1>;
+
+ reg = <0xae90000 0xa84>,
+ <0x88eaa00 0x200>,
+ <0x88ea200 0x200>,
+ <0x88ea600 0x200>,
+ <0xaf02000 0x1a0>,
+ <0x780000 0x621c>,
+ <0x88ea030 0x10>,
+ <0x0aee1000 0x034>;
+ reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+ "dp_mmss_cc", "qfprom_physical", "dp_pll",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <12 0>;
+
+ clocks = <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+ <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
+ clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+ "core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+ "core_usb_pipe_clk", "ctrl_link_clk",
+ "ctrl_link_iface_clk", "ctrl_crypto_clk",
+ "ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent";
+
+ qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
+
+ qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ qcom,ctrl-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ctrl-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda-1p2";
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <21800>;
+ qcom,supply-disable-load = <4>;
+ };
+ };
+
+ qcom,phy-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,phy-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda-0p9";
+ qcom,supply-min-voltage = <880000>;
+ qcom,supply-max-voltage = <880000>;
+ qcom,supply-enable-load = <36000>;
+ qcom,supply-disable-load = <32>;
+ };
+ };
+ };
+};
+
+&sde_dp {
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
+ pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 43 0>;
+ qcom,aux-sel-gpio = <&tlmm 51 0>;
+ qcom,usbplug-cc-gpio = <&tlmm 38 0>;
};
&mdss_mdp {
- connectors = <&sde_rscc &sde_wb &dsi_dual_nt35597_truly_cmd_display>;
+ connectors = <&sde_rscc &sde_wb &sde_dp>;
};
&dsi_dual_nt35597_truly_video {
@@ -396,7 +492,8 @@
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
- qcom,display-topology = <2 2 2>;
+ qcom,display-topology = <1 1 1>,
+ <2 2 1>;
qcom,default-topology-index = <0>;
};
@@ -404,7 +501,8 @@
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
- qcom,display-topology = <2 2 2>;
+ qcom,display-topology = <1 1 1>,
+ <2 2 1>;
qcom,default-topology-index = <0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
index 168f2a9..b9eac3c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
@@ -64,4 +64,46 @@
};
};
};
+
+ mdss_dp_pll: qcom,mdss_dp_pll@c011000 {
+ compatible = "qcom,mdss_dp_pll_10nm";
+ label = "MDSS DP PLL";
+ cell-index = <0>;
+ #clock-cells = <1>;
+
+ reg = <0x088ea000 0x200>,
+ <0x088eaa00 0x200>,
+ <0x088ea200 0x200>,
+ <0x088ea600 0x200>,
+ <0xaf03000 0x8>;
+ reg-names = "pll_base", "phy_base", "ln_tx0_base",
+ "ln_tx1_base", "gdsc_base";
+
+ gdsc-supply = <&mdss_core_gdsc>;
+
+ clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "iface_clk", "ref_clk_src", "ref_clk",
+ "cfg_ahb_clk", "pipe_clk";
+ clock-rate = <0>;
+
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+
+ };
+ };
+
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 68f710a..2a29283 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -136,10 +136,40 @@
qcom,sde-vbif-off = <0>;
qcom,sde-vbif-size = <0x1040>;
qcom,sde-vbif-id = <0>;
+ qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+ qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+ qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
+ 0x00000000>;
+ qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+ qcom,sde-qos-lut-linear =
+ <4 0x00000000 0x00000357>,
+ <5 0x00000000 0x00003357>,
+ <6 0x00000000 0x00023357>,
+ <7 0x00000000 0x00223357>,
+ <8 0x00000000 0x02223357>,
+ <9 0x00000000 0x22223357>,
+ <10 0x00000002 0x22223357>,
+ <11 0x00000022 0x22223357>,
+ <12 0x00000222 0x22223357>,
+ <13 0x00002222 0x22223357>,
+ <14 0x00012222 0x22223357>,
+ <0 0x00112222 0x22223357>;
+ qcom,sde-qos-lut-macrotile =
+ <10 0x00000003 0x44556677>,
+ <11 0x00000033 0x44556677>,
+ <12 0x00000233 0x44556677>,
+ <13 0x00002233 0x44556677>,
+ <14 0x00012233 0x44556677>,
+ <0 0x00112233 0x44556677>;
+ qcom,sde-qos-lut-nrt =
+ <0 0x00000000 0x00000000>;
+ qcom,sde-qos-lut-cwb =
+ <0 0x75300000 0x00000000>;
+
qcom,sde-inline-rotator = <&mdss_rotator 0>;
qcom,sde-reg-dma-off = <0>;
@@ -264,8 +294,19 @@
interrupt-parent = <&mdss_mdp>;
interrupts = <2 0>;
+ /* Offline rotator QoS setting */
qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
qcom,mdss-rot-cdp-setting = <1 1>;
+ qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
+ qcom,mdss-rot-danger-lut = <0x0 0x0>;
+ qcom,mdss-rot-safe-lut = <0x0000ffff 0x0000ffff>;
+
+ /* Inline rotator QoS Setting */
+ /* setting default register values for RD - qos/danger/safe */
+ qcom,mdss-inline-rot-qos-lut = <0x44556677 0x00112233
+ 0x44556677 0x00112233>;
+ qcom,mdss-inline-rot-danger-lut = <0x0055aaff 0x0000ffff>;
+ qcom,mdss-inline-rot-safe-lut = <0x0000f000 0x0000ff00>;
qcom,mdss-default-ot-rd-limit = <32>;
qcom,mdss-default-ot-wr-limit = <32>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 4fe9282..af12224 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -27,6 +27,10 @@
qcom,max-secure-instances = <5>;
qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
+ /* LLCC Info */
+ cache-slice-names = "vidsc0", "vidsc1";
+ cache-slices = <&llcc 2>, <&llcc 3>;
+
/* Supply */
venus-supply = <&venus_gdsc>;
venus-core0-supply = <&vcodec0_gdsc>;
@@ -91,6 +95,14 @@
qcom,bus-governor = "performance";
qcom,bus-range-kbps = <1000 1000>;
};
+ venus_bus_llcc {
+ compatible = "qcom,msm-vidc,bus";
+ label = "venus-llcc";
+ qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+ qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
+ qcom,bus-governor = "performance";
+ qcom,bus-range-kbps = <17000 125700>;
+ };
/* MMUs */
non_secure_cb {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 843e326..c89a05e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -459,6 +459,22 @@
compatible = "simple-bus";
};
+ firmware: firmware {
+ android {
+ compatible = "android,firmware";
+ fstab {
+ compatible = "android,fstab";
+ vendor {
+ compatible = "android,vendor";
+ dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
+ type = "ext4";
+ mnt_flags = "ro,barrier=1,discard";
+ fsmgr_flags = "wait,slotselect";
+ };
+ };
+ };
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -755,63 +771,6 @@
};
};
- msm_cpufreq: qcom,msm-cpufreq {
- compatible = "qcom,msm-cpufreq";
- clock-names = "cpu0_clk", "cpu4_clk";
- clocks = <&clock_cpucc CPU0_PWRCL_CLK>,
- <&clock_cpucc CPU4_PERFCL_CLK>;
-
- qcom,governor-per-policy;
-
- qcom,cpufreq-table-0 =
- < 300000 >,
- < 422400 >,
- < 499200 >,
- < 576000 >,
- < 652800 >,
- < 748800 >,
- < 825600 >,
- < 902400 >,
- < 979200 >,
- < 1056000 >,
- < 1132800 >,
- < 1209600 >,
- < 1286400 >,
- < 1363200 >,
- < 1440000 >,
- < 1516800 >,
- < 1593600 >,
- < 1651200 >,
- < 1708800 >;
-
- qcom,cpufreq-table-4 =
- < 300000 >,
- < 422400 >,
- < 499200 >,
- < 576000 >,
- < 652800 >,
- < 729600 >,
- < 806400 >,
- < 883200 >,
- < 960000 >,
- < 1036800 >,
- < 1113600 >,
- < 1190400 >,
- < 1267200 >,
- < 1344000 >,
- < 1420800 >,
- < 1497600 >,
- < 1574400 >,
- < 1651200 >,
- < 1728000 >,
- < 1804800 >,
- < 1881600 >,
- < 1958400 >,
- < 2035200 >,
- < 2092800 >,
- < 2208000 >;
- };
-
cpubw: qcom,cpubw {
compatible = "qcom,devbw";
governor = "performance";
@@ -2160,19 +2119,19 @@
};
qcom,llcc1_d_cache {
qcom,dump-node = <&LLCC_1>;
- qcom,dump-id = <0x121>;
+ qcom,dump-id = <0x140>;
};
qcom,llcc2_d_cache {
qcom,dump-node = <&LLCC_2>;
- qcom,dump-id = <0x122>;
+ qcom,dump-id = <0x141>;
};
qcom,llcc3_d_cache {
qcom,dump-node = <&LLCC_3>;
- qcom,dump-id = <0x123>;
+ qcom,dump-id = <0x142>;
};
qcom,llcc4_d_cache {
qcom,dump-node = <&LLCC_4>;
- qcom,dump-id = <0x124>;
+ qcom,dump-id = <0x143>;
};
qcom,l1_tlb_dump0 {
qcom,dump-node = <&L1_TLB_0>;
@@ -4203,10 +4162,12 @@
};
&vcodec0_gdsc {
+ qcom,support-hw-trigger;
status = "ok";
};
&vcodec1_gdsc {
+ qcom,support-hw-trigger;
status = "ok";
};
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 3f2ce31..9102df7 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1381,24 +1381,14 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
goto bail;
}
- PERF(fl->profile, fl->perf.invargs,
- if (!fl->sctx->smmu.coherent) {
+ if (!fl->sctx->smmu.coherent)
inv_args_pre(ctx);
- if (mode == FASTRPC_MODE_SERIAL)
- inv_args(ctx);
- }
- PERF_END);
-
PERF(fl->profile, fl->perf.link,
VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
PERF_END);
if (err)
goto bail;
- PERF(fl->profile, fl->perf.invargs,
- if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
- inv_args(ctx);
- PERF_END);
wait:
if (kernel)
wait_for_completion(&ctx->work);
@@ -1408,6 +1398,12 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
if (err)
goto bail;
}
+
+ PERF(fl->profile, fl->perf.invargs,
+ if (!fl->sctx->smmu.coherent)
+ inv_args(ctx);
+ PERF_END);
+
VERIFY(err, 0 == (err = ctx->retval));
if (err)
goto bail;
@@ -1790,11 +1786,9 @@ void fastrpc_glink_notify_state(void *handle, const void *priv,
link->port_state = FASTRPC_LINK_DISCONNECTED;
break;
case GLINK_REMOTE_DISCONNECTED:
- if (me->channel[cid].chan &&
- link->link_state == FASTRPC_LINK_STATE_UP) {
+ if (me->channel[cid].chan) {
fastrpc_glink_close(me->channel[cid].chan, cid);
me->channel[cid].chan = 0;
- link->port_state = FASTRPC_LINK_DISCONNECTED;
}
break;
default:
@@ -1960,10 +1954,9 @@ static int fastrpc_glink_open(int cid)
if (err)
goto bail;
- if (link->port_state == FASTRPC_LINK_CONNECTED ||
- link->port_state == FASTRPC_LINK_CONNECTING) {
+ VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
+ if (err)
goto bail;
- }
link->port_state = FASTRPC_LINK_CONNECTING;
cfg->priv = (void *)(uintptr_t)cid;
@@ -2113,7 +2106,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
fl->ssrcount = me->channel[cid].ssrcount;
if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
(me->channel[cid].chan == 0)) {
- fastrpc_glink_register(cid, me);
+ VERIFY(err, 0 == fastrpc_glink_register(cid, me));
+ if (err)
+ goto bail;
VERIFY(err, 0 == fastrpc_glink_open(cid));
if (err)
goto bail;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1f0c111..89201e2 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -557,7 +557,7 @@ static int clk_update_vdd(struct clk_vdd_class *vdd_class)
pr_debug("Set Voltage level Min %d, Max %d\n", uv[new_base + i],
uv[max_lvl + i]);
rc = regulator_set_voltage(r[i], uv[new_base + i],
- uv[max_lvl + i]);
+ vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
if (rc)
goto set_voltage_fail;
@@ -578,11 +578,13 @@ static int clk_update_vdd(struct clk_vdd_class *vdd_class)
return rc;
enable_disable_fail:
- regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+ regulator_set_voltage(r[i], uv[cur_base + i],
+ vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
set_voltage_fail:
for (i--; i >= 0; i--) {
- regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+ regulator_set_voltage(r[i], uv[cur_base + i],
+ vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
regulator_disable(r[i]);
else if (level == 0)
@@ -693,6 +695,9 @@ static int clk_vdd_class_init(struct clk_vdd_class *vdd)
{
struct clk_handoff_vdd *v;
+ if (vdd->skip_handoff)
+ return 0;
+
list_for_each_entry(v, &clk_handoff_vdd_list, list) {
if (v->vdd_class == vdd)
return 0;
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index adbabea..03d3ab9 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -87,7 +87,7 @@ static const char * const cam_cc_parent_names_1[] = {
};
static struct pll_vco fabia_vco[] = {
- { 250000000, 2000000000, 0 },
+ { 249600000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
};
@@ -278,6 +278,7 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
};
static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -316,7 +317,6 @@ static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
{ }
};
-
static struct clk_rcg2 cam_cc_cci_clk_src = {
.cmd_rcgr = 0xb0d8,
.mnd_width = 8,
@@ -341,7 +341,7 @@ static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
F(320000000, P_CAM_CC_PLL2_OUT_ODD, 3, 0, 0),
- F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+ F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
{ }
};
@@ -430,6 +430,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
@@ -490,13 +491,22 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
},
};
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 cam_cc_icp_clk_src = {
.cmd_rcgr = 0xb088,
.mnd_width = 0,
.hid_width = 5,
.enable_safe_config = true,
.parent_map = cam_cc_parent_map_0,
- .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_icp_clk_src",
.parent_names = cam_cc_parent_names_0,
@@ -513,6 +523,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -544,6 +555,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
@@ -655,6 +667,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -733,6 +746,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 3a0677f..2902f87 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -33,6 +33,8 @@
#include <linux/regmap.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
#include <soc/qcom/scm.h>
#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
@@ -42,6 +44,7 @@
#include "clk-voter.h"
#include "clk-debug.h"
+#define OSM_INIT_RATE 300000000UL
#define OSM_TABLE_SIZE 40
#define SINGLE_CORE 1
#define MAX_CLUSTER_CNT 3
@@ -236,7 +239,6 @@ struct clk_osm {
unsigned long pbases[NUM_BASES];
spinlock_t lock;
- u32 cpu_reg_mask;
u32 num_entries;
u32 cluster_num;
u32 core_num;
@@ -738,7 +740,6 @@ static struct clk_init_data osm_clks_init[] = {
static struct clk_osm l3_clk = {
.cluster_num = 0,
- .cpu_reg_mask = 0x0,
.hw.init = &osm_clks_init[0],
};
@@ -747,7 +748,6 @@ static DEFINE_CLK_VOTER(l3_cluster1_vote_clk, l3_clk, 0);
static struct clk_osm pwrcl_clk = {
.cluster_num = 1,
- .cpu_reg_mask = 0x300,
.hw.init = &osm_clks_init[1],
};
@@ -804,7 +804,6 @@ static struct clk_osm cpu3_pwrcl_clk = {
static struct clk_osm perfcl_clk = {
.cluster_num = 2,
- .cpu_reg_mask = 0x700,
.hw.init = &osm_clks_init[2],
};
@@ -888,67 +887,232 @@ static struct clk_osm *logical_cpu_to_clk(int cpu)
const u32 *cell;
u64 hwid;
static struct clk_osm *cpu_clk_map[NR_CPUS];
+ struct clk_osm *clk_cpu_map[] = {
+ &cpu0_pwrcl_clk,
+ &cpu1_pwrcl_clk,
+ &cpu2_pwrcl_clk,
+ &cpu3_pwrcl_clk,
+ &cpu4_perfcl_clk,
+ &cpu5_perfcl_clk,
+ &cpu6_perfcl_clk,
+ &cpu7_perfcl_clk,
+ };
- if (cpu_clk_map[cpu])
- return cpu_clk_map[cpu];
+ if (!cpu_clk_map[cpu]) {
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return NULL;
- cpu_node = of_get_cpu_node(cpu, NULL);
- if (!cpu_node)
- goto fail;
-
- cell = of_get_property(cpu_node, "reg", NULL);
- if (!cell) {
- pr_err("%s: missing reg property\n", cpu_node->full_name);
- goto fail;
- }
-
- hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
- if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
- switch (cpu) {
- case 0:
- cpu_clk_map[cpu] = &cpu0_pwrcl_clk;
- break;
- case 1:
- cpu_clk_map[cpu] = &cpu1_pwrcl_clk;
- break;
- case 2:
- cpu_clk_map[cpu] = &cpu2_pwrcl_clk;
- break;
- case 3:
- cpu_clk_map[cpu] = &cpu3_pwrcl_clk;
- break;
- default:
- pr_err("unsupported CPU number for power cluster\n");
+ cell = of_get_property(cpu_node, "reg", NULL);
+ if (!cell) {
+ pr_err("%s: missing reg property\n",
+ cpu_node->full_name);
+ of_node_put(cpu_node);
return NULL;
}
- return cpu_clk_map[cpu];
- }
- if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
- switch (cpu) {
- case 4:
- cpu_clk_map[cpu] = &cpu4_perfcl_clk;
- break;
- case 5:
- cpu_clk_map[cpu] = &cpu5_perfcl_clk;
- break;
- case 6:
- cpu_clk_map[cpu] = &cpu6_perfcl_clk;
- break;
- case 7:
- cpu_clk_map[cpu] = &cpu7_perfcl_clk;
- break;
- default:
- pr_err("unsupported CPU number for perf cluster\n");
+ hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+ hwid = (hwid >> 8) & 0xff;
+ of_node_put(cpu_node);
+ if (hwid >= ARRAY_SIZE(clk_cpu_map)) {
+ pr_err("unsupported CPU number - %d (hw_id - %llu)\n",
+ cpu, hwid);
return NULL;
}
- return cpu_clk_map[cpu];
+
+ cpu_clk_map[cpu] = clk_cpu_map[hwid];
}
-fail:
- return NULL;
+ return cpu_clk_map[cpu];
}
+static struct clk_osm *osm_configure_policy(struct cpufreq_policy *policy)
+{
+ int cpu;
+ struct clk_hw *parent, *c_parent;
+ struct clk_osm *first;
+ struct clk_osm *c, *n;
+
+ c = logical_cpu_to_clk(policy->cpu);
+ if (!c)
+ return NULL;
+
+ c_parent = clk_hw_get_parent(&c->hw);
+ if (!c_parent)
+ return NULL;
+
+ /*
+ * Don't put any other CPUs into the policy if we're doing
+ * per_core_dcvs
+ */
+ if (to_clk_osm(c_parent)->per_core_dcvs)
+ return c;
+
+ first = c;
+ /* Find CPUs that share the same clock domain */
+ for_each_possible_cpu(cpu) {
+ n = logical_cpu_to_clk(cpu);
+ if (!n)
+ continue;
+
+ parent = clk_hw_get_parent(&n->hw);
+ if (!parent)
+ return NULL;
+ if (parent != c_parent)
+ continue;
+
+ cpumask_set_cpu(cpu, policy->cpus);
+ if (n->core_num == 0)
+ first = n;
+ }
+
+ return first;
+}
+
+static void
+osm_set_index(struct clk_osm *c, unsigned int index, unsigned int num)
+{
+ clk_osm_write_reg(c, index, DCVS_PERF_STATE_DESIRED_REG(num), OSM_BASE);
+
+ /* Make sure the write goes through before proceeding */
+ clk_osm_mb(c, OSM_BASE);
+}
+
+static int
+osm_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct clk_osm *c = policy->driver_data;
+
+ osm_set_index(c, index, c->core_num);
+ return 0;
+}
+
+static unsigned int osm_cpufreq_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct clk_osm *c;
+ u32 index;
+
+ if (!policy)
+ return 0;
+
+ c = policy->driver_data;
+ index = clk_osm_read_reg(c, DCVS_PERF_STATE_DESIRED_REG(c->core_num));
+
+ return policy->freq_table[index].frequency;
+}
+
+static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table;
+ struct clk_osm *c, *parent;
+ struct clk_hw *p_hw;
+ int ret;
+ unsigned int i;
+ unsigned int xo_kHz;
+
+ c = osm_configure_policy(policy);
+ if (!c) {
+ pr_err("no clock for CPU%d\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ p_hw = clk_hw_get_parent(&c->hw);
+ if (!p_hw) {
+ pr_err("no parent clock for CPU%d\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ parent = to_clk_osm(p_hw);
+ c->vbases[OSM_BASE] = parent->vbases[OSM_BASE];
+
+ p_hw = clk_hw_get_parent(p_hw);
+ if (!p_hw) {
+ pr_err("no xo clock for CPU%d\n", policy->cpu);
+ return -ENODEV;
+ }
+ xo_kHz = clk_hw_get_rate(p_hw) / 1000;
+
+ table = kcalloc(OSM_TABLE_SIZE + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ for (i = 0; i < OSM_TABLE_SIZE; i++) {
+ u32 data, src, div, lval, core_count;
+
+ data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
+ src = (data & GENMASK(31, 30)) >> 30;
+ div = (data & GENMASK(29, 28)) >> 28;
+ lval = data & GENMASK(7, 0);
+ core_count = CORE_COUNT_VAL(data);
+
+ if (!src)
+ table[i].frequency = OSM_INIT_RATE / 1000;
+ else
+ table[i].frequency = xo_kHz * lval;
+ table[i].driver_data = table[i].frequency;
+
+ if (core_count != MAX_CORE_COUNT)
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
+
+ /* Two of the same frequencies means end of table */
+ if (i > 0 && table[i - 1].driver_data == table[i].driver_data) {
+ struct cpufreq_frequency_table *prev = &table[i - 1];
+
+ if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
+ prev->flags = CPUFREQ_BOOST_FREQ;
+ prev->frequency = prev->driver_data;
+ }
+
+ break;
+ }
+ }
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ ret = cpufreq_table_validate_and_show(policy, table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ goto err;
+ }
+
+ policy->driver_data = c;
+
+ clk_osm_enable(&parent->hw);
+ udelay(300);
+
+ return 0;
+
+err:
+ kfree(table);
+ return ret;
+}
+
+static int osm_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ kfree(policy->freq_table);
+ policy->freq_table = NULL;
+ return 0;
+}
+
+static struct freq_attr *osm_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &cpufreq_freq_attr_scaling_boost_freqs,
+ NULL
+};
+
+static struct cpufreq_driver qcom_osm_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = osm_cpufreq_target_index,
+ .get = osm_cpufreq_get,
+ .init = osm_cpufreq_cpu_init,
+ .exit = osm_cpufreq_cpu_exit,
+ .name = "osm-cpufreq",
+ .attr = osm_cpufreq_attr,
+ .boost_enabled = true,
+};
+
static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
{
u64 temp;
@@ -2916,16 +3080,13 @@ static int clk_osm_acd_init(struct clk_osm *c)
return 0;
}
-static unsigned long init_rate = 300000000;
-
static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
{
- int rc = 0, cpu, i;
+ int rc = 0, i;
int pvs_ver = 0;
u32 pte_efuse, val;
int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
struct clk *ext_xo_clk, *clk;
- struct clk_osm *c, *parent;
struct device *dev = &pdev->dev;
struct clk_onecell_data *clk_data;
char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
@@ -3233,7 +3394,7 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
get_online_cpus();
/* Set the L3 clock to run off GPLL0 and enable OSM for the domain */
- rc = clk_set_rate(l3_clk.hw.clk, init_rate);
+ rc = clk_set_rate(l3_clk.hw.clk, OSM_INIT_RATE);
if (rc) {
dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
rc);
@@ -3243,43 +3404,12 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
"Failed to enable clock for L3\n");
udelay(300);
- /* Set CPU clocks to run off GPLL0 and enable OSM for both domains */
- for_each_online_cpu(cpu) {
- c = logical_cpu_to_clk(cpu);
- if (!c) {
- pr_err("no clock device for CPU=%d\n", cpu);
- return -EINVAL;
- }
-
- parent = to_clk_osm(clk_hw_get_parent(&c->hw));
- if (!parent->per_core_dcvs) {
- if (cpu >= 0 && cpu <= 3)
- c = logical_cpu_to_clk(0);
- else if (cpu >= 4 && cpu <= 7)
- c = logical_cpu_to_clk(4);
- if (!c)
- return -EINVAL;
- }
-
- rc = clk_set_rate(c->hw.clk, init_rate);
- if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on %s, rc=%d\n",
- clk_hw_get_name(&parent->hw), rc);
- goto provider_err;
- }
- WARN(clk_prepare_enable(c->hw.clk),
- "Failed to enable OSM for %s\n",
- clk_hw_get_name(&parent->hw));
- udelay(300);
+ /* Configure default rate to lowest frequency */
+ for (i = 0; i < MAX_CORE_COUNT; i++) {
+ osm_set_index(&pwrcl_clk, 0, i);
+ osm_set_index(&perfcl_clk, 0, i);
}
- /*
- * Add always-on votes for the CPU cluster clocks since we do not want
- * to re-enable OSM at any point.
- */
- clk_prepare_enable(pwrcl_clk.hw.clk);
- clk_prepare_enable(perfcl_clk.hw.clk);
-
populate_opp_table(pdev);
populate_debugfs_dir(&l3_clk);
populate_debugfs_dir(&pwrcl_clk);
@@ -3287,18 +3417,24 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
register_cpu_cycle_counter_cb(&cb);
- pr_info("OSM driver inited\n");
put_online_cpus();
+ rc = cpufreq_register_driver(&qcom_osm_cpufreq_driver);
+ if (rc)
+ goto provider_err;
+
+ pr_info("OSM CPUFreq driver inited\n");
return 0;
+
provider_err:
if (clk_data)
devm_kfree(&pdev->dev, clk_data->clks);
clk_err:
devm_kfree(&pdev->dev, clk_data);
exit:
- dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc);
- panic("Unable to Setup OSM");
+ dev_err(&pdev->dev, "OSM CPUFreq driver failed to initialize, rc=%d\n",
+ rc);
+ panic("Unable to Setup OSM CPUFreq");
}
static const struct of_device_id match_table[] = {
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 2742ab3..4e0711d 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -197,7 +197,7 @@ static struct clk_dummy measure_only_ipa_2x_clk = {
};
static struct pll_vco fabia_vco[] = {
- { 250000000, 2000000000, 0 },
+ { 249600000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
};
@@ -790,8 +790,8 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(9600000, P_BI_TCXO, 2, 0, 0),
F(19200000, P_BI_TCXO, 1, 0, 0),
- F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index a5548e0..f2fa577 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -564,6 +564,9 @@ static int gpu_cc_gfx_sdm845_probe(struct platform_device *pdev)
return PTR_ERR(vdd_gfx.regulator[0]);
}
+ /* Avoid turning on the rail during clock registration */
+ vdd_gfx.skip_handoff = true;
+
clk_fabia_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
ret = qcom_cc_really_probe(pdev, &gpu_cc_gfx_sdm845_desc, regmap);
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index d183393..87feee6 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,3 +1,6 @@
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm-util.o
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
new file mode 100644
index 0000000..eb2092a
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
@@ -0,0 +1,766 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/usb/usbpd.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+#define DP_PHY_REVISION_ID0 0x0000
+#define DP_PHY_REVISION_ID1 0x0004
+#define DP_PHY_REVISION_ID2 0x0008
+#define DP_PHY_REVISION_ID3 0x000C
+
+#define DP_PHY_CFG 0x0010
+#define DP_PHY_PD_CTL 0x0018
+#define DP_PHY_MODE 0x001C
+
+#define DP_PHY_AUX_CFG0 0x0020
+#define DP_PHY_AUX_CFG1 0x0024
+#define DP_PHY_AUX_CFG2 0x0028
+#define DP_PHY_AUX_CFG3 0x002C
+#define DP_PHY_AUX_CFG4 0x0030
+#define DP_PHY_AUX_CFG5 0x0034
+#define DP_PHY_AUX_CFG6 0x0038
+#define DP_PHY_AUX_CFG7 0x003C
+#define DP_PHY_AUX_CFG8 0x0040
+#define DP_PHY_AUX_CFG9 0x0044
+#define DP_PHY_AUX_INTERRUPT_MASK 0x0048
+#define DP_PHY_AUX_INTERRUPT_CLEAR 0x004C
+#define DP_PHY_AUX_BIST_CFG 0x0050
+
+#define DP_PHY_VCO_DIV 0x0064
+#define DP_PHY_TX0_TX1_LANE_CTL 0x006C
+#define DP_PHY_TX2_TX3_LANE_CTL 0x0088
+
+#define DP_PHY_SPARE0 0x00AC
+#define DP_PHY_STATUS 0x00C0
+
+/* Tx registers */
+#define TXn_BIST_MODE_LANENO 0x0000
+#define TXn_CLKBUF_ENABLE 0x0008
+#define TXn_TX_EMP_POST1_LVL 0x000C
+
+#define TXn_TX_DRV_LVL 0x001C
+
+#define TXn_RESET_TSYNC_EN 0x0024
+#define TXn_PRE_STALL_LDO_BOOST_EN 0x0028
+#define TXn_TX_BAND 0x002C
+#define TXn_SLEW_CNTL 0x0030
+#define TXn_INTERFACE_SELECT 0x0034
+
+#define TXn_RES_CODE_LANE_TX 0x003C
+#define TXn_RES_CODE_LANE_RX 0x0040
+#define TXn_RES_CODE_LANE_OFFSET_TX 0x0044
+#define TXn_RES_CODE_LANE_OFFSET_RX 0x0048
+
+#define TXn_DEBUG_BUS_SEL 0x0058
+#define TXn_TRANSCEIVER_BIAS_EN 0x005C
+#define TXn_HIGHZ_DRVR_EN 0x0060
+#define TXn_TX_POL_INV 0x0064
+#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0068
+
+#define TXn_LANE_MODE_1 0x008C
+
+#define TXn_TRAN_DRVR_EMP_EN 0x00C0
+#define TXn_TX_INTERFACE_MODE 0x00C4
+
+#define TXn_VMODE_CTRL1 0x00F0
+
+/* PLL register offset */
+#define QSERDES_COM_ATB_SEL1 0x0000
+#define QSERDES_COM_ATB_SEL2 0x0004
+#define QSERDES_COM_FREQ_UPDATE 0x0008
+#define QSERDES_COM_BG_TIMER 0x000C
+#define QSERDES_COM_SSC_EN_CENTER 0x0010
+#define QSERDES_COM_SSC_ADJ_PER1 0x0014
+#define QSERDES_COM_SSC_ADJ_PER2 0x0018
+#define QSERDES_COM_SSC_PER1 0x001C
+#define QSERDES_COM_SSC_PER2 0x0020
+#define QSERDES_COM_SSC_STEP_SIZE1 0x0024
+#define QSERDES_COM_SSC_STEP_SIZE2 0x0028
+#define QSERDES_COM_POST_DIV 0x002C
+#define QSERDES_COM_POST_DIV_MUX 0x0030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x0034
+#define QSERDES_COM_CLK_ENABLE1 0x0038
+#define QSERDES_COM_SYS_CLK_CTRL 0x003C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x0040
+#define QSERDES_COM_PLL_EN 0x0044
+#define QSERDES_COM_PLL_IVCO 0x0048
+#define QSERDES_COM_CMN_IETRIM 0x004C
+#define QSERDES_COM_CMN_IPTRIM 0x0050
+
+#define QSERDES_COM_CP_CTRL_MODE0 0x0060
+#define QSERDES_COM_CP_CTRL_MODE1 0x0064
+#define QSERDES_COM_PLL_RCTRL_MODE0 0x0068
+#define QSERDES_COM_PLL_RCTRL_MODE1 0x006C
+#define QSERDES_COM_PLL_CCTRL_MODE0 0x0070
+#define QSERDES_COM_PLL_CCTRL_MODE1 0x0074
+#define QSERDES_COM_PLL_CNTRL 0x0078
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x007C
+#define QSERDES_COM_SYSCLK_EN_SEL 0x0080
+#define QSERDES_COM_CML_SYSCLK_SEL 0x0084
+#define QSERDES_COM_RESETSM_CNTRL 0x0088
+#define QSERDES_COM_RESETSM_CNTRL2 0x008C
+#define QSERDES_COM_LOCK_CMP_EN 0x0090
+#define QSERDES_COM_LOCK_CMP_CFG 0x0094
+#define QSERDES_COM_LOCK_CMP1_MODE0 0x0098
+#define QSERDES_COM_LOCK_CMP2_MODE0 0x009C
+#define QSERDES_COM_LOCK_CMP3_MODE0 0x00A0
+
+#define QSERDES_COM_DEC_START_MODE0 0x00B0
+#define QSERDES_COM_DEC_START_MODE1 0x00B4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x00B8
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x00BC
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x00C0
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x00C4
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x00C8
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x00CC
+#define QSERDES_COM_INTEGLOOP_INITVAL 0x00D0
+#define QSERDES_COM_INTEGLOOP_EN 0x00D4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00D8
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x00DC
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00E0
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00E4
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x00E8
+#define QSERDES_COM_VCO_TUNE_CTRL 0x00EC
+#define QSERDES_COM_VCO_TUNE_MAP 0x00F0
+
+#define QSERDES_COM_CMN_STATUS 0x0124
+#define QSERDES_COM_RESET_SM_STATUS 0x0128
+
+#define QSERDES_COM_CLK_SEL 0x0138
+#define QSERDES_COM_HSCLK_SEL 0x013C
+
+#define QSERDES_COM_CORECLK_DIV_MODE0 0x0148
+
+#define QSERDES_COM_SW_RESET 0x0150
+#define QSERDES_COM_CORE_CLK_EN 0x0154
+#define QSERDES_COM_C_READY_STATUS 0x0158
+#define QSERDES_COM_CMN_CONFIG 0x015C
+
+#define QSERDES_COM_SVS_MODE_CLK_SEL 0x0164
+
+#define DP_PHY_PLL_POLL_SLEEP_US 500
+#define DP_PHY_PLL_POLL_TIMEOUT_US 10000
+
+#define DP_VCO_RATE_8100MHZDIV1000 8100000UL
+#define DP_VCO_RATE_9720MHZDIV1000 9720000UL
+#define DP_VCO_RATE_10800MHZDIV1000 10800000UL
+
+int dp_mux_set_parent_10nm(void *context, unsigned int reg, unsigned int val)
+{
+ struct mdss_pll_resources *dp_res = context;
+ int rc;
+ u32 auxclk_div;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP PLL resources\n");
+ return rc;
+ }
+
+ auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+ auxclk_div &= ~0x03; /* bits 0 to 1 */
+
+ if (val == 0) /* mux parent index = 0 */
+ auxclk_div |= 1;
+ else if (val == 1) /* mux parent index = 1 */
+ auxclk_div |= 2;
+ else if (val == 2) /* mux parent index = 2 */
+ auxclk_div |= 0;
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_VCO_DIV, auxclk_div);
+ /* Make sure the PHY registers writes are done */
+ wmb();
+ pr_debug("%s: mux=%d auxclk_div=%x\n", __func__, val, auxclk_div);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ return 0;
+}
+
+int dp_mux_get_parent_10nm(void *context, unsigned int reg, unsigned int *val)
+{
+ int rc;
+ u32 auxclk_div = 0;
+ struct mdss_pll_resources *dp_res = context;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable dp_res resources\n");
+ return rc;
+ }
+
+ auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+ auxclk_div &= 0x03;
+
+ if (auxclk_div == 1) /* Default divider */
+ *val = 0;
+ else if (auxclk_div == 2)
+ *val = 1;
+ else if (auxclk_div == 0)
+ *val = 2;
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ pr_debug("%s: auxclk_div=%d, val=%d\n", __func__, auxclk_div, *val);
+
+ return 0;
+}
+
+static int dp_vco_pll_init_db_10nm(struct dp_pll_db *pdb,
+ unsigned long rate)
+{
+ struct mdss_pll_resources *dp_res = pdb->pll;
+ u32 spare_value = 0;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ pdb->lane_cnt = spare_value & 0x0F;
+ pdb->orientation = (spare_value & 0xF0) >> 4;
+
+ pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+ __func__, spare_value, pdb->lane_cnt, pdb->orientation);
+
+ switch (rate) {
+ case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+ pr_debug("%s: VCO rate: %ld\n", __func__,
+ DP_VCO_RATE_9720MHZDIV1000);
+ pdb->hsclk_sel = 0x0c;
+ pdb->dec_start_mode0 = 0x69;
+ pdb->div_frac_start1_mode0 = 0x00;
+ pdb->div_frac_start2_mode0 = 0x80;
+ pdb->div_frac_start3_mode0 = 0x07;
+ pdb->integloop_gain0_mode0 = 0x3f;
+ pdb->integloop_gain1_mode0 = 0x00;
+ pdb->vco_tune_map = 0x00;
+ pdb->lock_cmp1_mode0 = 0x6f;
+ pdb->lock_cmp2_mode0 = 0x08;
+ pdb->lock_cmp3_mode0 = 0x00;
+ pdb->phy_vco_div = 0x1;
+ pdb->lock_cmp_en = 0x00;
+ break;
+ case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+ pr_debug("%s: VCO rate: %ld\n", __func__,
+ DP_VCO_RATE_10800MHZDIV1000);
+ pdb->hsclk_sel = 0x04;
+ pdb->dec_start_mode0 = 0x69;
+ pdb->div_frac_start1_mode0 = 0x00;
+ pdb->div_frac_start2_mode0 = 0x80;
+ pdb->div_frac_start3_mode0 = 0x07;
+ pdb->integloop_gain0_mode0 = 0x3f;
+ pdb->integloop_gain1_mode0 = 0x00;
+ pdb->vco_tune_map = 0x00;
+ pdb->lock_cmp1_mode0 = 0x0f;
+ pdb->lock_cmp2_mode0 = 0x0e;
+ pdb->lock_cmp3_mode0 = 0x00;
+ pdb->phy_vco_div = 0x1;
+ pdb->lock_cmp_en = 0x00;
+ break;
+ case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+ pr_debug("%s: VCO rate: %ld\n", __func__,
+ DP_VCO_RATE_10800MHZDIV1000);
+ pdb->hsclk_sel = 0x00;
+ pdb->dec_start_mode0 = 0x8c;
+ pdb->div_frac_start1_mode0 = 0x00;
+ pdb->div_frac_start2_mode0 = 0x00;
+ pdb->div_frac_start3_mode0 = 0x0a;
+ pdb->integloop_gain0_mode0 = 0x3f;
+ pdb->integloop_gain1_mode0 = 0x00;
+ pdb->vco_tune_map = 0x00;
+ pdb->lock_cmp1_mode0 = 0x1f;
+ pdb->lock_cmp2_mode0 = 0x1c;
+ pdb->lock_cmp3_mode0 = 0x00;
+ pdb->phy_vco_div = 0x2;
+ pdb->lock_cmp_en = 0x00;
+ break;
+ case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
+ pr_debug("%s: VCO rate: %ld\n", __func__,
+ DP_VCO_RATE_8100MHZDIV1000);
+ pdb->hsclk_sel = 0x03;
+ pdb->dec_start_mode0 = 0x69;
+ pdb->div_frac_start1_mode0 = 0x00;
+ pdb->div_frac_start2_mode0 = 0x80;
+ pdb->div_frac_start3_mode0 = 0x07;
+ pdb->integloop_gain0_mode0 = 0x3f;
+ pdb->integloop_gain1_mode0 = 0x00;
+ pdb->vco_tune_map = 0x00;
+ pdb->lock_cmp1_mode0 = 0x2f;
+ pdb->lock_cmp2_mode0 = 0x2a;
+ pdb->lock_cmp3_mode0 = 0x00;
+ pdb->phy_vco_div = 0x0;
+ pdb->lock_cmp_en = 0x08;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dp_config_vco_rate_10nm(struct dp_pll_vco_clk *vco,
+ unsigned long rate)
+{
+ u32 res = 0;
+ struct mdss_pll_resources *dp_res = vco->priv;
+ struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+
+ res = dp_vco_pll_init_db_10nm(pdb, rate);
+ if (res) {
+ pr_err("VCO Init DB failed\n");
+ return res;
+ }
+
+ if (pdb->lane_cnt != 4) {
+ if (pdb->orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x6d);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x75);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x7d);
+ }
+
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0e);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_SEL, 0x30);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+ /* Different for each clock rates */
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_INTEGLOOP_GAIN0_MODE0, pdb->integloop_gain0_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_INTEGLOOP_GAIN1_MODE0, pdb->integloop_gain1_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_VCO_TUNE_MAP, pdb->vco_tune_map);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP3_MODE0, pdb->lock_cmp3_mode0);
+ /* Make sure the PLL register writes are done */
+ wmb();
+
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BG_TIMER, 0x0a);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORE_CLK_EN, 0x1f);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_IVCO, 0x07);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CP_CTRL_MODE0, 0x06);
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+ if (pdb->orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x4c);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x5c);
+ /* Make sure the PLL register writes are done */
+ wmb();
+
+ /* TX Lane configuration */
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+
+ /* TX-0 register configuration */
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_VMODE_CTRL1, 0x40);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3d);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_CLKBUF_ENABLE, 0x0f);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+ TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_INTERFACE_MODE, 0x00);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_BAND, 0x4);
+
+ /* TX-1 register configuration */
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_VMODE_CTRL1, 0x40);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3d);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_CLKBUF_ENABLE, 0x0f);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+ TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_INTERFACE_MODE, 0x00);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_BAND, 0x4);
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+ /* dependent on the vco frequency */
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, pdb->phy_vco_div);
+
+ return res;
+}
+
+static bool dp_10nm_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+ u32 status;
+ bool pll_locked;
+
+ /* poll for PLL lock status */
+ if (readl_poll_timeout_atomic((dp_res->pll_base +
+ QSERDES_COM_C_READY_STATUS),
+ status,
+ ((status & BIT(0)) > 0),
+ DP_PHY_PLL_POLL_SLEEP_US,
+ DP_PHY_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: C_READY status is not high. Status=%x\n",
+ __func__, status);
+ pll_locked = false;
+ } else {
+ pll_locked = true;
+ }
+
+ return pll_locked;
+}
+
+static bool dp_10nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
+{
+ u32 status;
+ bool phy_ready = true;
+
+ /* poll for PHY ready status */
+ if (readl_poll_timeout_atomic((dp_res->phy_base +
+ DP_PHY_STATUS),
+ status,
+ ((status & (BIT(1))) > 0),
+ DP_PHY_PLL_POLL_SLEEP_US,
+ DP_PHY_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: Phy_ready is not high. Status=%x\n",
+ __func__, status);
+ phy_ready = false;
+ }
+
+ return phy_ready;
+}
+
+static int dp_pll_enable_10nm(struct clk_hw *hw)
+{
+ int rc = 0;
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+ struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+ u32 bias_en, drvr_en;
+
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x04);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
+ wmb(); /* Make sure the PHY register writes are done */
+
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+ wmb(); /* Make sure the PLL register writes are done */
+
+ if (!dp_10nm_pll_lock_status(dp_res)) {
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+ /* Make sure the PHY register writes are done */
+ wmb();
+ /* poll for PHY ready status */
+ if (!dp_10nm_phy_rdy_status(dp_res)) {
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ pr_debug("%s: PLL is locked\n", __func__);
+
+ if (pdb->lane_cnt == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
+ }
+
+ if (pdb->lane_cnt != 4) {
+ if (pdb->orientation == ORIENTATION_CC1) {
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+ TXn_HIGHZ_DRVR_EN, drvr_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+ TXn_TRANSCEIVER_BIAS_EN, bias_en);
+ } else {
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+ TXn_HIGHZ_DRVR_EN, drvr_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+ TXn_TRANSCEIVER_BIAS_EN, bias_en);
+ }
+ } else {
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+ TXn_TRANSCEIVER_BIAS_EN, bias_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+ TXn_TRANSCEIVER_BIAS_EN, bias_en);
+ }
+
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_POL_INV, 0x0a);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_POL_INV, 0x0a);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
+ udelay(2000);
+
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+
+ /*
+ * Make sure all the register writes are completed before
+ * doing any other operation
+ */
+ wmb();
+
+ /* poll for PHY ready status */
+ if (!dp_10nm_phy_rdy_status(dp_res)) {
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_DRV_LVL, 0x38);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_DRV_LVL, 0x38);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_EMP_POST1_LVL, 0x20);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_EMP_POST1_LVL, 0x20);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+lock_err:
+ return rc;
+}
+
+static int dp_pll_disable_10nm(struct clk_hw *hw)
+{
+ int rc = 0;
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ /* Assert DP PHY power down */
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
+ /*
+ * Make sure all the register writes to disable PLL are
+ * completed before doing any other operation
+ */
+ wmb();
+
+ return rc;
+}
+
+
+int dp_vco_prepare_10nm(struct clk_hw *hw)
+{
+ int rc = 0;
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ pr_debug("rate=%ld\n", vco->rate);
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP pll resources\n");
+ goto error;
+ }
+
+ if ((dp_res->vco_cached_rate != 0)
+ && (dp_res->vco_cached_rate == vco->rate)) {
+ rc = vco->hw.init->ops->set_rate(hw,
+ dp_res->vco_cached_rate, dp_res->vco_cached_rate);
+ if (rc) {
+ pr_err("index=%d vco_set_rate failed. rc=%d\n",
+ rc, dp_res->index);
+ mdss_pll_resource_enable(dp_res, false);
+ goto error;
+ }
+ }
+
+ rc = dp_pll_enable_10nm(hw);
+ if (rc) {
+ mdss_pll_resource_enable(dp_res, false);
+ pr_err("ndx=%d failed to enable dp pll\n",
+ dp_res->index);
+ goto error;
+ }
+
+ mdss_pll_resource_enable(dp_res, false);
+error:
+ return rc;
+}
+
+void dp_vco_unprepare_10nm(struct clk_hw *hw)
+{
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ if (!dp_res) {
+ pr_err("Invalid input parameter\n");
+ return;
+ }
+
+ if (!dp_res->pll_on &&
+ mdss_pll_resource_enable(dp_res, true)) {
+ pr_err("pll resource can't be enabled\n");
+ return;
+ }
+ dp_res->vco_cached_rate = vco->rate;
+ dp_pll_disable_10nm(hw);
+
+ dp_res->handoff_resources = false;
+ mdss_pll_resource_enable(dp_res, false);
+ dp_res->pll_on = false;
+}
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+ int rc;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("pll resource can't be enabled\n");
+ return rc;
+ }
+
+ pr_debug("DP lane CLK rate=%ld\n", rate);
+
+ rc = dp_config_vco_rate_10nm(vco, rate);
+ if (rc)
+ pr_err("%s: Failed to set clk rate\n", __func__);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ vco->rate = rate;
+
+ return 0;
+}
+
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ int rc;
+ u32 div, hsclk_div, link_clk_div = 0;
+ u64 vco_rate;
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
+ return rc;
+ }
+
+ div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+ div &= 0x0f;
+
+ if (div == 12)
+ hsclk_div = 6; /* Default */
+ else if (div == 4)
+ hsclk_div = 4;
+ else if (div == 0)
+ hsclk_div = 2;
+ else if (div == 3)
+ hsclk_div = 1;
+ else {
+ pr_debug("unknown divider. forcing to default\n");
+ hsclk_div = 5;
+ }
+
+ div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_AUX_CFG2);
+ div >>= 2;
+
+ if ((div & 0x3) == 0)
+ link_clk_div = 5;
+ else if ((div & 0x3) == 1)
+ link_clk_div = 10;
+ else if ((div & 0x3) == 2)
+ link_clk_div = 20;
+ else
+ pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
+
+ if (link_clk_div == 20) {
+ vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+ } else {
+ if (hsclk_div == 6)
+ vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
+ else if (hsclk_div == 4)
+ vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+ else if (hsclk_div == 2)
+ vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+ else
+ vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
+ }
+
+ pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ dp_res->vco_cached_rate = vco->rate = vco_rate;
+ return (unsigned long)vco_rate;
+}
+
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long rrate = rate;
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+
+ if (rate <= vco->min_rate)
+ rrate = vco->min_rate;
+ else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+ rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+ else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+ rrate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+ else
+ rrate = vco->max_rate;
+
+ pr_debug("%s: rrate=%ld\n", __func__, rrate);
+
+ *parent_rate = rrate;
+ return rrate;
+}
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
new file mode 100644
index 0000000..e30ef82
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Display Port PLL driver block diagram for branch clocks
+ *
+ * +------------------------------+
+ * | DP_VCO_CLK |
+ * | |
+ * | +-------------------+ |
+ * | | (DP PLL/VCO) | |
+ * | +---------+---------+ |
+ * | v |
+ * | +----------+-----------+ |
+ * | | hsclk_divsel_clk_src | |
+ * | +----------+-----------+ |
+ * +------------------------------+
+ * |
+ * +------------<---------v------------>----------+
+ * | |
+ * +-----v------------+ |
+ * | dp_link_clk_src | |
+ * | divsel_ten | |
+ * +---------+--------+ |
+ * | |
+ * | |
+ * v v
+ * Input to DISPCC block |
+ * for link clk, crypto clk |
+ * and interface clock |
+ * |
+ * |
+ * +--------<------------+-----------------+---<---+
+ * | | |
+ * +-------v------+ +--------v-----+ +--------v------+
+ * | vco_divided | | vco_divided | | vco_divided |
+ * | _clk_src | | _clk_src | | _clk_src |
+ * | | | | | |
+ * |divsel_six | | divsel_two | | divsel_four |
+ * +-------+------+ +-----+--------+ +--------+------+
+ * | | |
+ * v------->----------v-------------<------v
+ * |
+ * +----------+---------+
+ * | vco_divided_clk |
+ * | _src_mux |
+ * +---------+----------+
+ * |
+ * v
+ * Input to DISPCC block
+ * for DP pixel clock
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+static struct dp_pll_db dp_pdb;
+static struct clk_ops mux_clk_ops;
+
+static struct regmap_config dp_pll_10nm_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x910,
+};
+
+static struct regmap_bus dp_pixel_mux_regmap_ops = {
+ .reg_write = dp_mux_set_parent_10nm,
+ .reg_read = dp_mux_get_parent_10nm,
+};
+
+/* Op structures */
+static const struct clk_ops dp_10nm_vco_clk_ops = {
+ .recalc_rate = dp_vco_recalc_rate_10nm,
+ .set_rate = dp_vco_set_rate_10nm,
+ .round_rate = dp_vco_round_rate_10nm,
+ .prepare = dp_vco_prepare_10nm,
+ .unprepare = dp_vco_unprepare_10nm,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+ .min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
+ .max_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_clk",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &dp_10nm_vco_clk_ops,
+ },
+};
+
+static struct clk_fixed_factor dp_link_clk_divsel_ten = {
+ .div = 10,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_link_clk_divsel_ten",
+ .parent_names =
+ (const char *[]){ "dp_vco_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
+ .div = 2,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_divsel_two_clk_src",
+ .parent_names =
+ (const char *[]){ "dp_vco_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
+ .div = 4,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_divsel_four_clk_src",
+ .parent_names =
+ (const char *[]){ "dp_vco_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dp_vco_divsel_six_clk_src = {
+ .div = 6,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_divsel_six_clk_src",
+ .parent_names =
+ (const char *[]){ "dp_vco_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+
+static int clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int ret = 0;
+
+ ret = __clk_mux_determine_rate_closest(hw, req);
+ if (ret)
+ return ret;
+
+ /* Set the new parent of mux if there is a new valid parent */
+ if (hw->clk && req->best_parent_hw->clk)
+ clk_set_parent(hw->clk, req->best_parent_hw->clk);
+
+ return 0;
+}
+
+static unsigned long mux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk *div_clk = NULL, *vco_clk = NULL;
+ struct dp_pll_vco_clk *vco = NULL;
+
+ div_clk = clk_get_parent(hw->clk);
+ if (!div_clk)
+ return 0;
+
+ vco_clk = clk_get_parent(div_clk);
+ if (!vco_clk)
+ return 0;
+
+ vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
+ if (!vco)
+ return 0;
+
+ if (vco->rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
+ return (vco->rate / 6);
+ else if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+ return (vco->rate / 4);
+ else
+ return (vco->rate / 2);
+}
+
+static struct clk_regmap_mux dp_vco_divided_clk_src_mux = {
+ .reg = 0x64,
+ .shift = 0,
+ .width = 2,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_divided_clk_src_mux",
+ .parent_names =
+ (const char *[]){"dp_vco_divsel_two_clk_src",
+ "dp_vco_divsel_four_clk_src",
+ "dp_vco_divsel_six_clk_src"},
+ .num_parents = 3,
+ .ops = &mux_clk_ops,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ },
+ },
+};
+
+static struct clk_hw *mdss_dp_pllcc_10nm[] = {
+ [DP_VCO_CLK] = &dp_vco_clk.hw,
+ [DP_LINK_CLK_DIVSEL_TEN] = &dp_link_clk_divsel_ten.hw,
+ [DP_VCO_DIVIDED_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
+ [DP_VCO_DIVIDED_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
+ [DP_VCO_DIVIDED_SIX_CLK_SRC] = &dp_vco_divsel_six_clk_src.hw,
+ [DP_VCO_DIVIDED_CLK_SRC_MUX] = &dp_vco_divided_clk_src_mux.clkr.hw,
+};
+
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = -ENOTSUPP, i = 0;
+ struct clk_onecell_data *clk_data;
+ struct clk *clk;
+ struct regmap *regmap;
+ int num_clks = ARRAY_SIZE(mdss_dp_pllcc_10nm);
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ if (!pll_res || !pll_res->pll_base || !pll_res->phy_base ||
+ !pll_res->ln_tx0_base || !pll_res->ln_tx1_base) {
+ pr_err("%s: Invalid input parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+ sizeof(struct clk *)), GFP_KERNEL);
+ if (!clk_data->clks) {
+ devm_kfree(&pdev->dev, clk_data);
+ return -ENOMEM;
+ }
+ clk_data->clk_num = num_clks;
+
+ pll_res->priv = &dp_pdb;
+ dp_pdb.pll = pll_res;
+
+ /* Set client data for vco, mux and div clocks */
+ regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
+ pll_res, &dp_pll_10nm_cfg);
+ dp_vco_divided_clk_src_mux.clkr.regmap = regmap;
+ mux_clk_ops = clk_regmap_mux_closest_ops;
+ mux_clk_ops.determine_rate = clk_mux_determine_rate;
+ mux_clk_ops.recalc_rate = mux_recalc_rate;
+
+ dp_vco_clk.priv = pll_res;
+
+ for (i = DP_VCO_CLK; i <= DP_VCO_DIVIDED_CLK_SRC_MUX; i++) {
+ pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+ clk = devm_clk_register(&pdev->dev,
+ mdss_dp_pllcc_10nm[i]);
+ if (IS_ERR(clk)) {
+ pr_err("clk registration failed for DP: %d\n",
+ pll_res->index);
+ rc = -EINVAL;
+ goto clk_reg_fail;
+ }
+ clk_data->clks[i] = clk;
+ }
+
+ rc = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, clk_data);
+ if (rc) {
+ pr_err("%s: Clock register failed rc=%d\n", __func__, rc);
+ rc = -EPROBE_DEFER;
+ } else {
+ pr_debug("%s SUCCESS\n", __func__);
+ }
+ return 0;
+clk_reg_fail:
+ devm_kfree(&pdev->dev, clk_data->clks);
+ devm_kfree(&pdev->dev, clk_data);
+ return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
new file mode 100644
index 0000000..c3b5635
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_DP_PLL_10NM_H
+#define __MDSS_DP_PLL_10NM_H
+
+#define DP_VCO_HSCLK_RATE_1620MHZDIV1000 1620000UL
+#define DP_VCO_HSCLK_RATE_2700MHZDIV1000 2700000UL
+#define DP_VCO_HSCLK_RATE_5400MHZDIV1000 5400000UL
+#define DP_VCO_HSCLK_RATE_8100MHZDIV1000 8100000UL
+
+struct dp_pll_db {
+ struct mdss_pll_resources *pll;
+
+ /* lane and orientation settings */
+ u8 lane_cnt;
+ u8 orientation;
+
+ /* COM PHY settings */
+ u32 hsclk_sel;
+ u32 dec_start_mode0;
+ u32 div_frac_start1_mode0;
+ u32 div_frac_start2_mode0;
+ u32 div_frac_start3_mode0;
+ u32 integloop_gain0_mode0;
+ u32 integloop_gain1_mode0;
+ u32 vco_tune_map;
+ u32 lock_cmp1_mode0;
+ u32 lock_cmp2_mode0;
+ u32 lock_cmp3_mode0;
+ u32 lock_cmp_en;
+
+ /* PHY vco divider */
+ u32 phy_vco_div;
+};
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+ unsigned long parent_rate);
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate);
+int dp_vco_prepare_10nm(struct clk_hw *hw);
+void dp_vco_unprepare_10nm(struct clk_hw *hw);
+int dp_mux_set_parent_10nm(void *context,
+ unsigned int reg, unsigned int val);
+int dp_mux_get_parent_10nm(void *context,
+ unsigned int reg, unsigned int *val);
+#endif /* __MDSS_DP_PLL_10NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c
deleted file mode 100644
index a3ed8a8..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-8998.h"
-
-int link2xclk_divsel_set_div(struct div_clk *clk, int div)
-{
- int rc;
- u32 link2xclk_div_tx0, link2xclk_div_tx1;
- u32 phy_mode;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable mdss DP PLL resources\n");
- return rc;
- }
-
- link2xclk_div_tx0 = MDSS_PLL_REG_R(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_BAND);
- link2xclk_div_tx1 = MDSS_PLL_REG_R(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_BAND);
-
- link2xclk_div_tx0 &= ~0x07; /* bits 0 to 2 */
- link2xclk_div_tx1 &= ~0x07; /* bits 0 to 2 */
-
- /* Configure TX band Mux */
- link2xclk_div_tx0 |= 0x4;
- link2xclk_div_tx1 |= 0x4;
-
- /*configure DP PHY MODE */
- phy_mode = 0x58;
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_BAND,
- link2xclk_div_tx0);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_BAND,
- link2xclk_div_tx1);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_MODE, phy_mode);
- /* Make sure the PHY register writes are done */
- wmb();
-
- pr_debug("%s: div=%d link2xclk_div_tx0=%x, link2xclk_div_tx1=%x\n",
- __func__, div, link2xclk_div_tx0, link2xclk_div_tx1);
-
- mdss_pll_resource_enable(dp_res, false);
-
- return rc;
-}
-
-int link2xclk_divsel_get_div(struct div_clk *clk)
-{
- int rc;
- u32 div = 0, phy_mode;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable dp_res resources\n");
- return rc;
- }
-
- phy_mode = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_MODE);
-
- if (phy_mode & 0x48)
- pr_err("%s: DP PAR Rate not correct\n", __func__);
-
- if ((phy_mode & 0x3) == 1)
- div = 10;
- else if ((phy_mode & 0x3) == 0)
- div = 5;
- else
- pr_err("%s: unsupported div: %d\n", __func__, phy_mode);
-
- mdss_pll_resource_enable(dp_res, false);
- pr_debug("%s: phy_mode=%d, div=%d\n", __func__,
- phy_mode, div);
-
- return div;
-}
-
-int vco_divided_clk_set_div(struct div_clk *clk, int div)
-{
- int rc;
- u32 auxclk_div;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable mdss DP PLL resources\n");
- return rc;
- }
-
- auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
- auxclk_div &= ~0x03; /* bits 0 to 1 */
-
- auxclk_div |= 1; /* Default divider */
-
- if (div == 4)
- auxclk_div |= 2;
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_VCO_DIV, auxclk_div);
- /* Make sure the PHY registers writes are done */
- wmb();
- pr_debug("%s: div=%d auxclk_div=%x\n", __func__, div, auxclk_div);
-
- mdss_pll_resource_enable(dp_res, false);
-
- return rc;
-}
-
-
-enum handoff vco_divided_clk_handoff(struct clk *c)
-{
- /*
- * Since cont-splash is not enabled, disable handoff
- * for vco_divider_clk.
- */
- return HANDOFF_DISABLED_CLK;
-}
-
-int vco_divided_clk_get_div(struct div_clk *clk)
-{
- int rc;
- u32 div, auxclk_div;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable dp_res resources\n");
- return rc;
- }
-
- auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
- auxclk_div &= 0x03;
-
- div = 2; /* Default divider */
- if (auxclk_div == 2)
- div = 4;
-
- mdss_pll_resource_enable(dp_res, false);
-
- pr_debug("%s: auxclk_div=%d, div=%d\n", __func__, auxclk_div, div);
-
- return div;
-}
-
-int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
-{
- u32 res = 0;
- struct mdss_pll_resources *dp_res = vco->priv;
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x3d);
- /* Make sure the PHY register writes are done */
- wmb();
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYSCLK_EN_SEL, 0x37);
-
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CLK_ENABLE1, 0x0e);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CLK_SEL, 0x30);
-
- /* Different for each clock rates */
- if (rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000) {
- pr_debug("%s: VCO rate: %ld\n", __func__,
- DP_VCO_RATE_8100MHZDIV1000);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYS_CLK_CTRL, 0x02);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_HSCLK_SEL, 0x2c);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP_EN, 0x04);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DEC_START_MODE0, 0x69);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x42);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP1_MODE0, 0xbf);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP2_MODE0, 0x21);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- } else if (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000) {
- pr_debug("%s: VCO rate: %ld\n", __func__,
- DP_VCO_RATE_8100MHZDIV1000);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYS_CLK_CTRL, 0x06);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_HSCLK_SEL, 0x84);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP_EN, 0x08);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DEC_START_MODE0, 0x69);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x02);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP1_MODE0, 0x3f);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP2_MODE0, 0x38);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- } else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000) {
- pr_debug("%s: VCO rate: %ld\n", __func__,
- DP_VCO_RATE_10800MHZDIV1000);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYS_CLK_CTRL, 0x06);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_HSCLK_SEL, 0x80);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP_EN, 0x08);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DEC_START_MODE0, 0x8c);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0xa0);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x12);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP2_MODE0, 0x70);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- } else {
- pr_err("%s: unsupported rate: %ld\n", __func__, rate);
- return -EINVAL;
- }
- /* Make sure the PLL register writes are done */
- wmb();
-
- if ((rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000)
- || (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000)) {
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_VCO_DIV, 0x1);
- } else {
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_VCO_DIV, 0x2);
- }
- /* Make sure the PHY register writes are done */
- wmb();
-
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_VCO_TUNE_MAP, 0x00);
-
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_BG_TIMER, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_BG_TIMER, 0x0a);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CORECLK_DIV_MODE0, 0x05);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_VCO_TUNE_CTRL, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CP_CTRL_MODE0, 0x06);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_IVCO, 0x07);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x37);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CORE_CLK_EN, 0x0f);
-
- /* Make sure the PLL register writes are done */
- wmb();
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_MODE, 0x58);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_TX0_TX1_LANE_CTL, 0x05);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_TX2_TX3_LANE_CTL, 0x05);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x1a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x1a);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1,
- 0x40);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1,
- 0x40);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
- 0x30);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
- 0x30);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_INTERFACE_SELECT,
- 0x3d);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_INTERFACE_SELECT,
- 0x3d);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
- 0x0f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
- 0x0f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_RESET_TSYNC_EN,
- 0x03);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_RESET_TSYNC_EN,
- 0x03);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TRAN_DRVR_EMP_EN,
- 0x03);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TRAN_DRVR_EMP_EN,
- 0x03);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
- 0x00);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
- 0x00);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_INTERFACE_MODE,
- 0x00);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE,
- 0x00);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_BAND,
- 0x4);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_BAND,
- 0x4);
- /* Make sure the PHY register writes are done */
- wmb();
- return res;
-}
-
-static bool dp_pll_lock_status(struct mdss_pll_resources *dp_res)
-{
- u32 status;
- bool pll_locked;
-
- /* poll for PLL ready status */
- if (readl_poll_timeout_atomic((dp_res->pll_base +
- QSERDES_COM_C_READY_STATUS),
- status,
- ((status & BIT(0)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: C_READY status is not high. Status=%x\n",
- __func__, status);
- pll_locked = false;
- } else if (readl_poll_timeout_atomic((dp_res->pll_base +
- DP_PHY_STATUS),
- status,
- ((status & BIT(1)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: Phy_ready is not high. Status=%x\n",
- __func__, status);
- pll_locked = false;
- } else {
- pll_locked = true;
- }
-
- return pll_locked;
-}
-
-
-static int dp_pll_enable(struct clk *c)
-{
- int rc = 0;
- u32 status;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *dp_res = vco->priv;
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x01);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x05);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x01);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x09);
- /* Make sure the PHY register writes are done */
- wmb();
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_RESETSM_CNTRL, 0x20);
- /* Make sure the PLL register writes are done */
- wmb();
- /* poll for PLL ready status */
- if (readl_poll_timeout_atomic((dp_res->pll_base +
- QSERDES_COM_C_READY_STATUS),
- status,
- ((status & BIT(0)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: C_READY status is not high. Status=%x\n",
- __func__, status);
- rc = -EINVAL;
- goto lock_err;
- }
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x19);
- /* Make sure the PHY register writes are done */
- wmb();
- /* poll for PHY ready status */
- if (readl_poll_timeout_atomic((dp_res->phy_base +
- DP_PHY_STATUS),
- status,
- ((status & BIT(1)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: Phy_ready is not high. Status=%x\n",
- __func__, status);
- rc = -EINVAL;
- goto lock_err;
- }
-
- pr_debug("%s: PLL is locked\n", __func__);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
- 0x0a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_POL_INV,
- 0x0a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x18);
- udelay(2000);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x19);
-
- /*
- * Make sure all the register writes are completed before
- * doing any other operation
- */
- wmb();
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_LANE_MODE_1,
- 0xf6);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_LANE_MODE_1,
- 0xf6);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
- 0x1f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
- 0x1f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
- 0x0f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
- 0x0f);
- /*
- * Make sure all the register writes are completed before
- * doing any other operation
- */
- wmb();
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x09);
- udelay(2000);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x19);
- udelay(2000);
- /* poll for PHY ready status */
- if (readl_poll_timeout_atomic((dp_res->phy_base +
- DP_PHY_STATUS),
- status,
- ((status & BIT(1)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: Lane_mode: Phy_ready is not high. Status=%x\n",
- __func__, status);
- rc = -EINVAL;
- goto lock_err;
- }
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL,
- 0x2a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL,
- 0x2a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL,
- 0x20);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL,
- 0x20);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
- 0x11);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
- 0x11);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
- 0x11);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
- 0x11);
- /* Make sure the PHY register writes are done */
- wmb();
-
-lock_err:
- return rc;
-}
-
-static int dp_pll_disable(struct clk *c)
-{
- int rc = 0;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *dp_res = vco->priv;
-
- /* Assert DP PHY power down */
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x2);
- /*
- * Make sure all the register writes to disable PLL are
- * completed before doing any other operation
- */
- wmb();
-
- return rc;
-}
-
-
-int dp_vco_prepare(struct clk *c)
-{
- int rc = 0;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *dp_pll_res = vco->priv;
-
- DEV_DBG("rate=%ld\n", vco->rate);
- rc = mdss_pll_resource_enable(dp_pll_res, true);
- if (rc) {
- pr_err("Failed to enable mdss DP pll resources\n");
- goto error;
- }
-
- rc = dp_pll_enable(c);
- if (rc) {
- mdss_pll_resource_enable(dp_pll_res, false);
- pr_err("ndx=%d failed to enable dsi pll\n",
- dp_pll_res->index);
- goto error;
- }
-
- mdss_pll_resource_enable(dp_pll_res, false);
-error:
- return rc;
-}
-
-void dp_vco_unprepare(struct clk *c)
-{
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *io = vco->priv;
-
- if (!io) {
- DEV_ERR("Invalid input parameter\n");
- return;
- }
-
- if (!io->pll_on &&
- mdss_pll_resource_enable(io, true)) {
- DEV_ERR("pll resource can't be enabled\n");
- return;
- }
- dp_pll_disable(c);
-
- io->handoff_resources = false;
- mdss_pll_resource_enable(io, false);
- io->pll_on = false;
-}
-
-int dp_vco_set_rate(struct clk *c, unsigned long rate)
-{
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *io = vco->priv;
- int rc;
-
- rc = mdss_pll_resource_enable(io, true);
- if (rc) {
- DEV_ERR("pll resource can't be enabled\n");
- return rc;
- }
-
- DEV_DBG("DP lane CLK rate=%ld\n", rate);
-
- rc = dp_config_vco_rate(vco, rate);
- if (rc)
- DEV_ERR("%s: Failed to set clk rate\n", __func__);
-
- mdss_pll_resource_enable(io, false);
-
- vco->rate = rate;
-
- return 0;
-}
-
-unsigned long dp_vco_get_rate(struct clk *c)
-{
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- int rc;
- u32 div, hsclk_div, link2xclk_div;
- u64 vco_rate;
- struct mdss_pll_resources *pll = vco->priv;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable mdss DP pll=%d\n", pll->index);
- return rc;
- }
-
- div = MDSS_PLL_REG_R(pll->pll_base, QSERDES_COM_HSCLK_SEL);
- div &= 0x0f;
-
- if (div == 12)
- hsclk_div = 5; /* Default */
- else if (div == 4)
- hsclk_div = 3;
- else if (div == 0)
- hsclk_div = 2;
- else {
- pr_debug("unknown divider. forcing to default\n");
- hsclk_div = 5;
- }
-
- div = MDSS_PLL_REG_R(pll->phy_base, DP_PHY_MODE);
-
- if (div & 0x58)
- pr_err("%s: DP PAR Rate not correct\n", __func__);
-
- if ((div & 0x3) == 1)
- link2xclk_div = 10;
- else if ((div & 0x3) == 0)
- link2xclk_div = 5;
- else
- pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
-
- if (link2xclk_div == 10) {
- vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
- } else {
- if (hsclk_div == 5)
- vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
- else if (hsclk_div == 3)
- vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
- else
- vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
- }
-
- pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
- mdss_pll_resource_enable(pll, false);
-
- return (unsigned long)vco_rate;
-}
-
-long dp_vco_round_rate(struct clk *c, unsigned long rate)
-{
- unsigned long rrate = rate;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-
- if (rate <= vco->min_rate)
- rrate = vco->min_rate;
- else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
- rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
- else
- rrate = vco->max_rate;
-
- pr_debug("%s: rrate=%ld\n", __func__, rrate);
-
- return rrate;
-}
-
-enum handoff dp_vco_handoff(struct clk *c)
-{
- enum handoff ret = HANDOFF_DISABLED_CLK;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *io = vco->priv;
-
- if (mdss_pll_resource_enable(io, true)) {
- DEV_ERR("pll resource can't be enabled\n");
- return ret;
- }
-
- if (dp_pll_lock_status(io)) {
- io->pll_on = true;
- c->rate = dp_vco_get_rate(c);
- io->handoff_resources = true;
- ret = HANDOFF_ENABLED_CLK;
- } else {
- io->handoff_resources = false;
- mdss_pll_resource_enable(io, false);
- DEV_DBG("%s: PLL not locked\n", __func__);
- }
-
- DEV_DBG("done, ret=%d\n", ret);
- return ret;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c b/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c
deleted file mode 100644
index 6a49d15..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-/*
- ***************************************************************************
- ******** Display Port PLL driver block diagram for branch clocks **********
- ***************************************************************************
-
- +--------------------------+
- | DP_VCO_CLK |
- | |
- | +-------------------+ |
- | | (DP PLL/VCO) | |
- | +---------+---------+ |
- | v |
- | +----------+-----------+ |
- | | hsclk_divsel_clk_src | |
- | +----------+-----------+ |
- +--------------------------+
- |
- v
- +------------<------------|------------>-------------+
- | | |
-+----------v----------+ +----------v----------+ +----------v----------+
-| dp_link_2x_clk | | vco_divided_clk_src | | vco_divided_clk_src |
-| divsel_five | | | | |
-v----------+----------v | divsel_two | | divsel_four |
- | +----------+----------+ +----------+----------+
- | | |
- v v v
- | +---------------------+ |
- Input to MMSSCC block | | (aux_clk_ops) | |
- for link clk, crypto clk +--> vco_divided_clk <-+
- and interface clock | _src_mux |
- +----------+----------+
- |
- v
- Input to MMSSCC block
- for DP pixel clock
-
- ******************************************************************************
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-8998.h"
-
-static const struct clk_ops clk_ops_vco_divided_clk_src_c;
-static const struct clk_ops clk_ops_link_2x_clk_div_c;
-static const struct clk_ops clk_ops_gen_mux_dp;
-
-static struct clk_div_ops link2xclk_divsel_ops = {
- .set_div = link2xclk_divsel_set_div,
- .get_div = link2xclk_divsel_get_div,
-};
-
-static struct clk_div_ops vco_divided_clk_ops = {
- .set_div = vco_divided_clk_set_div,
- .get_div = vco_divided_clk_get_div,
-};
-
-static const struct clk_ops dp_8998_vco_clk_ops = {
- .set_rate = dp_vco_set_rate,
- .round_rate = dp_vco_round_rate,
- .prepare = dp_vco_prepare,
- .unprepare = dp_vco_unprepare,
- .handoff = dp_vco_handoff,
-};
-
-static struct clk_mux_ops mdss_mux_ops = {
- .set_mux_sel = mdss_set_mux_sel,
- .get_mux_sel = mdss_get_mux_sel,
-};
-
-static struct dp_pll_vco_clk dp_vco_clk = {
- .min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
- .max_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000,
- .c = {
- .dbg_name = "dp_vco_clk",
- .ops = &dp_8998_vco_clk_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dp_vco_clk.c),
- },
-};
-
-static struct div_clk dp_link_2x_clk_divsel_five = {
- .data = {
- .div = 5,
- .min_div = 5,
- .max_div = 5,
- },
- .ops = &link2xclk_divsel_ops,
- .c = {
- .parent = &dp_vco_clk.c,
- .dbg_name = "dp_link_2x_clk_divsel_five",
- .ops = &clk_ops_link_2x_clk_div_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dp_link_2x_clk_divsel_five.c),
- },
-};
-
-static struct div_clk vco_divsel_four_clk_src = {
- .data = {
- .div = 4,
- .min_div = 4,
- .max_div = 4,
- },
- .ops = &vco_divided_clk_ops,
- .c = {
- .parent = &dp_vco_clk.c,
- .dbg_name = "vco_divsel_four_clk_src",
- .ops = &clk_ops_vco_divided_clk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(vco_divsel_four_clk_src.c),
- },
-};
-
-static struct div_clk vco_divsel_two_clk_src = {
- .data = {
- .div = 2,
- .min_div = 2,
- .max_div = 2,
- },
- .ops = &vco_divided_clk_ops,
- .c = {
- .parent = &dp_vco_clk.c,
- .dbg_name = "vco_divsel_two_clk_src",
- .ops = &clk_ops_vco_divided_clk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(vco_divsel_two_clk_src.c),
- },
-};
-
-static struct mux_clk vco_divided_clk_src_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&vco_divsel_two_clk_src.c, 0},
- {&vco_divsel_four_clk_src.c, 1},
- },
- .ops = &mdss_mux_ops,
- .c = {
- .parent = &vco_divsel_two_clk_src.c,
- .dbg_name = "vco_divided_clk_src_mux",
- .ops = &clk_ops_gen_mux_dp,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(vco_divided_clk_src_mux.c),
- }
-};
-
-static struct clk_lookup dp_pllcc_8998[] = {
- CLK_LIST(dp_vco_clk),
- CLK_LIST(dp_link_2x_clk_divsel_five),
- CLK_LIST(vco_divsel_four_clk_src),
- CLK_LIST(vco_divsel_two_clk_src),
- CLK_LIST(vco_divided_clk_src_mux),
-};
-
-int dp_pll_clock_register_8998(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res)
-{
- int rc = -ENOTSUPP;
-
- if (!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
- DEV_ERR("%s: Invalid input parameters\n", __func__);
- return -EINVAL;
- }
-
- /* Set client data for vco, mux and div clocks */
- dp_vco_clk.priv = pll_res;
- vco_divided_clk_src_mux.priv = pll_res;
- vco_divsel_two_clk_src.priv = pll_res;
- vco_divsel_four_clk_src.priv = pll_res;
- dp_link_2x_clk_divsel_five.priv = pll_res;
-
- clk_ops_link_2x_clk_div_c = clk_ops_div;
- clk_ops_link_2x_clk_div_c.prepare = mdss_pll_div_prepare;
-
- /*
- * Set the ops for the divider in the pixel clock tree to the
- * slave_div to ensure that a set rate on this divider clock will not
- * be propagated to it's parent. This is needed ensure that when we set
- * the rate for pixel clock, the vco is not reconfigured
- */
- clk_ops_vco_divided_clk_src_c = clk_ops_slave_div;
- clk_ops_vco_divided_clk_src_c.prepare = mdss_pll_div_prepare;
- clk_ops_vco_divided_clk_src_c.handoff = vco_divided_clk_handoff;
-
- clk_ops_gen_mux_dp = clk_ops_gen_mux;
- clk_ops_gen_mux_dp.get_rate = parent_get_rate;
-
- /* We can select different clock ops for future versions */
- dp_vco_clk.c.ops = &dp_8998_vco_clk_ops;
-
- rc = of_msm_clock_register(pdev->dev.of_node, dp_pllcc_8998,
- ARRAY_SIZE(dp_pllcc_8998));
- if (rc) {
- DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
- rc = -EPROBE_DEFER;
- } else {
- DEV_DBG("%s SUCCESS\n", __func__);
- }
-
- return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h b/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h
deleted file mode 100644
index 11d5ddc..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __MDSS_DP_PLL_8998_H
-#define __MDSS_DP_PLL_8998_H
-
-#define DP_PHY_REVISION_ID0 0x0000
-#define DP_PHY_REVISION_ID1 0x0004
-#define DP_PHY_REVISION_ID2 0x0008
-#define DP_PHY_REVISION_ID3 0x000C
-
-#define DP_PHY_CFG 0x0010
-#define DP_PHY_PD_CTL 0x0014
-#define DP_PHY_MODE 0x0018
-
-#define DP_PHY_AUX_CFG0 0x001C
-#define DP_PHY_AUX_CFG1 0x0020
-#define DP_PHY_AUX_CFG2 0x0024
-#define DP_PHY_AUX_CFG3 0x0028
-#define DP_PHY_AUX_CFG4 0x002C
-#define DP_PHY_AUX_CFG5 0x0030
-#define DP_PHY_AUX_CFG6 0x0034
-#define DP_PHY_AUX_CFG7 0x0038
-#define DP_PHY_AUX_CFG8 0x003C
-#define DP_PHY_AUX_CFG9 0x0040
-#define DP_PHY_AUX_INTERRUPT_MASK 0x0044
-#define DP_PHY_AUX_INTERRUPT_CLEAR 0x0048
-#define DP_PHY_AUX_BIST_CFG 0x004C
-
-#define DP_PHY_VCO_DIV 0x0064
-#define DP_PHY_TX0_TX1_LANE_CTL 0x0068
-
-#define DP_PHY_TX2_TX3_LANE_CTL 0x0084
-#define DP_PHY_STATUS 0x00BC
-
-/* Tx registers */
-#define QSERDES_TX0_OFFSET 0x0400
-#define QSERDES_TX1_OFFSET 0x0800
-
-#define TXn_BIST_MODE_LANENO 0x0000
-#define TXn_CLKBUF_ENABLE 0x0008
-#define TXn_TX_EMP_POST1_LVL 0x000C
-
-#define TXn_TX_DRV_LVL 0x001C
-
-#define TXn_RESET_TSYNC_EN 0x0024
-#define TXn_PRE_STALL_LDO_BOOST_EN 0x0028
-#define TXn_TX_BAND 0x002C
-#define TXn_SLEW_CNTL 0x0030
-#define TXn_INTERFACE_SELECT 0x0034
-
-#define TXn_RES_CODE_LANE_TX 0x003C
-#define TXn_RES_CODE_LANE_RX 0x0040
-#define TXn_RES_CODE_LANE_OFFSET_TX 0x0044
-#define TXn_RES_CODE_LANE_OFFSET_RX 0x0048
-
-#define TXn_DEBUG_BUS_SEL 0x0058
-#define TXn_TRANSCEIVER_BIAS_EN 0x005C
-#define TXn_HIGHZ_DRVR_EN 0x0060
-#define TXn_TX_POL_INV 0x0064
-#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0068
-
-#define TXn_LANE_MODE_1 0x008C
-
-#define TXn_TRAN_DRVR_EMP_EN 0x00C0
-#define TXn_TX_INTERFACE_MODE 0x00C4
-
-#define TXn_VMODE_CTRL1 0x00F0
-
-
-/* PLL register offset */
-#define QSERDES_COM_ATB_SEL1 0x0000
-#define QSERDES_COM_ATB_SEL2 0x0004
-#define QSERDES_COM_FREQ_UPDATE 0x0008
-#define QSERDES_COM_BG_TIMER 0x000C
-#define QSERDES_COM_SSC_EN_CENTER 0x0010
-#define QSERDES_COM_SSC_ADJ_PER1 0x0014
-#define QSERDES_COM_SSC_ADJ_PER2 0x0018
-#define QSERDES_COM_SSC_PER1 0x001C
-#define QSERDES_COM_SSC_PER2 0x0020
-#define QSERDES_COM_SSC_STEP_SIZE1 0x0024
-#define QSERDES_COM_SSC_STEP_SIZE2 0x0028
-#define QSERDES_COM_POST_DIV 0x002C
-#define QSERDES_COM_POST_DIV_MUX 0x0030
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x0034
-#define QSERDES_COM_CLK_ENABLE1 0x0038
-#define QSERDES_COM_SYS_CLK_CTRL 0x003C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x0040
-#define QSERDES_COM_PLL_EN 0x0044
-#define QSERDES_COM_PLL_IVCO 0x0048
-#define QSERDES_COM_CMN_IETRIM 0x004C
-#define QSERDES_COM_CMN_IPTRIM 0x0050
-
-#define QSERDES_COM_CP_CTRL_MODE0 0x0060
-#define QSERDES_COM_CP_CTRL_MODE1 0x0064
-#define QSERDES_COM_PLL_RCTRL_MODE0 0x0068
-#define QSERDES_COM_PLL_RCTRL_MODE1 0x006C
-#define QSERDES_COM_PLL_CCTRL_MODE0 0x0070
-#define QSERDES_COM_PLL_CCTRL_MODE1 0x0074
-#define QSERDES_COM_PLL_CNTRL 0x0078
-#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x007C
-#define QSERDES_COM_SYSCLK_EN_SEL 0x0080
-#define QSERDES_COM_CML_SYSCLK_SEL 0x0084
-#define QSERDES_COM_RESETSM_CNTRL 0x0088
-#define QSERDES_COM_RESETSM_CNTRL2 0x008C
-#define QSERDES_COM_LOCK_CMP_EN 0x0090
-#define QSERDES_COM_LOCK_CMP_CFG 0x0094
-#define QSERDES_COM_LOCK_CMP1_MODE0 0x0098
-#define QSERDES_COM_LOCK_CMP2_MODE0 0x009C
-#define QSERDES_COM_LOCK_CMP3_MODE0 0x00A0
-
-#define QSERDES_COM_DEC_START_MODE0 0x00B0
-#define QSERDES_COM_DEC_START_MODE1 0x00B4
-#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x00B8
-#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x00BC
-#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x00C0
-#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x00C4
-#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x00C8
-#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x00CC
-#define QSERDES_COM_INTEGLOOP_INITVAL 0x00D0
-#define QSERDES_COM_INTEGLOOP_EN 0x00D4
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00D8
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x00DC
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00E0
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00E4
-#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x00E8
-#define QSERDES_COM_VCO_TUNE_CTRL 0x00EC
-#define QSERDES_COM_VCO_TUNE_MAP 0x00F0
-
-#define QSERDES_COM_CMN_STATUS 0x0124
-#define QSERDES_COM_RESET_SM_STATUS 0x0128
-
-#define QSERDES_COM_CLK_SEL 0x0138
-#define QSERDES_COM_HSCLK_SEL 0x013C
-
-#define QSERDES_COM_CORECLK_DIV_MODE0 0x0148
-
-#define QSERDES_COM_SW_RESET 0x0150
-#define QSERDES_COM_CORE_CLK_EN 0x0154
-#define QSERDES_COM_C_READY_STATUS 0x0158
-#define QSERDES_COM_CMN_CONFIG 0x015C
-
-#define QSERDES_COM_SVS_MODE_CLK_SEL 0x0164
-
-#define DP_PLL_POLL_SLEEP_US 500
-#define DP_PLL_POLL_TIMEOUT_US 10000
-
-#define DP_VCO_RATE_8100MHZDIV1000 8100000UL
-#define DP_VCO_RATE_10800MHZDIV1000 10800000UL
-
-#define DP_VCO_HSCLK_RATE_1620MHZDIV1000 1620000UL
-#define DP_VCO_HSCLK_RATE_2700MHZDIV1000 2700000UL
-#define DP_VCO_HSCLK_RATE_5400MHZDIV1000 5400000UL
-
-int dp_vco_set_rate(struct clk *c, unsigned long rate);
-unsigned long dp_vco_get_rate(struct clk *c);
-long dp_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff dp_vco_handoff(struct clk *c);
-enum handoff vco_divided_clk_handoff(struct clk *c);
-int dp_vco_prepare(struct clk *c);
-void dp_vco_unprepare(struct clk *c);
-int hsclk_divsel_set_div(struct div_clk *clk, int div);
-int hsclk_divsel_get_div(struct div_clk *clk);
-int link2xclk_divsel_set_div(struct div_clk *clk, int div);
-int link2xclk_divsel_get_div(struct div_clk *clk);
-int vco_divided_clk_set_div(struct div_clk *clk, int div);
-int vco_divided_clk_get_div(struct div_clk *clk);
-
-#endif /* __MDSS_DP_PLL_8998_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll.h b/drivers/clk/qcom/mdss/mdss-dp-pll.h
index 2805ff9..2b1d70e 100644
--- a/drivers/clk/qcom/mdss/mdss-dp-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,21 +15,19 @@
#define __MDSS_DP_PLL_H
struct dp_pll_vco_clk {
+ struct clk_hw hw;
unsigned long rate; /* current vco rate */
u64 min_rate; /* min vco rate */
u64 max_rate; /* max vco rate */
void *priv;
-
- struct clk c;
};
-static inline struct dp_pll_vco_clk *mdss_dp_to_vco_clk(struct clk *clk)
+static inline struct dp_pll_vco_clk *to_dp_vco_hw(struct clk_hw *hw)
{
- return container_of(clk, struct dp_pll_vco_clk, c);
+ return container_of(hw, struct dp_pll_vco_clk, hw);
}
-int dp_pll_clock_register_8998(struct platform_device *pdev,
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
-
#endif /* __MDSS_DP_PLL_H */
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index 7f82fda..e292ef8 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -21,6 +21,7 @@
#include <linux/iopoll.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
+#include "mdss-dp-pll.h"
int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
{
@@ -126,6 +127,8 @@ static int mdss_pll_resource_parse(struct platform_device *pdev,
if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+ if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_10nm"))
+ pll_res->pll_interface_type = MDSS_DP_PLL_10NM;
else
goto err;
@@ -151,6 +154,9 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
case MDSS_DSI_PLL_10NM:
rc = dsi_pll_clock_register_10nm(pdev, pll_res);
break;
+ case MDSS_DP_PLL_10NM:
+ rc = dp_pll_clock_register_10nm(pdev, pll_res);
+ break;
case MDSS_UNKNOWN_PLL:
default:
rc = -EINVAL;
@@ -171,6 +177,7 @@ static int mdss_pll_probe(struct platform_device *pdev)
const char *label;
struct resource *pll_base_reg;
struct resource *phy_base_reg;
+ struct resource *tx0_base_reg, *tx1_base_reg;
struct resource *dynamic_pll_base_reg;
struct resource *gdsc_base_reg;
struct mdss_pll_resources *pll_res;
@@ -272,6 +279,30 @@ static int mdss_pll_probe(struct platform_device *pdev)
}
}
+ tx0_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ln_tx0_base");
+ if (tx0_base_reg) {
+ pll_res->ln_tx0_base = ioremap(tx0_base_reg->start,
+ resource_size(tx0_base_reg));
+ if (!pll_res->ln_tx0_base) {
+ pr_err("Unable to remap Lane TX0 base resources\n");
+ rc = -ENOMEM;
+ goto tx0_io_error;
+ }
+ }
+
+ tx1_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ln_tx1_base");
+ if (tx1_base_reg) {
+ pll_res->ln_tx1_base = ioremap(tx1_base_reg->start,
+ resource_size(tx1_base_reg));
+ if (!pll_res->ln_tx1_base) {
+ pr_err("Unable to remap Lane TX1 base resources\n");
+ rc = -ENOMEM;
+ goto tx1_io_error;
+ }
+ }
+
gdsc_base_reg = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "gdsc_base");
if (!gdsc_base_reg) {
@@ -309,6 +340,12 @@ static int mdss_pll_probe(struct platform_device *pdev)
if (pll_res->gdsc_base)
iounmap(pll_res->gdsc_base);
gdsc_io_error:
+ if (pll_res->ln_tx1_base)
+ iounmap(pll_res->ln_tx1_base);
+tx1_io_error:
+ if (pll_res->ln_tx0_base)
+ iounmap(pll_res->ln_tx0_base);
+tx0_io_error:
if (pll_res->dyn_pll_base)
iounmap(pll_res->dyn_pll_base);
dyn_pll_io_error:
@@ -347,6 +384,7 @@ static int mdss_pll_remove(struct platform_device *pdev)
static const struct of_device_id mdss_pll_dt_match[] = {
{.compatible = "qcom,mdss_dsi_pll_10nm"},
+ {.compatible = "qcom,mdss_dp_pll_10nm"},
{}
};
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index ee91e11..033462d 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -37,6 +37,7 @@
enum {
MDSS_DSI_PLL_10NM,
+ MDSS_DP_PLL_10NM,
MDSS_UNKNOWN_PLL,
};
@@ -81,6 +82,8 @@ struct mdss_pll_resources {
*/
void __iomem *pll_base;
void __iomem *phy_base;
+ void __iomem *ln_tx0_base;
+ void __iomem *ln_tx1_base;
void __iomem *gdsc_base;
void __iomem *dyn_pll_base;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 8af73ac..d9ebe11 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -4877,15 +4877,12 @@ static int _qce_suspend(void *handle)
if (handle == NULL)
return -ENODEV;
- qce_enable_clk(pce_dev);
-
sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
sps_disconnect(sps_pipe_info);
sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
sps_disconnect(sps_pipe_info);
- qce_disable_clk(pce_dev);
return 0;
}
@@ -4899,8 +4896,6 @@ static int _qce_resume(void *handle)
if (handle == NULL)
return -ENODEV;
- qce_enable_clk(pce_dev);
-
sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
@@ -4923,7 +4918,6 @@ static int _qce_resume(void *handle)
if (rc)
pr_err("Producer callback registration failed rc = %d\n", rc);
- qce_disable_clk(pce_dev);
return rc;
}
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index ff64631..56fbb94 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -5371,8 +5371,11 @@ static int _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
spin_unlock_irqrestore(&cp->lock, flags);
if (ret)
return ret;
- if (qce_pm_table.suspend)
+ if (qce_pm_table.suspend) {
+ qcrypto_ce_set_bus(pengine, true);
qce_pm_table.suspend(pengine->qce);
+ qcrypto_ce_set_bus(pengine, false);
+ }
return 0;
}
@@ -5393,9 +5396,11 @@ static int _qcrypto_resume(struct platform_device *pdev)
spin_lock_irqsave(&cp->lock, flags);
if (pengine->bw_state == BUS_SUSPENDED) {
spin_unlock_irqrestore(&cp->lock, flags);
- if (qce_pm_table.resume)
+ if (qce_pm_table.resume) {
+ qcrypto_ce_set_bus(pengine, true);
qce_pm_table.resume(pengine->qce);
-
+ qcrypto_ce_set_bus(pengine, false);
+ }
spin_lock_irqsave(&cp->lock, flags);
pengine->bw_state = BUS_NO_BANDWIDTH;
pengine->active_seq++;
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index 43d8fef..e8bfff2 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -548,10 +548,6 @@ static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data)
(devfreq->profile),
struct msm_adreno_extended_profile,
profile);
- if (devfreq == NULL) {
- pr_err(TAG "NULL defvreq passed to tz_handler\n");
- return -EFAULT;
- }
switch (event) {
case DEVFREQ_GOV_START:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index a79a9c9..70581e2 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -20,14 +20,8 @@
#define DP_AUX_ENUM_STR(x) #x
-struct aux_buf {
- u8 *start; /* buffer start addr */
- u8 *end; /* buffer end addr */
- u8 *data; /* data pou32er */
- u32 size; /* size of buffer */
- u32 len; /* dara length */
- u8 trans_num; /* transaction number */
- enum aux_tx_mode tx_mode;
+enum {
+ DP_AUX_DATA_INDEX_WRITE = BIT(31),
};
struct dp_aux_private {
@@ -38,15 +32,12 @@ struct dp_aux_private {
struct mutex mutex;
struct completion comp;
- struct aux_cmd *cmds;
- struct aux_buf txp;
- struct aux_buf rxp;
-
u32 aux_error_num;
bool cmd_busy;
+ bool native;
+ bool read;
- u8 txbuf[256];
- u8 rxbuf[256];
+ struct drm_dp_aux drm_aux;
};
static char *dp_aux_get_error(u32 aux_error)
@@ -69,159 +60,104 @@ static char *dp_aux_get_error(u32 aux_error)
}
}
-static void dp_aux_buf_init(struct aux_buf *buf, u8 *data, u32 size)
+static u32 dp_aux_write(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
{
- buf->start = data;
- buf->size = size;
- buf->data = buf->start;
- buf->end = buf->start + buf->size;
- buf->len = 0;
- buf->trans_num = 0;
- buf->tx_mode = AUX_NATIVE;
-}
+ u32 data[4], reg, len;
+ u8 *msgdata = msg->buffer;
+ int const aux_cmd_fifo_len = 128;
+ int i = 0;
-static void dp_aux_buf_set(struct dp_aux_private *aux)
-{
- init_completion(&aux->comp);
- aux->cmd_busy = false;
- mutex_init(&aux->mutex);
-
- dp_aux_buf_init(&aux->txp, aux->txbuf, sizeof(aux->txbuf));
- dp_aux_buf_init(&aux->rxp, aux->rxbuf, sizeof(aux->rxbuf));
-}
-
-static void dp_aux_buf_reset(struct aux_buf *buf)
-{
- buf->data = buf->start;
- buf->len = 0;
- buf->trans_num = 0;
- buf->tx_mode = AUX_NATIVE;
-
- memset(buf->start, 0x0, 256);
-}
-
-static void dp_aux_buf_push(struct aux_buf *buf, u32 len)
-{
- buf->data += len;
- buf->len += len;
-}
-
-static u32 dp_aux_buf_trailing(struct aux_buf *buf)
-{
- return (u32)(buf->end - buf->data);
-}
-
-static u32 dp_aux_add_cmd(struct aux_buf *buf, struct aux_cmd *cmd)
-{
- u8 data;
- u8 *bp, *cp;
- u32 i, len;
-
- if (cmd->ex_mode == AUX_READ)
+ if (aux->read)
len = 4;
else
- len = cmd->len + 4;
-
- if (dp_aux_buf_trailing(buf) < len) {
- pr_err("buf trailing error\n");
- return 0;
- }
+ len = msg->size + 4;
/*
* cmd fifo only has depth of 144 bytes
* limit buf length to 128 bytes here
*/
- if ((buf->len + len) > 128) {
+ if (len > aux_cmd_fifo_len) {
pr_err("buf len error\n");
return 0;
}
- bp = buf->data;
- data = cmd->addr >> 16;
- data &= 0x0f; /* 4 addr bits */
+ /* Pack cmd and write to HW */
+ data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+ if (aux->read)
+ data[0] |= BIT(4); /* R/W */
- if (cmd->ex_mode == AUX_READ)
- data |= BIT(4);
+ data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
+ data[2] = msg->address & 0xff; /* addr[7:0] */
+ data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
- *bp++ = data;
- *bp++ = cmd->addr >> 8;
- *bp++ = cmd->addr;
- *bp++ = cmd->len - 1;
-
- if (cmd->ex_mode == AUX_WRITE) {
- cp = cmd->buf;
-
- for (i = 0; i < cmd->len; i++)
- *bp++ = *cp++;
- }
-
- dp_aux_buf_push(buf, len);
-
- buf->tx_mode = cmd->tx_mode;
-
- buf->trans_num++;
-
- return cmd->len - 1;
-}
-
-static u32 dp_aux_cmd_fifo_tx(struct dp_aux_private *aux)
-{
- u8 *dp;
- u32 data, len, cnt;
- struct aux_buf *tp = &aux->txp;
-
- len = tp->len;
- if (len == 0) {
- pr_err("invalid len\n");
- return 0;
- }
-
- cnt = 0;
- dp = tp->start;
-
- while (cnt < len) {
- data = *dp;
- data <<= 8;
- data &= 0x00ff00;
- if (cnt == 0)
- data |= BIT(31);
-
- aux->catalog->data = data;
+ for (i = 0; i < len; i++) {
+ reg = (i < 4) ? data[i] : msgdata[i - 4];
+ reg = ((reg) << 8) & 0x0000ff00; /* index = 0, write */
+ if (i == 0)
+ reg |= DP_AUX_DATA_INDEX_WRITE;
+ aux->catalog->data = reg;
aux->catalog->write_data(aux->catalog);
-
- cnt++;
- dp++;
}
- data = (tp->trans_num - 1);
- if (tp->tx_mode == AUX_I2C) {
- data |= BIT(8); /* I2C */
- data |= BIT(10); /* NO SEND ADDR */
- data |= BIT(11); /* NO SEND STOP */
- }
+ reg = 0; /* Transaction number == 1 */
+ if (!aux->native) /* i2c */
+ reg |= (BIT(8) | BIT(10) | BIT(11));
- data |= BIT(9); /* GO */
- aux->catalog->data = data;
+ reg |= BIT(9);
+ aux->catalog->data = reg;
aux->catalog->write_trans(aux->catalog);
- return tp->len;
+ return len;
}
-static u32 dp_cmd_fifo_rx(struct dp_aux_private *aux, u32 len)
+static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ u32 ret = 0, len = 0, timeout;
+ int const aux_timeout_ms = HZ/4;
+
+ reinit_completion(&aux->comp);
+
+ len = dp_aux_write(aux, msg);
+ if (len == 0) {
+ pr_err("DP AUX write failed\n");
+ return -EINVAL;
+ }
+
+ timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
+ if (!timeout) {
+ pr_err("aux write timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ pr_debug("aux status %s\n",
+ dp_aux_get_error(aux->aux_error_num));
+
+ if (aux->aux_error_num == DP_AUX_ERR_NONE)
+ ret = len;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
{
u32 data;
u8 *dp;
u32 i;
- struct aux_buf *rp = &aux->rxp;
+ u32 len = msg->size;
data = 0;
- data |= BIT(31); /* INDEX_WRITE */
+ data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= BIT(0); /* read */
aux->catalog->data = data;
aux->catalog->write_data(aux->catalog);
- dp = rp->data;
+ dp = msg->buffer;
/* discard first byte */
data = aux->catalog->read_data(aux->catalog);
@@ -230,9 +166,6 @@ static u32 dp_cmd_fifo_rx(struct dp_aux_private *aux, u32 len)
data = aux->catalog->read_data(aux->catalog);
*dp++ = (u8)((data >> 8) & 0xff);
}
-
- rp->len = len;
- return len;
}
static void dp_aux_native_handler(struct dp_aux_private *aux)
@@ -292,219 +225,76 @@ static void dp_aux_isr(struct dp_aux *dp_aux)
if (!aux->cmd_busy)
return;
- if (aux->cmds->tx_mode == AUX_NATIVE)
+ if (aux->native)
dp_aux_native_handler(aux);
else
dp_aux_i2c_handler(aux);
}
-
-
-static int dp_aux_write(struct dp_aux_private *aux)
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
+ struct drm_dp_aux_msg *msg)
{
- struct aux_cmd *cm;
- struct aux_buf *tp;
- u32 len, ret, timeout;
+ ssize_t ret;
+ int const aux_cmd_native_max = 16;
+ int const aux_cmd_i2c_max = 128;
+ struct dp_aux_private *aux = container_of(drm_aux,
+ struct dp_aux_private, drm_aux);
mutex_lock(&aux->mutex);
- tp = &aux->txp;
- dp_aux_buf_reset(tp);
-
- cm = aux->cmds;
- while (cm) {
- ret = dp_aux_add_cmd(tp, cm);
- if (ret <= 0)
- break;
-
- if (!cm->next)
- break;
- cm++;
- }
-
- reinit_completion(&aux->comp);
+ aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+ aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
aux->cmd_busy = true;
- len = dp_aux_cmd_fifo_tx(aux);
-
- timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
- if (!timeout)
- pr_err("aux write timeout\n");
-
- pr_debug("aux status %s\n",
- dp_aux_get_error(aux->aux_error_num));
-
- if (aux->aux_error_num == DP_AUX_ERR_NONE)
- ret = len;
- else
- ret = aux->aux_error_num;
-
- aux->cmd_busy = false;
- mutex_unlock(&aux->mutex);
- return ret;
-}
-
-static int dp_aux_read(struct dp_aux_private *aux)
-{
- struct aux_cmd *cm;
- struct aux_buf *tp, *rp;
- u32 len, ret, timeout;
-
- mutex_lock(&aux->mutex);
-
- tp = &aux->txp;
- rp = &aux->rxp;
-
- dp_aux_buf_reset(tp);
- dp_aux_buf_reset(rp);
-
- cm = aux->cmds;
- len = 0;
-
- while (cm) {
- ret = dp_aux_add_cmd(tp, cm);
- len += cm->len;
-
- if (ret <= 0)
- break;
-
- if (!cm->next)
- break;
- cm++;
+ /* Ignore address only message */
+ if ((msg->size == 0) || (msg->buffer == NULL)) {
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ ret = msg->size;
+ goto unlock_exit;
}
- reinit_completion(&aux->comp);
- aux->cmd_busy = true;
+ /* msg sanity check */
+ if ((aux->native && (msg->size > aux_cmd_native_max)) ||
+ (msg->size > aux_cmd_i2c_max)) {
+ pr_err("%s: invalid msg: size(%zu), request(%x)\n",
+ __func__, msg->size, msg->request);
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
- dp_aux_cmd_fifo_tx(aux);
+ ret = dp_aux_cmd_fifo_tx(aux, msg);
+ if (ret < 0) {
+ aux->catalog->reset(aux->catalog); /* reset aux */
+ goto unlock_exit;
+ }
- timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
- if (!timeout)
- pr_err("aux read timeout\n");
+ if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+ if (aux->read)
+ dp_aux_cmd_fifo_rx(aux, msg);
- pr_debug("aux status %s\n",
- dp_aux_get_error(aux->aux_error_num));
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ } else {
+ /* Reply defer to retry */
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+ }
- if (aux->aux_error_num == DP_AUX_ERR_NONE)
- ret = dp_cmd_fifo_rx(aux, len);
- else
- ret = aux->aux_error_num;
+ /* Return requested size for success or retry */
+ ret = msg->size;
- aux->cmds->buf = rp->data;
+unlock_exit:
aux->cmd_busy = false;
-
mutex_unlock(&aux->mutex);
-
return ret;
}
-static int dp_aux_write_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
- enum aux_tx_mode mode, u8 *buf)
-{
- struct aux_cmd cmd = {0};
- struct dp_aux_private *aux;
-
- if (!dp_aux || !len) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
-
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- cmd.ex_mode = AUX_WRITE;
- cmd.tx_mode = mode;
- cmd.addr = addr;
- cmd.len = len;
- cmd.buf = buf;
-
- aux->cmds = &cmd;
-
- return dp_aux_write(aux);
-}
-
-static int dp_aux_read_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
- enum aux_tx_mode mode, u8 **buf)
-{
- int rc = 0;
- struct aux_cmd cmd = {0};
- struct dp_aux_private *aux;
-
- if (!dp_aux || !len) {
- pr_err("invalid input\n");
- rc = -EINVAL;
- goto end;
- }
-
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- cmd.ex_mode = AUX_READ;
- cmd.tx_mode = mode;
- cmd.addr = addr;
- cmd.len = len;
-
- aux->cmds = &cmd;
-
- rc = dp_aux_read(aux);
- if (rc <= 0) {
- rc = -EINVAL;
- goto end;
- }
-
- *buf = cmd.buf;
-end:
- return rc;
-}
-
-static int dp_aux_process(struct dp_aux *dp_aux, struct aux_cmd *cmds)
-{
- struct dp_aux_private *aux;
-
- if (!dp_aux || !cmds) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
-
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- aux->cmds = cmds;
-
- if (cmds->ex_mode == AUX_READ)
- return dp_aux_read(aux);
- else
- return dp_aux_write(aux);
-}
-
-static bool dp_aux_ready(struct dp_aux *dp_aux)
-{
- u8 data = 0;
- int count, ret;
- struct dp_aux_private *aux;
-
- if (!dp_aux) {
- pr_err("invalid input\n");
- goto error;
- }
-
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- for (count = 5; count; count--) {
- ret = dp_aux_write_ex(dp_aux, 0x50, 1, AUX_I2C, &data);
- if (ret >= 0)
- break;
-
- msleep(100);
- }
-
- if (count <= 0) {
- pr_err("aux chan NOT ready\n");
- goto error;
- }
-
- return true;
-error:
- return false;
-}
-
static void dp_aux_init(struct dp_aux *dp_aux, u32 *aux_cfg)
{
struct dp_aux_private *aux;
@@ -535,6 +325,45 @@ static void dp_aux_deinit(struct dp_aux *dp_aux)
aux->catalog->enable(aux->catalog, false);
}
+static int dp_aux_register(struct dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+ int ret = 0;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ aux->drm_aux.name = "sde_dp_aux";
+ aux->drm_aux.dev = aux->dev;
+ aux->drm_aux.transfer = dp_aux_transfer;
+ ret = drm_dp_aux_register(&aux->drm_aux);
+ if (ret) {
+ pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
+ goto exit;
+ }
+ dp_aux->drm_aux = &aux->drm_aux;
+exit:
+ return ret;
+}
+
+static void dp_aux_deregister(struct dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ drm_dp_aux_unregister(&aux->drm_aux);
+}
+
struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog)
{
int rc = 0;
@@ -553,21 +382,19 @@ struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog)
goto error;
}
+ init_completion(&aux->comp);
+ aux->cmd_busy = false;
+ mutex_init(&aux->mutex);
+
aux->dev = dev;
-
- dp_aux_buf_set(aux);
-
aux->catalog = catalog;
-
dp_aux = &aux->dp_aux;
- dp_aux->process = dp_aux_process;
- dp_aux->read = dp_aux_read_ex;
- dp_aux->write = dp_aux_write_ex;
- dp_aux->ready = dp_aux_ready;
dp_aux->isr = dp_aux_isr;
dp_aux->init = dp_aux_init;
dp_aux->deinit = dp_aux_deinit;
+ dp_aux->drm_aux_register = dp_aux_register;
+ dp_aux->drm_aux_deregister = dp_aux_deregister;
return dp_aux;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 0603c15..f08c12b 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -16,6 +16,7 @@
#define _DP_AUX_H_
#include "dp_catalog.h"
+#include "drm_dp_helper.h"
enum dp_aux_error {
DP_AUX_ERR_NONE = 0,
@@ -26,32 +27,10 @@ enum dp_aux_error {
DP_AUX_ERR_NACK_DEFER = -5,
};
-enum aux_tx_mode {
- AUX_NATIVE,
- AUX_I2C,
-};
-
-enum aux_exe_mode {
- AUX_WRITE,
- AUX_READ,
-};
-
-struct aux_cmd {
- enum aux_exe_mode ex_mode;
- enum aux_tx_mode tx_mode;
- u32 addr;
- u32 len;
- u8 *buf;
- bool next;
-};
-
struct dp_aux {
- int (*process)(struct dp_aux *aux, struct aux_cmd *cmd);
- int (*write)(struct dp_aux *aux, u32 addr, u32 len,
- enum aux_tx_mode mode, u8 *buf);
- int (*read)(struct dp_aux *aux, u32 addr, u32 len,
- enum aux_tx_mode mode, u8 **buf);
- bool (*ready)(struct dp_aux *aux);
+ struct drm_dp_aux *drm_aux;
+ int (*drm_aux_register)(struct dp_aux *aux);
+ void (*drm_aux_deregister)(struct dp_aux *aux);
void (*isr)(struct dp_aux *aux);
void (*init)(struct dp_aux *aux, u32 *aux_cfg);
void (*deinit)(struct dp_aux *aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index ca55d16..9361b52 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -177,8 +177,6 @@
#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x004)
-#define EDID_START_ADDRESS 0x50
-
/* DP MMSS_CC registers */
#define MMSS_DP_LINK_CMD_RCGR (0x0138)
#define MMSS_DP_LINK_CFG_RCGR (0x013C)
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 56f6052..888c511 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -20,14 +20,9 @@
#include "dp_ctrl.h"
-#define DP_LINK_RATE_MULTIPLIER 27000000
#define DP_KHZ_TO_HZ 1000
#define DP_CRYPTO_CLK_RATE_KHZ 180000
-/* sink power state */
-#define SINK_POWER_ON 1
-#define SINK_POWER_OFF 2
-
#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0)
#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3)
@@ -103,14 +98,6 @@ static void dp_ctrl_video_ready(struct dp_ctrl_private *ctrl)
complete(&ctrl->video_comp);
}
-static void dp_ctrl_set_sink_power_state(struct dp_ctrl_private *ctrl,
- u8 power_state)
-{
- const int len = 1;
-
- ctrl->aux->write(ctrl->aux, 0x600, len, AUX_NATIVE, &power_state);
-}
-
static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state)
{
ctrl->catalog->state_ctrl(ctrl->catalog, state);
@@ -128,7 +115,7 @@ static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_OFF);
+ drm_dp_link_power_down(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
reinit_completion(&ctrl->idle_comp);
dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
@@ -143,12 +130,13 @@ static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
{
u32 config = 0, tbd;
+ u8 *dpcd = ctrl->panel->dpcd;
config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */
config |= (0 << 11); /* RGB */
/* Scrambler reset enable */
- if (ctrl->panel->dpcd.scrambler_reset)
+ if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
config |= (1 << 10);
tbd = ctrl->link->get_test_bits_depth(ctrl->link,
@@ -158,7 +146,7 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
/* Num of Lanes */
config |= ((ctrl->link->lane_count - 1) << 4);
- if (ctrl->panel->dpcd.enhanced_frame)
+ if (drm_dp_enhanced_frame_cap(dpcd))
config |= 0x40;
config |= 0x04; /* progressive video */
@@ -327,7 +315,7 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
even_distribution = 0;
min_hblank = 0;
- lclk = link_rate * DP_LINK_RATE_MULTIPLIER;
+ lclk = drm_dp_bw_code_to_link_rate(link_rate) * DP_KHZ_TO_HZ;
pr_debug("pclk=%lld, active_width=%d, h_blank=%d\n",
pclk, lwidth, h_blank);
@@ -763,7 +751,7 @@ static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl,
buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
pr_debug("p|v=0x%x\n", voltage_level | pre_emphasis_level);
- return ctrl->aux->write(ctrl->aux, 0x103, 4, AUX_NATIVE, buf);
+ return drm_dp_dpcd_write(ctrl->aux->drm_aux, 0x103, buf, 4);
}
static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
@@ -778,25 +766,6 @@ static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
dp_ctrl_update_sink_vx_px(ctrl, link->v_level, link->p_level);
}
-static void dp_ctrl_cap_lane_rate_set(struct dp_ctrl_private *ctrl)
-{
- u8 buf[4];
- struct dp_panel_dpcd *cap;
-
- cap = &ctrl->panel->dpcd;
-
- pr_debug("bw=%x lane=%d\n", ctrl->link->link_rate,
- ctrl->link->lane_count);
-
- buf[0] = ctrl->link->link_rate;
- buf[1] = ctrl->link->lane_count;
-
- if (cap->enhanced_frame)
- buf[1] |= 0x80;
-
- ctrl->aux->write(ctrl->aux, 0x100, 2, AUX_NATIVE, buf);
-}
-
static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
u8 pattern)
{
@@ -805,33 +774,39 @@ static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
pr_debug("pattern=%x\n", pattern);
buf[0] = pattern;
- ctrl->aux->write(ctrl->aux, 0x102, 1, AUX_NATIVE, buf);
+ drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_TRAINING_PATTERN_SET, buf, 1);
}
static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
{
- int tries, old_v_level;
- int ret = 0;
- int usleep_time;
+ int tries, old_v_level, ret = 0, len = 0;
+ u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 5;
dp_ctrl_state_ctrl(ctrl, 0);
-
/* Make sure to clear the current pattern before starting a new one */
wmb();
ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
- dp_ctrl_cap_lane_rate_set(ctrl);
- dp_ctrl_train_pattern_set(ctrl, 0x21); /* train_1 */
+ dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+ DP_RECOVERED_CLOCK_OUT_EN); /* train_1 */
dp_ctrl_update_vx_px(ctrl);
tries = 0;
old_v_level = ctrl->link->v_level;
while (1) {
- usleep_time = ctrl->panel->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time * 2);
+ drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
- if (ctrl->link->clock_recovery(ctrl->link)) {
+ len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+ link_status);
+ if (len < DP_LINK_STATUS_SIZE) {
+ pr_err("[%s]: DP link status read failed\n", __func__);
+ ret = -1;
+ break;
+ }
+
+ if (drm_dp_clock_recovery_ok(link_status,
+ ctrl->link->lane_count)) {
ret = 0;
break;
}
@@ -852,8 +827,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
old_v_level = ctrl->link->v_level;
}
- ctrl->link->adjust_levels(ctrl->link);
-
+ ctrl->link->adjust_levels(ctrl->link, link_status);
dp_ctrl_update_vx_px(ctrl);
}
@@ -869,15 +843,15 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
switch (ctrl->link->link_rate) {
case DP_LINK_RATE_810:
- ctrl->link->link_rate = DP_LINK_RATE_540;
+ ctrl->link->link_rate = DP_LINK_BW_5_4;
break;
- case DP_LINK_RATE_540:
- ctrl->link->link_rate = DP_LINK_RATE_270;
+ case DP_LINK_BW_5_4:
+ ctrl->link->link_rate = DP_LINK_BW_2_7;
break;
- case DP_LINK_RATE_270:
- ctrl->link->link_rate = DP_LINK_RATE_162;
+ case DP_LINK_BW_2_7:
+ ctrl->link->link_rate = DP_LINK_BW_1_62;
break;
- case DP_LINK_RATE_162:
+ case DP_LINK_BW_1_62:
default:
ret = -EINVAL;
break;
@@ -890,36 +864,38 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
{
- int usleep_time;
-
dp_ctrl_train_pattern_set(ctrl, 0);
-
- usleep_time = ctrl->panel->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time * 2);
+ drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
}
static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
{
- int tries = 0;
- int ret = 0;
- int usleep_time;
+ int tries = 0, ret = 0, len = 0;
char pattern;
int const maximum_retries = 5;
+ u8 link_status[DP_LINK_STATUS_SIZE];
- if (ctrl->panel->dpcd.flags & DPCD_TPS3)
- pattern = 0x03;
+ if (drm_dp_tps3_supported(ctrl->panel->dpcd))
+ pattern = DP_TRAINING_PATTERN_3;
else
- pattern = 0x02;
+ pattern = DP_TRAINING_PATTERN_2;
dp_ctrl_update_vx_px(ctrl);
ctrl->catalog->set_pattern(ctrl->catalog, pattern);
- dp_ctrl_train_pattern_set(ctrl, pattern | 0x20);
+ dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
do {
- usleep_time = ctrl->panel->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time * 2);
+ drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
- if (ctrl->link->channel_equalization(ctrl->link)) {
+ len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+ link_status);
+ if (len < DP_LINK_STATUS_SIZE) {
+ pr_err("[%s]: DP link status read failed\n", __func__);
+ ret = -1;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(link_status, ctrl->link->lane_count)) {
ret = 0;
break;
}
@@ -930,8 +906,7 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
}
tries++;
- ctrl->link->adjust_levels(ctrl->link);
-
+ ctrl->link->adjust_levels(ctrl->link, link_status);
dp_ctrl_update_vx_px(ctrl);
} while (1);
@@ -941,12 +916,7 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
{
int ret = 0;
-
- ret = ctrl->aux->ready(ctrl->aux);
- if (!ret) {
- pr_err("aux chan NOT ready\n");
- return ret;
- }
+ struct drm_dp_link dp_link;
ctrl->link->p_level = 0;
ctrl->link->v_level = 0;
@@ -954,6 +924,11 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
dp_ctrl_config_ctrl(ctrl);
dp_ctrl_state_ctrl(ctrl, 0);
+ dp_link.num_lanes = ctrl->link->lane_count;
+ dp_link.rate = ctrl->link->link_rate;
+ dp_link.capabilities = ctrl->panel->dp_link.capabilities;
+ drm_dp_link_configure(ctrl->aux->drm_aux, &dp_link);
+
ret = dp_ctrl_link_train_1(ctrl);
if (ret < 0) {
if (!dp_ctrl_link_rate_down_shift(ctrl)) {
@@ -1007,7 +982,7 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, bool train)
ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
- dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_ON);
+ drm_dp_link_power_up(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
if (ctrl->link->phy_pattern_requested(ctrl->link))
goto end;
@@ -1065,8 +1040,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
ctrl->power->set_pixel_clk_parent(ctrl->power);
dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk",
- (ctrl->link->link_rate * DP_LINK_RATE_MULTIPLIER) /
- DP_KHZ_TO_HZ);
+ drm_dp_bw_code_to_link_rate(ctrl->link->link_rate));
dp_ctrl_set_clock_rate(ctrl, "ctrl_crypto_clk", DP_CRYPTO_CLK_RATE_KHZ);
@@ -1208,7 +1182,7 @@ static int dp_ctrl_on_hpd(struct dp_ctrl_private *ctrl)
ctrl->catalog->hpd_config(ctrl->catalog, true);
ctrl->link->link_rate = ctrl->panel->get_link_rate(ctrl->panel);
- ctrl->link->lane_count = ctrl->panel->dpcd.max_lane_count;
+ ctrl->link->lane_count = ctrl->panel->dp_link.num_lanes;
ctrl->pixel_rate = ctrl->panel->pinfo.pixel_clk_khz;
pr_debug("link_rate=%d, lane_count=%d, pixel_rate=%d\n",
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 850acbf..d3f6bca 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -195,6 +195,18 @@ static int dp_display_bind(struct device *dev, struct device *master,
goto end;
}
+ rc = dp->aux->drm_aux_register(dp->aux);
+ if (rc) {
+ pr_err("DRM DP AUX register failed\n");
+ goto end;
+ }
+
+ rc = dp->panel->sde_edid_register(dp->panel);
+ if (rc) {
+ pr_err("DRM DP EDID register failed\n");
+ goto end;
+ }
+
rc = dp->power->power_client_init(dp->power, &priv->phandle);
if (rc) {
pr_err("Power client create failed\n");
@@ -227,6 +239,10 @@ static void dp_display_unbind(struct device *dev, struct device *master,
(void)dp->power->power_client_deinit(dp->power);
+ (void) dp->panel->sde_edid_deregister(dp->panel);
+
+ (void) dp->aux->drm_aux_deregister(dp->aux);
+
(void)dp_display_debugfs_deinit(dp);
mutex_unlock(&dp->lock);
@@ -245,9 +261,8 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
if (rc)
goto end;
- rc = dp->panel->read_edid(dp->panel);
- if (rc)
- goto end;
+ sde_get_edid(dp->dp_display.connector, &dp->aux->drm_aux->ddc,
+ (void **)&dp->panel->edid_ctrl);
return 0;
end:
@@ -256,6 +271,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
static int dp_display_process_hpd_low(struct dp_display_private *dp)
{
+ dp->dp_display.is_connected = false;
return 0;
}
@@ -290,6 +306,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
if (dp->usbpd->hpd_high)
dp_display_process_hpd_high(dp);
+ dp->dp_display.is_connected = true;
mutex_unlock(&dp->lock);
end:
@@ -315,6 +332,7 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev)
}
mutex_lock(&dp->lock);
+ dp->dp_display.is_connected = false;
disable_irq(dp->irq);
mutex_unlock(&dp->lock);
@@ -573,33 +591,17 @@ static int dp_display_validate_mode(struct dp_display *dp,
return 0;
}
-static int dp_display_get_modes(struct dp_display *dp,
- struct dp_display_mode *modes, u32 *count)
+static int dp_display_get_modes(struct dp_display *dp)
{
- *count = 1;
+ int ret = 0;
+ struct dp_display_private *dp_display;
- if (modes) {
- modes->timing.h_active = 1920;
- modes->timing.v_active = 1080;
- modes->timing.h_back_porch = 148;
- modes->timing.h_front_porch = 88;
- modes->timing.h_sync_width = 44;
- modes->timing.h_active_low = 0;
- modes->timing.v_back_porch = 36;
- modes->timing.v_front_porch = 4;
- modes->timing.v_sync_width = 5;
- modes->timing.v_active_low = 0;
- modes->timing.h_skew = 0;
- modes->timing.refresh_rate = 60;
- modes->timing.pixel_clk_khz = 148500;
- }
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
- return 0;
-}
+ ret = _sde_edid_update_modes(dp->connector,
+ dp_display->panel->edid_ctrl);
-static int dp_display_detect(struct dp_display *dp)
-{
- return 0;
+ return ret;
}
static int dp_display_probe(struct platform_device *pdev)
@@ -637,7 +639,6 @@ static int dp_display_probe(struct platform_device *pdev)
g_dp_display->set_mode = dp_display_set_mode;
g_dp_display->validate_mode = dp_display_validate_mode;
g_dp_display->get_modes = dp_display_get_modes;
- g_dp_display->detect = dp_display_detect;
g_dp_display->prepare = dp_display_prepare;
g_dp_display->unprepare = dp_display_unprepare;
g_dp_display->request_irq = dp_request_irq;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index e684854..877287a 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -27,6 +27,8 @@ struct dp_display_mode {
struct dp_display {
struct drm_device *drm_dev;
struct dp_bridge *bridge;
+ struct drm_connector *connector;
+ bool is_connected;
int (*enable)(struct dp_display *dp_display);
int (*post_enable)(struct dp_display *dp_display);
@@ -38,11 +40,7 @@ struct dp_display {
struct dp_display_mode *mode);
int (*validate_mode)(struct dp_display *dp_display,
struct dp_display_mode *mode);
- int (*get_modes)(struct dp_display *dp_display,
- struct dp_display_mode *modes, u32 *count);
-
- int (*detect)(struct dp_display *dp_display);
-
+ int (*get_modes)(struct dp_display *dp_display);
int (*prepare)(struct dp_display *dp_display);
int (*unprepare)(struct dp_display *dp_display);
int (*request_irq)(struct dp_display *dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 0f6e36f..78c04c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -26,8 +26,10 @@
#define to_dp_bridge(x) container_of((x), struct dp_bridge, base)
static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
- struct dp_display_mode *dp_mode)
+ struct dp_display_mode *dp_mode, struct dp_display *dp)
{
+ const u32 num_components = 3;
+
memset(dp_mode, 0, sizeof(*dp_mode));
dp_mode->timing.h_active = drm_mode->hdisplay;
@@ -45,6 +47,7 @@ static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
dp_mode->timing.v_front_porch = drm_mode->vsync_start -
drm_mode->vdisplay;
+ dp_mode->timing.bpp = dp->connector->display_info.bpc * num_components;
dp_mode->timing.refresh_rate = drm_mode->vrefresh;
@@ -235,7 +238,7 @@ static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
dp = bridge->display;
memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode));
- convert_to_dp_mode(adjusted_mode, &bridge->dp_mode);
+ convert_to_dp_mode(adjusted_mode, &bridge->dp_mode, dp);
}
static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
@@ -257,7 +260,7 @@ static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
bridge = to_dp_bridge(drm_bridge);
dp = bridge->display;
- convert_to_dp_mode(mode, &dp_mode);
+ convert_to_dp_mode(mode, &dp_mode, dp);
rc = dp->validate_mode(dp, &dp_mode);
if (rc) {
@@ -289,6 +292,7 @@ int dp_connector_post_init(struct drm_connector *connector,
if (!info || !dp_display)
return -EINVAL;
+ dp_display->connector = connector;
return 0;
}
@@ -315,7 +319,7 @@ int dp_connector_get_topology(const struct drm_display_mode *drm_mode,
int dp_connector_get_info(struct msm_display_info *info, void *data)
{
- struct dsi_display *display = data;
+ struct dp_display *display = data;
if (!info || !display) {
pr_err("invalid params\n");
@@ -326,17 +330,10 @@ int dp_connector_get_info(struct msm_display_info *info, void *data)
info->num_of_h_tiles = 1;
info->h_tile_instance[0] = 0;
-
- info->is_connected = true;
- info->frame_rate = 60;
- info->width_mm = 160;
- info->height_mm = 90;
- info->max_width = 1920;
- info->max_height = 1080;
- info->vtotal = 1125;
- info->is_primary = true;
+ info->is_connected = display->is_connected;
info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
- info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
+ info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID |
+ MSM_DISPLAY_CAP_HOT_PLUG;
return 0;
}
@@ -375,60 +372,23 @@ enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
int dp_connector_get_modes(struct drm_connector *connector,
void *display)
{
- u32 count = 0;
- u32 size = 0;
- struct dp_display_mode *modes;
- struct drm_display_mode drm_mode;
+ int rc = 0;
struct dp_display *dp;
- int rc, i;
- if (!connector || !display || sde_connector_get_panel(connector))
- goto end;
+ if (!connector || !display)
+ return -EINVAL;
dp = display;
-
- rc = dp->get_modes(dp, NULL, &count);
- if (rc) {
- pr_err("failed to get num of modes, rc=%d\n", rc);
- goto end;
+ /* pluggable case assumes EDID is read when HPD */
+ if (dp->is_connected) {
+ rc = dp->get_modes(dp);
+ if (!rc)
+ pr_err("failed to get DP sink modes, rc=%d\n", rc);
+ } else {
+ pr_err("No sink connected\n");
}
- size = count * sizeof(*modes);
- modes = kzalloc(size, GFP_KERNEL);
- if (!modes) {
- count = 0;
- goto end;
- }
-
- rc = dp->get_modes(dp, modes, &count);
- if (rc) {
- pr_err("failed to get modes, rc=%d\n", rc);
- count = 0;
- goto error;
- }
-
- for (i = 0; i < count; i++) {
- struct drm_display_mode *m;
-
- memset(&drm_mode, 0x0, sizeof(drm_mode));
- convert_to_drm_mode(&modes[i], &drm_mode);
- m = drm_mode_duplicate(connector->dev, &drm_mode);
- if (!m) {
- pr_err("failed to add mode %ux%u\n",
- drm_mode.hdisplay,
- drm_mode.vdisplay);
- count = -ENOMEM;
- goto error;
- }
- m->width_mm = connector->display_info.width_mm;
- m->height_mm = connector->display_info.height_mm;
- drm_mode_probed_add(connector, m);
- }
-error:
- kfree(modes);
-end:
- pr_debug("MODE COUNT =%d\n\n", count);
- return count;
+ return 0;
}
int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index e9955a9..741acfca 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -114,18 +114,6 @@ struct dp_link_sink_count {
bool cp_ready;
};
-struct dp_link_status {
- u8 lane_01_status;
- u8 lane_23_status;
- u8 interlane_align_done;
- u8 downstream_port_status_changed;
- u8 link_status_updated;
- u8 port_0_in_sync;
- u8 port_1_in_sync;
- u8 req_voltage_swing[4];
- u8 req_pre_emphasis[4];
-};
-
struct dp_link_private {
struct device *dev;
struct dp_aux *aux;
@@ -133,7 +121,7 @@ struct dp_link_private {
struct dp_link_request request;
struct dp_link_sink_count sink_count;
- struct dp_link_status link_status;
+ u8 link_status[DP_LINK_STATUS_SIZE];
};
/**
@@ -232,13 +220,12 @@ static int dp_link_get_period(struct dp_link_private *link, int const addr)
int ret = 0;
u8 *bp;
u8 data;
- int rlen;
u32 const param_len = 0x1;
u32 const max_audio_period = 0xA;
/* TEST_AUDIO_PERIOD_CH_XX */
- rlen = link->aux->read(link->aux, addr, param_len, AUX_NATIVE, &bp);
- if (rlen < param_len) {
+ if (drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp,
+ param_len) < param_len) {
pr_err("failed to read test_audio_period (0x%x)\n", addr);
ret = -EINVAL;
goto exit;
@@ -350,8 +337,8 @@ static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
int const max_audio_pattern_type = 0x1;
/* Read the requested audio pattern type (Byte 0x272). */
- rlen = link->aux->read(link->aux, test_audio_pattern_type_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux,
+ test_audio_pattern_type_addr, &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link audio mode data\n");
ret = -EINVAL;
@@ -387,8 +374,8 @@ static int dp_link_parse_audio_mode(struct dp_link_private *link)
int channel_count = 0x0;
/* Read the requested audio mode (Byte 0x271). */
- rlen = link->aux->read(link->aux, test_audio_mode_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_audio_mode_addr,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link audio mode data\n");
ret = -EINVAL;
@@ -555,7 +542,7 @@ static int dp_link_parse_timing_params1(struct dp_link_private *link,
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
if (rlen < len) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -576,7 +563,7 @@ static int dp_link_parse_timing_params2(struct dp_link_private *link,
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
if (rlen < len) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -596,7 +583,7 @@ static int dp_link_parse_timing_params3(struct dp_link_private *link,
int rlen;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
if (rlen < 1) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -625,8 +612,8 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
int const test_misc_addr = 0x232;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = link->aux->read(link->aux, test_video_pattern_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_video_pattern_addr,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link video pattern\n");
ret = -EINVAL;
@@ -647,8 +634,8 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
link->request.test_video_pattern));
/* Read the requested color bit depth and dynamic range (Byte 0x232) */
- rlen = link->aux->read(link->aux, test_misc_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_misc_addr,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link bit depth\n");
ret = -EINVAL;
@@ -780,9 +767,9 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
*/
static bool dp_link_is_link_rate_valid(u32 link_rate)
{
- return ((link_rate == DP_LINK_RATE_162) ||
- (link_rate == DP_LINK_RATE_270) ||
- (link_rate == DP_LINK_RATE_540) ||
+ return ((link_rate == DP_LINK_BW_1_62) ||
+ (link_rate == DP_LINK_BW_2_7) ||
+ (link_rate == DP_LINK_BW_5_4) ||
(link_rate == DP_LINK_RATE_810));
}
@@ -814,12 +801,10 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
int ret = 0;
int rlen;
int const param_len = 0x1;
- int const test_link_rate_addr = 0x219;
- int const test_lane_count_addr = 0x220;
/* Read the requested link rate (Byte 0x219). */
- rlen = link->aux->read(link->aux, test_link_rate_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LINK_RATE,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link rate\n");
ret = -EINVAL;
@@ -837,8 +822,8 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
pr_debug("link rate = 0x%x\n", link->request.test_link_rate);
/* Read the requested lane count (Byte 0x220). */
- rlen = link->aux->read(link->aux, test_lane_count_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LANE_COUNT,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read lane count\n");
ret = -EINVAL;
@@ -890,8 +875,8 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link)
int const phy_test_pattern_addr = 0x248;
int ret = 0;
- rlen = link->aux->read(link->aux, phy_test_pattern_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, phy_test_pattern_addr,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read phy link pattern\n");
ret = -EINVAL;
@@ -965,16 +950,14 @@ static int dp_link_parse_request(struct dp_link_private *link)
u8 data;
int rlen;
u32 const param_len = 0x1;
- u32 const device_service_irq_addr = 0x201;
- u32 const test_request_addr = 0x218;
u8 buf[4];
/**
* Read the device service IRQ vector (Byte 0x201) to determine
* whether an automated link has been requested by the sink.
*/
- rlen = link->aux->read(link->aux, device_service_irq_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR, &bp, param_len);
if (rlen < param_len) {
pr_err("aux read failed\n");
ret = -EINVAL;
@@ -994,8 +977,8 @@ static int dp_link_parse_request(struct dp_link_private *link)
* Read the link request byte (Byte 0x218) to determine what type
* of automated link has been requested by the sink.
*/
- rlen = link->aux->read(link->aux, test_request_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_REQUEST,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("aux read failed\n");
ret = -EINVAL;
@@ -1033,7 +1016,7 @@ static int dp_link_parse_request(struct dp_link_private *link)
end:
/* clear the link request IRQ */
buf[0] = 1;
- link->aux->write(link->aux, test_request_addr, 1, AUX_NATIVE, buf);
+ drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_REQUEST, buf, 1);
/**
* Send a TEST_ACK if all link parameters are valid, otherwise send
@@ -1060,10 +1043,9 @@ static void dp_link_parse_sink_count(struct dp_link_private *link)
u8 data;
int rlen;
int const param_len = 0x1;
- int const sink_count_addr = 0x200;
- rlen = link->aux->read(link->aux, sink_count_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_SINK_COUNT,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read sink count\n");
return;
@@ -1080,67 +1062,16 @@ static void dp_link_parse_sink_count(struct dp_link_private *link)
link->sink_count.count, link->sink_count.cp_ready);
}
-static int dp_link_link_status_read(struct dp_link_private *link)
-{
- u8 *bp;
- u8 data;
- int rlen, ret = 0;
- int const addr = 0x202;
- int const len = 6;
- struct dp_link_status *sp;
-
- rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
- if (rlen < len) {
- pr_err("edp aux read failed\n");
- ret = -EINVAL;
- goto error;
- }
-
- sp = &link->link_status;
-
- data = *bp++; /* byte 0x202 */
- sp->lane_01_status = data; /* lane 0, 1 */
-
- data = *bp++; /* byte 0x203 */
- sp->lane_23_status = data; /* lane 2, 3 */
-
- data = *bp++; /* byte 0x204 */
- sp->interlane_align_done = (data & BIT(0));
- sp->downstream_port_status_changed = (data & BIT(6));
- sp->link_status_updated = (data & BIT(7));
-
- data = *bp++; /* byte 0x205 */
- sp->port_0_in_sync = (data & BIT(0));
- sp->port_1_in_sync = (data & BIT(1));
-
- data = *bp++; /* byte 0x206 */
- sp->req_voltage_swing[0] = data & 0x03;
- data >>= 2;
- sp->req_pre_emphasis[0] = data & 0x03;
- data >>= 2;
- sp->req_voltage_swing[1] = data & 0x03;
- data >>= 2;
- sp->req_pre_emphasis[1] = data & 0x03;
-
- data = *bp++; /* byte 0x207 */
- sp->req_voltage_swing[2] = data & 0x03;
- data >>= 2;
- sp->req_pre_emphasis[2] = data & 0x03;
- data >>= 2;
- sp->req_voltage_swing[3] = data & 0x03;
- data >>= 2;
- sp->req_pre_emphasis[3] = data & 0x03;
-
- return 0;
-error:
- return ret;
-}
-
static void dp_link_parse_sink_status_field(struct dp_link_private *link)
{
+ int len = 0;
+
dp_link_parse_sink_count(link);
dp_link_parse_request(link);
- dp_link_link_status_read(link);
+ len = drm_dp_dpcd_read_link_status(link->aux->drm_aux,
+ link->link_status);
+ if (len < DP_LINK_STATUS_SIZE)
+ pr_err("DP link status read failed\n");
}
static bool dp_link_is_link_training_requested(struct dp_link_private *link)
@@ -1196,7 +1127,7 @@ static int dp_link_parse_vx_px(struct dp_link_private *link)
pr_debug("\n");
- rlen = link->aux->read(link->aux, addr1, param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr1, &bp, param_len);
if (rlen < param_len) {
pr_err("failed reading lanes 0/1\n");
ret = -EINVAL;
@@ -1217,7 +1148,7 @@ static int dp_link_parse_vx_px(struct dp_link_private *link)
p1 = data & 0x3;
data = data >> 2;
- rlen = link->aux->read(link->aux, addr2, param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr2, &bp, param_len);
if (rlen < param_len) {
pr_err("failed reading lanes 2/3\n");
ret = -EINVAL;
@@ -1294,76 +1225,6 @@ static int dp_link_process_phy_test_pattern_request(
return 0;
}
-static bool dp_link_is_link_status_updated(struct dp_link_private *link)
-{
- return link->link_status.link_status_updated;
-}
-
-static bool dp_link_channel_eq_done(struct dp_link_private *link)
-{
- u32 mask, data;
- struct dp_link *dp_link = &link->dp_link;
-
- pr_debug("\n");
-
- dp_link_link_status_read(link);
-
- if (!link->link_status.interlane_align_done) { /* not align */
- pr_err("interlane align failed\n");
- return 0;
- }
-
- if (dp_link->lane_count == 1) {
- mask = 0x7;
- data = link->link_status.lane_01_status;
- } else if (dp_link->lane_count == 2) {
- mask = 0x77;
- data = link->link_status.lane_01_status;
- } else {
- mask = 0x7777;
- data = link->link_status.lane_23_status;
- data <<= 8;
- data |= link->link_status.lane_01_status;
- }
-
- data &= mask;
- pr_debug("data=%x mask=%x\n", data, mask);
-
- if (data == mask)/* all done */
- return true;
-
- return false;
-}
-
-static bool dp_link_clock_recovery_done(struct dp_link_private *link)
-{
- u32 mask, data;
- struct dp_link *dp_link = &link->dp_link;
-
- dp_link_link_status_read(link);
-
- if (dp_link->lane_count == 1) {
- mask = 0x01; /* lane 0 */
- data = link->link_status.lane_01_status;
- } else if (dp_link->lane_count == 2) {
- mask = 0x011; /*B lane 0, 1 */
- data = link->link_status.lane_01_status;
- } else {
- mask = 0x01111; /*B lane 0, 1 */
- data = link->link_status.lane_23_status;
- data <<= 8;
- data |= link->link_status.lane_01_status;
- }
-
- data &= mask;
- pr_debug("data=%x mask=%x\n", data, mask);
-
- if (data == mask) /* all done */
- return true;
-
- return false;
-}
-
/**
* dp_link_process_link_status_update() - processes link status updates
* @link: Display Port link module data
@@ -1377,21 +1238,25 @@ static bool dp_link_clock_recovery_done(struct dp_link_private *link)
*/
static int dp_link_process_link_status_update(struct dp_link_private *link)
{
- if (!dp_link_is_link_status_updated(link) ||
- (dp_link_channel_eq_done(link) &&
- dp_link_clock_recovery_done(link)))
+ if (!(link->link_status[2] & BIT(7)) || /* link status updated */
+ (drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.lane_count) &&
+ drm_dp_channel_eq_ok(link->link_status,
+ link->dp_link.lane_count)))
return -EINVAL;
pr_debug("channel_eq_done = %d, clock_recovery_done = %d\n",
- dp_link_channel_eq_done(link),
- dp_link_clock_recovery_done(link));
+ drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.lane_count),
+ drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.lane_count));
return 0;
}
static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link)
{
- return link->link_status.downstream_port_status_changed;
+ return (link->link_status[2] & BIT(6)); /* port status changed */
}
/**
@@ -1562,37 +1427,6 @@ static int dp_link_process_request(struct dp_link *dp_link)
return ret;
}
-static u8 *dp_link_get_voltage_swing(struct dp_link *dp_link)
-
-{
- struct dp_link_private *link;
-
- if (!dp_link) {
- pr_err("invalid input\n");
- return ERR_PTR(-EINVAL);
- }
-
- link = container_of(dp_link, struct dp_link_private, dp_link);
-
- return link->link_status.req_voltage_swing;
-}
-
-static u8 *dp_link_get_pre_emphasis(struct dp_link *dp_link)
-
-{
- struct dp_link_private *link;
-
-
- if (!dp_link) {
- pr_err("invalid input\n");
- return ERR_PTR(-EINVAL);
- }
-
- link = container_of(dp_link, struct dp_link_private, dp_link);
-
- return link->link_status.req_pre_emphasis;
-}
-
static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
{
u32 cc;
@@ -1625,38 +1459,11 @@ static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
return cc;
}
-static bool dp_link_clock_recovery(struct dp_link *dp_link)
-{
- struct dp_link_private *link;
-
- if (!dp_link) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
-
- link = container_of(dp_link, struct dp_link_private, dp_link);
-
- return dp_link_clock_recovery_done(link);
-}
-
-static bool dp_link_channel_equalization(struct dp_link *dp_link)
-{
- struct dp_link_private *link;
-
- if (!dp_link) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
-
- link = container_of(dp_link, struct dp_link_private, dp_link);
-
- return dp_link_channel_eq_done(link);
-}
-
-static int dp_link_adjust_levels(struct dp_link *dp_link)
+static int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
{
int i;
int max = 0;
+ u8 data;
struct dp_link_private *link;
if (!dp_link) {
@@ -1668,24 +1475,24 @@ static int dp_link_adjust_levels(struct dp_link *dp_link)
/* use the max level across lanes */
for (i = 0; i < dp_link->lane_count; i++) {
- pr_debug("lane=%d req_voltage_swing=%d\n",
- i, link->link_status.req_voltage_swing[i]);
- if (max < link->link_status.req_voltage_swing[i])
- max = link->link_status.req_voltage_swing[i];
+ data = drm_dp_get_adjust_request_voltage(link_status, i);
+ pr_debug("lane=%d req_voltage_swing=%d\n", i, data);
+ if (max < data)
+ max = data;
}
- dp_link->v_level = max;
+ dp_link->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
/* use the max level across lanes */
max = 0;
for (i = 0; i < dp_link->lane_count; i++) {
- pr_debug("lane=%d req_pre_emphasis=%d\n",
- i, link->link_status.req_pre_emphasis[i]);
- if (max < link->link_status.req_pre_emphasis[i])
- max = link->link_status.req_pre_emphasis[i];
+ data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+ pr_debug("lane=%d req_pre_emphasis=%d\n", i, data);
+ if (max < data)
+ max = data;
}
- dp_link->p_level = max;
+ dp_link->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
/**
* Adjust the voltage swing and pre-emphasis level combination to within
@@ -1781,12 +1588,8 @@ struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux)
dp_link = &link->dp_link;
dp_link->process_request = dp_link_process_request;
- dp_link->get_voltage_swing = dp_link_get_voltage_swing;
dp_link->get_test_bits_depth = dp_link_get_test_bits_depth;
- dp_link->get_pre_emphasis = dp_link_get_pre_emphasis;
dp_link->get_colorimetry_config = dp_link_get_colorimetry_config;
- dp_link->clock_recovery = dp_link_clock_recovery;
- dp_link->channel_equalization = dp_link_channel_equalization;
dp_link->adjust_levels = dp_link_adjust_levels;
dp_link->send_psm_request = dp_link_send_psm_request;
dp_link->phy_pattern_requested = dp_link_phy_pattern_requested;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index de10e9a..26249d6 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -54,15 +54,11 @@ struct dp_link {
u32 v_level;
u32 p_level;
- u8 *(*get_voltage_swing)(struct dp_link *dp_link);
- u8 *(*get_pre_emphasis)(struct dp_link *dp_link);
u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp);
int (*process_request)(struct dp_link *dp_link);
int (*get_colorimetry_config)(struct dp_link *dp_link);
- int (*adjust_levels)(struct dp_link *dp_link);
+ int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status);
int (*send_psm_request)(struct dp_link *dp_link, bool req);
- bool (*clock_recovery)(struct dp_link *dp_link);
- bool (*channel_equalization)(struct dp_link *dp_link);
bool (*phy_pattern_requested)(struct dp_link *dp_link);
};
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index f9616c4..fed1dbb 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -16,7 +16,9 @@
#include "dp_panel.h"
-#define DP_LINK_RATE_MULTIPLIER 27000000
+enum {
+ DP_LINK_RATE_MULTIPLIER = 27000000,
+};
struct dp_panel_private {
struct device *dev;
@@ -27,13 +29,10 @@ struct dp_panel_private {
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
- u8 *bp;
- u8 data;
- u32 const addr = 0x0;
- u32 const len = 16;
int rlen, rc = 0;
struct dp_panel_private *panel;
- struct dp_panel_dpcd *cap;
+ struct drm_dp_link *dp_link;
+ u8 major = 0, minor = 0;
if (!dp_panel) {
pr_err("invalid input\n");
@@ -41,236 +40,38 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
goto end;
}
- cap = &dp_panel->dpcd;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ dp_link = &dp_panel->dp_link;
- rlen = panel->aux->read(panel->aux, addr, len, AUX_NATIVE, &bp);
- if (rlen != len) {
+ rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
+ dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+ if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
pr_err("dpcd read failed, rlen=%d\n", rlen);
rc = -EINVAL;
goto end;
}
- memset(cap, 0, sizeof(*cap));
+ dp_link->revision = dp_panel->dpcd[DP_DPCD_REV];
- data = *bp++; /* byte 0 */
- cap->major = (data >> 4) & 0x0f;
- cap->minor = data & 0x0f;
- pr_debug("version: %d.%d\n", cap->major, cap->minor);
+ major = (dp_link->revision >> 4) & 0x0f;
+ minor = dp_link->revision & 0x0f;
+ pr_debug("version: %d.%d\n", major, minor);
- data = *bp++; /* byte 1 */
- /* 162, 270, 540, 810 MB, symbol rate, NOT bit rate */
- cap->max_link_rate = data;
- pr_debug("link_rate=%d\n", cap->max_link_rate);
+ dp_link->rate =
+ drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
+ pr_debug("link_rate=%d\n", dp_link->rate);
- data = *bp++; /* byte 2 */
- if (data & BIT(7))
- cap->enhanced_frame++;
+ dp_link->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
+ DP_MAX_LANE_COUNT_MASK;
+ pr_debug("lane_count=%d\n", dp_link->num_lanes);
- if (data & 0x40) {
- cap->flags |= DPCD_TPS3;
- pr_debug("pattern 3 supported\n");
- } else {
- pr_debug("pattern 3 not supported\n");
- }
+ if (dp_panel->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+ dp_link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
- data &= 0x0f;
- cap->max_lane_count = data;
- pr_debug("lane_count=%d\n", cap->max_lane_count);
-
- data = *bp++; /* byte 3 */
- if (data & BIT(0)) {
- cap->flags |= DPCD_MAX_DOWNSPREAD_0_5;
- pr_debug("max_downspread\n");
- }
-
- if (data & BIT(6)) {
- cap->flags |= DPCD_NO_AUX_HANDSHAKE;
- pr_debug("NO Link Training\n");
- }
-
- data = *bp++; /* byte 4 */
- cap->num_rx_port = (data & BIT(0)) + 1;
- pr_debug("rx_ports=%d", cap->num_rx_port);
-
- data = *bp++; /* Byte 5: DOWN_STREAM_PORT_PRESENT */
- cap->downstream_port.dfp_present = data & BIT(0);
- cap->downstream_port.dfp_type = data & 0x6;
- cap->downstream_port.format_conversion = data & BIT(3);
- cap->downstream_port.detailed_cap_info_available = data & BIT(4);
- pr_debug("dfp_present = %d, dfp_type = %d\n",
- cap->downstream_port.dfp_present,
- cap->downstream_port.dfp_type);
- pr_debug("format_conversion = %d, detailed_cap_info_available = %d\n",
- cap->downstream_port.format_conversion,
- cap->downstream_port.detailed_cap_info_available);
-
- bp += 1; /* Skip Byte 6 */
- rlen -= 1;
-
- data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */
- cap->downstream_port.dfp_count = data & 0x7;
- cap->downstream_port.msa_timing_par_ignored = data & BIT(6);
- cap->downstream_port.oui_support = data & BIT(7);
- pr_debug("dfp_count = %d, msa_timing_par_ignored = %d\n",
- cap->downstream_port.dfp_count,
- cap->downstream_port.msa_timing_par_ignored);
- pr_debug("oui_support = %d\n", cap->downstream_port.oui_support);
-
- data = *bp++; /* byte 8 */
- if (data & BIT(1)) {
- cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
- pr_debug("edid presented\n");
- }
-
- data = *bp++; /* byte 9 */
- cap->rx_port0_buf_size = (data + 1) * 32;
- pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size);
-
- bp += 2; /* skip 10, 11 port1 capability */
- rlen -= 2;
-
- data = *bp++; /* byte 12 */
- cap->i2c_speed_ctrl = data;
- if (cap->i2c_speed_ctrl > 0)
- pr_debug("i2c_rate=%d", cap->i2c_speed_ctrl);
-
- data = *bp++; /* byte 13 */
- cap->scrambler_reset = data & BIT(0);
- pr_debug("scrambler_reset=%d\n", cap->scrambler_reset);
-
- if (data & BIT(1))
- cap->enhanced_frame++;
-
- pr_debug("enhanced_framing=%d\n", cap->enhanced_frame);
-
- data = *bp++; /* byte 14 */
- if (data == 0)
- cap->training_read_interval = 4000; /* us */
- else
- cap->training_read_interval = 4000 * data; /* us */
- pr_debug("training_interval=%d\n", cap->training_read_interval);
end:
return rc;
}
-/*
- * edid standard header bytes
- */
-static u8 edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
-
-static bool dp_panel_is_edid_header_valid(u8 *buf)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(edid_hdr); i++) {
- if (buf[i] != edid_hdr[i])
- return false;
- }
-
- return true;
-}
-
-static int dp_panel_validate_edid(u8 *bp, int len)
-{
- int i;
- u8 csum = 0;
- u32 const size = 128;
-
- if (len < size) {
- pr_err("Error: len=%x\n", len);
- return -EINVAL;
- }
-
- for (i = 0; i < size; i++)
- csum += *bp++;
-
- if (csum != 0) {
- pr_err("error: csum=0x%x\n", csum);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dp_panel_read_edid(struct dp_panel *dp_panel)
-{
- u8 *edid_buf;
- u32 checksum = 0;
- int rlen, ret = 0;
- int edid_blk = 0, blk_num = 0, retries = 10;
- u32 const segment_addr = 0x30;
- bool edid_parsing_done = false;
- struct dp_panel_private *panel;
-
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
- ret = panel->aux->ready(panel->aux);
- if (!ret) {
- pr_err("aux chan NOT ready\n");
- goto end;
- }
-
- do {
- u8 segment;
-
-
- /*
- * Write the segment first.
- * Segment = 0, for blocks 0 and 1
- * Segment = 1, for blocks 2 and 3
- * Segment = 2, for blocks 3 and 4
- * and so on ...
- */
- segment = blk_num >> 1;
-
- panel->aux->write(panel->aux, segment_addr, 1, AUX_I2C,
- &segment);
-
- rlen = panel->aux->read(panel->aux, EDID_START_ADDRESS +
- (blk_num * EDID_BLOCK_SIZE),
- EDID_BLOCK_SIZE, AUX_I2C, &edid_buf);
- if (rlen != EDID_BLOCK_SIZE) {
- pr_err("invalid edid len: %d\n", rlen);
- continue;
- }
-
- pr_debug("=== EDID data ===\n");
- print_hex_dump(KERN_DEBUG, "EDID: ", DUMP_PREFIX_NONE, 16, 1,
- edid_buf, EDID_BLOCK_SIZE, false);
-
- pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
-
- if (dp_panel_is_edid_header_valid(edid_buf)) {
- ret = dp_panel_validate_edid(edid_buf, rlen);
- if (ret) {
- pr_err("corrupt edid block detected\n");
- goto end;
- }
-
- if (edid_parsing_done) {
- blk_num++;
- continue;
- }
-
- dp_panel->edid.ext_block_cnt = edid_buf[0x7E];
- edid_parsing_done = true;
- checksum = edid_buf[rlen - 1];
- } else {
- edid_blk++;
- blk_num++;
- }
-
- memcpy(dp_panel->edid.buf + (edid_blk * EDID_BLOCK_SIZE),
- edid_buf, EDID_BLOCK_SIZE);
-
- if (edid_blk == dp_panel->edid.ext_block_cnt)
- goto end;
- } while (retries--);
-end:
- return ret;
-}
-
static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
{
int rc = 0;
@@ -334,6 +135,36 @@ static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
return rc;
}
+static int dp_panel_edid_register(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ dp_panel->edid_ctrl = sde_edid_init();
+ if (!dp_panel->edid_ctrl) {
+ pr_err("sde edid init for DP failed\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+end:
+ return rc;
+}
+
+static void dp_panel_edid_deregister(struct dp_panel *dp_panel)
+{
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ sde_edid_deinit((void **)&dp_panel->edid_ctrl);
+}
+
static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
int rc = 0;
@@ -350,12 +181,12 @@ static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
return rc;
}
-static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
+static u32 dp_panel_get_link_rate(struct dp_panel *dp_panel)
{
const u32 encoding_factx10 = 8;
const u32 ln_to_link_ratio = 10;
u32 min_link_rate, reminder = 0;
- u8 calc_link_rate = 0, lane_cnt;
+ u32 calc_link_rate = 0, lane_cnt, max_rate = 0;
struct dp_panel_private *panel;
struct dp_panel_info *pinfo;
@@ -366,11 +197,10 @@ static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- lane_cnt = dp_panel->dpcd.max_lane_count;
+ lane_cnt = dp_panel->dp_link.num_lanes;
+ max_rate = drm_dp_link_rate_to_bw_code(dp_panel->dp_link.rate);
pinfo = &dp_panel->pinfo;
- pinfo->bpp = 24;
-
/*
* The max pixel clock supported is 675Mhz. The
* current calculations below will make sure
@@ -393,12 +223,12 @@ static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
min_link_rate += 1;
pr_debug("min_link_rate = %d\n", min_link_rate);
- if (min_link_rate <= DP_LINK_RATE_162)
- calc_link_rate = DP_LINK_RATE_162;
- else if (min_link_rate <= DP_LINK_RATE_270)
- calc_link_rate = DP_LINK_RATE_270;
- else if (min_link_rate <= DP_LINK_RATE_540)
- calc_link_rate = DP_LINK_RATE_540;
+ if (min_link_rate <= DP_LINK_BW_1_62)
+ calc_link_rate = DP_LINK_BW_1_62;
+ else if (min_link_rate <= DP_LINK_BW_2_7)
+ calc_link_rate = DP_LINK_BW_2_7;
+ else if (min_link_rate <= DP_LINK_BW_5_4)
+ calc_link_rate = DP_LINK_BW_5_4;
else if (min_link_rate <= DP_LINK_RATE_810)
calc_link_rate = DP_LINK_RATE_810;
else {
@@ -407,8 +237,8 @@ static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
calc_link_rate = DP_LINK_RATE_810;
}
- if (calc_link_rate > dp_panel->dpcd.max_link_rate)
- calc_link_rate = dp_panel->dpcd.max_link_rate;
+ if (calc_link_rate > max_rate)
+ calc_link_rate = max_rate;
pr_debug("calc_link_rate = 0x%x\n", calc_link_rate);
end:
@@ -440,12 +270,10 @@ struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
dp_panel = &panel->dp_panel;
- dp_panel->edid.buf = devm_kzalloc(dev,
- sizeof(EDID_BLOCK_SIZE) * 4, GFP_KERNEL);
-
+ dp_panel->sde_edid_register = dp_panel_edid_register;
+ dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
dp_panel->init_info = dp_panel_init_panel_info;
dp_panel->timing_cfg = dp_panel_timing_cfg;
- dp_panel->read_edid = dp_panel_read_edid;
dp_panel->read_dpcd = dp_panel_read_dpcd;
dp_panel->get_link_rate = dp_panel_get_link_rate;
@@ -463,6 +291,5 @@ void dp_panel_put(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- devm_kfree(panel->dev, dp_panel->edid.buf);
devm_kfree(panel->dev, panel);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 5c145eb..5852c70 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -16,66 +16,9 @@
#define _DP_PANEL_H_
#include "dp_aux.h"
+#include "sde_edid_parser.h"
-#define DPCD_ENHANCED_FRAME BIT(0)
-#define DPCD_TPS3 BIT(1)
-#define DPCD_MAX_DOWNSPREAD_0_5 BIT(2)
-#define DPCD_NO_AUX_HANDSHAKE BIT(3)
-#define DPCD_PORT_0_EDID_PRESENTED BIT(4)
-
-#define EDID_START_ADDRESS 0x50
-#define EDID_BLOCK_SIZE 0x80
-
-
-#define DP_LINK_RATE_162 6 /* 1.62G = 270M * 6 */
-#define DP_LINK_RATE_270 10 /* 2.70G = 270M * 10 */
-#define DP_LINK_RATE_540 20 /* 5.40G = 270M * 20 */
#define DP_LINK_RATE_810 30 /* 8.10G = 270M * 30 */
-#define DP_LINK_RATE_MAX DP_LINK_RATE_810
-
-struct downstream_port_config {
- /* Byte 02205h */
- bool dfp_present;
- u32 dfp_type;
- bool format_conversion;
- bool detailed_cap_info_available;
- /* Byte 02207h */
- u32 dfp_count;
- bool msa_timing_par_ignored;
- bool oui_support;
-};
-
-struct dp_panel_dpcd {
- u8 major;
- u8 minor;
- u8 max_lane_count;
- u8 num_rx_port;
- u8 i2c_speed_ctrl;
- u8 scrambler_reset;
- u8 enhanced_frame;
- u32 max_link_rate; /* 162, 270 and 540 Mb, divided by 10 */
- u32 flags;
- u32 rx_port0_buf_size;
- u32 training_read_interval;/* us */
- struct downstream_port_config downstream_port;
-};
-
-struct dp_panel_edid {
- u8 *buf;
- u8 id_name[4];
- u8 id_product;
- u8 version;
- u8 revision;
- u8 video_intf; /* dp == 0x5 */
- u8 color_depth; /* 6, 8, 10, 12 and 14 bits */
- u8 color_format; /* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
- u8 dpm; /* display power management */
- u8 sync_digital; /* 1 = digital */
- u8 sync_separate; /* 1 = separate */
- u8 vsync_pol; /* 0 = negative, 1 = positive */
- u8 hsync_pol; /* 0 = negative, 1 = positive */
- u8 ext_block_cnt;
-};
struct dp_panel_info {
u32 h_active;
@@ -95,17 +38,21 @@ struct dp_panel_info {
};
struct dp_panel {
- struct dp_panel_dpcd dpcd;
- struct dp_panel_edid edid;
+ /* dpcd raw data */
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct drm_dp_link dp_link;
+
+ struct sde_edid_ctrl *edid_ctrl;
struct dp_panel_info pinfo;
u32 vic;
+ int (*sde_edid_register)(struct dp_panel *dp_panel);
+ void (*sde_edid_deregister)(struct dp_panel *dp_panel);
int (*init_info)(struct dp_panel *dp_panel);
int (*timing_cfg)(struct dp_panel *dp_panel);
- int (*read_edid)(struct dp_panel *dp_panel);
int (*read_dpcd)(struct dp_panel *dp_panel);
- u8 (*get_link_rate)(struct dp_panel *dp_panel);
+ u32 (*get_link_rate)(struct dp_panel *dp_panel);
};
struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 39b797e..da7a7c0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -879,14 +879,12 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
int rc = 0;
struct mipi_dsi_packet packet;
struct dsi_ctrl_cmd_dma_fifo_info cmd;
+ struct dsi_ctrl_cmd_dma_info cmd_mem;
u32 hw_flags = 0;
u32 length = 0;
u8 *buffer = NULL;
-
- if (!(flags & DSI_CTRL_CMD_FIFO_STORE)) {
- pr_err("Memory DMA is not supported, use FIFO\n");
- goto error;
- }
+ u32 cnt = 0;
+ u8 *cmdbuf;
rc = mipi_dsi_create_packet(&packet, msg);
if (rc) {
@@ -894,7 +892,32 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
goto error;
}
- if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+ if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+ rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
+ &packet,
+ &buffer,
+ &length);
+
+ if (rc) {
+ pr_err("[%s] failed to copy message, rc=%d\n",
+ dsi_ctrl->name, rc);
+ goto error;
+ }
+
+ cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
+ cmd_mem.length = length;
+ cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
+ true : false;
+ cmd_mem.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
+ true : false;
+ cmd_mem.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+ true : false;
+
+ cmdbuf = (u8 *)(dsi_ctrl->vaddr);
+ for (cnt = 0; cnt < length; cnt++)
+ cmdbuf[cnt] = buffer[cnt];
+
+ } else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
&packet,
&buffer,
@@ -920,10 +943,15 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
- if (flags & DSI_CTRL_CMD_FIFO_STORE)
+ if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+ dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+ &cmd_mem,
+ hw_flags);
+ } else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
&cmd,
hw_flags);
+ }
if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
u32 retry = 10;
@@ -2171,14 +2199,14 @@ int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
}
/**
- * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
- * @dsi_ctrl: DSI controller handle.
- * @enable: enable/disable ULPS.
- *
- * ULPS can be enabled/disabled after DSI host engine is turned on.
- *
- * Return: error code.
- */
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable)
{
int rc = 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index f89cb68..7f36fde 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -33,12 +33,15 @@
* @DSI_CTRL_CMD_DEFER_TRIGGER: Defer the command trigger to later.
* @DSI_CTRL_CMD_FIFO_STORE: Use FIFO for command transfer in place of
* reading data from memory.
+ * @DSI_CTRL_CMD_FETCH_MEMORY: Fetch command from memory through AXI bus
+ * and transfer it.
*/
#define DSI_CTRL_CMD_READ 0x1
#define DSI_CTRL_CMD_BROADCAST 0x2
#define DSI_CTRL_CMD_BROADCAST_MASTER 0x4
#define DSI_CTRL_CMD_DEFER_TRIGGER 0x8
#define DSI_CTRL_CMD_FIFO_STORE 0x10
+#define DSI_CTRL_CMD_FETCH_MEMORY 0x20
/**
* enum dsi_power_state - defines power states for dsi controller.
@@ -188,6 +191,8 @@ struct dsi_ctrl_interrupts {
* @roi: Partial update region of interest.
* Origin is top left of this CTRL.
* @tx_cmd_buf: Tx command buffer.
+ * @cmd_buffer_iova: cmd buffer mapped address.
+ * @vaddr: CPU virtual address of cmd buffer.
* @cmd_buffer_size: Size of command buffer.
* @debugfs_root: Root for debugfs entries.
*/
@@ -221,6 +226,8 @@ struct dsi_ctrl {
/* Command tx and rx */
struct drm_gem_object *tx_cmd_buf;
u32 cmd_buffer_size;
+ u32 cmd_buffer_iova;
+ void *vaddr;
/* Debug Information */
struct dentry *debugfs_root;
@@ -377,14 +384,14 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl);
int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
/**
- * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
- * @dsi_ctrl: DSI controller handle.
- * @enable: enable/disable ULPS.
- *
- * ULPS can be enabled/disabled after DSI host engine is turned on.
- *
- * Return: error code.
- */
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index c2cf2cb..133dc93 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/of.h>
+#include <linux/err.h>
#include "msm_drv.h"
#include "dsi_display.h"
@@ -27,10 +28,14 @@
#include "dsi_pwr.h"
#define to_dsi_display(x) container_of(x, struct dsi_display, host)
+#define INT_BASE_10 10
static DEFINE_MUTEX(dsi_display_list_lock);
static LIST_HEAD(dsi_display_list);
-
+static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
+static char dsi_display_secondary[MAX_CMDLINE_PARAM_LEN];
+static struct dsi_display_boot_param boot_displays[MAX_DSI_ACTIVE_DISPLAY];
+static struct device_node *default_active_node;
static const struct of_device_id dsi_display_dt_match[] = {
{.compatible = "qcom,dsi-display"},
{}
@@ -553,6 +558,184 @@ static int dsi_display_ctrl_power_off(struct dsi_display *display)
return rc;
}
+static int dsi_display_parse_cmdline_topology(unsigned int display_type)
+{
+ char *str = NULL;
+ int top_index = -1;
+
+ if (display_type >= MAX_DSI_ACTIVE_DISPLAY) {
+ pr_err("display_type=%d not supported\n", display_type);
+ return -EINVAL;
+ }
+ if (display_type == DSI_PRIMARY)
+ str = strnstr(dsi_display_primary,
+ ":config", strlen(dsi_display_primary));
+ else
+ str = strnstr(dsi_display_secondary,
+ ":config", strlen(dsi_display_secondary));
+ if (!str)
+ return -EINVAL;
+
+ if (kstrtol(str + strlen(":config"), INT_BASE_10,
+ (unsigned long *)&top_index))
+ return -EINVAL;
+
+ return top_index;
+}
+
+/**
+ * dsi_display_name_compare()- compare whether DSI display name matches.
+ * @node: Pointer to device node structure
+ * @display_name: Name of display to validate
+ *
+ * Return: returns a bool specifying whether given display is active
+ */
+static bool dsi_display_name_compare(struct device_node *node,
+ const char *display_name, int index)
+{
+ if (index >= MAX_DSI_ACTIVE_DISPLAY) {
+ pr_err("Invalid Index\n");
+ return false;
+ }
+
+ if (boot_displays[index].boot_disp_en) {
+ if (!(strcmp(&boot_displays[index].name[0], display_name))) {
+ boot_displays[index].node = node;
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * dsi_display_parse_boot_display_selection()- Parse DSI boot display name
+ *
+ * Return: returns error status
+ */
+static int dsi_display_parse_boot_display_selection(void)
+{
+ char *pos = NULL;
+ char disp_buf[MAX_CMDLINE_PARAM_LEN] = {'\0'};
+ int i, j, num_displays;
+
+ if (strlen(dsi_display_primary) == 0)
+ return -EINVAL;
+
+ if ((strlen(dsi_display_secondary) > 0))
+ num_displays = MAX_DSI_ACTIVE_DISPLAY;
+ else {
+ /*
+ * Initialize secondary dsi variables
+ * for the senario where dsi_display1
+ * is null but dsi_display0 is valid
+ */
+
+ /* Max number of displays will be one->only Primary */
+ num_displays = 1;
+ boot_displays[DSI_SECONDARY].is_primary = false;
+ boot_displays[DSI_SECONDARY].name[0] = '\0';
+ }
+
+ for (i = 0; i < num_displays; i++) {
+ boot_displays[i].is_primary = false;
+ if (i == DSI_PRIMARY) {
+ strlcpy(disp_buf, &dsi_display_primary[0],
+ sizeof(dsi_display_primary));
+ pos = strnstr(disp_buf, ":",
+ sizeof(dsi_display_primary));
+ } else {
+ strlcpy(disp_buf, &dsi_display_secondary[0],
+ sizeof(dsi_display_secondary));
+ pos = strnstr(disp_buf, ":",
+ sizeof(dsi_display_secondary));
+ }
+ /* Use ':' as a delimiter to retrieve the display name */
+ if (!pos) {
+ pr_debug("display name[%s]is not valid\n", disp_buf);
+ continue;
+ }
+
+ for (j = 0; (disp_buf + j) < pos; j++)
+ boot_displays[i].name[j] = *(disp_buf + j);
+ boot_displays[i].name[j] = '\0';
+
+ if (i == DSI_PRIMARY) {
+ boot_displays[i].is_primary = true;
+ /* Currently, secondary DSI display is not supported */
+ boot_displays[i].boot_disp_en = true;
+ }
+ }
+ return 0;
+}
+
+/**
+ * validate_dsi_display_selection()- validate boot DSI display selection
+ *
+ * Return: returns true when both displays have unique configurations
+ */
+static bool validate_dsi_display_selection(void)
+{
+ int i, j;
+ int rc = 0;
+ int phy_count = 0;
+ int ctrl_count = 0;
+ int index = 0;
+ bool ctrl_flags[MAX_DSI_ACTIVE_DISPLAY] = {false, false};
+ bool phy_flags[MAX_DSI_ACTIVE_DISPLAY] = {false, false};
+ struct device_node *node, *ctrl_node, *phy_node;
+
+ for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
+ node = boot_displays[i].node;
+ ctrl_count = of_count_phandle_with_args(node, "qcom,dsi-ctrl",
+ NULL);
+
+ for (j = 0; j < ctrl_count; j++) {
+ ctrl_node = of_parse_phandle(node, "qcom,dsi-ctrl", j);
+ rc = of_property_read_u32(ctrl_node, "cell-index",
+ &index);
+ of_node_put(ctrl_node);
+ if (rc) {
+ pr_err("cell index not set for ctrl_nodes\n");
+ return false;
+ }
+ if (ctrl_flags[index])
+ return false;
+ ctrl_flags[index] = true;
+ }
+
+ phy_count = of_count_phandle_with_args(node, "qcom,dsi-phy",
+ NULL);
+ for (j = 0; j < phy_count; j++) {
+ phy_node = of_parse_phandle(node, "qcom,dsi-phy", j);
+ rc = of_property_read_u32(phy_node, "cell-index",
+ &index);
+ of_node_put(phy_node);
+ if (rc) {
+ pr_err("cell index not set phy_nodes\n");
+ return false;
+ }
+ if (phy_flags[index])
+ return false;
+ phy_flags[index] = true;
+ }
+ }
+ return true;
+}
+
+struct device_node *dsi_display_get_boot_display(int index)
+{
+
+ pr_err("index = %d\n", index);
+
+ if (boot_displays[index].node)
+ return boot_displays[index].node;
+ else if ((index == (MAX_DSI_ACTIVE_DISPLAY - 1))
+ && (default_active_node))
+ return default_active_node;
+ else
+ return NULL;
+}
+
static int dsi_display_phy_power_on(struct dsi_display *display)
{
int rc = 0;
@@ -1004,9 +1187,9 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display,
int i;
m_flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_BROADCAST_MASTER |
- DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FIFO_STORE);
+ DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FETCH_MEMORY);
flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
- DSI_CTRL_CMD_FIFO_STORE);
+ DSI_CTRL_CMD_FETCH_MEMORY);
/*
* 1. Setup commands in FIFO
@@ -1101,8 +1284,8 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct dsi_display *display = to_dsi_display(host);
-
- int rc = 0;
+ struct dsi_display_ctrl *display_ctrl;
+ int rc = 0, cnt = 0;
if (!host || !msg) {
pr_err("Invalid params\n");
@@ -1131,6 +1314,44 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
goto error_disable_clks;
}
+ if (display->tx_cmd_buf == NULL) {
+ mutex_lock(&display->drm_dev->struct_mutex);
+ display->tx_cmd_buf = msm_gem_new(display->drm_dev,
+ SZ_4K,
+ MSM_BO_UNCACHED);
+ mutex_unlock(&display->drm_dev->struct_mutex);
+
+ display->cmd_buffer_size = SZ_4K;
+
+ if ((display->tx_cmd_buf) == NULL) {
+ pr_err("value of display->tx_cmd_buf is NULL");
+ goto error_disable_cmd_engine;
+ }
+ rc = msm_gem_get_iova(display->tx_cmd_buf, 0,
+ &(display->cmd_buffer_iova));
+ if (rc) {
+ pr_err("failed to get the iova rc %d\n", rc);
+ goto free_gem;
+ }
+
+ display->vaddr =
+ (void *) msm_gem_get_vaddr(display->tx_cmd_buf);
+
+ if (IS_ERR_OR_NULL(display->vaddr)) {
+ pr_err("failed to get va rc %d\n", rc);
+ rc = -EINVAL;
+ goto put_iova;
+ }
+
+ for (cnt = 0; cnt < display->ctrl_count; cnt++) {
+ display_ctrl = &display->ctrl[cnt];
+ display_ctrl->ctrl->cmd_buffer_size = SZ_4K;
+ display_ctrl->ctrl->cmd_buffer_iova =
+ display->cmd_buffer_iova;
+ display_ctrl->ctrl->vaddr = display->vaddr;
+ }
+ }
+
if (display->ctrl_count > 1 && !(msg->flags & MIPI_DSI_MSG_UNICAST)) {
rc = dsi_display_broadcast_cmd(display, msg);
if (rc) {
@@ -1143,13 +1364,19 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
msg->ctrl : 0;
rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
- DSI_CTRL_CMD_FIFO_STORE);
+ DSI_CTRL_CMD_FETCH_MEMORY);
if (rc) {
pr_err("[%s] cmd transfer failed, rc=%d\n",
display->name, rc);
goto error_disable_cmd_engine;
}
}
+ return rc;
+
+put_iova:
+ msm_gem_put_iova(display->tx_cmd_buf, 0);
+free_gem:
+ msm_gem_free_object(display->tx_cmd_buf);
error_disable_cmd_engine:
(void)dsi_display_cmd_engine_disable(display);
error_disable_clks:
@@ -1754,7 +1981,8 @@ static int dsi_display_res_init(struct dsi_display *display)
}
}
- display->panel = dsi_panel_get(&display->pdev->dev, display->panel_of);
+ display->panel = dsi_panel_get(&display->pdev->dev, display->panel_of,
+ display->cmdline_topology);
if (IS_ERR_OR_NULL(display->panel)) {
rc = PTR_ERR(display->panel);
pr_err("failed to get panel, rc=%d\n", rc);
@@ -2419,6 +2647,7 @@ static int dsi_display_bind(struct device *dev,
goto error_panel_deinit;
}
+ pr_info("Successfully bind display panel '%s'\n", display->name);
display->drm_dev = drm;
goto error;
@@ -2516,6 +2745,9 @@ int dsi_display_dev_probe(struct platform_device *pdev)
{
int rc = 0;
struct dsi_display *display;
+ static bool display_from_cmdline, boot_displays_parsed;
+ static bool comp_add_success;
+ static struct device_node *primary_np, *secondary_np;
if (!pdev || !pdev->dev.of_node) {
pr_err("pdev not found\n");
@@ -2528,9 +2760,66 @@ int dsi_display_dev_probe(struct platform_device *pdev)
display->name = of_get_property(pdev->dev.of_node, "label", NULL);
- display->is_active = of_property_read_bool(pdev->dev.of_node,
- "qcom,dsi-display-active");
+ if (!boot_displays_parsed) {
+ boot_displays[DSI_PRIMARY].boot_disp_en = false;
+ boot_displays[DSI_SECONDARY].boot_disp_en = false;
+ if (dsi_display_parse_boot_display_selection())
+ pr_debug("Display Boot param not valid/available\n");
+ boot_displays_parsed = true;
+ }
+
+ /* Initialize cmdline_topology to use default topology */
+ display->cmdline_topology = -1;
+ if ((!display_from_cmdline) &&
+ (boot_displays[DSI_PRIMARY].boot_disp_en)) {
+ display->is_active = dsi_display_name_compare(pdev->dev.of_node,
+ display->name, DSI_PRIMARY);
+ if (display->is_active) {
+ if (comp_add_success) {
+ (void)_dsi_display_dev_deinit(main_display);
+ component_del(&main_display->pdev->dev,
+ &dsi_display_comp_ops);
+ comp_add_success = false;
+ default_active_node = NULL;
+ pr_debug("removed the existing comp ops\n");
+ }
+ /*
+ * Need to add component for
+ * the secondary DSI display
+ * when more than one DSI display
+ * is supported.
+ */
+ pr_debug("cmdline primary dsi: %s\n",
+ display->name);
+ display_from_cmdline = true;
+ display->cmdline_topology =
+ dsi_display_parse_cmdline_topology(DSI_PRIMARY);
+ primary_np = pdev->dev.of_node;
+ }
+ }
+
+ if (boot_displays[DSI_SECONDARY].boot_disp_en) {
+ if (!secondary_np) {
+ if (dsi_display_name_compare(pdev->dev.of_node,
+ display->name, DSI_SECONDARY)) {
+ pr_debug("cmdline secondary dsi: %s\n",
+ display->name);
+ secondary_np = pdev->dev.of_node;
+ if (primary_np) {
+ if (validate_dsi_display_selection()) {
+ display->is_active = true;
+ display->cmdline_topology =
+ dsi_display_parse_cmdline_topology
+ (DSI_SECONDARY);
+ } else {
+ boot_displays[DSI_SECONDARY]
+ .boot_disp_en = false;
+ }
+ }
+ }
+ }
+ }
display->display_type = of_get_property(pdev->dev.of_node,
"qcom,display-type", NULL);
if (!display->display_type)
@@ -2543,6 +2832,10 @@ int dsi_display_dev_probe(struct platform_device *pdev)
list_add(&display->list, &dsi_display_list);
mutex_unlock(&dsi_display_list_lock);
+ if (!display_from_cmdline)
+ display->is_active = of_property_read_bool(pdev->dev.of_node,
+ "qcom,dsi-display-active");
+
if (display->is_active) {
main_display = display;
rc = _dsi_display_dev_init(display);
@@ -2554,6 +2847,11 @@ int dsi_display_dev_probe(struct platform_device *pdev)
rc = component_add(&pdev->dev, &dsi_display_comp_ops);
if (rc)
pr_err("component add failed, rc=%d\n", rc);
+
+ comp_add_success = true;
+ pr_debug("Component_add success: %s\n", display->name);
+ if (!display_from_cmdline)
+ default_active_node = pdev->dev.of_node;
}
return rc;
}
@@ -2736,6 +3034,7 @@ int dsi_display_get_info(struct msm_display_info *info, void *disp)
goto error;
}
+ memset(info, 0, sizeof(struct msm_display_info));
info->intf_type = DRM_MODE_CONNECTOR_DSI;
timing = &display->panel->mode.timing;
@@ -3405,6 +3704,13 @@ static void __exit dsi_display_unregister(void)
dsi_ctrl_drv_unregister();
dsi_phy_drv_unregister();
}
-
+module_param_string(dsi_display0, dsi_display_primary, MAX_CMDLINE_PARAM_LEN,
+ 0600);
+MODULE_PARM_DESC(dsi_display0,
+ "msm_drm.dsi_display0=<display node>:<configX> where <display node> is 'primary dsi display node name' and <configX> where x represents index in the topology list");
+module_param_string(dsi_display1, dsi_display_secondary, MAX_CMDLINE_PARAM_LEN,
+ 0600);
+MODULE_PARM_DESC(dsi_display1,
+ "msm_drm.dsi_display1=<display node>:<configX> where <display node> is 'secondary dsi display node name' and <configX> where x represents index in the topology list");
module_init(dsi_display_register);
module_exit(dsi_display_unregister);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index d2bc7d8..9aa3113 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -30,6 +30,7 @@
#define MAX_DSI_CTRLS_PER_DISPLAY 2
#define DSI_CLIENT_NAME_SIZE 20
+#define MAX_CMDLINE_PARAM_LEN 512
/*
* DSI Validate Mode modifiers
* @DSI_VALIDATE_FLAG_ALLOW_ADJUST: Allow mode validation to also do fixup
@@ -37,6 +38,18 @@
#define DSI_VALIDATE_FLAG_ALLOW_ADJUST 0x1
/**
+ * enum dsi_display_selection_type - enumerates DSI display selection types
+ * @DSI_PRIMARY: primary DSI display selected from module parameter
+ * @DSI_SECONDARY: Secondary DSI display selected from module parameter
+ * @MAX_DSI_ACTIVE_DISPLAY: Maximum acive displays that can be selected
+ */
+enum dsi_display_selection_type {
+ DSI_PRIMARY = 0,
+ DSI_SECONDARY,
+ MAX_DSI_ACTIVE_DISPLAY,
+};
+
+/**
* enum dsi_display_type - enumerates DSI display types
* @DSI_DISPLAY_SINGLE: A panel connected on a single DSI interface.
* @DSI_DISPLAY_EXT_BRIDGE: A bridge is connected between panel and DSI host.
@@ -78,6 +91,22 @@ struct dsi_display_ctrl {
bool phy_enabled;
};
+/**
+ * struct dsi_display_boot_param - defines DSI boot display selection
+ * @name:Name of DSI display selected as a boot param.
+ * @boot_disp_en:bool to indicate dtsi availability of display node
+ * @is_primary:bool to indicate whether current display is primary display
+ * @length:length of DSI display.
+ * @cmdline_topology: Display topology shared from kernel command line.
+ */
+struct dsi_display_boot_param {
+ char name[MAX_CMDLINE_PARAM_LEN];
+ bool boot_disp_en;
+ bool is_primary;
+ int length;
+ struct device_node *node;
+ int cmdline_topology;
+};
/**
* struct dsi_display_clk_info - dsi display clock source information
@@ -113,6 +142,7 @@ struct dsi_display_clk_info {
* @config: DSI host configuration information.
* @lane_map: Lane mapping between DSI host and Panel.
* @num_of_modes: Number of modes supported by display.
+ * @cmdline_topology: Display topology shared from kernel command line.
* @is_tpg_enabled: TPG state.
* @ulps_enabled: ulps state.
* @clamp_enabled: clamp state.
@@ -151,10 +181,15 @@ struct dsi_display {
struct dsi_host_config config;
struct dsi_lane_map lane_map;
u32 num_of_modes;
+ int cmdline_topology;
bool is_tpg_enabled;
bool ulps_enabled;
bool clamp_enabled;
bool phy_idle_power_off;
+ struct drm_gem_object *tx_cmd_buf;
+ u32 cmd_buffer_size;
+ u32 cmd_buffer_iova;
+ void *vaddr;
struct mipi_dsi_host host;
struct dsi_bridge *bridge;
@@ -189,8 +224,16 @@ int dsi_display_get_active_displays(void **display_array,
u32 max_display_count);
/**
+ * dsi_display_get_boot_display()- get DSI boot display name
+ * @index: index of display selection
+ *
+ * Return: returns the display node pointer
+ */
+struct device_node *dsi_display_get_boot_display(int index);
+
+/**
* dsi_display_get_display_by_name()- finds display by name
- * @index: name of the display.
+ * @name: name of the display.
*
* Return: handle to the display or error code.
*/
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 37ed411..4e09cfb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -20,6 +20,7 @@
#include "msm_kms.h"
#include "sde_connector.h"
#include "dsi_drm.h"
+#include "sde_trace.h"
#define to_dsi_bridge(x) container_of((x), struct dsi_bridge, base)
#define to_dsi_state(x) container_of((x), struct dsi_connector_state, base)
@@ -134,19 +135,24 @@ static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
+ SDE_ATRACE_BEGIN("dsi_bridge_pre_enable");
rc = dsi_display_prepare(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display prepare failed, rc=%d\n",
c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_bridge_pre_enable");
return;
}
+ SDE_ATRACE_BEGIN("dsi_display_enable");
rc = dsi_display_enable(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display enable failed, rc=%d\n",
c_bridge->id, rc);
(void)dsi_display_unprepare(c_bridge->display);
}
+ SDE_ATRACE_END("dsi_display_enable");
+ SDE_ATRACE_END("dsi_bridge_pre_enable");
}
static void dsi_bridge_enable(struct drm_bridge *bridge)
@@ -197,19 +203,25 @@ static void dsi_bridge_post_disable(struct drm_bridge *bridge)
return;
}
+ SDE_ATRACE_BEGIN("dsi_bridge_post_disable");
+ SDE_ATRACE_BEGIN("dsi_display_disable");
rc = dsi_display_disable(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display disable failed, rc=%d\n",
c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_display_disable");
return;
}
+ SDE_ATRACE_END("dsi_display_disable");
rc = dsi_display_unprepare(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display unprepare failed, rc=%d\n",
c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_bridge_post_disable");
return;
}
+ SDE_ATRACE_END("dsi_bridge_post_disable");
}
static void dsi_bridge_mode_set(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index dcb787b..b8bf7a8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -22,9 +22,6 @@
#include "dsi_panel.h"
#include "dsi_ctrl_hw.h"
-#define MAX_CMDLINE_PARAM_LEN 256
-static char display_config[MAX_CMDLINE_PARAM_LEN];
-
/**
* topology is currently defined by a set of following 3 values:
* 1. num of layer mixers
@@ -32,7 +29,6 @@ static char display_config[MAX_CMDLINE_PARAM_LEN];
* 3. num of interfaces
*/
#define TOPOLOGY_SET_LEN 3
-#define INT_BASE_10 10
#define MAX_TOPOLOGY 5
#define DSI_PANEL_DEFAULT_LABEL "Default dsi panel"
@@ -2078,31 +2074,9 @@ static int dsi_panel_parse_hdr_config(struct dsi_panel *panel,
return 0;
}
-static int dsi_get_cmdline_top_override(void)
-{
- char *str = display_config;
- int top_index = -1;
-
- /*
- * This module need to be updated with needed cmd line argument parsing
- * for other dsi parameters.
- */
- if (strlcat(str, "\0", sizeof(str)) > sizeof(str))
- return -EINVAL;
-
- str = strnstr(display_config, "config", strlen(display_config));
- if (!str)
- return -EINVAL;
-
- if (kstrtol(str + strlen("config"), INT_BASE_10,
- (unsigned long *)&top_index))
- return -EINVAL;
-
- return top_index;
-}
-
static int dsi_panel_parse_topology(struct dsi_panel *panel,
- struct device_node *of_node)
+ struct device_node *of_node,
+ int topology_override)
{
struct msm_display_topology *topology;
u32 top_count, top_sel, *array = NULL;
@@ -2143,12 +2117,13 @@ static int dsi_panel_parse_topology(struct dsi_panel *panel,
top->num_intf = array[i * TOPOLOGY_SET_LEN + 2];
};
- top_sel = dsi_get_cmdline_top_override();
- if (top_sel >= 0 && top_sel < top_count) {
- pr_info("overidden topology: lm: %d comp_enc:%d intf: %d\n",
- topology[top_sel].num_lm,
- topology[top_sel].num_enc,
- topology[top_sel].num_intf);
+ if (topology_override >= 0 && topology_override < top_count) {
+ pr_info("override topology: cfg:%d lm:%d comp_enc:%d intf:%d\n",
+ topology_override,
+ topology[topology_override].num_lm,
+ topology[topology_override].num_enc,
+ topology[topology_override].num_intf);
+ top_sel = topology_override;
goto parse_done;
}
@@ -2266,7 +2241,8 @@ static int dsi_panel_parse_partial_update_caps(struct dsi_panel *panel,
}
struct dsi_panel *dsi_panel_get(struct device *parent,
- struct device_node *of_node)
+ struct device_node *of_node,
+ int topology_override)
{
struct dsi_panel *panel;
const char *data;
@@ -2323,7 +2299,7 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
DSI_V_TOTAL(&panel->mode.timing) *
panel->mode.timing.refresh_rate) / 1000;
- rc = dsi_panel_parse_topology(panel, of_node);
+ rc = dsi_panel_parse_topology(panel, of_node, topology_override);
if (rc) {
pr_err("failed to parse panel topology, rc=%d\n", rc);
goto error;
@@ -2970,6 +2946,3 @@ int dsi_panel_post_unprepare(struct dsi_panel *panel)
mutex_unlock(&panel->panel_lock);
return rc;
}
-
-module_param_string(display_param, display_config, MAX_CMDLINE_PARAM_LEN, 0600);
-MODULE_PARM_DESC(display_param, "format: configx - x indexes the selected topology from the display topology list. Index 0 corresponds to the first topology in the list");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f254af5..3569b5b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -205,7 +205,8 @@ static inline bool dsi_panel_initialized(struct dsi_panel *panel)
}
struct dsi_panel *dsi_panel_get(struct device *parent,
- struct device_node *of_node);
+ struct device_node *of_node,
+ int topology_override);
void dsi_panel_put(struct dsi_panel *panel);
int dsi_panel_drv_init(struct dsi_panel *panel, struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index ff6802e..efeea31 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -20,6 +20,7 @@
#include "msm_kms.h"
#include "msm_gem.h"
#include "msm_fence.h"
+#include "sde_trace.h"
struct msm_commit {
struct drm_device *dev;
@@ -96,6 +97,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_crtc_state *old_crtc_state;
int i;
+ SDE_ATRACE_BEGIN("msm_disable");
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
@@ -177,6 +179,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
else
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
+ SDE_ATRACE_END("msm_disable");
}
static void
@@ -286,6 +289,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int bridge_enable_count = 0;
int i;
+ SDE_ATRACE_BEGIN("msm_enable");
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
@@ -352,8 +356,10 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
}
/* If no bridges were pre_enabled, skip iterating over them again */
- if (bridge_enable_count == 0)
+ if (bridge_enable_count == 0) {
+ SDE_ATRACE_END("msm_enable");
return;
+ }
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
struct drm_encoder *encoder;
@@ -373,6 +379,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_bridge_enable(encoder->bridge);
}
+ SDE_ATRACE_END("msm_enable");
}
/* The (potentially) asynchronous part of the commit. At this point
@@ -430,7 +437,9 @@ static void _msm_drm_commit_work_cb(struct kthread_work *work)
commit = container_of(work, struct msm_commit, commit_work);
+ SDE_ATRACE_BEGIN("complete_commit");
complete_commit(commit);
+ SDE_ATRACE_END("complete_commit");
}
static struct msm_commit *commit_init(struct drm_atomic_state *state)
@@ -512,9 +521,12 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_plane_state *plane_state;
int i, ret;
+ SDE_ATRACE_BEGIN("atomic_commit");
ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
+ if (ret) {
+ SDE_ATRACE_END("atomic_commit");
return ret;
+ }
c = commit_init(state);
if (!c) {
@@ -592,14 +604,17 @@ int msm_atomic_commit(struct drm_device *dev,
commit_destroy(c);
goto error;
}
+ SDE_ATRACE_END("atomic_commit");
return 0;
}
complete_commit(c);
+ SDE_ATRACE_END("atomic_commit");
return 0;
error:
drm_atomic_helper_cleanup_planes(dev, state);
+ SDE_ATRACE_END("atomic_commit");
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a3a9142..962087c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -45,6 +45,7 @@
#include "msm_gpu.h"
#include "msm_kms.h"
#include "sde_wb.h"
+#include "dsi_display.h"
/*
* MSM driver version:
@@ -58,12 +59,68 @@
#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+static void msm_drm_helper_hotplug_event(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ char *event_string;
+ char const *connector_name;
+ char *envp[2];
+
+ if (!dev) {
+ DRM_ERROR("hotplug_event failed, invalid input\n");
+ return;
+ }
+
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ event_string = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!event_string) {
+ DRM_ERROR("failed to allocate event string\n");
+ return;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev) {
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+
+ if (connector->name)
+ connector_name = connector->name;
+ else
+ connector_name = "unknown";
+
+ snprintf(event_string, SZ_4K, "name=%s status=%s\n",
+ connector_name,
+ drm_get_connector_status_name(connector->status));
+ DRM_DEBUG("generating hotplug event [%s]\n", event_string);
+ envp[0] = event_string;
+ envp[1] = NULL;
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+ envp);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ kfree(event_string);
+}
+
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = NULL;
+
+ if (!dev) {
+ DRM_ERROR("output_poll_changed failed, invalid input\n");
+ return;
+ }
+
+ priv = dev->dev_private;
if (priv->fbdev)
drm_fb_helper_hotplug_event(priv->fbdev);
+ else
+ msm_drm_helper_hotplug_event(dev);
}
int msm_atomic_check(struct drm_device *dev,
@@ -1764,15 +1821,26 @@ static int add_display_components(struct device *dev,
struct component_match **matchptr)
{
struct device *mdp_dev = NULL;
+ struct device_node *node;
+ const char *name;
int ret;
if (of_device_is_compatible(dev->of_node, "qcom,sde-kms")) {
struct device_node *np = dev->of_node;
unsigned int i;
- for (i = 0; ; i++) {
- struct device_node *node;
+ for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
+ node = dsi_display_get_boot_display(i);
+ if (node != NULL) {
+ name = of_get_property(node, "label", NULL);
+ component_match_add(dev, matchptr, compare_of,
+ node);
+ pr_debug("Added component = %s\n", name);
+ }
+ }
+
+ for (i = 0; ; i++) {
node = of_parse_phandle(np, "connectors", i);
if (!node)
break;
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 448a1e7..f42e510 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -482,6 +482,9 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
update_bus = 1;
update_clk = 1;
}
+ trace_sde_perf_crtc_update(crtc->base.id, new->bw_ctl,
+ new->core_clk_rate, stop_req,
+ update_bus, update_clk);
if (update_bus)
_sde_core_perf_crtc_update_bus(kms, crtc);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 7d0fad0..23f23b0 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -36,6 +36,7 @@
#include "sde_connector.h"
#include "sde_power_handle.h"
#include "sde_core_perf.h"
+#include "sde_trace.h"
struct sde_crtc_irq_info {
struct sde_irq_callback irq;
@@ -381,7 +382,7 @@ static void *_sde_crtc_rp_get(struct sde_crtc_respool *rp, u32 type, u64 tag)
if (rp->ops.get)
val = rp->ops.get(NULL, type, -1);
if (IS_ERR_OR_NULL(val)) {
- SDE_ERROR("crtc%d.%u failed to get res:0x%x//\n",
+ SDE_DEBUG("crtc%d.%u failed to get res:0x%x//\n",
crtc->base.id, rp->sequence_id, type);
return NULL;
}
@@ -1111,12 +1112,11 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct sde_hw_stage_cfg *stage_cfg;
struct sde_rect plane_crtc_roi;
- u32 flush_mask = 0;
+ u32 flush_mask, flush_sbuf, flush_tmp;
uint32_t lm_idx = LEFT_MIXER, stage_idx;
bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
int zpos_cnt[CRTC_DUAL_MIXERS][SDE_STAGE_MAX + 1] = { {0} };
int i;
- bool sbuf_mode = false;
u32 prefill = 0;
if (!sde_crtc || !mixer) {
@@ -1128,6 +1128,10 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
lm = mixer->hw_lm;
stage_cfg = &sde_crtc->stage_cfg;
cstate = to_sde_crtc_state(crtc->state);
+ flush_sbuf = 0x0;
+
+ cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
+ cstate->sbuf_prefill_line = 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
@@ -1143,10 +1147,16 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
fb = state->fb;
if (sde_plane_is_sbuf_mode(plane, &prefill))
- sbuf_mode = true;
+ cstate->sbuf_cfg.rot_op_mode =
+ SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+ if (prefill > cstate->sbuf_prefill_line)
+ cstate->sbuf_prefill_line = prefill;
- sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
+ sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_tmp);
+ /* persist rotator flush bit(s) for one more commit */
+ flush_mask |= cstate->sbuf_flush_mask | flush_tmp;
+ flush_sbuf |= flush_tmp;
SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
@@ -1162,7 +1172,8 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
state->src_x >> 16, state->src_y >> 16,
state->src_w >> 16, state->src_h >> 16,
state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h);
+ state->crtc_w, state->crtc_h,
+ cstate->sbuf_cfg.rot_op_mode);
for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
struct sde_rect intersect;
@@ -1207,6 +1218,8 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
}
}
+ cstate->sbuf_flush_mask = flush_sbuf;
+
if (lm && lm->ops.setup_dim_layer) {
cstate = to_sde_crtc_state(crtc->state);
for (i = 0; i < cstate->num_dim_layers; i++)
@@ -1214,20 +1227,8 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
mixer, &cstate->dim_layer[i]);
}
- if (ctl->ops.setup_sbuf_cfg) {
- cstate = to_sde_crtc_state(crtc->state);
- if (!sbuf_mode) {
- cstate->sbuf_cfg.rot_op_mode =
- SDE_CTL_ROT_OP_MODE_OFFLINE;
- cstate->sbuf_prefill_line = 0;
- } else {
- cstate->sbuf_cfg.rot_op_mode =
- SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
- cstate->sbuf_prefill_line = prefill;
- }
-
+ if (ctl->ops.setup_sbuf_cfg)
ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
- }
_sde_crtc_program_lm_output_roi(crtc);
}
@@ -1490,6 +1491,7 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
struct sde_crtc_state *cstate;
struct sde_kms *sde_kms;
unsigned long flags;
+ bool disable_inprogress = false;
if (!work) {
SDE_ERROR("invalid work handle\n");
@@ -1515,6 +1517,9 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
ktime_to_ns(fevent->ts));
+ disable_inprogress = fevent->event &
+ SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
+ fevent->event &= ~SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
(fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) ||
@@ -1528,9 +1533,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
atomic_read(&sde_crtc->frame_pending));
SDE_EVT32(DRMID(crtc), fevent->event,
SDE_EVTLOG_FUNC_CASE1);
-
- /* don't propagate unexpected frame done events */
- return;
} else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) {
/* release bandwidth and other resources */
SDE_DEBUG("crtc%d ts:%lld last pending\n",
@@ -1538,13 +1540,15 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
ktime_to_ns(fevent->ts));
SDE_EVT32(DRMID(crtc), fevent->event,
SDE_EVTLOG_FUNC_CASE2);
- sde_core_perf_crtc_release_bw(crtc);
+ if (!disable_inprogress)
+ sde_core_perf_crtc_release_bw(crtc);
} else {
SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
SDE_EVTLOG_FUNC_CASE3);
}
- if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE &&
+ !disable_inprogress)
sde_core_perf_crtc_update(crtc, 0, false);
} else {
SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
@@ -1580,7 +1584,7 @@ static void sde_crtc_frame_event_cb(void *data, u32 event)
pipe_id = drm_crtc_index(crtc);
SDE_DEBUG("crtc%d\n", crtc->base.id);
- SDE_EVT32_VERBOSE(DRMID(crtc));
+ SDE_EVT32_VERBOSE(DRMID(crtc), event);
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
@@ -1599,7 +1603,11 @@ static void sde_crtc_frame_event_cb(void *data, u32 event)
fevent->event = event;
fevent->crtc = crtc;
fevent->ts = ktime_get();
- kthread_queue_work(&priv->disp_thread[pipe_id].worker, &fevent->work);
+ if (event & SDE_ENCODER_FRAME_EVENT_DURING_DISABLE)
+ sde_crtc_frame_event_work(&fevent->work);
+ else
+ kthread_queue_work(&priv->disp_thread[pipe_id].worker,
+ &fevent->work);
}
void sde_crtc_complete_commit(struct drm_crtc *crtc,
@@ -1739,6 +1747,7 @@ static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
* if its fence has timed out. Call input fence wait multiple times if
* fence wait is interrupted due to interrupt call.
*/
+ SDE_ATRACE_BEGIN("plane_wait_input_fence");
drm_atomic_crtc_for_each_plane(plane, crtc) {
do {
kt_wait = ktime_sub(kt_end, ktime_get());
@@ -1750,6 +1759,7 @@ static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
rc = sde_plane_wait_input_fence(plane, wait_ms);
} while (wait_ms && rc == -ERESTARTSYS);
}
+ SDE_ATRACE_END("plane_wait_input_fence");
}
static void _sde_crtc_setup_mixer_for_encoder(
@@ -2076,6 +2086,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
if (unlikely(!sde_crtc->num_mixers))
return;
+ SDE_ATRACE_BEGIN("crtc_commit");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct sde_encoder_kickoff_params params = { 0 };
@@ -2097,7 +2108,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
SDE_ERROR("crtc%d invalid frame pending\n",
crtc->base.id);
SDE_EVT32(DRMID(crtc), 0);
- return;
+ goto end;
} else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
@@ -2113,6 +2124,9 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
sde_encoder_kickoff(encoder);
}
+end:
+ SDE_ATRACE_END("crtc_commit");
+ return;
}
/**
@@ -2415,8 +2429,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
if (atomic_read(&sde_crtc->frame_pending)) {
/* release bandwidth and other resources */
- SDE_ERROR("crtc%d invalid frame pending\n",
- crtc->base.id);
+ SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id);
SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
SDE_EVTLOG_FUNC_CASE2);
sde_core_perf_crtc_release_bw(crtc);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 4b3c814..2cf30a9 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -272,6 +272,7 @@ struct sde_crtc_respool {
* @new_perf: new performance state
* @sbuf_cfg: stream buffer configuration
* @sbuf_prefill_line: number of line for inline rotator prefetch
+ * @sbuf_flush_mask: flush mask for inline rotator
*/
struct sde_crtc_state {
struct drm_crtc_state base;
@@ -298,7 +299,8 @@ struct sde_crtc_state {
struct sde_core_perf_params cur_perf;
struct sde_core_perf_params new_perf;
struct sde_ctl_sbuf_cfg sbuf_cfg;
- u64 sbuf_prefill_line;
+ u32 sbuf_prefill_line;
+ u32 sbuf_flush_mask;
struct sde_crtc_respool rp;
};
@@ -433,10 +435,14 @@ static inline bool sde_crtc_is_enabled(struct drm_crtc *crtc)
*/
static inline u32 sde_crtc_get_inline_prefill(struct drm_crtc *crtc)
{
+ struct sde_crtc_state *cstate;
+
if (!crtc || !crtc->state)
return 0;
- return to_sde_crtc_state(crtc->state)->sbuf_prefill_line;
+ cstate = to_sde_crtc_state(crtc->state);
+ return cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE ?
+ cstate->sbuf_prefill_line : 0;
}
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 3d48a17..5ccd385 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -35,6 +35,7 @@
#include "sde_power_handle.h"
#include "sde_hw_dsc.h"
#include "sde_crtc.h"
+#include "sde_trace.h"
#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -142,7 +143,6 @@ enum sde_enc_rc_states {
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
- * @crtc_frame_event: callback event
* @frame_done_timeout: frame done timeout in Hz
* @frame_done_timer: watchdog timer for frame done event
* @rsc_client: rsc client pointer
@@ -160,6 +160,7 @@ enum sde_enc_rc_states {
* @rsc_cfg: rsc configuration
* @cur_conn_roi: current connector roi
* @prv_conn_roi: previous connector roi to optimize if unchanged
+ * @disable_inprogress: sde encoder disable is in progress.
*/
struct sde_encoder_virt {
struct drm_encoder base;
@@ -184,7 +185,6 @@ struct sde_encoder_virt {
DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
- u32 crtc_frame_event;
atomic_t frame_done_timeout;
struct timer_list frame_done_timer;
@@ -204,6 +204,7 @@ struct sde_encoder_virt {
struct sde_encoder_rsc_config rsc_cfg;
struct sde_rect cur_conn_roi;
struct sde_rect prv_conn_roi;
+ bool disable_inprogress;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -1456,6 +1457,7 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
SDE_EVT32(DRMID(drm_enc));
sde_enc->cur_master = NULL;
+ sde_enc->disable_inprogress = false;
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -1514,6 +1516,7 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private;
sde_kms = to_sde_kms(priv->kms);
+ sde_enc->disable_inprogress = true;
SDE_EVT32(DRMID(drm_enc));
@@ -1580,6 +1583,7 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
if (!drm_enc || !phy_enc)
return;
+ SDE_ATRACE_BEGIN("encoder_vblank_callback");
sde_enc = to_sde_encoder_virt(drm_enc);
spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
@@ -1588,6 +1592,7 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
atomic_inc(&phy_enc->vsync_cnt);
+ SDE_ATRACE_END("encoder_vblank_callback");
}
static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
@@ -1596,8 +1601,10 @@ static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
if (!phy_enc)
return;
+ SDE_ATRACE_BEGIN("encoder_underrun_callback");
atomic_inc(&phy_enc->underrun_cnt);
SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+ SDE_ATRACE_END("encoder_underrun_callback");
}
void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
@@ -1664,7 +1671,6 @@ static void sde_encoder_frame_done_callback(
for (i = 0; i < sde_enc->num_phys_encs; i++)
if (sde_enc->phys_encs[i] == ready_phys) {
clear_bit(i, sde_enc->frame_busy_mask);
- sde_enc->crtc_frame_event |= event;
SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
sde_enc->frame_busy_mask[0]);
}
@@ -1676,10 +1682,12 @@ static void sde_encoder_frame_done_callback(
sde_encoder_resource_control(drm_enc,
SDE_ENC_RC_EVENT_FRAME_DONE);
+ if (sde_enc->disable_inprogress)
+ event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
+
if (sde_enc->crtc_frame_event_cb)
sde_enc->crtc_frame_event_cb(
- sde_enc->crtc_frame_event_cb_data,
- sde_enc->crtc_frame_event);
+ sde_enc->crtc_frame_event_cb_data, event);
}
}
@@ -1861,7 +1869,6 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
}
pending_flush = 0x0;
- sde_enc->crtc_frame_event = 0;
/* update pending counts and trigger kickoff ctl flush atomically */
spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
@@ -2133,6 +2140,7 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc)
SDE_ERROR("invalid encoder\n");
return;
}
+ SDE_ATRACE_BEGIN("encoder_kickoff");
sde_enc = to_sde_encoder_virt(drm_enc);
SDE_DEBUG_ENC(sde_enc, "\n");
@@ -2152,6 +2160,7 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc)
if (phys && phys->ops.handle_post_kickoff)
phys->ops.handle_post_kickoff(phys);
}
+ SDE_ATRACE_END("encoder_kickoff");
}
int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
@@ -2679,6 +2688,7 @@ static void sde_encoder_frame_done_timeout(unsigned long data)
struct drm_encoder *drm_enc = (struct drm_encoder *) data;
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
struct msm_drm_private *priv;
+ u32 event;
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
SDE_ERROR("invalid parameters\n");
@@ -2696,13 +2706,14 @@ static void sde_encoder_frame_done_timeout(unsigned long data)
return;
}
- SDE_EVT32(DRMID(drm_enc), 2, sde_enc->crtc_frame_event);
- SDE_ERROR_ENC(sde_enc, "frame done timeout, frame_event %d\n",
- sde_enc->crtc_frame_event);
+ SDE_ERROR_ENC(sde_enc, "frame done timeout\n");
- sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data,
- sde_enc->crtc_frame_event |
- SDE_ENCODER_FRAME_EVENT_ERROR);
+ event = SDE_ENCODER_FRAME_EVENT_ERROR;
+ if (sde_enc->disable_inprogress)
+ event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
+
+ SDE_EVT32(DRMID(drm_enc), event);
+ sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, event);
}
static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 6ef245b..d3a9bb4 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -27,6 +27,7 @@
#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0)
#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1)
#define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define SDE_ENCODER_FRAME_EVENT_DURING_DISABLE BIT(3)
/**
* Encoder functions and data types
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 3d6dc32..c2ef28d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -294,6 +294,7 @@ struct sde_encoder_phys_cmd {
* @bypass_irqreg: Bypass irq register/unregister if non-zero
* @wbdone_complete: for wbdone irq synchronization
* @wb_cfg: Writeback hardware configuration
+ * @cdp_cfg: Writeback CDP configuration
* @intf_cfg: Interface hardware configuration
* @wb_roi: Writeback region-of-interest
* @wb_fmt: Writeback pixel format
@@ -315,6 +316,7 @@ struct sde_encoder_phys_wb {
u32 bypass_irqreg;
struct completion wbdone_complete;
struct sde_hw_wb_cfg wb_cfg;
+ struct sde_hw_wb_cdp_cfg cdp_cfg;
struct sde_hw_intf_cfg intf_cfg;
struct sde_rect wb_roi;
const struct sde_format *wb_fmt;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 5cb84b4..488f5c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -222,7 +222,7 @@ static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
* @rot_fetch_lines: number of line to prefill, or 0 to disable
*/
static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
- u64 rot_fetch_lines)
+ u32 rot_fetch_lines)
{
struct sde_encoder_phys_vid *vid_enc =
to_sde_encoder_phys_vid(phys_enc);
@@ -232,9 +232,12 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
u32 horiz_total = 0;
u32 vert_total = 0;
u32 rot_fetch_start_vsync_counter = 0;
+ u32 flush_mask = 0;
unsigned long lock_flags;
- if (!phys_enc || !vid_enc->hw_intf ||
+ if (!phys_enc || !vid_enc->hw_intf || !phys_enc->hw_ctl ||
+ !phys_enc->hw_ctl->ops.get_bitmask_intf ||
+ !phys_enc->hw_ctl->ops.update_pending_flush ||
!vid_enc->hw_intf->ops.setup_rot_start)
return;
@@ -253,9 +256,14 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
}
SDE_DEBUG_VIDENC(vid_enc,
- "rot_fetch_lines %llu rot_fetch_start_vsync_counter %u\n",
+ "rot_fetch_lines %u rot_fetch_start_vsync_counter %u\n",
rot_fetch_lines, rot_fetch_start_vsync_counter);
+ phys_enc->hw_ctl->ops.get_bitmask_intf(
+ phys_enc->hw_ctl, &flush_mask, vid_enc->hw_intf->idx);
+ phys_enc->hw_ctl->ops.update_pending_flush(
+ phys_enc->hw_ctl, flush_mask);
+
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 385c610..1657b9b 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -248,16 +248,18 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
struct sde_hw_wb *hw_wb;
struct sde_hw_wb_cfg *wb_cfg;
+ struct sde_hw_wb_cdp_cfg *cdp_cfg;
const struct msm_format *format;
int ret, mmu_id;
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
SDE_ERROR("invalid encoder\n");
return;
}
hw_wb = wb_enc->hw_wb;
wb_cfg = &wb_enc->wb_cfg;
+ cdp_cfg = &wb_enc->cdp_cfg;
memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
wb_cfg->intf_mode = phys_enc->intf_mode;
@@ -325,6 +327,21 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
if (hw_wb->ops.setup_outformat)
hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+ if (hw_wb->ops.setup_cdp) {
+ memset(cdp_cfg, 0, sizeof(struct sde_hw_wb_cdp_cfg));
+
+ cdp_cfg->enable = phys_enc->sde_kms->catalog->perf.cdp_cfg
+ [SDE_PERF_CDP_USAGE_NRT].wr_enable;
+ cdp_cfg->ubwc_meta_enable =
+ SDE_FORMAT_IS_UBWC(wb_cfg->dest.format);
+ cdp_cfg->tile_amortize_enable =
+ SDE_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
+ SDE_FORMAT_IS_TILE(wb_cfg->dest.format);
+ cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
+
+ hw_wb->ops.setup_cdp(hw_wb, cdp_cfg);
+ }
+
if (hw_wb->ops.setup_outaddress)
hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 30e63da..306bb86 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -79,7 +79,6 @@
/* maximum XIN halt timeout in usec */
#define VBIF_XIN_HALT_TIMEOUT 0x4000
-#define DEFAULT_CREQ_LUT_NRT 0x0
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
/* access property value based on prop_type and hardware index */
@@ -137,7 +136,6 @@ enum sde_prop {
QSEED_TYPE,
CSC_TYPE,
PANIC_PER_PIPE,
- CDP,
SRC_SPLIT,
DIM_LAYER,
SMART_DMA_REV,
@@ -160,6 +158,13 @@ enum {
PERF_DOWNSCALING_PREFILL_LINES,
PERF_XTRA_PREFILL_LINES,
PERF_AMORTIZABLE_THRESHOLD,
+ PERF_DANGER_LUT,
+ PERF_SAFE_LUT,
+ PERF_QOS_LUT_LINEAR,
+ PERF_QOS_LUT_MACROTILE,
+ PERF_QOS_LUT_NRT,
+ PERF_QOS_LUT_CWB,
+ PERF_CDP_SETTING,
PERF_PROP_MAX,
};
@@ -170,8 +175,6 @@ enum {
SSPP_XIN,
SSPP_CLK_CTRL,
SSPP_CLK_STATUS,
- SSPP_DANGER,
- SSPP_SAFE,
SSPP_SCALE_SIZE,
SSPP_VIG_BLOCKS,
SSPP_RGB_BLOCKS,
@@ -287,6 +290,8 @@ enum {
VBIF_DYNAMIC_OT_WR_LIMIT,
VBIF_QOS_RT_REMAP,
VBIF_QOS_NRT_REMAP,
+ VBIF_MEMTYPE_0,
+ VBIF_MEMTYPE_1,
VBIF_PROP_MAX,
};
@@ -346,7 +351,6 @@ static struct sde_prop_type sde_prop[] = {
{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
- {CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
@@ -378,6 +382,18 @@ static struct sde_prop_type sde_perf_prop[] = {
false, PROP_TYPE_U32},
{PERF_AMORTIZABLE_THRESHOLD, "qcom,sde-amortizable-threshold",
false, PROP_TYPE_U32},
+ {PERF_DANGER_LUT, "qcom,sde-danger-lut", false, PROP_TYPE_U32_ARRAY},
+ {PERF_SAFE_LUT, "qcom,sde-safe-lut", false, PROP_TYPE_U32_ARRAY},
+ {PERF_QOS_LUT_LINEAR, "qcom,sde-qos-lut-linear", false,
+ PROP_TYPE_U32_ARRAY},
+ {PERF_QOS_LUT_MACROTILE, "qcom,sde-qos-lut-macrotile", false,
+ PROP_TYPE_U32_ARRAY},
+ {PERF_QOS_LUT_NRT, "qcom,sde-qos-lut-nrt", false,
+ PROP_TYPE_U32_ARRAY},
+ {PERF_QOS_LUT_CWB, "qcom,sde-qos-lut-cwb", false,
+ PROP_TYPE_U32_ARRAY},
+ {PERF_CDP_SETTING, "qcom,sde-cdp-setting", false,
+ PROP_TYPE_U32_ARRAY},
};
static struct sde_prop_type sspp_prop[] = {
@@ -389,8 +405,6 @@ static struct sde_prop_type sspp_prop[] = {
PROP_TYPE_BIT_OFFSET_ARRAY},
{SSPP_CLK_STATUS, "qcom,sde-sspp-clk-status", false,
PROP_TYPE_BIT_OFFSET_ARRAY},
- {SSPP_DANGER, "qcom,sde-sspp-danger-lut", false, PROP_TYPE_U32_ARRAY},
- {SSPP_SAFE, "qcom,sde-sspp-safe-lut", false, PROP_TYPE_U32_ARRAY},
{SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
{SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
{SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
@@ -518,6 +532,8 @@ static struct sde_prop_type vbif_prop[] = {
PROP_TYPE_U32_ARRAY},
{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
PROP_TYPE_U32_ARRAY},
+ {VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
+ {VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
};
static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
@@ -792,6 +808,8 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
sspp->type = SSPP_TYPE_VIG;
set_bit(SDE_SSPP_QOS, &sspp->features);
+ if (sde_cfg->vbif_qos_nlvl == 8)
+ set_bit(SDE_SSPP_QOS_8LVL, &sspp->features);
(*vig_count)++;
if (!prop_value)
@@ -884,6 +902,8 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
sspp->type = SSPP_TYPE_RGB;
set_bit(SDE_SSPP_QOS, &sspp->features);
+ if (sde_cfg->vbif_qos_nlvl == 8)
+ set_bit(SDE_SSPP_QOS_8LVL, &sspp->features);
(*rgb_count)++;
if (!prop_value)
@@ -954,6 +974,8 @@ static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
sspp->id - SSPP_VIG0);
sspp->type = SSPP_TYPE_DMA;
set_bit(SDE_SSPP_QOS, &sspp->features);
+ if (sde_cfg->vbif_qos_nlvl == 8)
+ set_bit(SDE_SSPP_QOS_8LVL, &sspp->features);
(*dma_count)++;
}
@@ -970,7 +992,6 @@ static int sde_sspp_parse_dt(struct device_node *np,
struct sde_sspp_cfg *sspp;
struct sde_sspp_sub_blks *sblk;
u32 vig_count = 0, dma_count = 0, rgb_count = 0, cursor_count = 0;
- u32 danger_count = 0, safe_count = 0;
struct device_node *snp = NULL;
prop_value = kzalloc(SSPP_PROP_MAX *
@@ -985,16 +1006,6 @@ static int sde_sspp_parse_dt(struct device_node *np,
if (rc)
goto end;
- rc = _validate_dt_entry(np, &sspp_prop[SSPP_DANGER], 1,
- &prop_count[SSPP_DANGER], &danger_count);
- if (rc)
- goto end;
-
- rc = _validate_dt_entry(np, &sspp_prop[SSPP_SAFE], 1,
- &prop_count[SSPP_SAFE], &safe_count);
- if (rc)
- goto end;
-
rc = _read_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop), prop_count,
prop_exists, prop_value);
if (rc)
@@ -1055,6 +1066,9 @@ static int sde_sspp_parse_dt(struct device_node *np,
set_bit(SDE_SSPP_SRC, &sspp->features);
+ if (sde_cfg->has_cdp)
+ set_bit(SDE_SSPP_CDP, &sspp->features);
+
if (sde_cfg->ts_prefill_rev == 1) {
set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
} else if (sde_cfg->ts_prefill_rev == 2) {
@@ -1099,19 +1113,6 @@ static int sde_sspp_parse_dt(struct device_node *np,
sblk->maxvdeciexp = MAX_VERT_DECIMATION;
sspp->xin_id = PROP_VALUE_ACCESS(prop_value, SSPP_XIN, i);
- sblk->danger_lut_linear =
- PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 0);
- sblk->danger_lut_tile =
- PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 1);
- sblk->danger_lut_nrt =
- PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 2);
- sblk->safe_lut_linear =
- PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 0);
- sblk->safe_lut_tile =
- PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 1);
- sblk->safe_lut_nrt =
- PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 2);
- sblk->creq_lut_nrt = DEFAULT_CREQ_LUT_NRT;
sblk->pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE;
sblk->src_blk.len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
@@ -1134,15 +1135,8 @@ static int sde_sspp_parse_dt(struct device_node *np,
}
SDE_DEBUG(
- "xin:%d danger:%x/%x/%x safe:%x/%x/%x creq:%x ram:%d clk%d:%x/%d\n",
+ "xin:%d ram:%d clk%d:%x/%d\n",
sspp->xin_id,
- sblk->danger_lut_linear,
- sblk->danger_lut_tile,
- sblk->danger_lut_nrt,
- sblk->safe_lut_linear,
- sblk->safe_lut_tile,
- sblk->safe_lut_nrt,
- sblk->creq_lut_nrt,
sblk->pixel_ram_size,
sspp->clk_ctrl,
sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].reg_off,
@@ -1514,6 +1508,13 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
set_bit(SDE_WB_YUV_CONFIG, &wb->features);
+ if (sde_cfg->has_cdp)
+ set_bit(SDE_WB_CDP, &wb->features);
+
+ set_bit(SDE_WB_QOS, &wb->features);
+ if (sde_cfg->vbif_qos_nlvl == 8)
+ set_bit(SDE_WB_QOS_8LVL, &wb->features);
+
if (sde_cfg->has_wb_ubwc)
set_bit(SDE_WB_UBWC, &wb->features);
@@ -1980,6 +1981,16 @@ static int sde_vbif_parse_dt(struct device_node *np,
if (rc)
goto end;
+ rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
+ &prop_count[VBIF_MEMTYPE_0], NULL);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
+ &prop_count[VBIF_MEMTYPE_1], NULL);
+ if (rc)
+ goto end;
+
sde_cfg->vbif_count = off_count;
rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
@@ -2128,6 +2139,19 @@ static int sde_vbif_parse_dt(struct device_node *np,
if (vbif->qos_rt_tbl.npriority_lvl ||
vbif->qos_nrt_tbl.npriority_lvl)
set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
+
+ vbif->memtype_count = prop_count[VBIF_MEMTYPE_0] +
+ prop_count[VBIF_MEMTYPE_1];
+ if (vbif->memtype_count > MAX_XIN_COUNT) {
+ vbif->memtype_count = 0;
+ SDE_ERROR("too many memtype defs, ignoring entries\n");
+ }
+ for (j = 0, k = 0; j < prop_count[VBIF_MEMTYPE_0]; j++)
+ vbif->memtype[k++] = PROP_VALUE_ACCESS(
+ prop_value, VBIF_MEMTYPE_0, j);
+ for (j = 0; j < prop_count[VBIF_MEMTYPE_1]; j++)
+ vbif->memtype[k++] = PROP_VALUE_ACCESS(
+ prop_value, VBIF_MEMTYPE_1, j);
}
end:
@@ -2380,6 +2404,7 @@ static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
struct sde_prop_value *prop_value = NULL;
bool prop_exists[PERF_PROP_MAX];
const char *str = NULL;
+ int j, k;
if (!cfg) {
SDE_ERROR("invalid argument\n");
@@ -2399,6 +2424,41 @@ static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
if (rc)
goto freeprop;
+ rc = _validate_dt_entry(np, &sde_perf_prop[PERF_DANGER_LUT], 1,
+ &prop_count[PERF_DANGER_LUT], NULL);
+ if (rc)
+ goto freeprop;
+
+ rc = _validate_dt_entry(np, &sde_perf_prop[PERF_SAFE_LUT], 1,
+ &prop_count[PERF_SAFE_LUT], NULL);
+ if (rc)
+ goto freeprop;
+
+ rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_LINEAR], 1,
+ &prop_count[PERF_QOS_LUT_LINEAR], NULL);
+ if (rc)
+ goto freeprop;
+
+ rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_MACROTILE], 1,
+ &prop_count[PERF_QOS_LUT_MACROTILE], NULL);
+ if (rc)
+ goto freeprop;
+
+ rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_NRT], 1,
+ &prop_count[PERF_QOS_LUT_NRT], NULL);
+ if (rc)
+ goto freeprop;
+
+ rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_CWB], 1,
+ &prop_count[PERF_QOS_LUT_CWB], NULL);
+ if (rc)
+ goto freeprop;
+
+ rc = _validate_dt_entry(np, &sde_perf_prop[PERF_CDP_SETTING], 1,
+ &prop_count[PERF_CDP_SETTING], NULL);
+ if (rc)
+ goto freeprop;
+
rc = _read_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
prop_count, prop_exists, prop_value);
if (rc)
@@ -2472,6 +2532,93 @@ static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
PERF_AMORTIZABLE_THRESHOLD, 0) :
DEFAULT_AMORTIZABLE_THRESHOLD;
+ if (prop_exists[PERF_DANGER_LUT] && prop_count[PERF_DANGER_LUT] <=
+ SDE_QOS_LUT_USAGE_MAX) {
+ for (j = 0; j < prop_count[PERF_DANGER_LUT]; j++) {
+ cfg->perf.danger_lut_tbl[j] =
+ PROP_VALUE_ACCESS(prop_value,
+ PERF_DANGER_LUT, j);
+ SDE_DEBUG("danger usage:%d lut:0x%x\n",
+ j, cfg->perf.danger_lut_tbl[j]);
+ }
+ }
+
+ if (prop_exists[PERF_SAFE_LUT] && prop_count[PERF_SAFE_LUT] <=
+ SDE_QOS_LUT_USAGE_MAX) {
+ for (j = 0; j < prop_count[PERF_SAFE_LUT]; j++) {
+ cfg->perf.safe_lut_tbl[j] =
+ PROP_VALUE_ACCESS(prop_value,
+ PERF_SAFE_LUT, j);
+ SDE_DEBUG("safe usage:%d lut:0x%x\n",
+ j, cfg->perf.safe_lut_tbl[j]);
+ }
+ }
+
+ for (j = 0; j < SDE_QOS_LUT_USAGE_MAX; j++) {
+ static const u32 prop_key[SDE_QOS_LUT_USAGE_MAX] = {
+ [SDE_QOS_LUT_USAGE_LINEAR] =
+ PERF_QOS_LUT_LINEAR,
+ [SDE_QOS_LUT_USAGE_MACROTILE] =
+ PERF_QOS_LUT_MACROTILE,
+ [SDE_QOS_LUT_USAGE_NRT] =
+ PERF_QOS_LUT_NRT,
+ [SDE_QOS_LUT_USAGE_CWB] =
+ PERF_QOS_LUT_CWB,
+ };
+ const u32 entry_size = 3;
+ int m, count;
+ int key = prop_key[j];
+
+ if (!prop_exists[key])
+ continue;
+
+ count = prop_count[key] / entry_size;
+
+ cfg->perf.qos_lut_tbl[j].entries = kcalloc(count,
+ sizeof(struct sde_qos_lut_entry), GFP_KERNEL);
+ if (!cfg->perf.qos_lut_tbl[j].entries) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ for (k = 0, m = 0; k < count; k++, m += entry_size) {
+ u64 lut_hi, lut_lo;
+
+ cfg->perf.qos_lut_tbl[j].entries[k].fl =
+ PROP_VALUE_ACCESS(prop_value, key, m);
+ lut_hi = PROP_VALUE_ACCESS(prop_value, key, m + 1);
+ lut_lo = PROP_VALUE_ACCESS(prop_value, key, m + 2);
+ cfg->perf.qos_lut_tbl[j].entries[k].lut =
+ (lut_hi << 32) | lut_lo;
+ SDE_DEBUG("usage:%d.%d fl:%d lut:0x%llx\n",
+ j, k,
+ cfg->perf.qos_lut_tbl[j].entries[k].fl,
+ cfg->perf.qos_lut_tbl[j].entries[k].lut);
+ }
+ cfg->perf.qos_lut_tbl[j].nentry = count;
+ }
+
+ if (prop_exists[PERF_CDP_SETTING]) {
+ const u32 prop_size = 2;
+ u32 count = prop_count[PERF_CDP_SETTING] / prop_size;
+
+ count = min_t(u32, count, SDE_PERF_CDP_USAGE_MAX);
+
+ for (j = 0; j < count; j++) {
+ cfg->perf.cdp_cfg[j].rd_enable =
+ PROP_VALUE_ACCESS(prop_value,
+ PERF_CDP_SETTING, j * prop_size);
+ cfg->perf.cdp_cfg[j].wr_enable =
+ PROP_VALUE_ACCESS(prop_value,
+ PERF_CDP_SETTING, j * prop_size + 1);
+ SDE_DEBUG("cdp usage:%d rd:%d wr:%d\n",
+ j, cfg->perf.cdp_cfg[j].rd_enable,
+ cfg->perf.cdp_cfg[j].wr_enable);
+ }
+
+ cfg->has_cdp = true;
+ }
+
freeprop:
kfree(prop_value);
end:
@@ -2639,6 +2786,9 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
kfree(sde_cfg->vbif[i].qos_nrt_tbl.priority_lvl);
}
+ for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++)
+ kfree(sde_cfg->perf.qos_lut_tbl[i].entries);
+
kfree(sde_cfg->dma_formats);
kfree(sde_cfg->cursor_formats);
kfree(sde_cfg->vig_formats);
@@ -2670,6 +2820,10 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
if (rc)
goto end;
+ rc = sde_perf_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
rc = sde_rot_parse_dt(np, sde_cfg);
if (rc)
goto end;
@@ -2720,10 +2874,6 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
if (rc)
goto end;
- rc = sde_perf_parse_dt(np, sde_cfg);
- if (rc)
- goto end;
-
return sde_cfg;
end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index e24192b..beff43c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -60,6 +60,8 @@
#define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
#define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
+#define MAX_XIN_COUNT 16
+
/**
* Supported UBWC feature versions
*/
@@ -79,7 +81,6 @@ enum {
* @SDE_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
* compression initial revision
* @SDE_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
- * @SDE_MDP_CDP, Client driven prefetch
* @SDE_MDP_MAX Maximum value
*/
@@ -89,7 +90,6 @@ enum {
SDE_MDP_BWC,
SDE_MDP_UBWC_1_0,
SDE_MDP_UBWC_1_5,
- SDE_MDP_CDP,
SDE_MDP_MAX
};
@@ -107,12 +107,14 @@ enum {
* @SDE_SSPP_PCC, Color correction support
* @SDE_SSPP_CURSOR, SSPP can be used as a cursor layer
* @SDE_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @SDE_SSPP_QOS_8LVL, SSPP support 8-level QoS control
* @SDE_SSPP_EXCL_RECT, SSPP supports exclusion rect
* @SDE_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
* @SDE_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
* @SDE_SSPP_SBUF, SSPP support inline stream buffer
* @SDE_SSPP_TS_PREFILL Supports prefill with traffic shaper
* @SDE_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @SDE_SSPP_CDP Supports client driven prefetch
* @SDE_SSPP_MAX maximum value
*/
enum {
@@ -128,12 +130,14 @@ enum {
SDE_SSPP_PCC,
SDE_SSPP_CURSOR,
SDE_SSPP_QOS,
+ SDE_SSPP_QOS_8LVL,
SDE_SSPP_EXCL_RECT,
SDE_SSPP_SMART_DMA_V1,
SDE_SSPP_SMART_DMA_V2,
SDE_SSPP_SBUF,
SDE_SSPP_TS_PREFILL,
SDE_SSPP_TS_PREFILL_REC1,
+ SDE_SSPP_CDP,
SDE_SSPP_MAX
};
@@ -241,6 +245,9 @@ enum {
* @SDE_WB_PIPE_ALPHA Writeback supports pipe alpha
* @SDE_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
* the destination image
+ * @SDE_WB_QOS, Writeback supports QoS control, danger/safe/creq
+ * @SDE_WB_QOS_8LVL, Writeback supports 8-level QoS control
+ * @SDE_WB_CDP Writeback supports client driven prefetch
* @SDE_WB_MAX maximum value
*/
enum {
@@ -256,6 +263,9 @@ enum {
SDE_WB_YUV_CONFIG,
SDE_WB_PIPE_ALPHA,
SDE_WB_XY_ROI_OFFSET,
+ SDE_WB_QOS,
+ SDE_WB_QOS_8LVL,
+ SDE_WB_CDP,
SDE_WB_MAX
};
@@ -344,17 +354,41 @@ struct sde_format_extended {
};
/**
+ * enum sde_qos_lut_usage - define QoS LUT use cases
+ */
+enum sde_qos_lut_usage {
+ SDE_QOS_LUT_USAGE_LINEAR,
+ SDE_QOS_LUT_USAGE_MACROTILE,
+ SDE_QOS_LUT_USAGE_NRT,
+ SDE_QOS_LUT_USAGE_CWB,
+ SDE_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct sde_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct sde_qos_lut_entry {
+ u32 fl;
+ u64 lut;
+};
+
+/**
+ * struct sde_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct sde_qos_lut_tbl {
+ u32 nentry;
+ struct sde_qos_lut_entry *entries;
+};
+
+/**
* struct sde_sspp_sub_blks : SSPP sub-blocks
* @maxdwnscale: max downscale ratio supported(without DECIMATION)
* @maxupscale: maxupscale ratio supported
* @maxwidth: max pixelwidth supported by this pipe
- * @danger_lut_linear: LUT to generate danger signals for linear format
- * @safe_lut_linear: LUT to generate safe signals for linear format
- * @danger_lut_tile: LUT to generate danger signals for tile format
- * @safe_lut_tile: LUT to generate safe signals for tile format
- * @danger_lut_nrt: LUT to generate danger signals for non-realtime use case
- * @safe_lut_nrt: LUT to generate safe signals for non-realtime use case
- * @creq_lut_nrt: LUT to generate creq signals for non-realtime use case
* @creq_vblank: creq priority during vertical blanking
* @danger_vblank: danger priority during vertical blanking
* @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
@@ -371,13 +405,6 @@ struct sde_format_extended {
*/
struct sde_sspp_sub_blks {
u32 maxlinewidth;
- u32 danger_lut_linear;
- u32 safe_lut_linear;
- u32 danger_lut_tile;
- u32 safe_lut_tile;
- u32 danger_lut_nrt;
- u32 safe_lut_nrt;
- u32 creq_lut_nrt;
u32 creq_vblank;
u32 danger_vblank;
u32 pixel_ram_size;
@@ -680,6 +707,8 @@ struct sde_vbif_qos_tbl {
* @dynamic_ot_wr_tbl dynamic OT write configuration table
* @qos_rt_tbl real-time QoS priority table
* @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
*/
struct sde_vbif_cfg {
SDE_HW_BLK_INFO;
@@ -690,6 +719,8 @@ struct sde_vbif_cfg {
struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
struct sde_vbif_qos_tbl qos_rt_tbl;
struct sde_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
};
/**
* struct sde_reg_dma_cfg - information of lut dma blocks
@@ -706,6 +737,27 @@ struct sde_reg_dma_cfg {
};
/**
+ * Define CDP use cases
+ * @SDE_PERF_CDP_UDAGE_RT: real-time use cases
+ * @SDE_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ SDE_PERF_CDP_USAGE_RT,
+ SDE_PERF_CDP_USAGE_NRT,
+ SDE_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct sde_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct sde_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
* struct sde_perf_cfg - performance control settings
* @max_bw_low low threshold of maximum bandwidth (kbps)
* @max_bw_high high threshold of maximum bandwidth (kbps)
@@ -722,6 +774,10 @@ struct sde_reg_dma_cfg {
* @downscaling_prefill_lines downscaling latency in lines
* @amortizable_theshold minimum y position for traffic shaping prefill
* @min_prefill_lines minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
*/
struct sde_perf_cfg {
u32 max_bw_low;
@@ -739,6 +795,10 @@ struct sde_perf_cfg {
u32 downscaling_prefill_lines;
u32 amortizable_threshold;
u32 min_prefill_lines;
+ u32 safe_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
+ u32 danger_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
+ struct sde_qos_lut_tbl qos_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
+ struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
};
/**
@@ -756,7 +816,7 @@ struct sde_perf_cfg {
* @csc_type csc or csc_10bit support.
* @smart_dma_rev Supported version of SmartDMA feature.
* @has_src_split source split feature status
- * @has_cdp Client driver prefetch feature status
+ * @has_cdp Client driven prefetch feature status
* @has_wb_ubwc UBWC feature supported on WB
* @ubwc_version UBWC feature version (0x0 for not supported)
* @has_sbuf indicate if stream buffer is available
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 24f16c6..53a48c8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -395,37 +395,32 @@ static const struct sde_irq_type sde_irq_map[] = {
SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 68-71 */
+ /* irq_idx: 72-75 */
{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2},
{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2},
{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
- /* irq_idx: 72-75 */
+ /* irq_idx: 76-79 */
{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2},
{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 76-79 */
+ /* irq_idx: 80-83 */
{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2},
{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 80-83 */
+ /* irq_idx: 84-87 */
{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2},
{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2},
{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
- /* irq_idx: 84-87 */
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
/* irq_idx: 88-91 */
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
@@ -986,7 +981,7 @@ static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
sde_intr_set[reg_idx].status_off) &
sde_irq_map[irq_idx].irq_mask;
if (intr_status && clear)
- SDE_REG_WRITE(&intr->hw, sde_intr_set[irq_idx].clr_off,
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
intr_status);
spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 694d267..bb9f9c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -70,6 +70,8 @@
#define SSPP_QOS_CTRL 0x6C
#define SSPP_DECIMATION_CONFIG 0xB4
#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_CREQ_LUT_0 0x74
+#define SSPP_CREQ_LUT_1 0x78
#define SSPP_SW_PIX_EXT_C0_LR 0x100
#define SSPP_SW_PIX_EXT_C0_TB 0x104
#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
@@ -80,6 +82,7 @@
#define SSPP_SW_PIX_EXT_C3_TB 0x124
#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
#define SSPP_UBWC_ERROR_STATUS 0x138
#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
@@ -982,7 +985,13 @@ static void sde_hw_sspp_setup_creq_lut(struct sde_hw_pipe *ctx,
if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
return;
- SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ if (ctx->cap && test_bit(SDE_SSPP_QOS_8LVL, &ctx->cap->features)) {
+ SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+ cfg->creq_lut >> 32);
+ } else {
+ SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ }
}
static void sde_hw_sspp_setup_qos_ctrl(struct sde_hw_pipe *ctx,
@@ -1094,6 +1103,30 @@ static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
SDE_REG_WRITE(&ctx->hw, ts_prefill_offset, ts_count);
}
+static void sde_hw_sspp_setup_cdp(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cdp_cfg *cfg)
+{
+ u32 idx;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->tile_amortize_enable)
+ cdp_cntl |= BIT(2);
+ if (cfg->preload_ahead == SDE_SSPP_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
static void _setup_layer_ops(struct sde_hw_pipe *c,
unsigned long features)
{
@@ -1155,6 +1188,9 @@ static void _setup_layer_ops(struct sde_hw_pipe *c,
c->ops.setup_sys_cache = sde_hw_sspp_setup_sys_cache;
c->ops.get_sbuf_status = sde_hw_sspp_get_sbuf_status;
}
+
+ if (test_bit(SDE_SSPP_CDP, &features))
+ c->ops.setup_cdp = sde_hw_sspp_setup_cdp;
}
static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 010b363..d52c0e5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -300,7 +300,7 @@ struct sde_hw_pipe_cfg {
struct sde_hw_pipe_qos_cfg {
u32 danger_lut;
u32 safe_lut;
- u32 creq_lut;
+ u64 creq_lut;
u32 creq_vblank;
u32 danger_vblank;
bool vblank_en;
@@ -308,6 +308,30 @@ struct sde_hw_pipe_qos_cfg {
};
/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ SDE_SSPP_CDP_PRELOAD_AHEAD_32,
+ SDE_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct sde_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * SDE_SSPP_CDP_PRELOAD_AHEAD_32,
+ * SDE_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct sde_hw_pipe_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
* enum system cache rotation operation mode
*/
enum {
@@ -574,6 +598,14 @@ struct sde_hw_sspp_ops {
void (*setup_ts_prefill)(struct sde_hw_pipe *ctx,
struct sde_hw_pipe_ts_cfg *cfg,
enum sde_sspp_multirect_index index);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to cdp configuration
+ */
+ void (*setup_cdp)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cdp_cfg *cfg);
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index 9b9763a..b5c273a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -31,11 +31,43 @@
#define VBIF_IN_WR_LIM_CONF2 0x00C8
#define VBIF_OUT_RD_LIM_CONF0 0x00D0
#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
#define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+static void sde_hw_set_mem_type(struct sde_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = SDE_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ SDE_REG_WRITE(c, reg_off, reg_val);
+}
+
static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
u32 xin_id, bool rd, u32 limit)
{
@@ -144,6 +176,7 @@ static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
ops->set_qos_remap = sde_hw_set_qos_remap;
+ ops->set_mem_type = sde_hw_set_mem_type;
}
static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
index c67738b..80a9e5a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -71,6 +71,15 @@ struct sde_hw_vbif_ops {
*/
void (*set_qos_remap)(struct sde_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct sde_hw_vbif *vbif,
+ u32 xin_id, u32 value);
};
struct sde_hw_vbif {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
index 98aff0f..378b904 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -41,13 +41,21 @@
#define WB_N16_INIT_PHASE_Y_C12 0x06C
#define WB_OUT_SIZE 0x074
#define WB_ALPHA_X_VALUE 0x078
+#define WB_DANGER_LUT 0x084
+#define WB_SAFE_LUT 0x088
+#define WB_QOS_CTRL 0x090
+#define WB_CREQ_LUT_0 0x098
+#define WB_CREQ_LUT_1 0x09C
#define WB_UBWC_STATIC_CTRL 0x144
#define WB_CSC_BASE 0x260
#define WB_DST_ADDR_SW_STATUS 0x2B0
-#define WB_CDP_CTRL 0x2B4
+#define WB_CDP_CNTL 0x2B4
#define WB_OUT_IMAGE_SIZE 0x2C0
#define WB_OUT_XY 0x2C4
+/* WB_QOS_CTRL */
+#define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+
static struct sde_wb_cfg *_wb_offset(enum sde_wb wb,
struct sde_mdss_cfg *m,
void __iomem *addr,
@@ -88,7 +96,6 @@ static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
u32 write_config = 0;
u32 opmode = 0;
u32 dst_addr_sw = 0;
- u32 cdp_settings = 0x0;
chroma_samp = fmt->chroma_sample;
@@ -157,18 +164,6 @@ static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
SDE_REG_WRITE(c, WB_OUT_SIZE, outsize);
SDE_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
SDE_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
-
- /* Enable CDP */
- cdp_settings = BIT(0);
-
- if (!SDE_FORMAT_IS_LINEAR(fmt))
- cdp_settings |= BIT(1);
-
- /* Enable 64 transactions if line mode*/
- if (data->intf_mode == INTF_MODE_WB_LINE)
- cdp_settings |= BIT(3);
-
- SDE_REG_WRITE(c, WB_CDP_CTRL, cdp_settings);
}
static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
@@ -185,6 +180,68 @@ static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
SDE_REG_WRITE(c, WB_OUT_SIZE, out_size);
}
+static void sde_hw_wb_setup_danger_safe_lut(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_qos_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ if (!ctx || !cfg)
+ return;
+
+ SDE_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
+ SDE_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
+}
+
+static void sde_hw_wb_setup_creq_lut(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_qos_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (ctx->caps && test_bit(SDE_WB_QOS_8LVL, &ctx->caps->features)) {
+ SDE_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
+ SDE_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
+ }
+}
+
+static void sde_hw_wb_setup_qos_ctrl(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_qos_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 qos_ctrl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
+
+ SDE_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
+}
+
+static void sde_hw_wb_setup_cdp(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cdp_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->preload_ahead == SDE_WB_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ SDE_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
+}
+
static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
unsigned long features)
{
@@ -193,6 +250,16 @@ static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
if (test_bit(SDE_WB_XY_ROI_OFFSET, &features))
ops->setup_roi = sde_hw_wb_roi;
+
+ if (test_bit(SDE_WB_QOS, &features)) {
+ ops->setup_danger_safe_lut =
+ sde_hw_wb_setup_danger_safe_lut;
+ ops->setup_creq_lut = sde_hw_wb_setup_creq_lut;
+ ops->setup_qos_ctrl = sde_hw_wb_setup_qos_ctrl;
+ }
+
+ if (test_bit(SDE_WB_CDP, &features))
+ ops->setup_cdp = sde_hw_wb_setup_cdp;
}
struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
index 9d17fb3..ca3c386 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -29,6 +29,44 @@ struct sde_hw_wb_cfg {
};
/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ SDE_WB_CDP_PRELOAD_AHEAD_32,
+ SDE_WB_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct sde_hw_wb_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * SDE_WB_CDP_PRELOAD_AHEAD_32,
+ * SDE_WB_CDP_PRELOAD_AHEAD_64
+ */
+struct sde_hw_wb_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
+ * struct sde_hw_wb_qos_cfg : Writeback pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @danger_safe_en: enable danger safe generation
+ */
+struct sde_hw_wb_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ bool danger_safe_en;
+};
+
+/**
*
* struct sde_hw_wb_ops : Interface to the wb Hw driver functions
* Assumption is these functions will be called after clocks are enabled
@@ -57,6 +95,38 @@ struct sde_hw_wb_ops {
void (*setup_roi)(struct sde_hw_wb *ctx,
struct sde_hw_wb_cfg *wb);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ */
+ void (*setup_danger_safe_lut)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_qos_cfg *cfg);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ */
+ void (*setup_creq_lut)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ */
+ void (*setup_qos_ctrl)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_qos_cfg *cfg);
+
+ /**
+ * setup_cdp - setup CDP
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe CDP configuration
+ */
+ void (*setup_cdp)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cdp_cfg *cfg);
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index a7d6ecf..c783ab0 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1238,6 +1238,10 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
sde_hw_intr_destroy(sde_kms->hw_intr);
sde_kms->hw_intr = NULL;
+ if (sde_kms->power_event)
+ sde_power_handle_unregister_event(
+ &priv->phandle, sde_kms->power_event);
+
_sde_kms_release_displays(sde_kms);
/* safe to call these more than once during shutdown */
@@ -1443,6 +1447,16 @@ static void __iomem *_sde_kms_ioremap(struct platform_device *pdev,
return ptr;
}
+static void sde_kms_handle_power_event(u32 event_type, void *usr)
+{
+ struct sde_kms *sde_kms = usr;
+
+ if (!sde_kms)
+ return;
+
+ if (event_type == SDE_POWER_EVENT_POST_ENABLE)
+ sde_vbif_init_memtypes(sde_kms);
+}
static int sde_kms_hw_init(struct msm_kms *kms)
{
@@ -1660,6 +1674,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
+ /*
+ * Handle (re)initializations during power enable
+ */
+ sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
+ sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
+ SDE_POWER_EVENT_POST_ENABLE,
+ sde_kms_handle_power_event, sde_kms, "kms");
+
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index d20af9f..f73cb21 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -159,6 +159,7 @@ struct sde_kms {
struct sde_power_client *core_client;
struct ion_client *iclient;
+ struct sde_power_event *power_event;
/* directory entry for debugfs */
struct dentry *debugfs_danger;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 463c84e..d63fec1 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -99,6 +99,7 @@ enum sde_plane_qos {
* @csc_cfg: Decoded user configuration for csc
* @csc_usr_ptr: Points to csc_cfg if valid user config available
* @csc_ptr: Points to sde_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
* @catalog: Points to sde catalog structure
* @sbuf_mode: force stream buffer mode if set
* @sbuf_writeback: force stream buffer writeback if set
@@ -126,6 +127,7 @@ struct sde_plane {
bool is_error;
bool is_rt_pipe;
bool is_virtual;
+ struct list_head mplane_list;
struct sde_mdss_cfg *catalog;
u32 sbuf_mode;
u32 sbuf_writeback;
@@ -222,93 +224,89 @@ static bool sde_plane_crtc_enabled(struct drm_plane_state *state)
static inline int _sde_plane_calc_fill_level(struct drm_plane *plane,
const struct sde_format *fmt, u32 src_width)
{
- struct sde_plane *psde;
+ struct sde_plane *psde, *tmp;
+ struct sde_plane_state *pstate;
+ struct sde_plane_rot_state *rstate;
u32 fixed_buff_size;
u32 total_fl;
+ u32 hflip_bytes;
- if (!plane || !fmt) {
+ if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
SDE_ERROR("invalid arguments\n");
return 0;
}
psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(plane->state);
+ rstate = &pstate->rot;
fixed_buff_size = psde->pipe_sblk->pixel_ram_size;
+ list_for_each_entry(tmp, &psde->mplane_list, mplane_list) {
+ if (!sde_plane_enabled(tmp->base.state))
+ continue;
+ SDE_DEBUG("plane%d/%d src_width:%d/%d\n",
+ psde->base.base.id, tmp->base.base.id,
+ src_width, tmp->pipe_cfg.src_rect.w);
+ src_width = max_t(u32, src_width, tmp->pipe_cfg.src_rect.w);
+ }
+
+ if ((rstate->out_rotation & DRM_REFLECT_X) &&
+ SDE_FORMAT_IS_LINEAR(fmt))
+ hflip_bytes = (src_width + 32) * fmt->bpp;
+ else
+ hflip_bytes = 0;
+
if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
if (fmt->chroma_sample == SDE_CHROMA_420) {
/* NV12 */
- total_fl = (fixed_buff_size / 2) /
+ total_fl = (fixed_buff_size / 2 - hflip_bytes) /
((src_width + 32) * fmt->bpp);
} else {
/* non NV12 */
- total_fl = (fixed_buff_size) /
- ((src_width + 32) * fmt->bpp);
+ total_fl = (fixed_buff_size / 2 - hflip_bytes) /
+ ((src_width + 32) * fmt->bpp * 2);
}
} else {
- total_fl = (fixed_buff_size * 2) /
- ((src_width + 32) * fmt->bpp);
+ if (pstate->multirect_mode == SDE_SSPP_MULTIRECT_PARALLEL) {
+ total_fl = (fixed_buff_size / 2 - hflip_bytes) /
+ ((src_width + 32) * fmt->bpp * 2);
+ } else {
+ total_fl = (fixed_buff_size - hflip_bytes) /
+ ((src_width + 32) * fmt->bpp * 2);
+ }
}
- SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u hf:%d fl:%u\n",
plane->base.id, psde->pipe - SSPP_VIG0,
(char *)&fmt->base.pixel_format,
- src_width, total_fl);
+ src_width, hflip_bytes, total_fl);
return total_fl;
}
/**
- * _sde_plane_get_qos_lut_linear - get linear LUT mapping
+ * _sde_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
* @total_fl: fill level
* Return: LUT setting corresponding to the fill level
*/
-static inline u32 _sde_plane_get_qos_lut_linear(u32 total_fl)
+static u64 _sde_plane_get_qos_lut(const struct sde_qos_lut_tbl *tbl,
+ u32 total_fl)
{
- u32 qos_lut;
+ int i;
- if (total_fl <= 4)
- qos_lut = 0x1B;
- else if (total_fl <= 5)
- qos_lut = 0x5B;
- else if (total_fl <= 6)
- qos_lut = 0x15B;
- else if (total_fl <= 7)
- qos_lut = 0x55B;
- else if (total_fl <= 8)
- qos_lut = 0x155B;
- else if (total_fl <= 9)
- qos_lut = 0x555B;
- else if (total_fl <= 10)
- qos_lut = 0x1555B;
- else if (total_fl <= 11)
- qos_lut = 0x5555B;
- else if (total_fl <= 12)
- qos_lut = 0x15555B;
- else
- qos_lut = 0x55555B;
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
- return qos_lut;
-}
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
-/**
- * _sde_plane_get_qos_lut_macrotile - get macrotile LUT mapping
- * @total_fl: fill level
- * Return: LUT setting corresponding to the fill level
- */
-static inline u32 _sde_plane_get_qos_lut_macrotile(u32 total_fl)
-{
- u32 qos_lut;
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
- if (total_fl <= 10)
- qos_lut = 0x1AAff;
- else if (total_fl <= 11)
- qos_lut = 0x5AAFF;
- else if (total_fl <= 12)
- qos_lut = 0x15AAFF;
- else
- qos_lut = 0x55AAFF;
-
- return qos_lut;
+ return 0;
}
/**
@@ -321,8 +319,8 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
{
struct sde_plane *psde;
const struct sde_format *fmt = NULL;
- u32 qos_lut;
- u32 total_fl = 0;
+ u64 qos_lut;
+ u32 total_fl = 0, lut_usage;
if (!plane || !fb) {
SDE_ERROR("invalid arguments plane %d fb %d\n",
@@ -332,7 +330,7 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
psde = to_sde_plane(plane);
- if (!psde->pipe_hw || !psde->pipe_sblk) {
+ if (!psde->pipe_hw || !psde->pipe_sblk || !psde->catalog) {
SDE_ERROR("invalid arguments\n");
return;
} else if (!psde->pipe_hw->ops.setup_creq_lut) {
@@ -340,7 +338,7 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
}
if (!psde->is_rt_pipe) {
- qos_lut = psde->pipe_sblk->creq_lut_nrt;
+ lut_usage = SDE_QOS_LUT_USAGE_NRT;
} else {
fmt = sde_get_sde_format_ext(
fb->pixel_format,
@@ -350,19 +348,21 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
psde->pipe_cfg.src_rect.w);
if (SDE_FORMAT_IS_LINEAR(fmt))
- qos_lut = _sde_plane_get_qos_lut_linear(total_fl);
+ lut_usage = SDE_QOS_LUT_USAGE_LINEAR;
else
- qos_lut = _sde_plane_get_qos_lut_macrotile(total_fl);
+ lut_usage = SDE_QOS_LUT_USAGE_MACROTILE;
}
+ qos_lut = _sde_plane_get_qos_lut(
+ &psde->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
psde->pipe_qos_cfg.creq_lut = qos_lut;
trace_sde_perf_set_qos_luts(psde->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
- psde->is_rt_pipe, total_fl, qos_lut,
- (fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
+ psde->is_rt_pipe, total_fl, qos_lut, lut_usage);
- SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%x\n",
+ SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
plane->base.id,
psde->pipe - SSPP_VIG0,
fmt ? (char *)&fmt->base.pixel_format : NULL,
@@ -390,7 +390,7 @@ static void _sde_plane_set_danger_lut(struct drm_plane *plane,
psde = to_sde_plane(plane);
- if (!psde->pipe_hw || !psde->pipe_sblk) {
+ if (!psde->pipe_hw || !psde->pipe_sblk || !psde->catalog) {
SDE_ERROR("invalid arguments\n");
return;
} else if (!psde->pipe_hw->ops.setup_danger_safe_lut) {
@@ -398,8 +398,10 @@ static void _sde_plane_set_danger_lut(struct drm_plane *plane,
}
if (!psde->is_rt_pipe) {
- danger_lut = psde->pipe_sblk->danger_lut_nrt;
- safe_lut = psde->pipe_sblk->safe_lut_nrt;
+ danger_lut = psde->catalog->perf.danger_lut_tbl
+ [SDE_QOS_LUT_USAGE_NRT];
+ safe_lut = psde->catalog->perf.safe_lut_tbl
+ [SDE_QOS_LUT_USAGE_NRT];
} else {
fmt = sde_get_sde_format_ext(
fb->pixel_format,
@@ -407,11 +409,15 @@ static void _sde_plane_set_danger_lut(struct drm_plane *plane,
drm_format_num_planes(fb->pixel_format));
if (SDE_FORMAT_IS_LINEAR(fmt)) {
- danger_lut = psde->pipe_sblk->danger_lut_linear;
- safe_lut = psde->pipe_sblk->safe_lut_linear;
+ danger_lut = psde->catalog->perf.danger_lut_tbl
+ [SDE_QOS_LUT_USAGE_LINEAR];
+ safe_lut = psde->catalog->perf.safe_lut_tbl
+ [SDE_QOS_LUT_USAGE_LINEAR];
} else {
- danger_lut = psde->pipe_sblk->danger_lut_tile;
- safe_lut = psde->pipe_sblk->safe_lut_tile;
+ danger_lut = psde->catalog->perf.danger_lut_tbl
+ [SDE_QOS_LUT_USAGE_MACROTILE];
+ safe_lut = psde->catalog->perf.safe_lut_tbl
+ [SDE_QOS_LUT_USAGE_MACROTILE];
}
}
@@ -1362,33 +1368,31 @@ static struct sde_crtc_res_ops fbo_res_ops = {
static u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
{
struct drm_plane_state *state;
- struct drm_crtc_state *cstate;
struct sde_plane_state *pstate;
struct sde_plane_rot_state *rstate;
struct sde_kms *sde_kms;
u32 blocksize = 128;
u32 prefill_line = 0;
- if (!plane || !plane->state || !plane->state->fb ||
- !plane->state->crtc || !plane->state->crtc->state) {
+ if (!plane || !plane->state || !plane->state->fb) {
SDE_ERROR("invalid parameters\n");
return 0;
}
sde_kms = _sde_plane_get_kms(plane);
state = plane->state;
- cstate = state->crtc->state;
pstate = to_sde_plane_state(state);
rstate = &pstate->rot;
- if (!rstate->rot_hw || !rstate->rot_hw->caps || !rstate->out_src_h ||
- !sde_kms || !sde_kms->catalog) {
- SDE_ERROR("invalid parameters\n");
+ if (!sde_kms || !sde_kms->catalog) {
+ SDE_ERROR("invalid kms\n");
return 0;
}
- sde_format_get_block_size(rstate->out_fb_format, &blocksize,
- &blocksize);
+ if (rstate->out_fb_format)
+ sde_format_get_block_size(rstate->out_fb_format,
+ &blocksize, &blocksize);
+
prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
SDE_DEBUG("plane%d prefill:%u\n", plane->base.id, prefill_line);
@@ -1410,7 +1414,7 @@ bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill)
struct sde_plane_rot_state *rstate = pstate ? &pstate->rot : NULL;
bool sbuf_mode = rstate ? rstate->out_sbuf : false;
- if (prefill && sbuf_mode)
+ if (prefill)
*prefill = sde_plane_rot_calc_prefill(plane);
return sbuf_mode;
@@ -2441,16 +2445,16 @@ int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
* sde_plane_get_ctl_flush - get control flush for the given plane
* @plane: Pointer to drm plane structure
* @ctl: Pointer to hardware control driver
- * @flush: Pointer to flush control word
+ * @flush_sspp: Pointer to sspp flush control word
+ * @flush_rot: Pointer to rotator flush control word
*/
void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
- u32 *flush)
+ u32 *flush_sspp, u32 *flush_rot)
{
struct sde_plane_state *pstate;
struct sde_plane_rot_state *rstate;
- u32 bitmask;
- if (!plane || !flush) {
+ if (!plane || !flush_sspp) {
SDE_ERROR("invalid parameters\n");
return;
}
@@ -2458,13 +2462,15 @@ void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
pstate = to_sde_plane_state(plane->state);
rstate = &pstate->rot;
- bitmask = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
+ *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
+ if (!flush_rot)
+ return;
+
+ *flush_rot = 0x0;
if (sde_plane_is_sbuf_mode(plane, NULL) && rstate->rot_hw &&
ctl->ops.get_bitmask_rot)
- ctl->ops.get_bitmask_rot(ctl, &bitmask, rstate->rot_hw->idx);
-
- *flush = bitmask;
+ ctl->ops.get_bitmask_rot(ctl, flush_rot, rstate->rot_hw->idx);
}
static int sde_plane_prepare_fb(struct drm_plane *plane,
@@ -3084,6 +3090,23 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags,
pstate->multirect_index);
+ if (psde->pipe_hw->ops.setup_cdp) {
+ struct sde_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+ memset(cdp_cfg, 0, sizeof(struct sde_hw_pipe_cdp_cfg));
+
+ cdp_cfg->enable = psde->catalog->perf.cdp_cfg
+ [SDE_PERF_CDP_USAGE_RT].rd_enable;
+ cdp_cfg->ubwc_meta_enable =
+ SDE_FORMAT_IS_UBWC(fmt);
+ cdp_cfg->tile_amortize_enable =
+ SDE_FORMAT_IS_UBWC(fmt) ||
+ SDE_FORMAT_IS_TILE(fmt);
+ cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
+
+ psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg);
+ }
+
if (psde->pipe_hw->ops.setup_sys_cache) {
if (rstate->out_sbuf) {
if (rstate->nplane < 2)
@@ -4124,7 +4147,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
unsigned long possible_crtcs, u32 master_plane_id)
{
- struct drm_plane *plane = NULL;
+ struct drm_plane *plane = NULL, *master_plane = NULL;
const struct sde_format_extended *format_list;
struct sde_format_extended *virt_format_list = NULL;
struct sde_plane *psde;
@@ -4168,6 +4191,13 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
psde->pipe = pipe;
psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
psde->is_virtual = (master_plane_id != 0);
+ INIT_LIST_HEAD(&psde->mplane_list);
+ master_plane = drm_plane_find(dev, master_plane_id);
+ if (master_plane) {
+ struct sde_plane *mpsde = to_sde_plane(master_plane);
+
+ list_add_tail(&psde->mplane_list, &mpsde->mplane_list);
+ }
/* initialize underlying h/w driver */
psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 47611d1..f83a891 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -110,6 +110,7 @@ struct sde_plane_rot_state {
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
+ * @cdp_cfg: CDP configuration
*/
struct sde_plane_state {
struct drm_plane_state base;
@@ -126,6 +127,8 @@ struct sde_plane_state {
/* @sc_cfg: system_cache configuration */
struct sde_hw_pipe_sc_cfg sc_cfg;
struct sde_plane_rot_state rot;
+
+ struct sde_hw_pipe_cdp_cfg cdp_cfg;
};
/**
@@ -169,10 +172,11 @@ bool is_sde_plane_virtual(struct drm_plane *plane);
* sde_plane_get_ctl_flush - get control flush mask
* @plane: Pointer to DRM plane object
* @ctl: Pointer to control hardware
- * @flush: Pointer to updated flush mask
+ * @flush_sspp: Pointer to sspp flush control word
+ * @flush_rot: Pointer to rotator flush control word
*/
void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
- u32 *flush);
+ u32 *flush_sspp, u32 *flush_rot);
/**
* sde_plane_is_sbuf_mode - return status of stream buffer mode
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index 2a4e6b5..f731a30 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,15 +24,15 @@
TRACE_EVENT(sde_perf_set_qos_luts,
TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
- u32 lut, bool linear),
- TP_ARGS(pnum, fmt, rt, fl, lut, linear),
+ u32 lut, u32 lut_usage),
+ TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
TP_STRUCT__entry(
__field(u32, pnum)
__field(u32, fmt)
__field(bool, rt)
__field(u32, fl)
- __field(u32, lut)
- __field(bool, linear)
+ __field(u64, lut)
+ __field(u32, lut_usage)
),
TP_fast_assign(
__entry->pnum = pnum;
@@ -40,12 +40,12 @@ TRACE_EVENT(sde_perf_set_qos_luts,
__entry->rt = rt;
__entry->fl = fl;
__entry->lut = lut;
- __entry->linear = linear;
+ __entry->lut_usage = lut_usage;
),
- TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%x lin=%d",
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
__entry->pnum, __entry->fmt,
__entry->rt, __entry->fl,
- __entry->lut, __entry->linear)
+ __entry->lut, __entry->lut_usage)
);
TRACE_EVENT(sde_perf_set_danger_luts,
@@ -180,6 +180,36 @@ TRACE_EVENT(sde_evtlog,
__entry->tag_id, __entry->value1, __entry->value2)
)
+TRACE_EVENT(sde_perf_crtc_update,
+ TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate,
+ bool stop_req, u32 update_bus, u32 update_clk),
+ TP_ARGS(crtc, bw_ctl, core_clk_rate,
+ stop_req, update_bus, update_clk),
+ TP_STRUCT__entry(
+ __field(u32, crtc)
+ __field(u64, bw_ctl)
+ __field(u32, core_clk_rate)
+ __field(bool, stop_req)
+ __field(u32, update_bus)
+ __field(u32, update_clk)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->bw_ctl = bw_ctl;
+ __entry->core_clk_rate = core_clk_rate;
+ __entry->stop_req = stop_req;
+ __entry->update_bus = update_bus;
+ __entry->update_clk = update_clk;
+ ),
+ TP_printk("crtc=%d bw=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
+);
+
#define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
#define SDE_ATRACE_BEGIN(name) trace_sde_mark_write(current->tgid, name, 1)
#define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index c675216..e63fe8c 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -265,6 +265,26 @@ void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
+void sde_vbif_init_memtypes(struct sde_kms *sde_kms)
+{
+ struct sde_hw_vbif *vbif;
+ int i, j;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+ vbif = sde_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
{
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index d05c2e0..f1da68b 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -27,6 +27,13 @@ struct sde_vbif_set_ot_params {
u32 clk_ctrl;
};
+struct sde_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
/**
* struct sde_vbif_set_qos_params - QoS remapper parameter
* @vbif_idx: vbif identifier
@@ -59,6 +66,12 @@ void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
struct sde_vbif_set_qos_params *params);
+/**
+ * sde_vbif_init_memtypes - initialize xin memory types for vbif
+ * @sde_kms: SDE handler
+ */
+void sde_vbif_init_memtypes(struct sde_kms *sde_kms);
+
#ifdef CONFIG_DEBUG_FS
int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index b2665be..ceda16e 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -273,6 +273,7 @@ int sde_wb_get_info(struct msm_display_info *info, void *display)
return -EINVAL;
}
+ memset(info, 0, sizeof(struct msm_display_info));
info->intf_type = DRM_MODE_CONNECTOR_VIRTUAL;
info->num_of_h_tiles = 1;
info->h_tile_instance[0] = sde_wb_get_index(display);
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 7bf2211..e284a9f 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -30,17 +30,17 @@
#include "sde_rsc_priv.h"
#include "sde_dbg.h"
-/* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */
-#define TCS_CASE_EXECUTION_TIME 1064000
+/* worst case time to execute the one tcs vote(sleep/wake) - ~0.2ms */
+#define SINGLE_TCS_EXECUTION_TIME 200000
/* this time is ~1ms - only wake tcs in any mode */
-#define RSC_BACKOFF_TIME_NS (TCS_CASE_EXECUTION_TIME + 100)
+#define RSC_BACKOFF_TIME_NS (SINGLE_TCS_EXECUTION_TIME + 100)
/* this time is ~1ms - only wake TCS in mode-0 */
-#define RSC_MODE_THRESHOLD_TIME_IN_NS ((TCS_CASE_EXECUTION_TIME >> 1) + 100)
+#define RSC_MODE_THRESHOLD_TIME_IN_NS ((SINGLE_TCS_EXECUTION_TIME >> 1) + 100)
/* this time is ~2ms - sleep+ wake TCS in mode-1 */
-#define RSC_TIME_SLOT_0_NS ((TCS_CASE_EXECUTION_TIME * 2) + 100)
+#define RSC_TIME_SLOT_0_NS ((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
#define DEFAULT_PANEL_FPS 60
#define DEFAULT_PANEL_JITTER 5
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 1f76233a..dbacb20 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -796,6 +796,10 @@
#define A6XX_GMU_CM3_FW_BUSY 0x1F81A
#define A6XX_GMU_CM3_FW_INIT_RESULT 0x1F81C
#define A6XX_GMU_CM3_CFG 0x1F82D
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x1F840
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x1F841
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x1F844
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x1F845
#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x1F8C0
#define A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x1F8C1
#define A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x1F8C2
@@ -848,6 +852,7 @@
#define A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x23B0A
#define A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x23B0B
#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x23B0C
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x23B0E
#define A6XX_GMU_AHB_FENCE_STATUS 0x23B13
#define A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x23B15
#define A6XX_GMU_AO_SPARE_CNTL 0x23B16
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 627b351..f581cff 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2963,11 +2963,11 @@ static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
}
static void adreno_clk_set_options(struct kgsl_device *device, const char *name,
- struct clk *clk)
+ struct clk *clk, bool on)
{
if (ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options)
ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options(
- ADRENO_DEVICE(device), name, clk);
+ ADRENO_DEVICE(device), name, clk, on);
}
static void adreno_iommu_sync(struct kgsl_device *device, bool sync)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 91f03d0..26c5505 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -851,7 +851,7 @@ struct adreno_gpudev {
void (*preemption_schedule)(struct adreno_device *);
void (*enable_64bit)(struct adreno_device *);
void (*clk_set_options)(struct adreno_device *,
- const char *, struct clk *);
+ const char *, struct clk *, bool on);
void (*llc_configure_gpu_scid)(struct adreno_device *adreno_dev);
void (*llc_configure_gpuhtw_scid)(struct adreno_device *adreno_dev);
void (*llc_enable_overrides)(struct adreno_device *adreno_dev);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 6c8b677..314ac85a 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -55,7 +55,7 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
{ adreno_is_a530, a530_vbif },
{ adreno_is_a512, a540_vbif },
{ adreno_is_a510, a530_vbif },
- { adreno_is_a508, a530_vbif },
+ { adreno_is_a508, a540_vbif },
{ adreno_is_a505, a530_vbif },
{ adreno_is_a506, a530_vbif },
};
@@ -1608,11 +1608,15 @@ static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
}
static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
- const char *name, struct clk *clk)
+ const char *name, struct clk *clk, bool on)
{
+
+ if (!adreno_is_a540(adreno_dev) && !adreno_is_a512(adreno_dev) &&
+ !adreno_is_a508(adreno_dev))
+ return;
+
/* Handle clock settings for GFX PSCBCs */
- if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev) ||
- adreno_is_a508(adreno_dev)) {
+ if (on) {
if (!strcmp(name, "mem_iface_clk")) {
clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
@@ -1620,6 +1624,11 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
}
+ } else {
+ if (!strcmp(name, "core_clk")) {
+ clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+ }
}
}
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 9a56bec..314b2d8 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -2419,6 +2419,13 @@ static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
};
+static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
+ { KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1, 0 },
+};
+
static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
@@ -2451,6 +2458,8 @@ static struct adreno_perfcount_group a6xx_perfcounter_groups
A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
ADRENO_PERFCOUNTER_GROUP_FIXED),
+ A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
+ ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
ADRENO_PERFCOUNTER_GROUP_FIXED),
};
@@ -2460,6 +2469,30 @@ static struct adreno_perfcounters a6xx_perfcounters = {
ARRAY_SIZE(a6xx_perfcounter_groups),
};
+/* Program the GMU power counter to count GPU busy cycles */
+static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
+ unsigned int counter)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ /*
+ * We have a limited number of power counters. Since we're not using
+ * total GPU cycle count, return error if requested.
+ */
+ if (counter == 0)
+ return -EINVAL;
+
+ if (!device->gmu.pdev)
+ return -ENODEV;
+
+ kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0);
+ kgsl_regrmw(device,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
+ kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
+
+ return 0;
+}
+
/* Register offset defines for A6XX, in order of enum adreno_regs */
static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
@@ -2581,6 +2614,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
.regulator_enable = a6xx_sptprac_enable,
.regulator_disable = a6xx_sptprac_disable,
.perfcounters = &a6xx_perfcounters,
+ .enable_pwr_counters = a6xx_enable_pwr_counters,
.microcode_read = a6xx_microcode_read,
.enable_64bit = a6xx_enable_64bit,
.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 17ee6e6..bca3dd0 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -1264,59 +1264,14 @@ static void a6xx_snapshot_debugbus(struct kgsl_device *device,
}
}
-static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
- u8 *buf, size_t remain, void *priv)
-{
- struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
- struct kgsl_snapshot_registers *regs = priv;
- unsigned int *data = (unsigned int *)(buf + sizeof(*header));
- int count = 0, j, k;
-
- /* Figure out how many registers we are going to dump */
- for (j = 0; j < regs->count; j++) {
- int start = regs->regs[j * 2];
- int end = regs->regs[j * 2 + 1];
-
- count += (end - start + 1);
- }
-
- if (remain < (count * 8) + sizeof(*header)) {
- SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
- return 0;
- }
-
- for (j = 0; j < regs->count; j++) {
- unsigned int start = regs->regs[j * 2];
- unsigned int end = regs->regs[j * 2 + 1];
-
- for (k = start; k <= end; k++) {
- unsigned int val;
-
- kgsl_gmu_regread(device, k, &val);
- *data++ = k;
- *data++ = val;
- }
- }
-
- header->count = count;
-
- /* Return the size of the section */
- return (count * 8) + sizeof(*header);
-}
-
static void a6xx_snapshot_gmu(struct kgsl_device *device,
struct kgsl_snapshot *snapshot)
{
- struct kgsl_snapshot_registers gmu_regs = {
- .regs = a6xx_gmu_registers,
- .count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
- };
-
if (!kgsl_gmu_isenabled(device))
return;
- kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
- snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
+ adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
+ ARRAY_SIZE(a6xx_gmu_registers) / 2);
}
/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index d01a5e9..e8b1c67 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2097,7 +2097,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
/* Turn off all the timers */
del_timer_sync(&dispatcher->timer);
del_timer_sync(&dispatcher->fault_timer);
- del_timer_sync(&adreno_dev->preempt.timer);
+ /*
+ * Deleting uninitialized timer will block for ever on kernel debug
+ * disable build. Hence skip del timer if it is not initialized.
+ */
+ if (adreno_is_preemption_enabled(adreno_dev))
+ del_timer_sync(&adreno_dev->preempt.timer);
mutex_lock(&device->mutex);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 9d847ae..bff1fda 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -54,21 +54,10 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev,
/* Read always on registers */
if (!adreno_is_a3xx(adreno_dev)) {
- if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev))) {
- uint32_t val_lo, val_hi;
-
- adreno_read_gmureg(adreno_dev,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO, &val_lo);
- adreno_read_gmureg(adreno_dev,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI, &val_hi);
-
- time->ticks = (val_lo | ((uint64_t)val_hi << 32));
- } else {
- adreno_readreg64(adreno_dev,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
- &time->ticks);
- }
+ adreno_readreg64(adreno_dev,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
+ &time->ticks);
/* Mask hi bits as they may be incorrect on some targets */
if (ADRENO_GPUREV(adreno_dev) >= 400 &&
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index d836cbb..6a39792 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -257,6 +257,13 @@ static void _deferred_put(struct work_struct *work)
kgsl_mem_entry_put(entry);
}
+static inline void
+kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
+{
+ if (entry)
+ queue_work(kgsl_driver.mem_workqueue, &entry->work);
+}
+
static inline struct kgsl_mem_entry *
kgsl_mem_entry_create(void)
{
@@ -266,6 +273,7 @@ kgsl_mem_entry_create(void)
kref_init(&entry->refcount);
/* put this ref in userspace memory alloc and map ioctls */
kref_get(&entry->refcount);
+ INIT_WORK(&entry->work, _deferred_put);
}
return entry;
@@ -1244,7 +1252,8 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
spin_lock(&private->mem_lock);
idr_for_each_entry(&private->mem_idr, entry, id) {
if (GPUADDR_IN_MEMDESC(gpuaddr, &entry->memdesc)) {
- ret = kgsl_mem_entry_get(entry);
+ if (!entry->pending_free)
+ ret = kgsl_mem_entry_get(entry);
break;
}
}
@@ -1877,7 +1886,7 @@ long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -1895,7 +1904,7 @@ long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -1932,8 +1941,7 @@ static void gpuobj_free_fence_func(void *priv)
{
struct kgsl_mem_entry *entry = priv;
- INIT_WORK(&entry->work, _deferred_put);
- queue_work(kgsl_driver.mem_workqueue, &entry->work);
+ kgsl_mem_entry_put_deferred(entry);
}
static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -1997,7 +2005,7 @@ long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
else
ret = -EINVAL;
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -3377,7 +3385,13 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
if (entry == NULL)
return -EINVAL;
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
if (entry->memdesc.cur_bindings != 0) {
+ kgsl_mem_entry_unset_pend(entry);
kgsl_mem_entry_put(entry);
return -EINVAL;
}
@@ -3386,7 +3400,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return 0;
}
@@ -3446,7 +3460,13 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
if (entry == NULL)
return -EINVAL;
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
if (entry->bind_tree.rb_node != NULL) {
+ kgsl_mem_entry_unset_pend(entry);
kgsl_mem_entry_put(entry);
return -EINVAL;
}
@@ -3455,7 +3475,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return 0;
}
@@ -4853,7 +4873,7 @@ static int __init kgsl_core_init(void)
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM, 0);
kgsl_events_init();
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index ee4e7ef..ca1f181 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -179,7 +179,7 @@ struct kgsl_functable {
unsigned int prelevel, unsigned int postlevel, bool post);
void (*regulator_disable_poll)(struct kgsl_device *device);
void (*clk_set_options)(struct kgsl_device *device,
- const char *name, struct clk *clk);
+ const char *name, struct clk *clk, bool on);
void (*gpu_model)(struct kgsl_device *device, char *str,
size_t bufsz);
void (*stop_fault_timer)(struct kgsl_device *device);
@@ -532,18 +532,49 @@ static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
priv->stats[type].max = priv->stats[type].cur;
}
+static inline bool kgsl_is_register_offset(struct kgsl_device *device,
+ unsigned int offsetwords)
+{
+ return ((offsetwords * sizeof(uint32_t)) < device->reg_len);
+}
+
+static inline bool kgsl_is_gmu_offset(struct kgsl_device *device,
+ unsigned int offsetwords)
+{
+ struct gmu_device *gmu = &device->gmu;
+
+ return (gmu->pdev &&
+ (offsetwords >= gmu->gmu2gpu_offset) &&
+ ((offsetwords - gmu->gmu2gpu_offset) * sizeof(uint32_t) <
+ gmu->reg_len));
+}
+
static inline void kgsl_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
- device->ftbl->regread(device, offsetwords, value);
+ if (kgsl_is_register_offset(device, offsetwords))
+ device->ftbl->regread(device, offsetwords, value);
+ else if (device->ftbl->gmu_regread &&
+ kgsl_is_gmu_offset(device, offsetwords))
+ device->ftbl->gmu_regread(device, offsetwords, value);
+ else {
+ WARN(1, "Out of bounds register read: 0x%x\n", offsetwords);
+ *value = 0;
+ }
}
static inline void kgsl_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
- device->ftbl->regwrite(device, offsetwords, value);
+ if (kgsl_is_register_offset(device, offsetwords))
+ device->ftbl->regwrite(device, offsetwords, value);
+ else if (device->ftbl->gmu_regwrite &&
+ kgsl_is_gmu_offset(device, offsetwords))
+ device->ftbl->gmu_regwrite(device, offsetwords, value);
+ else
+ WARN(1, "Out of bounds register write: 0x%x\n", offsetwords);
}
static inline void kgsl_gmu_regread(struct kgsl_device *device,
@@ -570,9 +601,9 @@ static inline void kgsl_regrmw(struct kgsl_device *device,
{
unsigned int val = 0;
- device->ftbl->regread(device, offsetwords, &val);
+ kgsl_regread(device, offsetwords, &val);
val &= ~mask;
- device->ftbl->regwrite(device, offsetwords, val | bits);
+ kgsl_regwrite(device, offsetwords, val | bits);
}
static inline void kgsl_gmu_regrmw(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index f72b3fa..f87e4da 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1477,8 +1477,7 @@ void gmu_stop(struct kgsl_device *device)
if (!idle || (gpudev->wait_for_gmu_idle &&
gpudev->wait_for_gmu_idle(adreno_dev))) {
- dev_err(&gmu->pdev->dev, "Failure to stop GMU");
- return;
+ dev_err(&gmu->pdev->dev, "Stopping GMU before it is idle\n");
}
/* Pending message in all queues are abandoned */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 7ffb42b..4dd7b8e 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -156,9 +156,6 @@ static void _ab_buslevel_update(struct kgsl_pwrctrl *pwr,
*ab = pwr->bus_ab_mbytes;
else
*ab = (pwr->bus_percent_ab * max_bw) / 100;
-
- if (*ab > ib)
- *ab = ib;
}
/**
@@ -2052,10 +2049,6 @@ static int _get_clocks(struct kgsl_device *device)
if (!strcmp(name, "isense_clk"))
pwr->isense_clk_indx = i;
-
- if (device->ftbl->clk_set_options)
- device->ftbl->clk_set_options(device, name,
- pwr->grp_clks[i]);
break;
}
}
@@ -2480,6 +2473,22 @@ static void kgsl_pwrctrl_disable(struct kgsl_device *device)
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
+static void
+kgsl_pwrctrl_clk_set_options(struct kgsl_device *device, bool on)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int i;
+
+ for (i = 0; i < KGSL_MAX_CLKS; i++) {
+ if (pwr->grp_clks[i] == NULL)
+ continue;
+
+ if (device->ftbl->clk_set_options)
+ device->ftbl->clk_set_options(device, clocks[i],
+ pwr->grp_clks[i], on);
+ }
+}
+
/**
* _init() - Get the GPU ready to start, but don't turn anything on
* @device - Pointer to the kgsl_device struct
@@ -2529,6 +2538,7 @@ static int _wake(struct kgsl_device *device)
device->ftbl->resume(device);
/* fall through */
case KGSL_STATE_SLUMBER:
+ kgsl_pwrctrl_clk_set_options(device, true);
status = device->ftbl->start(device,
device->pwrctrl.superfast);
device->pwrctrl.superfast = false;
@@ -2565,6 +2575,7 @@ static int _wake(struct kgsl_device *device)
device->pwrctrl.interval_timeout);
break;
case KGSL_STATE_AWARE:
+ kgsl_pwrctrl_clk_set_options(device, true);
/* Enable state before turning on irq */
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
@@ -2689,6 +2700,7 @@ _slumber(struct kgsl_device *device)
status = kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device);
device->ftbl->stop(device);
+ kgsl_pwrctrl_clk_set_options(device, false);
kgsl_pwrctrl_disable(device);
kgsl_pwrscale_sleep(device);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b91a6b5..cb9726e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -166,6 +166,7 @@
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
#define SMR_VALID (1 << 31)
#define SMR_MASK_SHIFT 16
+#define SMR_MASK_MASK 0x7FFF
#define SMR_ID_SHIFT 0
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
@@ -335,10 +336,12 @@ struct arm_smmu_s2cr {
enum arm_smmu_s2cr_type type;
enum arm_smmu_s2cr_privcfg privcfg;
u8 cbndx;
+ bool cb_handoff;
};
#define s2cr_init_val (struct arm_smmu_s2cr){ \
.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
+ .cb_handoff = false, \
}
struct arm_smmu_smr {
@@ -409,7 +412,6 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
-#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
u32 options;
@@ -528,7 +530,6 @@ static bool using_legacy_binding, using_generic_binding;
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
{ ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
- { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
{ 0, NULL},
@@ -548,8 +549,15 @@ static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova);
+
static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
+static int arm_smmu_alloc_cb(struct iommu_domain *domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev);
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -1612,14 +1620,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (is_iommu_pt_coherent(smmu_domain))
quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
- /* Dynamic domains must set cbndx through domain attribute */
- if (!dynamic) {
- ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
- smmu->num_context_banks);
- if (ret < 0)
- goto out_unlock;
- cfg->cbndx = ret;
- }
+ ret = arm_smmu_alloc_cb(domain, smmu, dev);
+ if (ret < 0)
+ goto out_unlock;
+ cfg->cbndx = ret;
+
if (smmu->version < ARM_SMMU_V2) {
cfg->irptndx = atomic_inc_return(&smmu->irptndx);
cfg->irptndx %= smmu->num_context_irqs;
@@ -2189,6 +2194,23 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ uint64_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->iova_to_pte(ops, iova);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+}
+
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
@@ -2223,14 +2245,18 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
int ret;
- size_t size;
+ size_t size, batch_size, size_to_unmap = 0;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ unsigned int idx_start, idx_end;
+ struct scatterlist *sg_start, *sg_end;
+ unsigned long __saved_iova_start;
if (!ops)
return -ENODEV;
@@ -2239,17 +2265,45 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
if (ret)
return ret;
- spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
- ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
- spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ __saved_iova_start = iova;
+ idx_start = idx_end = 0;
+ sg_start = sg_end = sg;
+ while (idx_end < nents) {
+ batch_size = sg_end->length;
+ sg_end = sg_next(sg_end);
+ idx_end++;
+ while ((idx_end < nents) &&
+ (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
- if (!ret)
- arm_smmu_unmap(domain, iova, size);
+ batch_size += sg_end->length;
+ sg_end = sg_next(sg_end);
+ idx_end++;
+ }
- arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
+ prot, &size);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ /* Returns 0 on error */
+ if (!ret) {
+ size_to_unmap = iova + size - __saved_iova_start;
+ goto out;
+ }
+
+ iova += batch_size;
+ idx_start = idx_end;
+ sg_start = sg_end;
+ }
+
+out:
arm_smmu_assign_table(smmu_domain);
- return ret;
+ if (size_to_unmap) {
+ arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
+ iova = __saved_iova_start;
+ }
+ arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+ return iova - __saved_iova_start;
}
static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -2976,6 +3030,7 @@ static struct iommu_ops arm_smmu_ops = {
.enable_config_clocks = arm_smmu_enable_config_clocks,
.disable_config_clocks = arm_smmu_disable_config_clocks,
.is_iova_coherent = arm_smmu_is_iova_coherent,
+ .iova_to_pte = arm_smmu_iova_to_pte,
};
#define IMPL_DEF1_MICRO_MMU_CTRL 0
@@ -3166,12 +3221,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
* Reset stream mapping groups: Initial values mark all SMRn as
* invalid and all S2CRn as bypass unless overridden.
*/
- if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
- for (i = 0; i < smmu->num_mapping_groups; ++i)
- arm_smmu_write_sme(smmu, i);
+ for (i = 0; i < smmu->num_mapping_groups; ++i)
+ arm_smmu_write_sme(smmu, i);
- arm_smmu_context_bank_reset(smmu);
- }
+ arm_smmu_context_bank_reset(smmu);
/* Invalidate the TLB, just in case */
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
@@ -3228,6 +3281,92 @@ static int arm_smmu_id_size_to_bits(int size)
}
}
+
+/*
+ * Some context banks needs to be transferred from bootloader to HLOS in a way
+ * that allows ongoing traffic. The current expectation is that these context
+ * banks operate in bypass mode.
+ * Additionally, there must be exactly one device in devicetree with stream-ids
+ * overlapping those used by the bootloader.
+ */
+static int arm_smmu_alloc_cb(struct iommu_domain *domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ u32 i, idx;
+ int cb = -EINVAL;
+ bool dynamic;
+
+ /* Dynamic domains must set cbndx through domain attribute */
+ dynamic = is_dynamic_domain(domain);
+ if (dynamic)
+ return INVALID_CBNDX;
+
+ mutex_lock(&smmu->stream_map_mutex);
+ for_each_cfg_sme(fwspec, i, idx) {
+ if (smmu->s2crs[idx].cb_handoff)
+ cb = smmu->s2crs[idx].cbndx;
+ }
+
+ if (cb < 0) {
+ mutex_unlock(&smmu->stream_map_mutex);
+ return __arm_smmu_alloc_bitmap(smmu->context_map,
+ smmu->num_s2_context_banks,
+ smmu->num_context_banks);
+ }
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ if (smmu->s2crs[i].cbndx == cb) {
+ smmu->s2crs[i].cbndx = 0;
+ smmu->s2crs[i].cb_handoff = false;
+ smmu->s2crs[i].count -= 1;
+ }
+ }
+ mutex_unlock(&smmu->stream_map_mutex);
+
+ return cb;
+}
+
+static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
+{
+ u32 i, raw_smr, raw_s2cr;
+ struct arm_smmu_smr smr;
+ struct arm_smmu_s2cr s2cr;
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
+ ARM_SMMU_GR0_SMR(i));
+ if (!(raw_smr & SMR_VALID))
+ continue;
+
+ smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+ smr.id = (u16)raw_smr;
+ smr.valid = true;
+
+ raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
+ ARM_SMMU_GR0_S2CR(i));
+ s2cr.group = NULL;
+ s2cr.count = 1;
+ s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
+ s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
+ S2CR_PRIVCFG_MASK;
+ s2cr.cbndx = (u8)raw_s2cr;
+ s2cr.cb_handoff = true;
+
+ if (s2cr.type != S2CR_TYPE_TRANS)
+ continue;
+
+ smmu->smrs[i] = smr;
+ smmu->s2crs[i] = s2cr;
+ bitmap_set(smmu->context_map, s2cr.cbndx, 1);
+ dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
+ raw_smr, raw_s2cr, s2cr.cbndx);
+ }
+
+ return 0;
+}
+
static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
{
struct device *dev = smmu->dev;
@@ -3487,6 +3626,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->streamid_mask = size - 1;
if (id & ID0_SMS) {
u32 smr;
+ int i;
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
@@ -3501,14 +3641,25 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* bits are set, so check each one separately. We can reject
* masters later if they try to claim IDs outside these masks.
*/
+ for (i = 0; i < size; i++) {
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
+ if (!(smr & SMR_VALID))
+ break;
+ }
+ if (i == size) {
+ dev_err(smmu->dev,
+ "Unable to compute streamid_masks\n");
+ return -ENODEV;
+ }
+
smr = smmu->streamid_mask << SMR_ID_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
smmu->streamid_mask = smr >> SMR_ID_SHIFT;
smr = smmu->streamid_mask << SMR_MASK_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
/* Zero-initialised to mark as invalid */
@@ -3800,6 +3951,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err)
goto out_power_off;
+ err = arm_smmu_handoff_cbs(smmu);
+ if (err)
+ goto out_power_off;
+
err = arm_smmu_parse_impl_def_registers(smmu);
if (err)
goto out_power_off;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ac3059d..560bb43 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -765,6 +765,51 @@ static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
return ERR_PTR(-ENOMEM);
}
+/*
+ * Based off of similar code from dma-iommu.c, but modified to use a different
+ * iova allocator
+ */
+static void fast_smmu_reserve_pci_windows(struct device *dev,
+ struct dma_fast_smmu_mapping *mapping)
+{
+ struct pci_host_bridge *bridge;
+ struct resource_entry *window;
+ phys_addr_t start, end;
+ struct pci_dev *pci_dev;
+ unsigned long flags;
+
+ if (!dev_is_pci(dev))
+ return;
+
+ pci_dev = to_pci_dev(dev);
+ bridge = pci_find_host_bridge(pci_dev->bus);
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ resource_list_for_each_entry(window, &bridge->windows) {
+ if (resource_type(window->res) != IORESOURCE_MEM &&
+ resource_type(window->res) != IORESOURCE_IO)
+ continue;
+
+ start = round_down(window->res->start - window->offset,
+ FAST_PAGE_SIZE);
+ end = round_up(window->res->end - window->offset,
+ FAST_PAGE_SIZE);
+ start = max_t(unsigned long, mapping->base, start);
+ end = min_t(unsigned long, mapping->base + mapping->size, end);
+ if (start >= end)
+ continue;
+
+ dev_dbg(dev, "iova allocator reserved 0x%pa-0x%pa\n",
+ &start, &end);
+
+ start = (start - mapping->base) >> FAST_PAGE_SHIFT;
+ end = (end - mapping->base) >> FAST_PAGE_SHIFT;
+ bitmap_set(mapping->bitmap, start, end - start);
+ }
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+
/**
* fast_smmu_attach_device
* @dev: valid struct device pointer
@@ -798,6 +843,8 @@ int fast_smmu_attach_device(struct device *dev,
mapping->fast->domain = domain;
mapping->fast->dev = dev;
+ fast_smmu_reserve_pci_windows(dev, mapping->fast);
+
group = dev->iommu_group;
if (!group) {
dev_err(dev, "No iommu associated with device\n");
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f7739ae..fa5069e 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -854,6 +854,19 @@ static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
return 0;
}
+static uint64_t arm_lpae_iova_get_pte(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte pte;
+ int lvl;
+
+ if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
+ return pte;
+
+ return 0;
+}
+
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
@@ -983,6 +996,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.unmap = arm_lpae_unmap,
.iova_to_phys = arm_lpae_iova_to_phys,
.is_iova_coherent = arm_lpae_is_iova_coherent,
+ .iova_to_pte = arm_lpae_iova_get_pte,
};
return data;
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 1599121..a686ad0 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -149,6 +149,8 @@ struct io_pgtable_ops {
unsigned long iova);
bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
unsigned long iova);
+ uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops,
+ unsigned long iova);
};
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 0c49a64..6bb435b 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -154,6 +154,7 @@ void iommu_debug_domain_remove(struct iommu_domain *domain)
static LIST_HEAD(iommu_debug_devices);
static struct dentry *debugfs_tests_dir;
static u32 iters_per_op = 1;
+static void *test_virt_addr;
struct iommu_debug_device {
struct device *dev;
@@ -1207,6 +1208,68 @@ static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
return -EIO;
}
+static ssize_t __iommu_debug_dma_attach_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ struct dma_iommu_mapping *dma_mapping;
+ ssize_t retval = -EINVAL;
+ int val;
+
+ if (kstrtoint_from_user(ubuf, count, 0, &val)) {
+ pr_err("Invalid format. Expected a hex or decimal integer");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (val) {
+ if (dev->archdata.mapping)
+ if (dev->archdata.mapping->domain) {
+ pr_err("Already attached.\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (WARN(dev->archdata.iommu,
+ "Attachment tracking out of sync with device\n")) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+ (SZ_1G * 4ULL));
+
+ if (!dma_mapping)
+ goto out;
+
+ if (arm_iommu_attach_device(dev, dma_mapping))
+ goto out_release_mapping;
+ pr_err("Attached\n");
+ } else {
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(dev->archdata.mapping);
+ pr_err("Detached\n");
+ }
+ retval = count;
+ return retval;
+
+out_release_mapping:
+ arm_iommu_release_mapping(dma_mapping);
+out:
+ return retval;
+}
+
static ssize_t __iommu_debug_attach_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset,
@@ -1260,6 +1323,81 @@ static ssize_t __iommu_debug_attach_write(struct file *file,
return retval;
}
+static ssize_t iommu_debug_dma_attach_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
+
+}
+
+static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ char c[2];
+
+ if (*offset)
+ return 0;
+
+ if (!dev->archdata.mapping)
+ c[0] = '0';
+ else
+ c[0] = dev->archdata.mapping->domain ? '1' : '0';
+
+ c[1] = '\n';
+ if (copy_to_user(ubuf, &c, 2)) {
+ pr_err("copy_to_user failed\n");
+ return -EFAULT;
+ }
+ *offset = 1; /* non-zero means we're done */
+
+ return 2;
+}
+
+static const struct file_operations iommu_debug_dma_attach_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_attach_write,
+ .read = iommu_debug_dma_attach_read,
+};
+
+static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+ int buf_len = sizeof(buf);
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, buf_len);
+
+ if (!test_virt_addr)
+ strlcpy(buf, "FAIL\n", buf_len);
+ else
+ snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_test_virt_addr_fops = {
+ .open = simple_open,
+ .read = iommu_debug_test_virt_addr_read,
+};
+
static ssize_t iommu_debug_attach_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1309,6 +1447,75 @@ static const struct file_operations iommu_debug_secure_attach_fops = {
.read = iommu_debug_attach_read,
};
+static ssize_t iommu_debug_pte_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ dma_addr_t iova;
+
+ if (kstrtox_from_user(ubuf, count, 0, &iova)) {
+ pr_err("Invalid format for iova\n");
+ ddev->iova = 0;
+ return -EINVAL;
+ }
+
+ ddev->iova = iova;
+ pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+ return count;
+}
+
+
+static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ uint64_t pte;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
+ ddev->iova);
+
+ if (!pte)
+ strlcpy(buf, "FAIL\n", sizeof(buf));
+ else
+ snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_pte_fops = {
+ .open = simple_open,
+ .write = iommu_debug_pte_write,
+ .read = iommu_debug_pte_read,
+};
+
static ssize_t iommu_debug_atos_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1370,6 +1577,55 @@ static const struct file_operations iommu_debug_atos_fops = {
.read = iommu_debug_atos_read,
};
+static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ phys_addr_t phys;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
+ ddev->iova);
+ if (!phys)
+ strlcpy(buf, "FAIL\n", sizeof(buf));
+ else
+ snprintf(buf, sizeof(buf), "%pa\n", &phys);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_atos_fops = {
+ .open = simple_open,
+ .write = iommu_debug_atos_write,
+ .read = iommu_debug_dma_atos_read,
+};
+
static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *offset)
{
@@ -1450,6 +1706,159 @@ static const struct file_operations iommu_debug_map_fops = {
.write = iommu_debug_map_write,
};
+/*
+ * Performs DMA mapping of a given virtual address and size to an iova address.
+ * User input format: (addr,len,dma attr) where dma attr is:
+ * 0: normal mapping
+ * 1: force coherent mapping
+ * 2: force non-cohernet mapping
+ * 3: use system cache
+ */
+static ssize_t iommu_debug_dma_map_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *offset)
+{
+ ssize_t retval = -EINVAL;
+ int ret;
+ char *comma1, *comma2;
+ char buf[100];
+ unsigned long addr;
+ void *v_addr;
+ dma_addr_t iova;
+ size_t size;
+ unsigned int attr;
+ unsigned long dma_attrs;
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+
+ if (count >= sizeof(buf)) {
+ pr_err("Value too large\n");
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ memset(buf, 0, sizeof(buf));
+
+ if (copy_from_user(buf, ubuf, count)) {
+ pr_err("Couldn't copy from user\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ comma1 = strnchr(buf, count, ',');
+ if (!comma1)
+ goto invalid_format;
+
+ comma2 = strnchr(comma1 + 1, count, ',');
+ if (!comma2)
+ goto invalid_format;
+
+ *comma1 = *comma2 = '\0';
+
+ if (kstrtoul(buf, 0, &addr))
+ goto invalid_format;
+ v_addr = (void *)addr;
+
+ if (kstrtosize_t(comma1 + 1, 0, &size))
+ goto invalid_format;
+
+ if (kstrtouint(comma2 + 1, 0, &attr))
+ goto invalid_format;
+
+ if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
+ goto invalid_addr;
+
+ if (attr == 0)
+ dma_attrs = 0;
+ else if (attr == 1)
+ dma_attrs = DMA_ATTR_FORCE_COHERENT;
+ else if (attr == 2)
+ dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
+ else if (attr == 3)
+ dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+ else
+ goto invalid_format;
+
+ iova = dma_map_single_attrs(dev, v_addr, size,
+ DMA_TO_DEVICE, dma_attrs);
+
+ if (dma_mapping_error(dev, iova)) {
+ pr_err("Failed to perform dma_map_single\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ retval = count;
+ pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
+ v_addr, &iova, size);
+ ddev->iova = iova;
+ pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+out:
+ return retval;
+
+invalid_format:
+ pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
+ return retval;
+
+invalid_addr:
+ pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
+ return retval;
+}
+
+static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+ dma_addr_t iova;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ iova = ddev->iova;
+ snprintf(buf, sizeof(buf), "%pa\n", &iova);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_map_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_map_write,
+ .read = iommu_debug_dma_map_read,
+};
+
static ssize_t iommu_debug_unmap_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1515,6 +1924,91 @@ static const struct file_operations iommu_debug_unmap_fops = {
.write = iommu_debug_unmap_write,
};
+static ssize_t iommu_debug_dma_unmap_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ ssize_t retval = 0;
+ char *comma1, *comma2;
+ char buf[100];
+ size_t size;
+ unsigned int attr;
+ dma_addr_t iova;
+ unsigned long dma_attrs;
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+
+ if (count >= sizeof(buf)) {
+ pr_err("Value too large\n");
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ memset(buf, 0, sizeof(buf));
+
+ if (copy_from_user(buf, ubuf, count)) {
+ pr_err("Couldn't copy from user\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ comma1 = strnchr(buf, count, ',');
+ if (!comma1)
+ goto invalid_format;
+
+ comma2 = strnchr(comma1 + 1, count, ',');
+ if (!comma2)
+ goto invalid_format;
+
+ *comma1 = *comma2 = '\0';
+
+ if (kstrtoux(buf, 0, &iova))
+ goto invalid_format;
+
+ if (kstrtosize_t(comma1 + 1, 0, &size))
+ goto invalid_format;
+
+ if (kstrtouint(comma2 + 1, 0, &attr))
+ goto invalid_format;
+
+ if (attr == 0)
+ dma_attrs = 0;
+ else if (attr == 1)
+ dma_attrs = DMA_ATTR_FORCE_COHERENT;
+ else if (attr == 2)
+ dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
+ else if (attr == 3)
+ dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+ else
+ goto invalid_format;
+
+ dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
+
+ retval = count;
+ pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+out:
+ return retval;
+
+invalid_format:
+ pr_err("Invalid format. Expected: iova,len, dma attr\n");
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_unmap_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_unmap_write,
+};
+
static ssize_t iommu_debug_config_clocks_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1624,6 +2118,13 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
+ &iommu_debug_test_virt_addr_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
&iommu_debug_profiling_fops)) {
pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
@@ -1666,6 +2167,13 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
+ &iommu_debug_dma_attach_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
&iommu_debug_attach_fops)) {
pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
@@ -1687,6 +2195,13 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
+ &iommu_debug_dma_atos_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
&iommu_debug_map_fops)) {
pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
@@ -1694,6 +2209,13 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_map", 0600, dir, ddev,
+ &iommu_debug_dma_map_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
&iommu_debug_unmap_fops)) {
pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
@@ -1701,6 +2223,20 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
+ &iommu_debug_dma_unmap_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
+ if (!debugfs_create_file("pte", 0600, dir, ddev,
+ &iommu_debug_pte_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
&iommu_debug_config_clocks_fops)) {
pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
@@ -1734,6 +2270,11 @@ static int iommu_debug_init_tests(void)
return -ENODEV;
}
+ test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
+
+ if (!test_virt_addr)
+ return -ENOMEM;
+
return bus_for_each_dev(&platform_bus_type, NULL, NULL,
snarf_iommu_devices);
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index e81bb48..6c3f8a2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1282,6 +1282,15 @@ phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
return domain->ops->iova_to_phys_hard(domain, iova);
}
+uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ if (unlikely(domain->ops->iova_to_pte == NULL))
+ return 0;
+
+ return domain->ops->iova_to_pte(domain, iova);
+}
+
bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova)
{
if (unlikely(domain->ops->is_iova_coherent == NULL))
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6c7f6c4..d2cb1e8 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -60,6 +60,7 @@ static void msg_submit(struct mbox_chan *chan)
void *data;
int err = -EBUSY;
+again:
spin_lock_irqsave(&chan->lock, flags);
if (!chan->msg_count || chan->active_req)
@@ -85,6 +86,16 @@ static void msg_submit(struct mbox_chan *chan)
exit:
spin_unlock_irqrestore(&chan->lock, flags);
+ /*
+ * If the controller returns -EAGAIN, then it means, our spinlock
+ * here is preventing the controller from receiving its interrupt,
+ * that would help clear the controller channels that are currently
+ * blocked waiting on the interrupt response.
+ * Unlock and retry again.
+ */
+ if (err == -EAGAIN)
+ goto again;
+
if (!err && (chan->txdone_method & TXDONE_BY_POLL))
/* kick start the timer immediately to avoid delays */
hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index b328a2a..1f649d6 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -28,7 +28,6 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <asm-generic/io.h>
@@ -95,10 +94,10 @@ struct tcs_response {
struct mbox_chan *chan;
struct tcs_mbox_msg *msg;
u32 m; /* m-th TCS */
- struct tasklet_struct tasklet;
int err;
int idx;
bool in_use;
+ struct list_head list;
};
struct tcs_response_pool {
@@ -122,16 +121,18 @@ struct tcs_mbox {
/* One per MBOX controller */
struct tcs_drv {
+ struct mbox_controller mbox;
const char *name;
- void *base; /* start address of the RSC's registers */
- void *reg_base; /* start address for DRV specific register */
+ void __iomem *base; /* start address of the RSC's registers */
+ void __iomem *reg_base; /* start address for DRV specific register */
int drv_id;
struct platform_device *pdev;
- struct mbox_controller mbox;
struct tcs_mbox tcs[TCS_TYPE_NR];
int num_assigned;
int num_tcs;
- struct workqueue_struct *wq;
+ struct tasklet_struct tasklet;
+ struct list_head response_pending;
+ spinlock_t drv_lock;
struct tcs_response_pool *resp_pool;
atomic_t tcs_in_use[MAX_POOL_SIZE];
/* Debug info */
@@ -141,8 +142,6 @@ struct tcs_drv {
atomic_t tcs_irq_count[MAX_POOL_SIZE];
};
-static void tcs_notify_tx_done(unsigned long data);
-
static int tcs_response_pool_init(struct tcs_drv *drv)
{
struct tcs_response_pool *pool;
@@ -153,11 +152,10 @@ static int tcs_response_pool_init(struct tcs_drv *drv)
return -ENOMEM;
for (i = 0; i < MAX_POOL_SIZE; i++) {
- tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
- (unsigned long) &pool->resp[i]);
pool->resp[i].drv = drv;
pool->resp[i].idx = i;
pool->resp[i].m = TCS_M_INIT;
+ INIT_LIST_HEAD(&pool->resp[i].list);
}
spin_lock_init(&pool->lock);
@@ -188,6 +186,9 @@ static struct tcs_response *setup_response(struct tcs_drv *drv,
}
spin_unlock_irqrestore(&pool->lock, flags);
+ if (pos == MAX_POOL_SIZE)
+ pr_err("response pool is full\n");
+
return resp;
}
@@ -240,11 +241,11 @@ static void print_response(struct tcs_drv *drv, int m)
return;
msg = resp->msg;
- pr_info("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+ pr_debug("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
resp->idx, resp->m, resp->in_use);
- pr_info("Msg: state=%d\n", msg->state);
+ pr_debug("Msg: state=%d\n", msg->state);
for (i = 0; i < msg->num_payload; i++)
- pr_info("addr=0x%x data=0x%x complete=0x%x\n",
+ pr_debug("addr=0x%x data=0x%x complete=0x%x\n",
msg->payload[i].addr,
msg->payload[i].data,
msg->payload[i].complete);
@@ -364,7 +365,15 @@ static inline struct tcs_mbox *get_tcs_for_msg(struct tcs_drv *drv,
static inline void send_tcs_response(struct tcs_response *resp)
{
- tasklet_schedule(&resp->tasklet);
+ struct tcs_drv *drv = resp->drv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv->drv_lock, flags);
+ INIT_LIST_HEAD(&resp->list);
+ list_add_tail(&resp->list, &drv->response_pending);
+ spin_unlock_irqrestore(&drv->drv_lock, flags);
+
+ tasklet_schedule(&drv->tasklet);
}
static inline void enable_tcs_irq(struct tcs_drv *drv, int m, bool enable)
@@ -455,12 +464,12 @@ static irqreturn_t tcs_irq_handler(int irq, void *p)
/* Clear the TCS IRQ status */
write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
+ /* Notify the client that this request is completed. */
+ atomic_set(&drv->tcs_in_use[m], 0);
+
/* Clean up response object and notify mbox in tasklet */
if (resp)
send_tcs_response(resp);
-
- /* Notify the client that this request is completed. */
- atomic_set(&drv->tcs_in_use[m], 0);
}
return IRQ_HANDLED;
@@ -475,19 +484,38 @@ static inline void mbox_notify_tx_done(struct mbox_chan *chan,
mbox_chan_txdone(chan, err);
}
-/**
- * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
- */
-static void tcs_notify_tx_done(unsigned long data)
+static void respond_tx_done(struct tcs_response *resp)
{
- struct tcs_response *resp = (struct tcs_response *) data;
struct mbox_chan *chan = resp->chan;
struct tcs_mbox_msg *msg = resp->msg;
int err = resp->err;
int m = resp->m;
- mbox_notify_tx_done(chan, msg, m, err);
free_response(resp);
+ mbox_notify_tx_done(chan, msg, m, err);
+}
+
+/**
+ * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
+ */
+static void tcs_notify_tx_done(unsigned long data)
+{
+ struct tcs_drv *drv = (struct tcs_drv *)data;
+ struct tcs_response *resp;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&drv->drv_lock, flags);
+ if (list_empty(&drv->response_pending)) {
+ spin_unlock_irqrestore(&drv->drv_lock, flags);
+ break;
+ }
+ resp = list_first_entry(&drv->response_pending,
+ struct tcs_response, list);
+ list_del(&resp->list);
+ spin_unlock_irqrestore(&drv->drv_lock, flags);
+ respond_tx_done(resp);
+ } while (1);
}
static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
@@ -673,8 +701,11 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- if (trigger)
+ if (trigger) {
resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
+ if (IS_ERR_OR_NULL(resp))
+ return -EBUSY;
+ }
/* Identify the sequential slots that we can write to */
spin_lock_irqsave(&tcs->tcs_lock, flags);
@@ -686,28 +717,21 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
return slot;
}
- if (trigger) {
- ret = check_for_req_inflight(drv, tcs, msg);
- if (ret) {
- spin_unlock_irqrestore(&tcs->tcs_lock, flags);
- return ret;
- }
- }
-
- /* Mark the slots as in-use, before we unlock */
- if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
- bitmap_set(tcs->slots, slot, msg->num_payload);
-
- /* Copy the addresses of the resources over to the slots */
- for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
- tcs->cmd_addr[slot + i] = msg->payload[i].addr;
-
+ /* Figure out the TCS-m and CMD-n to write to */
offset = slot / tcs->ncpt;
m = offset + tcs->tcs_offset;
n = slot % tcs->ncpt;
- /* Block, if we have an address from the msg in flight */
if (trigger) {
+ /* Block, if we have an address from the msg in flight */
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret) {
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+ if (resp)
+ free_response(resp);
+ return ret;
+ }
+
resp->m = m;
/* Mark the TCS as busy */
atomic_set(&drv->tcs_in_use[m], 1);
@@ -716,6 +740,14 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
if (tcs->type != ACTIVE_TCS)
enable_tcs_irq(drv, m, true);
drv->tcs_last_sent_ts[m] = arch_counter_get_cntvct();
+ } else {
+ /* Mark the slots as in-use, before we unlock */
+ if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
+ bitmap_set(tcs->slots, slot, msg->num_payload);
+
+ /* Copy the addresses of the resources over to the slots */
+ for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
+ tcs->cmd_addr[slot + i] = msg->payload[i].addr;
}
/* Write to the TCS or AMC */
@@ -758,6 +790,32 @@ static int tcs_mbox_invalidate(struct mbox_chan *chan)
return 0;
}
+static void print_tcs_regs(struct tcs_drv *drv, int m)
+{
+ int n;
+ struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
+ void __iomem *base = drv->reg_base;
+ u32 enable, addr, data, msgid;
+
+ if (!tcs || tcs_is_free(drv, m))
+ return;
+
+ enable = read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
+ if (!enable)
+ return;
+
+ pr_debug("TCS-%d contents:\n", m);
+ for (n = 0; n < tcs->ncpt; n++) {
+ if (!(enable & BIT(n)))
+ continue;
+ addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n);
+ data = read_tcs_reg(base, TCS_DRV_CMD_DATA, m, n);
+ msgid = read_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n);
+ pr_debug("\tn=%d addr=0x%x data=0x%x hdr=0x%x\n",
+ n, addr, data, msgid);
+ }
+}
+
static void dump_tcs_stats(struct tcs_drv *drv)
{
int i;
@@ -766,12 +824,13 @@ static void dump_tcs_stats(struct tcs_drv *drv)
for (i = 0; i < drv->num_tcs; i++) {
if (!atomic_read(&drv->tcs_in_use[i]))
continue;
- pr_info("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
+ pr_debug("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
curr, i,
atomic_read(&drv->tcs_send_count[i]),
drv->tcs_last_sent_ts[i],
atomic_read(&drv->tcs_irq_count[i]),
drv->tcs_last_recv_ts[i]);
+ print_tcs_regs(drv, i);
print_response(drv, i);
}
}
@@ -840,7 +899,7 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
if (ret != -EBUSY)
break;
udelay(100);
- } while (++count < 10);
+ } while (++count < 100);
tx_fail:
/* If there was an error in the request, schedule a response */
@@ -849,7 +908,8 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
drv, msg, chan, TCS_M_INIT, ret);
dev_err(dev, "Error sending RPMH message %d\n", ret);
- send_tcs_response(resp);
+ if (resp)
+ send_tcs_response(resp);
ret = 0;
}
@@ -857,6 +917,7 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
if (ret == -EBUSY) {
dev_err(dev, "TCS Busy, retrying RPMH message send\n");
dump_tcs_stats(drv);
+ ret = -EAGAIN;
}
return ret;
@@ -967,6 +1028,7 @@ static struct mbox_chan *of_tcs_mbox_xlate(struct mbox_controller *mbox,
}
chan = &mbox->chans[drv->num_assigned++];
+ chan->con_priv = drv;
return chan;
}
@@ -1108,6 +1170,9 @@ static int tcs_drv_probe(struct platform_device *pdev)
drv->mbox.is_idle = tcs_drv_is_idle;
drv->num_tcs = st;
drv->pdev = pdev;
+ INIT_LIST_HEAD(&drv->response_pending);
+ spin_lock_init(&drv->drv_lock);
+ tasklet_init(&drv->tasklet, tcs_notify_tx_done, (unsigned long)drv);
drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
if (!drv->name)
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index fdebdc7..b774625 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -51,21 +51,23 @@ struct cam_cpas_intf {
static struct cam_cpas_intf *g_cpas_intf;
int cam_cpas_get_hw_info(uint32_t *camera_family,
- struct cam_hw_version *camera_version)
+ struct cam_hw_version *camera_version,
+ struct cam_hw_version *cpas_version)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
pr_err("cpas intf not initialized\n");
return -ENODEV;
}
- if (!camera_family || !camera_version) {
- pr_err("invalid input %pK %pK\n", camera_family,
- camera_version);
+ if (!camera_family || !camera_version || !cpas_version) {
+ pr_err("invalid input %pK %pK %pK\n", camera_family,
+ camera_version, cpas_version);
return -EINVAL;
}
*camera_family = g_cpas_intf->hw_caps.camera_family;
*camera_version = g_cpas_intf->hw_caps.camera_version;
+ *cpas_version = g_cpas_intf->hw_caps.cpas_version;
return 0;
}
@@ -344,7 +346,7 @@ int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
}
rc = cam_cpas_get_hw_info(&query.camera_family,
- &query.camera_version);
+ &query.camera_version, &query.cpas_version);
if (rc)
break;
@@ -428,6 +430,7 @@ static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, unsigned long arg)
{
+ struct cam_control cmd_data;
int32_t rc;
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
@@ -436,9 +439,16 @@ static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
return -ENODEV;
}
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
switch (cmd) {
case VIDIOC_CAM_CONTROL:
- rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+ rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
break;
default:
pr_err("Invalid command %d for CPAS!\n", cmd);
@@ -446,6 +456,15 @@ static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
break;
}
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
return rc;
}
#endif
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
index 12c8e66..20ed1b6 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -279,17 +279,16 @@ static struct cam_camnoc_specific
.value = 3,
},
.danger_lut = {
- .enable = false,
+ .enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
- .masked_value = 0,
.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
- .value = 0x0,
+ .value = 0xFFFFFF00,
},
.safe_lut = {
- .enable = false,
+ .enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
- .value = 0x0,
+ .value = 0x3,
},
.ubwc_ctl = {
.enable = true,
@@ -328,18 +327,16 @@ static struct cam_camnoc_specific
.value = 3,
},
.danger_lut = {
- .enable = false,
+ .enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
- .masked_value = 0,
.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
- .value = 0x0,
+ .value = 0xFFFFFF00,
},
.safe_lut = {
- .enable = false,
+ .enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
- .masked_value = 0,
.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
- .value = 0x0,
+ .value = 0x3,
},
.ubwc_ctl = {
.enable = true,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index f6b0729..27b8504 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -312,13 +312,15 @@ int cam_cpas_reg_read(
* @camera_family : Camera family type. One of
* CAM_FAMILY_CAMERA_SS
* CAM_FAMILY_CPAS_SS
- * @camera_version : Camera version
+ * @camera_version : Camera platform version
+ * @cpas_version : Camera cpas version
*
* @return 0 on success.
*
*/
int cam_cpas_get_hw_info(
uint32_t *camera_family,
- struct cam_hw_version *camera_version);
+ struct cam_hw_version *camera_version,
+ struct cam_hw_version *cpas_version);
#endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index c837232..4888e5b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -531,7 +531,8 @@ int cam_sensor_match_id(struct cam_sensor_ctrl_t *s_ctrl)
rc = camera_io_dev_read(
&(s_ctrl->io_master_info),
slave_info->sensor_id_reg_addr,
- &chipid, CAMERA_SENSOR_I2C_TYPE_WORD);
+ &chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
+ CAMERA_SENSOR_I2C_TYPE_WORD);
CDBG("%s:%d read id: 0x%x expected id 0x%x:\n",
__func__, __LINE__, chipid, slave_info->sensor_id);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index f889abc..13e115a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -38,6 +38,7 @@ int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
enum camera_sensor_i2c_type data_type)
{
if (!io_master_info) {
@@ -47,7 +48,7 @@ int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
if (io_master_info->master_type == CCI_MASTER) {
return cam_cci_i2c_read(io_master_info->cci_client,
- addr, data, data_type, data_type);
+ addr, data, addr_type, data_type);
} else {
pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
__LINE__, io_master_info->master_type);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index 757ac17..27bbe6e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -38,12 +38,14 @@ struct camera_io_master {
* @io_master_info: I2C/SPI master information
* @addr: I2C address
* @data: I2C data
+ * @addr_type: I2C addr type
* @data_type: I2C data type
*
* This API abstracts read functionality based on master type
*/
int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
enum camera_sensor_i2c_type data_type);
/**
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 901632a..96f40e1 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -82,7 +82,7 @@ int cam_sync_register_callback(sync_callback cb_func,
sync_cb->sync_obj = sync_obj;
INIT_WORK(&sync_cb->cb_dispatch_work,
cam_sync_util_cb_dispatch);
-
+ list_add_tail(&sync_cb->list, &row->callback_list);
sync_cb->status = row->state;
queue_work(sync_dev->work_queue,
&sync_cb->cb_dispatch_work);
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
index 1e42f75..0ffea5b 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
@@ -44,6 +44,7 @@ struct hfi_mem_info {
struct hfi_mem msg_q;
struct hfi_mem dbg_q;
struct hfi_mem sec_heap;
+ struct hfi_mem shmem;
void __iomem *icp_base;
};
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/icp/hfi.c
index 4315865..15e0315 100644
--- a/drivers/media/platform/msm/camera/icp/hfi.c
+++ b/drivers/media/platform/msm/camera/icp/hfi.c
@@ -19,6 +19,8 @@
#include <asm/errno.h>
#include <linux/timer.h>
#include <media/cam_icp.h>
+#include <linux/iopoll.h>
+
#include "cam_io_util.h"
#include "hfi_reg.h"
#include "hfi_sys_defs.h"
@@ -336,7 +338,7 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
} else {
cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN |
- ICP_FLAG_CSR_WAKE_UP_EN,
+ ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
}
@@ -460,8 +462,10 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
}
cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
- cam_io_w((uint32_t)0x7400000, icp_base + HFI_REG_SHARED_MEM_PTR);
- cam_io_w((uint32_t)0x6400000, icp_base + HFI_REG_SHARED_MEM_SIZE);
+ cam_io_w((uint32_t)hfi_mem->shmem.iova,
+ icp_base + HFI_REG_SHARED_MEM_PTR);
+ cam_io_w((uint32_t)hfi_mem->shmem.len,
+ icp_base + HFI_REG_SHARED_MEM_SIZE);
cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
icp_base + HFI_REG_UNCACHED_HEAP_PTR);
cam_io_w((uint32_t)hfi_mem->sec_heap.len,
@@ -472,25 +476,17 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
pr_debug("hw version : %u[%x]\n", hw_version, hw_version);
- do {
- msleep(500);
- status = cam_io_r(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
- } while (status != ICP_INIT_RESP_SUCCESS);
-
- if (status == ICP_INIT_RESP_SUCCESS) {
- g_hfi->hfi_state = FW_RESP_DONE;
- rc = 0;
- } else {
- rc = -ENODEV;
- pr_err("FW initialization failed");
+ rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+ status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
+ if (rc) {
+ pr_err("timed out , status = %u\n", status);
goto regions_fail;
}
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
g_hfi->hfi_state = FW_START_SENT;
- pr_debug("fw version : %u[%x]\n", fw_version, fw_version);
- pr_debug("hfi init is successful\n");
+ HFI_DBG("fw version : %u[%x]\n", fw_version, fw_version);
cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 140542b..43491a9 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -26,6 +26,8 @@
#include <linux/debugfs.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
+#include <linux/debugfs.h>
+
#include "cam_sync_api.h"
#include "cam_packet_util.h"
#include "cam_hw.h"
@@ -55,6 +57,23 @@
static struct cam_icp_hw_mgr icp_hw_mgr;
+static int cam_icp_hw_mgr_create_debugfs_entry(void)
+{
+ icp_hw_mgr.dentry = debugfs_create_dir("camera_icp", NULL);
+ if (!icp_hw_mgr.dentry)
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("a5_debug",
+ 0644,
+ icp_hw_mgr.dentry,
+ &icp_hw_mgr.a5_debug)) {
+ debugfs_remove_recursive(icp_hw_mgr.dentry);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int cam_icp_stop_cpas(struct cam_icp_hw_mgr *hw_mgr_priv)
{
struct cam_hw_intf *a5_dev_intf = NULL;
@@ -568,7 +587,12 @@ static int cam_icp_allocate_hfi_mem(void)
uint64_t kvaddr;
size_t len;
- pr_err("Allocating FW for iommu handle: %x\n", icp_hw_mgr.iommu_hdl);
+ rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+ CAM_MEM_MGR_REGION_SHARED,
+ &icp_hw_mgr.hfi_mem.shmem);
+ if (rc)
+ return -ENOMEM;
+
rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
&iova, &kvaddr, &len);
if (rc < 0) {
@@ -764,7 +788,7 @@ static int cam_icp_mgr_destroy_handle(
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("timeout/err in iconfig command: %d\n", rc);
+ pr_err("FW response timeout: %d\n", rc);
}
return rc;
@@ -870,6 +894,7 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
cam_icp_free_hfi_mem();
hw_mgr->fw_download = false;
+ debugfs_remove_recursive(icp_hw_mgr.dentry);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return 0;
@@ -886,6 +911,8 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
struct cam_icp_a5_set_irq_cb irq_cb;
struct cam_icp_a5_set_fw_buf_info fw_buf_info;
struct hfi_mem_info hfi_mem;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
int rc = 0;
if (!hw_mgr) {
@@ -1014,9 +1041,12 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+ hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+ hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
rc = cam_hfi_init(0, &hfi_mem,
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
- false);
+ hw_mgr->a5_debug);
if (rc < 0) {
pr_err("hfi_init is failed\n");
goto set_irq_failed;
@@ -1033,7 +1063,13 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
NULL, 0);
ICP_DBG("Wait for INIT DONE Message\n");
- wait_for_completion(&hw_mgr->a5_complete);
+ rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ pr_err("FW response timed out %d\n", rc);
+ goto set_irq_failed;
+ }
ICP_DBG("Done Waiting for INIT DONE Message\n");
@@ -1041,6 +1077,10 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_POWER_COLLAPSE,
NULL, 0);
+ if (rc) {
+ pr_err("icp power collapse failed\n");
+ goto set_irq_failed;
+ }
hw_mgr->fw_download = true;
@@ -1428,6 +1468,8 @@ static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
int rc = 0;
struct hfi_cmd_work_data *task_data;
struct hfi_cmd_ipebps_async ioconfig_cmd;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
@@ -1451,7 +1493,13 @@ static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
task->process_cb = cam_icp_mgr_process_cmd;
cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
- wait_for_completion(&ctx_data->wait_complete);
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ pr_err("FW response timed out %d\n", rc);
+ }
return rc;
}
@@ -1462,6 +1510,8 @@ static int cam_icp_mgr_create_handle(uint32_t dev_type,
{
struct hfi_cmd_create_handle create_handle;
struct hfi_cmd_work_data *task_data;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
int rc = 0;
create_handle.size = sizeof(struct hfi_cmd_create_handle);
@@ -1479,7 +1529,13 @@ static int cam_icp_mgr_create_handle(uint32_t dev_type,
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
- wait_for_completion(&ctx_data->wait_complete);
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ pr_err("FW response timed out %d\n", rc);
+ }
return rc;
}
@@ -1489,6 +1545,8 @@ static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
{
struct hfi_cmd_ping_pkt ping_pkt;
struct hfi_cmd_work_data *task_data;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
int rc = 0;
ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
@@ -1505,7 +1563,14 @@ static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
- wait_for_completion(&ctx_data->wait_complete);
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ pr_err("FW response timed out %d\n", rc);
+ }
+
return rc;
}
@@ -1929,6 +1994,9 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
if (!icp_hw_mgr.msg_work_data)
goto msg_work_data_failed;
+ rc = cam_icp_hw_mgr_create_debugfs_entry();
+ if (rc)
+ goto msg_work_data_failed;
for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
icp_hw_mgr.msg_work->task.pool[i].payload =
@@ -1940,7 +2008,6 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
init_completion(&icp_hw_mgr.a5_complete);
- pr_err("Exit\n");
return rc;
msg_work_data_failed:
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index e5ffa7a..32d796a 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -23,6 +23,8 @@
#include "hfi_session_defs.h"
#include "cam_req_mgr_workq.h"
#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+
#define CAM_ICP_ROLE_PARENT 1
#define CAM_ICP_ROLE_CHILD 2
@@ -56,6 +58,7 @@ struct icp_hfi_mem_info {
struct cam_mem_mgr_memory_desc dbg_q;
struct cam_mem_mgr_memory_desc sec_heap;
struct cam_mem_mgr_memory_desc fw_buf;
+ struct cam_smmu_region_info shmem;
};
/**
@@ -176,6 +179,8 @@ struct cam_icp_hw_mgr {
struct hfi_cmd_work_data *cmd_work_data;
struct hfi_msg_work_data *msg_work_data;
uint32_t ctxt_cnt;
+ struct dentry *dentry;
+ bool a5_debug;
};
#endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 6ebfc1a..17fa2cc 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -351,12 +351,12 @@ static u32 sde_hw_rotator_v4_outpixfmts[] = {
SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
SDE_PIX_FMT_RGBA_1010102,
SDE_PIX_FMT_RGBX_1010102,
- /* SDE_PIX_FMT_ARGB_2101010 */
- /* SDE_PIX_FMT_XRGB_2101010 */
+ SDE_PIX_FMT_ARGB_2101010,
+ SDE_PIX_FMT_XRGB_2101010,
SDE_PIX_FMT_BGRA_1010102,
SDE_PIX_FMT_BGRX_1010102,
- /* SDE_PIX_FMT_ABGR_2101010 */
- /* SDE_PIX_FMT_XBGR_2101010 */
+ SDE_PIX_FMT_ABGR_2101010,
+ SDE_PIX_FMT_XBGR_2101010,
SDE_PIX_FMT_RGBA_1010102_UBWC,
SDE_PIX_FMT_RGBX_1010102_UBWC,
SDE_PIX_FMT_Y_CBCR_H2V2_P010,
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 88250e1..8d54e20 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1369,13 +1369,13 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) * 2;
break;
}
- case HAL_CONFIG_VPE_OPERATIONS:
+ case HAL_PARAM_VPE_ROTATION:
{
- struct hfi_operations_type *hfi;
- struct hal_operations *prop =
- (struct hal_operations *) pdata;
- pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VPE_OPERATIONS;
- hfi = (struct hfi_operations_type *) &pkt->rg_property_data[1];
+ struct hfi_vpe_rotation_type *hfi;
+ struct hal_vpe_rotation *prop =
+ (struct hal_vpe_rotation *) pdata;
+ pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VPE_ROTATION;
+ hfi = (struct hfi_vpe_rotation_type *)&pkt->rg_property_data[1];
switch (prop->rotate) {
case HAL_ROTATE_NONE:
hfi->rotation = HFI_ROTATE_NONE;
@@ -1411,7 +1411,7 @@ int create_pkt_cmd_session_set_property(
rc = -EINVAL;
break;
}
- pkt->size += sizeof(u32) + sizeof(struct hfi_operations_type);
+ pkt->size += sizeof(u32) + sizeof(struct hfi_vpe_rotation_type);
break;
}
case HAL_PARAM_VENC_INTRA_REFRESH:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 89e8356..f678f56 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -110,6 +110,8 @@ static int hfi_process_sess_evt_seq_changed(u32 device_id,
struct hfi_profile_level *profile_level;
struct hfi_bit_depth *pixel_depth;
struct hfi_pic_struct *pic_struct;
+ struct hfi_buffer_requirements *buf_req;
+ struct hfi_index_extradata_input_crop_payload *crop_info;
u32 entropy_mode = 0;
u8 *data_ptr;
int prop_id;
@@ -231,6 +233,41 @@ static int hfi_process_sess_evt_seq_changed(u32 device_id,
data_ptr +=
sizeof(u32);
break;
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ data_ptr = data_ptr + sizeof(u32);
+ buf_req =
+ (struct hfi_buffer_requirements *)
+ data_ptr;
+ event_notify.capture_buf_count =
+ buf_req->buffer_count_min;
+ dprintk(VIDC_DBG,
+ "Capture Count : 0x%x\n",
+ event_notify.capture_buf_count);
+ data_ptr +=
+ sizeof(struct hfi_buffer_requirements);
+ break;
+ case HFI_INDEX_EXTRADATA_INPUT_CROP:
+ data_ptr = data_ptr + sizeof(u32);
+ crop_info = (struct
+ hfi_index_extradata_input_crop_payload *)
+ data_ptr;
+ event_notify.crop_data.left = crop_info->left;
+ event_notify.crop_data.top = crop_info->top;
+ event_notify.crop_data.width = crop_info->width;
+ event_notify.crop_data.height =
+ crop_info->height;
+ dprintk(VIDC_DBG,
+ "CROP info : Left = %d Top = %d\n",
+ crop_info->left,
+ crop_info->top);
+ dprintk(VIDC_DBG,
+ "CROP info : Width = %d Height = %d\n",
+ crop_info->width,
+ crop_info->height);
+ data_ptr +=
+ sizeof(struct
+ hfi_index_extradata_input_crop_payload);
+ break;
default:
dprintk(VIDC_ERR,
"%s cmd: %#x not supported\n",
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 074ea4fa..b116622 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -74,6 +74,14 @@ static int get_device_address(struct smem_client *smem_client,
goto mem_map_failed;
}
+ /* Check if the dmabuf size matches expected size */
+ if (buf->size < *buffer_size) {
+ rc = -EINVAL;
+ dprintk(VIDC_ERR,
+ "Size mismatch! Dmabuf size: %zu Expected Size: %lu",
+ buf->size, *buffer_size);
+ goto mem_buf_size_mismatch;
+ }
/* Prepare a dma buf for dma on the given device */
attach = dma_buf_attach(buf, cb->dev);
if (IS_ERR_OR_NULL(attach)) {
@@ -151,6 +159,7 @@ static int get_device_address(struct smem_client *smem_client,
dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
mem_map_table_failed:
dma_buf_detach(buf, attach);
+mem_buf_size_mismatch:
mem_buf_attach_failed:
dma_buf_put(buf);
mem_map_failed:
@@ -201,12 +210,12 @@ static void put_device_address(struct smem_client *smem_client,
}
}
-static int ion_user_to_kernel(struct smem_client *client, int fd, u32 offset,
+static int ion_user_to_kernel(struct smem_client *client, int fd, u32 size,
struct msm_smem *mem, enum hal_buffer buffer_type)
{
struct ion_handle *hndl = NULL;
ion_phys_addr_t iova = 0;
- unsigned long buffer_size = 0;
+ unsigned long buffer_size = size;
int rc = 0;
unsigned long align = SZ_4K;
unsigned long ion_flags = 0;
@@ -217,10 +226,11 @@ static int ion_user_to_kernel(struct smem_client *client, int fd, u32 offset,
dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
if (IS_ERR_OR_NULL(hndl)) {
dprintk(VIDC_ERR, "Failed to get handle: %pK, %d, %d, %pK\n",
- client, fd, offset, hndl);
+ client, fd, size, hndl);
rc = -ENOMEM;
goto fail_import_fd;
}
+
mem->kvaddr = NULL;
rc = ion_handle_get_flags(client->clnt, hndl, &ion_flags);
if (rc) {
@@ -441,7 +451,7 @@ static void ion_delete_client(struct smem_client *client)
ion_client_destroy(client->clnt);
}
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 size,
enum hal_buffer buffer_type)
{
struct smem_client *client = clt;
@@ -459,7 +469,7 @@ struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
}
switch (client->mem_type) {
case SMEM_ION:
- rc = ion_user_to_kernel(clt, fd, offset, mem, buffer_type);
+ rc = ion_user_to_kernel(clt, fd, size, mem, buffer_type);
break;
default:
dprintk(VIDC_ERR, "Mem type not supported\n");
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 7802d31..5c34f28 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -225,6 +225,14 @@ static int msm_v4l2_g_parm(struct file *file, void *fh,
return 0;
}
+static int msm_v4l2_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *a)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+ return msm_vidc_g_crop(vidc_inst, a);
+}
+
static int msm_v4l2_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
@@ -265,6 +273,7 @@ static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = {
.vidioc_encoder_cmd = msm_v4l2_encoder_cmd,
.vidioc_s_parm = msm_v4l2_s_parm,
.vidioc_g_parm = msm_v4l2_g_parm,
+ .vidioc_g_crop = msm_v4l2_g_crop,
.vidioc_enum_framesizes = msm_v4l2_enum_framesizes,
};
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index aa5f18d..d44684e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1117,7 +1117,7 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
struct hal_h264_entropy_control h264_entropy_control;
struct hal_intra_period intra_period;
struct hal_idr_period idr_period;
- struct hal_operations operations;
+ struct hal_vpe_rotation vpe_rotation;
struct hal_intra_refresh intra_refresh;
struct hal_multi_slice_control multi_slice_control;
struct hal_h264_db_control h264_db_control;
@@ -1345,19 +1345,12 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
{
- if (!(inst->capability.pixelprocess_capabilities &
- HAL_VIDEO_ENCODER_ROTATION_CAPABILITY)) {
- dprintk(VIDC_ERR, "Rotation not supported: %#x\n",
- ctrl->id);
- rc = -ENOTSUPP;
- break;
- }
- property_id = HAL_CONFIG_VPE_OPERATIONS;
- operations.rotate = msm_comm_v4l2_to_hal(
+ property_id = HAL_PARAM_VPE_ROTATION;
+ vpe_rotation.rotate = msm_comm_v4l2_to_hal(
V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
ctrl->val);
- operations.flip = HAL_FLIP_NONE;
- pdata = &operations;
+ vpe_rotation.flip = HAL_FLIP_NONE;
+ pdata = &vpe_rotation;
break;
}
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 6253632..2e952a3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -265,6 +265,29 @@ int msm_vidc_s_ctrl(void *instance, struct v4l2_control *control)
}
EXPORT_SYMBOL(msm_vidc_s_ctrl);
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *crop)
+{
+ struct msm_vidc_inst *inst = instance;
+
+ if (!inst || !crop)
+ return -EINVAL;
+
+ if (inst->session_type == MSM_VIDC_ENCODER) {
+ dprintk(VIDC_ERR,
+ "Session = %pK : Encoder Crop is not implemented yet\n",
+ inst);
+ return -EPERM;
+ }
+
+ crop->c.left = inst->prop.crop_info.left;
+ crop->c.top = inst->prop.crop_info.top;
+ crop->c.width = inst->prop.crop_info.width;
+ crop->c.height = inst->prop.crop_info.height;
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_vidc_g_crop);
+
int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
{
struct msm_vidc_inst *inst = instance;
@@ -534,7 +557,7 @@ static struct msm_smem *map_buffer(struct msm_vidc_inst *inst,
handle = msm_comm_smem_user_to_kernel(inst,
p->reserved[0],
- p->reserved[1],
+ p->length,
buffer_type);
if (!handle) {
dprintk(VIDC_ERR,
@@ -605,8 +628,10 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
goto exit;
}
- dprintk(VIDC_DBG, "[MAP] Create binfo = %pK fd = %d type = %d\n",
- binfo, b->m.planes[0].reserved[0], b->type);
+ dprintk(VIDC_DBG,
+ "[MAP] Create binfo = %pK fd = %d size = %d type = %d\n",
+ binfo, b->m.planes[0].reserved[0],
+ b->m.planes[0].length, b->type);
for (i = 0; i < b->length; ++i) {
rc = 0;
@@ -878,6 +903,7 @@ int msm_vidc_release_buffer(void *instance, int buffer_type,
struct buffer_info *bi, *dummy;
int i, rc = 0;
int found_buf = 0;
+ struct vb2_buf_entry *temp, *next;
if (!inst)
return -EINVAL;
@@ -936,6 +962,16 @@ int msm_vidc_release_buffer(void *instance, int buffer_type,
default:
break;
}
+
+ mutex_lock(&inst->pendingq.lock);
+ list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+ if (temp->vb->type == buffer_type) {
+ list_del(&temp->list);
+ kfree(temp);
+ }
+ }
+ mutex_unlock(&inst->pendingq.lock);
+
return rc;
}
EXPORT_SYMBOL(msm_vidc_release_buffer);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index b1a8e8b..fe61e6f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1166,12 +1166,12 @@ static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst,
if (!rc) {
dprintk(VIDC_ERR, "Wait interrupted or timed out: %d\n",
SESSION_MSG_INDEX(cmd));
- msm_comm_kill_session(inst);
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dprintk(VIDC_ERR,
"sess resp timeout can potentially crash the system\n");
msm_comm_print_debug_info(inst);
msm_vidc_handle_hw_error(inst->core);
+ msm_comm_kill_session(inst);
rc = -EIO;
} else {
rc = 0;
@@ -1554,6 +1554,14 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
inst->entropy_mode = event_notify->entropy_mode;
inst->profile = event_notify->profile;
inst->level = event_notify->level;
+ inst->prop.crop_info.left =
+ event_notify->crop_data.left;
+ inst->prop.crop_info.top =
+ event_notify->crop_data.top;
+ inst->prop.crop_info.height =
+ event_notify->crop_data.height;
+ inst->prop.crop_info.width =
+ event_notify->crop_data.width;
ptr = (u32 *)seq_changed_event.u.data;
ptr[0] = event_notify->height;
@@ -1561,6 +1569,10 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
ptr[2] = event_notify->bit_depth;
ptr[3] = event_notify->pic_struct;
ptr[4] = event_notify->colour_space;
+ ptr[5] = event_notify->crop_data.top;
+ ptr[6] = event_notify->crop_data.left;
+ ptr[7] = event_notify->crop_data.height;
+ ptr[8] = event_notify->crop_data.width;
dprintk(VIDC_DBG,
"Event payload: height = %d width = %d\n",
@@ -1571,6 +1583,13 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
event_notify->bit_depth, event_notify->pic_struct,
event_notify->colour_space);
+ dprintk(VIDC_DBG,
+ "Event payload: CROP top = %d left = %d Height = %d Width = %d\n",
+ event_notify->crop_data.top,
+ event_notify->crop_data.left,
+ event_notify->crop_data.height,
+ event_notify->crop_data.width);
+
mutex_lock(&inst->lock);
inst->in_reconfig = true;
inst->reconfig_height = event_notify->height;
@@ -4245,14 +4264,13 @@ int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype,
__func__, inst,
SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
inst->state = MSM_VIDC_CORE_INVALID;
- msm_comm_kill_session(inst);
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dprintk(VIDC_ERR,
"SESS_PROP timeout can potentially crash the system\n");
- if (inst->core->resources.debug_timeout)
- msm_comm_print_debug_info(inst);
+ msm_comm_print_debug_info(inst);
msm_vidc_handle_hw_error(inst->core);
+ msm_comm_kill_session(inst);
rc = -ETIMEDOUT;
goto exit;
} else {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 4d43cbb..c197776 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -12,6 +12,7 @@
*/
#define CREATE_TRACE_POINTS
+#define MAX_SSR_STRING_LEN 10
#include "msm_vidc_debug.h"
#include "vidc_hfi_api.h"
@@ -31,7 +32,7 @@ int msm_vidc_firmware_unload_delay = 15000;
bool msm_vidc_thermal_mitigation_disabled = !true;
bool msm_vidc_clock_scaling = true;
bool msm_vidc_debug_timeout = !true;
-bool msm_vidc_syscache_disable = true;
+bool msm_vidc_syscache_disable = !true;
#define MAX_DBG_BUF_SIZE 4096
@@ -134,21 +135,36 @@ static int trigger_ssr_open(struct inode *inode, struct file *file)
static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos) {
- u32 ssr_trigger_val;
- int rc;
+ unsigned long ssr_trigger_val = 0;
+ int rc = 0;
struct msm_vidc_core *core = filp->private_data;
+ size_t size = MAX_SSR_STRING_LEN;
+ char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
if (!buf)
return -EINVAL;
- rc = kstrtou32(buf, 0, &ssr_trigger_val);
- if (rc < 0) {
+ if (!count)
+ goto exit;
+
+ if (count < size)
+ size = count;
+
+ if (copy_from_user(kbuf, buf, size)) {
+ dprintk(VIDC_WARN, "%s User memory fault\n", __func__);
+ rc = -EFAULT;
+ goto exit;
+ }
+
+ rc = kstrtoul(kbuf, 0, &ssr_trigger_val);
+ if (rc) {
dprintk(VIDC_WARN, "returning error err %d\n", rc);
rc = -EINVAL;
} else {
msm_vidc_trigger_ssr(core, ssr_trigger_val);
rc = count;
}
+exit:
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index 8fd895d..f4c851a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -186,7 +186,7 @@ static inline void msm_vidc_handle_hw_error(struct msm_vidc_core *core)
{
bool enable_fatal;
- enable_fatal = core->resources.debug_timeout;
+ enable_fatal = msm_vidc_debug_timeout;
/* Video driver can decide FATAL handling of HW errors
* based on multiple factors. This condition check will
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 37bccbd..5edd3d5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -175,9 +175,17 @@ struct msm_video_device {
struct video_device vdev;
};
+struct session_crop {
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
struct session_prop {
u32 width[MAX_PORT_NUM];
u32 height[MAX_PORT_NUM];
+ struct session_crop crop_info;
u32 fps;
u32 bitrate;
};
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index d259072..19ca561 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -969,7 +969,7 @@ int read_platform_resources_from_dt(
res->debug_timeout = of_property_read_bool(pdev->dev.of_node,
"qcom,debug-timeout");
- res->debug_timeout |= msm_vidc_debug_timeout;
+ msm_vidc_debug_timeout |= res->debug_timeout;
of_property_read_u32(pdev->dev.of_node,
"qcom,pm-qos-latency-us", &res->pm_qos_latency_us);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 8968764..6139e46 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1049,8 +1049,12 @@ static int venus_hfi_suspend(void *dev)
}
dprintk(VIDC_DBG, "Suspending Venus\n");
- rc = flush_delayed_work(&venus_hfi_pm_work);
+ flush_delayed_work(&venus_hfi_pm_work);
+ mutex_lock(&device->lock);
+ if (device->power_enabled)
+ rc = -EBUSY;
+ mutex_unlock(&device->lock);
return rc;
}
@@ -4168,7 +4172,7 @@ static int venus_hfi_get_fw_info(void *dev, struct hal_fw_info *fw_info)
struct venus_hfi_device *device = dev;
u32 smem_block_size = 0;
u8 *smem_table_ptr;
- char version[VENUS_VERSION_LENGTH];
+ char version[VENUS_VERSION_LENGTH] = "";
const u32 smem_image_index_venus = 14 * 128;
if (!device || !fw_info) {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index a2f076b..86e4f42 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -154,7 +154,7 @@ enum hal_property {
HAL_PARAM_VENC_SESSION_QP_RANGE,
HAL_CONFIG_VENC_INTRA_PERIOD,
HAL_CONFIG_VENC_IDR_PERIOD,
- HAL_CONFIG_VPE_OPERATIONS,
+ HAL_PARAM_VPE_ROTATION,
HAL_PARAM_VENC_INTRA_REFRESH,
HAL_PARAM_VENC_MULTI_SLICE_CONTROL,
HAL_SYS_DEBUG_CONFIG,
@@ -634,7 +634,7 @@ enum hal_flip {
HAL_UNUSED_FLIP = 0x10000000,
};
-struct hal_operations {
+struct hal_vpe_rotation {
enum hal_rotate rotate;
enum hal_flip flip;
};
@@ -1019,7 +1019,7 @@ union hal_get_property {
struct hal_quantization_range quantization_range;
struct hal_intra_period intra_period;
struct hal_idr_period idr_period;
- struct hal_operations operations;
+ struct hal_vpe_rotation vpe_rotation;
struct hal_intra_refresh intra_refresh;
struct hal_multi_slice_control multi_slice_control;
struct hal_debug_config debug_config;
@@ -1212,6 +1212,16 @@ struct msm_vidc_cb_cmd_done {
} data;
};
+struct hal_index_extradata_input_crop_payload {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
struct msm_vidc_cb_event {
u32 device_id;
void *session_id;
@@ -1227,6 +1237,8 @@ struct msm_vidc_cb_event {
u32 profile;
u32 level;
u32 entropy_mode;
+ u32 capture_buf_count;
+ struct hal_index_extradata_input_crop_payload crop_data;
};
struct msm_vidc_cb_data_done {
@@ -1314,16 +1326,6 @@ struct vidc_clk_scale_data {
int num_sessions;
};
-struct hal_index_extradata_input_crop_payload {
- u32 size;
- u32 version;
- u32 port_index;
- u32 left;
- u32 top;
- u32 width;
- u32 height;
-};
-
struct hal_cmd_sys_get_property_packet {
u32 size;
u32 packet_type;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 2d4a573..616fc09 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -327,8 +327,6 @@ struct hfi_buffer_info {
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
-#define HFI_PROPERTY_PARAM_VPE_COMMON_START \
- (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x008)
#define HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME \
@@ -344,13 +342,15 @@ struct hfi_buffer_info {
#define HFI_PROPERTY_CONFIG_VENC_SESSION_QP \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012)
+#define HFI_PROPERTY_PARAM_VPE_COMMON_START \
+ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+#define HFI_PROPERTY_PARAM_VPE_ROTATION \
+ (HFI_PROPERTY_PARAM_VPE_COMMON_START + 0x001)
#define HFI_PROPERTY_CONFIG_VPE_COMMON_START \
(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
#define HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE \
(HFI_PROPERTY_CONFIG_COMMON_START + 0x010)
-#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS \
- (HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
struct hfi_pic_struct {
u32 progressive_only;
@@ -472,7 +472,7 @@ struct hfi_idr_period {
u32 idr_period;
};
-struct hfi_operations_type {
+struct hfi_vpe_rotation_type {
u32 rotation;
u32 flip;
};
@@ -716,12 +716,7 @@ struct hfi_vpe_color_space_conversion {
#define HFI_FLIP_NONE (HFI_COMMON_BASE + 0x1)
#define HFI_FLIP_HORIZONTAL (HFI_COMMON_BASE + 0x2)
-#define HFI_FLIP_VERTICAL (HFI_COMMON_BASE + 0x3)
-
-struct hfi_operations {
- u32 rotate;
- u32 flip;
-};
+#define HFI_FLIP_VERTICAL (HFI_COMMON_BASE + 0x4)
#define HFI_RESOURCE_SYSCACHE 0x00000002
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 30ad689..0502e39d 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -293,7 +293,7 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
struct wcd9xxx_core_resource *wcd9xxx_res = data;
int num_irq_regs = wcd9xxx_res->num_irq_regs;
- u8 status[num_irq_regs], status1[num_irq_regs];
+ u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
@@ -317,6 +317,23 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
"Failed to read interrupt status: %d\n", ret);
goto err_disable_irq;
}
+ /*
+ * If status is 0 return without clearing.
+ * status contains: HW status - masked interrupts
+ * status1 contains: unhandled interrupts - masked interrupts
+ * unmasked_status contains: unhandled interrupts
+ */
+ if (unlikely(!memcmp(status, status1, sizeof(status)))) {
+ pr_debug("%s: status is 0\n", __func__);
+ wcd9xxx_unlock_sleep(wcd9xxx_res);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Copy status to unmask_status before masking, otherwise SW may miss
+ * to clear masked interrupt in corner case.
+ */
+ memcpy(unmask_status, status, sizeof(unmask_status));
/* Apply masking */
for (i = 0; i < num_irq_regs; i++)
@@ -340,6 +357,8 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
status1[BIT_BYTE(irqdata.intr_num)] &=
~BYTE_BIT_MASK(irqdata.intr_num);
+ unmask_status[BIT_BYTE(irqdata.intr_num)] &=
+ ~BYTE_BIT_MASK(irqdata.intr_num);
}
}
@@ -361,12 +380,13 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
linebuf, sizeof(linebuf), false);
pr_warn("%s: status1 : %s\n", __func__, linebuf);
}
-
- memset(status, 0xff, num_irq_regs);
-
+ /*
+ * unmask_status contains unhandled interrupts, hence clear all
+ * unhandled interrupts.
+ */
ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
- status, num_irq_regs);
+ unmask_status, num_irq_regs);
if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
regmap_write(wcd9xxx_res->wcd_core_regmap,
wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index 06e0dc3..4c9fa8f 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -143,7 +143,8 @@ static int audio_aio_ion_lookup_vaddr(struct q6audio_aio *audio, void *addr,
list) {
if (addr >= region_elt->vaddr &&
addr < region_elt->vaddr + region_elt->len &&
- addr + len <= region_elt->vaddr + region_elt->len)
+ addr + len <= region_elt->vaddr + region_elt->len &&
+ addr + len > addr)
pr_err("\t%s[%pK]:%pK, %ld --> %pK\n",
__func__, audio,
region_elt->vaddr,
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 877c4d1..c1857c7 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1261,7 +1261,7 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
atomic_read(&data->ioctl_count) <= 1)) {
pr_err("Interrupted from abort\n");
ret = -ERESTARTSYS;
- break;
+ return ret;
}
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index d8e9599..9ac6568 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1718,6 +1718,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
/* We couldn't get a response from the card. Give up. */
if (err) {
+ if (card->err_in_sdr104)
+ return ERR_RETRY;
/* Check if the card is removed */
if (mmc_detect_card_removed(card->host))
return ERR_NOMEDIUM;
@@ -2208,7 +2210,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
brq->data.error == -ETIMEDOUT ||
brq->cmd.error == -EILSEQ ||
brq->cmd.error == -EIO ||
- brq->cmd.error == -ETIMEDOUT))
+ brq->cmd.error == -ETIMEDOUT ||
+ brq->sbc.error))
card->err_in_sdr104 = true;
/*
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 790f191..8b1b0a0 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -456,6 +456,22 @@ int mmc_clk_update_freq(struct mmc_host *host,
}
EXPORT_SYMBOL(mmc_clk_update_freq);
+void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+{
+ if (!host->card)
+ return;
+
+ if (host->sdr104_wa && mmc_card_sd(host->card) &&
+ (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !host->card->sdr104_blocked) {
+ pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ mmc_hostname(host), __func__);
+ mmc_host_clear_sdr104(host);
+ mmc_hw_reset(host);
+ host->card->sdr104_blocked = true;
+ }
+}
+
static int mmc_devfreq_set_target(struct device *dev,
unsigned long *freq, u32 devfreq_flags)
{
@@ -507,6 +523,9 @@ static int mmc_devfreq_set_target(struct device *dev,
if (abort)
goto out;
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ goto rel_host;
+
/*
* In case we were able to claim host there is no need to
* defer the frequency change. It will be done now
@@ -515,15 +534,18 @@ static int mmc_devfreq_set_target(struct device *dev,
mmc_host_clk_hold(host);
err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
- if (err && err != -EAGAIN)
+ if (err && err != -EAGAIN) {
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
- else
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
+ }
mmc_host_clk_release(host);
+rel_host:
mmc_release_host(host);
out:
return err;
@@ -544,6 +566,9 @@ void mmc_deferred_scaling(struct mmc_host *host)
if (!host->clk_scaling.enable)
return;
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ return;
+
spin_lock_bh(&host->clk_scaling.lock);
if (host->clk_scaling.clk_scaling_in_progress ||
@@ -564,13 +589,15 @@ void mmc_deferred_scaling(struct mmc_host *host)
err = mmc_clk_update_freq(host, target_freq,
host->clk_scaling.state);
- if (err && err != -EAGAIN)
+ if (err && err != -EAGAIN) {
pr_err("%s: failed on deferred scale clocks (%d)\n",
mmc_hostname(host), err);
- else
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
+ }
host->clk_scaling.clk_scaling_in_progress = false;
atomic_dec(&host->clk_scaling.devfreq_abort);
}
@@ -1540,8 +1567,13 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
}
}
if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card))
+ mmc_card_removed(host->card)) {
+ if (cmd->error && !cmd->retries &&
+ cmd->opcode != MMC_SEND_STATUS &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK)
+ mmc_recovery_fallback_lower_speed(host);
break;
+ }
mmc_retune_recheck(host);
@@ -2368,6 +2400,13 @@ void mmc_ungate_clock(struct mmc_host *host)
WARN_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
__mmc_set_clock(host, host->clk_old);
+ /*
+ * We have seen that host controller's clock tuning circuit may
+ * go out of sync if controller clocks are gated.
+ * To workaround this issue, we are triggering retuning of the
+ * tuning circuit after ungating the controller clocks.
+ */
+ mmc_retune_needed(host);
}
}
@@ -4189,12 +4228,18 @@ int _mmc_detect_card_removed(struct mmc_host *host)
}
if (ret) {
- mmc_card_set_removed(host->card);
- if (host->card->sdr104_blocked) {
- mmc_host_set_sdr104(host);
- host->card->sdr104_blocked = false;
+ if (host->ops->get_cd && host->ops->get_cd(host)) {
+ mmc_recovery_fallback_lower_speed(host);
+ ret = 0;
+ } else {
+ mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
+ }
+ pr_debug("%s: card remove detected\n",
+ mmc_hostname(host));
}
- pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 0d0d56f..0c8ff86 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -338,10 +338,15 @@ static int mmc_force_err_set(void *data, u64 val)
{
struct mmc_host *host = data;
- if (host && host->ops && host->ops->force_err_irq) {
- mmc_host_clk_hold(host);
+ if (host && host->card && host->ops &&
+ host->ops->force_err_irq) {
+ /*
+ * To access the force error irq reg, we need to make
+ * sure the host is powered up and host clock is ticking.
+ */
+ mmc_get_card(host->card);
host->ops->force_err_irq(host, val);
- mmc_host_clk_release(host);
+ mmc_put_card(host->card);
}
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index a83f8f6..5610c4f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1665,6 +1665,42 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return wil_ps_update(wil, ps_profile);
}
+static int wil_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* Setting the wakeup trigger based on wow is TBD */
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
+ rc = wil_can_suspend(wil, false);
+ if (rc)
+ goto out;
+
+ wil_dbg_pm(wil, "suspending\n");
+
+ wil_p2p_stop_discovery(wil);
+
+ wil_abort_scan(wil, true);
+
+out:
+ return rc;
+}
+
+static int wil_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_pm(wil, "resuming\n");
+
+ return 0;
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1696,6 +1732,8 @@ static struct cfg80211_ops wil_cfg80211_ops = {
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
+ .suspend = wil_cfg80211_suspend,
+ .resume = wil_cfg80211_resume,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 5648ebb..0ac657d 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -509,6 +509,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
void *buf;
size_t ret;
+ if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
+ test_bit(wil_status_suspended, wil_blob->wil->status))
+ return 0;
+
if (pos < 0)
return -EINVAL;
@@ -1604,6 +1608,49 @@ static const struct file_operations fops_fw_version = {
.llseek = seq_lseek,
};
+/*---------suspend_stats---------*/
+static ssize_t wil_write_suspend_stats(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+
+ memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
+
+ return len;
+}
+
+static ssize_t wil_read_suspend_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ static char text[400];
+ int n;
+
+ n = snprintf(text, sizeof(text),
+ "Suspend statistics:\n"
+ "successful suspends:%ld failed suspends:%ld\n"
+ "successful resumes:%ld failed resumes:%ld\n"
+ "rejected by host:%ld rejected by device:%ld\n",
+ wil->suspend_stats.successful_suspends,
+ wil->suspend_stats.failed_suspends,
+ wil->suspend_stats.successful_resumes,
+ wil->suspend_stats.failed_resumes,
+ wil->suspend_stats.rejected_by_host,
+ wil->suspend_stats.rejected_by_device);
+
+ n = min_t(int, n, sizeof(text));
+
+ return simple_read_from_buffer(user_buf, count, ppos, text, n);
+}
+
+static const struct file_operations fops_suspend_stats = {
+ .read = wil_read_suspend_stats,
+ .write = wil_write_suspend_stats,
+ .open = simple_open,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1656,6 +1703,7 @@ static const struct {
{"led_blink_time", 0644, &fops_led_blink_time},
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
+ {"suspend_stats", 0644, &fops_suspend_stats},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1702,6 +1750,7 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(discovery_mode, 0644, doff_u8),
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
+ WIL_FIELD(wakeup_trigger, 0644, doff_u8),
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index cab1e5c..cad8a95c4 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -467,6 +467,12 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
wil6210_unmask_irq_pseudo(wil);
+ if (wil->suspend_resp_rcvd) {
+ wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+ wil->suspend_resp_comp = true;
+ wake_up_interruptible(&wil->wq);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 1fc4580..aff8b1b 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -579,6 +579,9 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
+ wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
+ WMI_WAKEUP_TRIGGER_BCAST;
+
return 0;
out_wmi_wq:
@@ -589,8 +592,10 @@ int wil_priv_init(struct wil6210_priv *wil)
void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
{
- if (wil->platform_ops.bus_request)
+ if (wil->platform_ops.bus_request) {
+ wil->bus_request_kbps = kbps;
wil->platform_ops.bus_request(wil->platform_handle, kbps);
+ }
}
/**
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 1afed52..03246a9 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -112,8 +112,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
- pdev->msi_enabled = 0;
-
pci_set_master(pdev);
wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
@@ -249,7 +247,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
rc = pci_enable_device(pdev);
- if (rc) {
+ if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
"pci_enable_device failed, retry with MSI only\n");
/* Work around for platforms that can't allocate IRQ:
@@ -264,6 +262,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_plat;
}
/* rollback to err_disable_pdev */
+ pci_set_power_state(pdev, PCI_D0);
rc = pci_request_region(pdev, 0, WIL_NAME);
if (rc) {
@@ -284,6 +283,15 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_set_capabilities(wil);
wil6210_clear_irq(wil);
+ wil->keep_radio_on_during_sleep =
+ wil->platform_ops.keep_radio_on_during_sleep &&
+ wil->platform_ops.keep_radio_on_during_sleep(
+ wil->platform_handle) &&
+ test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+ wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+ wil->keep_radio_on_during_sleep);
+
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
@@ -383,15 +391,16 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
goto out;
rc = wil_suspend(wil, is_runtime);
- if (rc)
- goto out;
+ if (!rc) {
+ wil->suspend_stats.successful_suspends++;
- /* TODO: how do I bring card in low power state? */
-
- /* disable bus mastering */
- pci_clear_master(pdev);
- /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
-
+ /* If platform device supports keep_radio_on_during_sleep
+ * it will control PCIe master
+ */
+ if (!wil->keep_radio_on_during_sleep)
+ /* disable bus mastering */
+ pci_clear_master(pdev);
+ }
out:
return rc;
}
@@ -404,12 +413,21 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
- /* allow master */
- pci_set_master(pdev);
-
+ /* If platform device supports keep_radio_on_during_sleep it will
+ * control PCIe master
+ */
+ if (!wil->keep_radio_on_during_sleep)
+ /* allow master */
+ pci_set_master(pdev);
rc = wil_resume(wil, is_runtime);
- if (rc)
- pci_clear_master(pdev);
+ if (rc) {
+ wil_err(wil, "device failed to resume (%d)\n", rc);
+ wil->suspend_stats.failed_resumes++;
+ if (!wil->keep_radio_on_during_sleep)
+ pci_clear_master(pdev);
+ } else {
+ wil->suspend_stats.successful_resumes++;
+ }
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 2ae4fe8..015dc3c 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -15,6 +15,7 @@
*/
#include "wil6210.h"
+#include <linux/jiffies.h>
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
{
@@ -61,20 +62,164 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+ if (rc)
+ wil->suspend_stats.rejected_by_host++;
+
return rc;
}
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
+{
+ int rc = 0;
+
+ /* wil_status_resuming will be cleared when getting
+ * WMI_TRAFFIC_RESUME_EVENTID
+ */
+ set_bit(wil_status_resuming, wil->status);
+ clear_bit(wil_status_suspended, wil->status);
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_unmask_irq(wil);
+
+ /* Send WMI resume request to the device */
+ rc = wmi_resume(wil);
+ if (rc) {
+ wil_err(wil, "device failed to resume (%d), resetting\n", rc);
+ rc = wil_down(wil);
+ if (rc) {
+ wil_err(wil, "wil_down failed (%d)\n", rc);
+ goto out;
+ }
+ rc = wil_up(wil);
+ if (rc) {
+ wil_err(wil, "wil_up failed (%d)\n", rc);
+ goto out;
+ }
+ }
+
+ wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
+
+out:
+ if (rc)
+ set_bit(wil_status_suspended, wil->status);
+ return rc;
+}
+
+static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ unsigned long start, data_comp_to;
+
+ wil_dbg_pm(wil, "suspend keep radio on\n");
+
+ /* Prevent handling of new tx and wmi commands */
+ set_bit(wil_status_suspending, wil->status);
+
+ if (!wil_is_tx_idle(wil)) {
+ wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ if (!wil_is_rx_idle(wil)) {
+ wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ if (!wil_is_wmi_idle(wil)) {
+ wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ /* Send WMI suspend request to the device */
+ rc = wmi_suspend(wil);
+ if (rc) {
+ wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
+ rc);
+ goto reject_suspend;
+ }
+
+ /* Wait for completion of the pending RX packets */
+ start = jiffies;
+ data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ while (!wil_is_rx_idle(wil)) {
+ if (time_after(jiffies, data_comp_to)) {
+ if (wil_is_rx_idle(wil))
+ break;
+ wil_err(wil,
+ "TO waiting for idle RX, suspend failed\n");
+ wil->suspend_stats.failed_suspends++;
+ goto resume_after_fail;
+ }
+ wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
+ napi_synchronize(&wil->napi_rx);
+ msleep(20);
+ }
+ }
+
+ /* In case of pending WMI events, reject the suspend
+ * and resume the device.
+ * This can happen if the device sent the WMI events before
+ * approving the suspend.
+ */
+ if (!wil_is_wmi_idle(wil)) {
+ wil_err(wil, "suspend failed due to pending WMI events\n");
+ wil->suspend_stats.failed_suspends++;
+ goto resume_after_fail;
+ }
+
+ wil_mask_irq(wil);
+
+ /* Disable device reset on PERST */
+ wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+
+ /* Save the current bus request to return to the same in resume */
+ wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
+ wil6210_bus_request(wil, 0);
+
+ if (wil->platform_ops.suspend) {
+ rc = wil->platform_ops.suspend(wil->platform_handle, true);
+ if (rc) {
+ wil_err(wil, "platform device failed to suspend (%d)\n",
+ rc);
+ wil->suspend_stats.failed_suspends++;
+ clear_bit(wil_status_suspending, wil->status);
+ rc = wil_resume_keep_radio_on(wil);
+ /* if resume succeeded, reject the suspend */
+ if (!rc)
+ rc = -EBUSY;
+ goto out;
+ }
+ }
+
+ set_bit(wil_status_suspended, wil->status);
+ clear_bit(wil_status_suspending, wil->status);
+
+ return rc;
+
+resume_after_fail:
+ clear_bit(wil_status_suspending, wil->status);
+ rc = wmi_resume(wil);
+ /* if resume succeeded, reject the suspend */
+ if (!rc)
+ rc = -EBUSY;
+
+out:
+ return rc;
+
+reject_suspend:
+ clear_bit(wil_status_suspending, wil->status);
+ return -EBUSY;
+}
+
+static int wil_suspend_radio_off(struct wil6210_priv *wil)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
-
- if (test_bit(wil_status_suspended, wil->status)) {
- wil_dbg_pm(wil, "trying to suspend while suspended\n");
- return 0;
- }
+ wil_dbg_pm(wil, "suspend radio off\n");
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
@@ -90,7 +235,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_disable_irq(wil);
if (wil->platform_ops.suspend) {
- rc = wil->platform_ops.suspend(wil->platform_handle);
+ rc = wil->platform_ops.suspend(wil->platform_handle, false);
if (rc) {
wil_enable_irq(wil);
goto out;
@@ -100,6 +245,50 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
set_bit(wil_status_suspended, wil->status);
out:
+ wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
+
+ return rc;
+}
+
+static int wil_resume_radio_off(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
+ wil_enable_irq(wil);
+ /* if netif up, bring hardware up
+ * During open(), IFF_UP set after actual device method
+ * invocation. This prevent recursive call to wil_up()
+ * wil_status_suspended will be cleared in wil_reset
+ */
+ if (ndev->flags & IFF_UP)
+ rc = wil_up(wil);
+ else
+ clear_bit(wil_status_suspended, wil->status);
+
+ return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
+
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
+ if (!keep_radio_on)
+ rc = wil_suspend_radio_off(wil);
+ else
+ rc = wil_suspend_keep_radio_on(wil);
+
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
@@ -110,29 +299,24 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
- rc = wil->platform_ops.resume(wil->platform_handle);
+ rc = wil->platform_ops.resume(wil->platform_handle,
+ keep_radio_on);
if (rc) {
wil_err(wil, "platform_ops.resume : %d\n", rc);
goto out;
}
}
- wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
- wil_enable_irq(wil);
-
- /* if netif up, bring hardware up
- * During open(), IFF_UP set after actual device method
- * invocation. This prevent recursive call to wil_up().
- * wil_status_suspended will be cleared in wil_reset
- */
- if (ndev->flags & IFF_UP)
- rc = wil_up(wil);
+ if (keep_radio_on)
+ rc = wil_resume_keep_radio_on(wil);
else
- clear_bit(wil_status_suspended, wil->status);
+ rc = wil_resume_radio_off(wil);
out:
wil_dbg_pm(wil, "resume: %s => %d\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 35bbf3a..439fe30 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -104,6 +104,51 @@ static inline int wil_vring_avail_high(struct vring *vring)
return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
}
+/* returns true when all tx vrings are empty */
+bool wil_is_tx_idle(struct wil6210_priv *wil)
+{
+ int i;
+ unsigned long data_comp_to;
+
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct vring *vring = &wil->vring_tx[i];
+ int vring_index = vring - wil->vring_tx;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+
+ spin_lock(&txdata->lock);
+
+ if (!vring->va || !txdata->enabled) {
+ spin_unlock(&txdata->lock);
+ continue;
+ }
+
+ data_comp_to = jiffies + msecs_to_jiffies(
+ WIL_DATA_COMPLETION_TO_MS);
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ while (!wil_vring_is_empty(vring)) {
+ if (time_after(jiffies, data_comp_to)) {
+ wil_dbg_pm(wil,
+ "TO waiting for idle tx\n");
+ spin_unlock(&txdata->lock);
+ return false;
+ }
+ wil_dbg_ratelimited(wil,
+ "tx vring is not empty -> NAPI\n");
+ spin_unlock(&txdata->lock);
+ napi_synchronize(&wil->napi_tx);
+ msleep(20);
+ spin_lock(&txdata->lock);
+ if (!vring->va || !txdata->enabled)
+ break;
+ }
+ }
+
+ spin_unlock(&txdata->lock);
+ }
+
+ return true;
+}
+
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
@@ -406,6 +451,18 @@ static inline int wil_is_back_req(u8 fc)
(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
}
+bool wil_is_rx_idle(struct wil6210_priv *wil)
+{
+ struct vring_rx_desc *_d;
+ struct vring *vring = &wil->vring_rx;
+
+ _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
+ if (_d->dma.status & RX_DMA_STATUS_DU)
+ return false;
+
+ return true;
+}
+
/**
* reap 1 frame from @swhead
*
@@ -1812,6 +1869,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
spin_lock(&txdata->lock);
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resuming, wil->status)) {
+ wil_dbg_txrx(wil,
+ "suspend/resume in progress. drop packet\n");
+ spin_unlock(&txdata->lock);
+ return -EINVAL;
+ }
+
rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
(wil, vring, skb);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index ba1c33b..38f61e3 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -83,6 +83,15 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
*/
#define WIL_MAX_MPDU_OVERHEAD (62)
+struct wil_suspend_stats {
+ unsigned long successful_suspends;
+ unsigned long failed_suspends;
+ unsigned long successful_resumes;
+ unsigned long failed_resumes;
+ unsigned long rejected_by_device;
+ unsigned long rejected_by_host;
+};
+
/* Calculate MAC buffer size for the firmware. It includes all overhead,
* as it will go over the air, and need to be 8 byte aligned
*/
@@ -293,6 +302,8 @@ enum {
#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
+#define WIL_DATA_COMPLETION_TO_MS 200
+
/* Hardware definitions end */
struct fw_map {
u32 from; /* linker address - from, inclusive */
@@ -421,7 +432,9 @@ enum { /* for wil6210_priv.status */
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
+ wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
+ wil_status_resuming, /* resume in progress */
wil_status_last /* keep last */
};
@@ -686,9 +699,12 @@ struct wil6210_priv {
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
u8 discovery_mode;
u8 abft_len;
+ u8 wakeup_trigger;
+ struct wil_suspend_stats suspend_stats;
void *platform_handle;
struct wil_platform_ops platform_ops;
+ bool keep_radio_on_during_sleep;
struct pmc_ctx pmc;
@@ -715,6 +731,11 @@ struct wil6210_priv {
struct notifier_block pm_notify;
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
+
+ bool suspend_resp_rcvd;
+ bool suspend_resp_comp;
+ u32 bus_request_kbps;
+ u32 bus_request_kbps_pre_suspend;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -977,6 +998,11 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+bool wil_is_wmi_idle(struct wil6210_priv *wil);
+int wmi_resume(struct wil6210_priv *wil);
+int wmi_suspend(struct wil6210_priv *wil);
+bool wil_is_tx_idle(struct wil6210_priv *wil);
+bool wil_is_rx_idle(struct wil6210_priv *wil);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index f8c4117..621005b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -33,10 +33,11 @@ enum wil_platform_event {
*/
struct wil_platform_ops {
int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */);
- int (*suspend)(void *handle);
- int (*resume)(void *handle);
+ int (*suspend)(void *handle, bool keep_device_power);
+ int (*resume)(void *handle, bool device_powered_on);
void (*uninit)(void *handle);
int (*notify)(void *handle, enum wil_platform_event evt);
+ bool (*keep_radio_on_during_sleep)(void *handle);
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 8e1825f..ba2b207 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -38,6 +38,8 @@ module_param(led_id, byte, 0444);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
+#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+
/**
* WMI event receiving - theory of operations
*
@@ -234,6 +236,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
return -EAGAIN;
}
+ /* Allow sending only suspend / resume commands during susepnd flow */
+ if ((test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resuming, wil->status)) &&
+ ((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) &&
+ (cmdid != WMI_TRAFFIC_RESUME_CMDID))) {
+ wil_err(wil, "WMI: reject send_command during suspend\n");
+ return -EINVAL;
+ }
+
if (!head) {
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
return -EINVAL;
@@ -893,6 +905,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
return;
}
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil, "suspended. cannot handle WMI event\n");
+ return;
+ }
+
for (n = 0;; n++) {
u16 len;
bool q;
@@ -945,6 +962,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
struct wmi_cmd_hdr *wmi = &evt->event.wmi;
u16 id = le16_to_cpu(wmi->command_id);
u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
+ if (test_bit(wil_status_resuming, wil->status)) {
+ if (id == WMI_TRAFFIC_RESUME_EVENTID)
+ clear_bit(wil_status_resuming,
+ wil->status);
+ else
+ wil_err(wil,
+ "WMI evt %d while resuming\n",
+ id);
+ }
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
if (wil->reply_id && wil->reply_id == id) {
if (wil->reply_buf) {
@@ -952,6 +978,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
min(len, wil->reply_size));
immed_reply = true;
}
+ if (id == WMI_TRAFFIC_SUSPEND_EVENTID) {
+ wil_dbg_wmi(wil,
+ "set suspend_resp_rcvd\n");
+ wil->suspend_resp_rcvd = true;
+ }
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
@@ -1909,6 +1940,85 @@ int wmi_link_maintain_cfg_write(struct wil6210_priv *wil,
return rc;
}
+int wmi_suspend(struct wil6210_priv *wil)
+{
+ int rc;
+ struct wmi_traffic_suspend_cmd cmd = {
+ .wakeup_trigger = wil->wakeup_trigger,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_traffic_suspend_event evt;
+ } __packed reply;
+ u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP;
+
+ wil->suspend_resp_rcvd = false;
+ wil->suspend_resp_comp = false;
+
+ reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+
+ rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
+ WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
+ suspend_to);
+ if (rc) {
+ wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc);
+ if (rc == -ETIME)
+ /* wmi_call TO */
+ wil->suspend_stats.rejected_by_device++;
+ else
+ wil->suspend_stats.rejected_by_host++;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "waiting for suspend_response_completed\n");
+
+ rc = wait_event_interruptible_timeout(wil->wq,
+ wil->suspend_resp_comp,
+ msecs_to_jiffies(suspend_to));
+ if (rc == 0) {
+ wil_err(wil, "TO waiting for suspend_response_completed\n");
+ if (wil->suspend_resp_rcvd)
+ /* Device responded but we TO due to another reason */
+ wil->suspend_stats.rejected_by_host++;
+ else
+ wil->suspend_stats.rejected_by_device++;
+ rc = -EBUSY;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
+ if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
+ wil_dbg_pm(wil, "device rejected the suspend\n");
+ wil->suspend_stats.rejected_by_device++;
+ }
+ rc = reply.evt.status;
+
+out:
+ wil->suspend_resp_rcvd = false;
+ wil->suspend_resp_comp = false;
+
+ return rc;
+}
+
+int wmi_resume(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_traffic_resume_event evt;
+ } __packed reply;
+
+ reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+
+ rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
+ WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
+ WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
+ if (rc)
+ return rc;
+
+ return reply.evt.status;
+}
+
static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
void *d, int len)
{
@@ -1998,3 +2108,36 @@ void wmi_event_worker(struct work_struct *work)
}
wil_dbg_wmi(wil, "event_worker: Finished\n");
}
+
+bool wil_is_wmi_idle(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ bool rc = false;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ /* Check if there are pending WMI events in the events queue */
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ wil_dbg_pm(wil, "Pending WMI events in queue\n");
+ goto out;
+ }
+
+ /* Check if there is a pending WMI call */
+ if (wil->reply_id) {
+ wil_dbg_pm(wil, "Pending WMI call\n");
+ goto out;
+ }
+
+ /* Check if there are pending RX events in mbox */
+ r->head = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail != r->head)
+ wil_dbg_pm(wil, "Pending WMI mbox events\n");
+ else
+ rc = true;
+
+out:
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index f7f5f4f..256f63c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -59,6 +59,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
WMI_FW_CAPABILITY_WMI_ONLY = 5,
WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7,
+ WMI_FW_CAPABILITY_D3_SUSPEND = 8,
WMI_FW_CAPABILITY_MAX,
};
@@ -157,7 +158,7 @@ enum wmi_command_id {
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
/* Power management */
- WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
+ WMI_TRAFFIC_SUSPEND_CMDID = 0x904,
WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
@@ -500,8 +501,14 @@ struct wmi_port_delete_cmd {
u8 reserved[3];
} __packed;
-/* WMI_TRAFFIC_DEFERRAL_CMDID */
-struct wmi_traffic_deferral_cmd {
+/* WMI_TRAFFIC_SUSPEND_CMD wakeup trigger bit mask values */
+enum wmi_wakeup_trigger {
+ WMI_WAKEUP_TRIGGER_UCAST = 0x01,
+ WMI_WAKEUP_TRIGGER_BCAST = 0x02,
+};
+
+/* WMI_TRAFFIC_SUSPEND_CMDID */
+struct wmi_traffic_suspend_cmd {
/* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
u8 wakeup_trigger;
} __packed;
@@ -1084,7 +1091,7 @@ enum wmi_event_id {
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
/* Power management */
- WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
+ WMI_TRAFFIC_SUSPEND_EVENTID = 0x1904,
WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
/* P2P */
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
@@ -1926,14 +1933,14 @@ struct wmi_link_maintain_cfg_read_done_event {
struct wmi_link_maintain_cfg lm_cfg;
} __packed;
-enum wmi_traffic_deferral_status {
- WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
- WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
+enum wmi_traffic_suspend_status {
+ WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
+ WMI_TRAFFIC_SUSPEND_REJECTED = 0x1,
};
-/* WMI_TRAFFIC_DEFERRAL_EVENTID */
-struct wmi_traffic_deferral_event {
- /* enum wmi_traffic_deferral_status_e */
+/* WMI_TRAFFIC_SUSPEND_EVENTID */
+struct wmi_traffic_suspend_event {
+ /* enum wmi_traffic_suspend_status_e */
u8 status;
} __packed;
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 293371b..c5aaac5 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -125,7 +125,6 @@ enum ipa3_usb_state {
IPA_USB_CONNECTED,
IPA_USB_STOPPED,
IPA_USB_SUSPEND_REQUESTED,
- IPA_USB_SUSPEND_IN_PROGRESS,
IPA_USB_SUSPENDED,
IPA_USB_SUSPENDED_NO_RWAKEUP,
IPA_USB_RESUME_IN_PROGRESS
@@ -146,13 +145,6 @@ enum ipa3_usb_transport_type {
#define IPA3_USB_IS_TTYPE_DPL(__ttype) \
((__ttype) == IPA_USB_TRANSPORT_DPL)
-struct finish_suspend_work_context {
- struct work_struct work;
- enum ipa3_usb_transport_type ttype;
- u32 dl_clnt_hdl;
- u32 ul_clnt_hdl;
-};
-
struct ipa3_usb_teth_prot_conn_params {
u32 usb_to_ipa_clnt_hdl;
u32 ipa_to_usb_clnt_hdl;
@@ -168,7 +160,6 @@ struct ipa3_usb_transport_type_ctx {
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
void *user_data;
enum ipa3_usb_state state;
- struct finish_suspend_work_context finish_suspend_work;
struct ipa_usb_xdci_chan_params ch_params;
struct ipa3_usb_teth_prot_conn_params teth_conn_params;
};
@@ -221,16 +212,10 @@ struct ipa3_usb_status_dbg_info {
static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work);
-static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work);
-static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work);
static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work,
ipa3_usb_wq_notify_remote_wakeup);
static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work,
ipa3_usb_wq_dpl_notify_remote_wakeup);
-static DECLARE_WORK(ipa3_usb_notify_suspend_completed_work,
- ipa3_usb_wq_notify_suspend_completed);
-static DECLARE_WORK(ipa3_usb_dpl_notify_suspend_completed_work,
- ipa3_usb_wq_dpl_notify_suspend_completed);
struct ipa3_usb_context *ipa3_usb_ctx;
@@ -273,8 +258,6 @@ static char *ipa3_usb_state_to_string(enum ipa3_usb_state state)
return "IPA_USB_STOPPED";
case IPA_USB_SUSPEND_REQUESTED:
return "IPA_USB_SUSPEND_REQUESTED";
- case IPA_USB_SUSPEND_IN_PROGRESS:
- return "IPA_USB_SUSPEND_IN_PROGRESS";
case IPA_USB_SUSPENDED:
return "IPA_USB_SUSPENDED";
case IPA_USB_SUSPENDED_NO_RWAKEUP:
@@ -330,17 +313,11 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
* In case of failure during suspend request
* handling, state is reverted to connected.
*/
- (err_permit && state == IPA_USB_SUSPEND_REQUESTED) ||
- /*
- * In case of failure during suspend completing
- * handling, state is reverted to connected.
- */
- (err_permit && state == IPA_USB_SUSPEND_IN_PROGRESS))
+ (err_permit && state == IPA_USB_SUSPEND_REQUESTED))
state_legal = true;
break;
case IPA_USB_STOPPED:
- if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
- state == IPA_USB_CONNECTED ||
+ if (state == IPA_USB_CONNECTED ||
state == IPA_USB_SUSPENDED ||
state == IPA_USB_SUSPENDED_NO_RWAKEUP)
state_legal = true;
@@ -349,19 +326,8 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
if (state == IPA_USB_CONNECTED)
state_legal = true;
break;
- case IPA_USB_SUSPEND_IN_PROGRESS:
- if (state == IPA_USB_SUSPEND_REQUESTED ||
- /*
- * In case of failure during resume, state is reverted
- * to original, which could be suspend_in_progress.
- * Allow it.
- */
- (err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
- state_legal = true;
- break;
case IPA_USB_SUSPENDED:
if (state == IPA_USB_SUSPEND_REQUESTED ||
- state == IPA_USB_SUSPEND_IN_PROGRESS ||
/*
* In case of failure during resume, state is reverted
* to original, which could be suspended. Allow it
@@ -374,8 +340,7 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
state_legal = true;
break;
case IPA_USB_RESUME_IN_PROGRESS:
- if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
- state == IPA_USB_SUSPENDED)
+ if (state == IPA_USB_SUSPENDED)
state_legal = true;
break;
default:
@@ -452,7 +417,6 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
break;
case IPA_USB_OP_DISCONNECT:
if (state == IPA_USB_CONNECTED ||
- state == IPA_USB_SUSPEND_IN_PROGRESS ||
state == IPA_USB_SUSPENDED ||
state == IPA_USB_SUSPENDED_NO_RWAKEUP)
is_legal = true;
@@ -483,7 +447,6 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
break;
case IPA_USB_OP_RESUME:
if (state == IPA_USB_SUSPENDED ||
- state == IPA_USB_SUSPEND_IN_PROGRESS ||
state == IPA_USB_SUSPENDED_NO_RWAKEUP)
is_legal = true;
break;
@@ -582,71 +545,6 @@ static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work)
ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP);
}
-static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work)
-{
- ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_SUSPEND_COMPLETED);
-}
-
-static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work)
-{
- ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_SUSPEND_COMPLETED);
-}
-
-static void ipa3_usb_wq_finish_suspend_work(struct work_struct *work)
-{
- struct finish_suspend_work_context *finish_suspend_work_ctx;
- unsigned long flags;
- int result = -EFAULT;
- struct ipa3_usb_transport_type_ctx *tctx;
-
- mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG_LOW("entry\n");
- finish_suspend_work_ctx = container_of(work,
- struct finish_suspend_work_context, work);
- tctx = &ipa3_usb_ctx->ttype_ctx[finish_suspend_work_ctx->ttype];
-
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (tctx->state != IPA_USB_SUSPEND_IN_PROGRESS) {
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- mutex_unlock(&ipa3_usb_ctx->general_mutex);
- return;
- }
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- /* Stop DL/DPL channel */
- result = ipa3_stop_gsi_channel(finish_suspend_work_ctx->dl_clnt_hdl);
- if (result) {
- IPAERR("Error stopping DL/DPL channel: %d, resuming channel\n",
- result);
- ipa3_xdci_resume(finish_suspend_work_ctx->ul_clnt_hdl,
- finish_suspend_work_ctx->dl_clnt_hdl,
- IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype));
- /* Change state back to CONNECTED */
- if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true,
- finish_suspend_work_ctx->ttype))
- IPA_USB_ERR("failed to change state to connected\n");
- queue_work(ipa3_usb_ctx->wq,
- IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
- &ipa3_usb_dpl_notify_remote_wakeup_work :
- &ipa3_usb_notify_remote_wakeup_work);
- mutex_unlock(&ipa3_usb_ctx->general_mutex);
- return;
- }
-
- /* Change ipa_usb state to SUSPENDED */
- if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false,
- finish_suspend_work_ctx->ttype))
- IPA_USB_ERR("failed to change state to suspended\n");
-
- queue_work(ipa3_usb_ctx->wq,
- IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
- &ipa3_usb_dpl_notify_suspend_completed_work :
- &ipa3_usb_notify_suspend_completed_work);
-
- IPA_USB_DBG_LOW("exit\n");
- mutex_unlock(&ipa3_usb_ctx->general_mutex);
-}
-
static int ipa3_usb_cons_request_resource_cb_do(
enum ipa3_usb_transport_type ttype,
struct work_struct *remote_wakeup_work)
@@ -674,17 +572,6 @@ static int ipa3_usb_cons_request_resource_cb_do(
else
result = -EINPROGRESS;
break;
- case IPA_USB_SUSPEND_IN_PROGRESS:
- /*
- * This case happens due to suspend interrupt.
- * CONS is granted
- */
- if (!rm_ctx->cons_requested) {
- rm_ctx->cons_requested = true;
- queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
- }
- result = 0;
- break;
case IPA_USB_SUSPENDED:
if (!rm_ctx->cons_requested) {
rm_ctx->cons_requested = true;
@@ -727,15 +614,10 @@ static int ipa3_usb_cons_release_resource_cb_do(
ipa3_usb_state_to_string(
ipa3_usb_ctx->ttype_ctx[ttype].state));
switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
- case IPA_USB_SUSPEND_IN_PROGRESS:
+ case IPA_USB_SUSPENDED:
/* Proceed with the suspend if no DL/DPL data */
if (rm_ctx->cons_requested)
rm_ctx->cons_requested_released = true;
- else {
- queue_work(ipa3_usb_ctx->wq,
- &ipa3_usb_ctx->ttype_ctx[ttype].
- finish_suspend_work.work);
- }
break;
case IPA_USB_SUSPEND_REQUESTED:
if (rm_ctx->cons_requested)
@@ -2311,8 +2193,7 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
- if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
- orig_state != IPA_USB_SUSPENDED) {
+ if (orig_state != IPA_USB_SUSPENDED) {
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
flags);
/* Stop UL channel */
@@ -2340,8 +2221,7 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (result)
goto bad_params;
- if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
- orig_state != IPA_USB_SUSPENDED) {
+ if (orig_state != IPA_USB_SUSPENDED) {
result = ipa3_usb_release_prod(ttype);
if (result) {
IPA_USB_ERR("failed to release PROD.\n");
@@ -2547,7 +2427,6 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
{
int result = 0;
unsigned long flags;
- enum ipa3_usb_cons_state curr_cons_state;
enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
@@ -2602,49 +2481,20 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto release_prod_fail;
}
+ /* Check if DL/DPL data pending */
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- curr_cons_state = ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state;
+ if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state ==
+ IPA_USB_CONS_GRANTED &&
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+
+ IPA_USB_DBG("DL/DPL data pending, invoke remote wakeup\n");
+ queue_work(ipa3_usb_ctx->wq,
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ &ipa3_usb_dpl_notify_remote_wakeup_work :
+ &ipa3_usb_notify_remote_wakeup_work);
+ }
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- if (curr_cons_state == IPA_USB_CONS_GRANTED) {
- /* Change state to SUSPEND_IN_PROGRESS */
- if (!ipa3_usb_set_state(IPA_USB_SUSPEND_IN_PROGRESS,
- false, ttype))
- IPA_USB_ERR("fail set state to suspend_in_progress\n");
- /* Check if DL/DPL data pending */
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
- IPA_USB_DBG(
- "DL/DPL data pending, invoke remote wakeup\n");
- queue_work(ipa3_usb_ctx->wq,
- IPA3_USB_IS_TTYPE_DPL(ttype) ?
- &ipa3_usb_dpl_notify_remote_wakeup_work :
- &ipa3_usb_notify_remote_wakeup_work);
- }
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ttype =
- ttype;
- ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.dl_clnt_hdl =
- dl_clnt_hdl;
- ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ul_clnt_hdl =
- ul_clnt_hdl;
- INIT_WORK(&ipa3_usb_ctx->ttype_ctx[ttype].
- finish_suspend_work.work,
- ipa3_usb_wq_finish_suspend_work);
-
- result = -EINPROGRESS;
- IPA_USB_DBG("exit with suspend_in_progress\n");
- goto bad_params;
- }
-
- /* Stop DL channel */
- result = ipa3_stop_gsi_channel(dl_clnt_hdl);
- if (result) {
- IPAERR("Error stopping DL/DPL channel: %d\n", result);
- result = -EFAULT;
- goto release_prod_fail;
- }
/* Change state to SUSPENDED */
if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
IPA_USB_ERR("failed to change state to suspended\n");
@@ -2803,13 +2653,11 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
}
}
- if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
- /* Start DL/DPL channel */
- result = ipa3_start_gsi_channel(dl_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to start DL/DPL channel.\n");
- goto start_dl_fail;
- }
+ /* Start DL/DPL channel */
+ result = ipa3_start_gsi_channel(dl_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to start DL/DPL channel.\n");
+ goto start_dl_fail;
}
/* Change state to CONNECTED */
@@ -2824,12 +2672,10 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
return 0;
state_change_connected_fail:
- if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
- result = ipa3_stop_gsi_channel(dl_clnt_hdl);
- if (result)
- IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
- result);
- }
+ result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+ if (result)
+ IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
+ result);
start_dl_fail:
if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
result = ipa3_stop_gsi_channel(ul_clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 31e530e..837bf38 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -232,6 +232,9 @@ static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
ipa3_transport_release_resource);
static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
+static void ipa3_post_init_wq(struct work_struct *work);
+static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq);
+
static struct ipa3_plat_drv_res ipa3_res = {0, };
struct msm_bus_scale_pdata *ipa3_bus_scale_table;
@@ -495,63 +498,6 @@ static int ipa3_open(struct inode *inode, struct file *filp)
return 0;
}
-/**
-* ipa3_flow_control() - Enable/Disable flow control on a particular client.
-* Return codes:
-* None
-*/
-void ipa3_flow_control(enum ipa_client_type ipa_client,
- bool enable, uint32_t qmap_id)
-{
- struct ipa_ep_cfg_ctrl ep_ctrl = {0};
- int ep_idx;
- struct ipa3_ep_context *ep;
-
- /* Check if tethered flow control is needed or not.*/
- if (!ipa3_ctx->tethered_flow_control) {
- IPADBG("Apps flow control is not needed\n");
- return;
- }
-
- /* Check if ep is valid. */
- ep_idx = ipa3_get_ep_mapping(ipa_client);
- if (ep_idx == -1) {
- IPADBG("Invalid IPA client\n");
- return;
- }
-
- ep = &ipa3_ctx->ep[ep_idx];
- if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
- IPADBG("EP not valid/Not applicable for client.\n");
- return;
- }
-
- spin_lock(&ipa3_ctx->disconnect_lock);
- /* Check if the QMAP_ID matches. */
- if (ep->cfg.meta.qmap_id != qmap_id) {
- IPADBG("Flow control ind not for same flow: %u %u\n",
- ep->cfg.meta.qmap_id, qmap_id);
- spin_unlock(&ipa3_ctx->disconnect_lock);
- return;
- }
- if (!ep->disconnect_in_progress) {
- if (enable) {
- IPADBG("Enabling Flow\n");
- ep_ctrl.ipa_ep_delay = false;
- IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
- } else {
- IPADBG("Disabling Flow\n");
- ep_ctrl.ipa_ep_delay = true;
- IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
- }
- ep_ctrl.ipa_ep_suspend = false;
- ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
- } else {
- IPADBG("EP disconnect is in progress\n");
- }
- spin_unlock(&ipa3_ctx->disconnect_lock);
-}
-
static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
{
if (!buff) {
@@ -1863,9 +1809,11 @@ static void ipa3_q6_avoid_holb(void)
IPA_ENDP_INIT_HOL_BLOCK_EN_n,
ep_idx, &ep_holb);
- ipahal_write_reg_n_fields(
- IPA_ENDP_INIT_CTRL_n,
- ep_idx, &ep_suspend);
+ /* from IPA 4.0 pipe suspend is not supported */
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_CTRL_n,
+ ep_idx, &ep_suspend);
}
}
}
@@ -3979,6 +3927,15 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
struct ipa3_flt_tbl *flt_tbl;
int i;
+ if (ipa3_ctx == NULL) {
+ IPADBG("IPA driver haven't initialized\n");
+ return -ENXIO;
+ }
+
+ /* Prevent consequent calls from trying to load the FW again. */
+ if (ipa3_ctx->ipa_initialization_complete)
+ return 0;
+
/*
* indication whether working in MHI config or non MHI config is given
* in ipa3_write which is launched before ipa3_post_init. i.e. from
@@ -4113,41 +4070,15 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
fail_setup_apps_pipes:
gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
fail_register_device:
- ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
- ipa_rm_exit();
- cdev_del(&ipa3_ctx->cdev);
- device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
- unregister_chrdev_region(ipa3_ctx->dev_num, 1);
- ipa3_free_dma_task_for_gsi();
ipa3_destroy_flt_tbl_idrs();
- idr_destroy(&ipa3_ctx->ipa_idr);
- kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
- kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
- kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_cache);
- kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
- kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
- destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
- destroy_workqueue(ipa3_ctx->power_mgmt_wq);
- iounmap(ipa3_ctx->mmio);
- ipa3_disable_clks();
- if (ipa3_clk)
- clk_put(ipa3_clk);
- ipa3_clk = NULL;
- msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
- if (ipa3_bus_scale_table) {
- msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
- ipa3_bus_scale_table = NULL;
- }
- kfree(ipa3_ctx->ctrl);
- kfree(ipa3_ctx);
- ipa3_ctx = NULL;
return result;
}
+static void ipa3_post_init_wq(struct work_struct *work)
+{
+ ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+}
+
static int ipa3_trigger_fw_loading_mdms(void)
{
int result;
@@ -4249,9 +4180,10 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
if (result) {
IPAERR("FW loading process has failed\n");
return result;
- } else
- ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
-
+ } else {
+ queue_work(ipa3_ctx->transport_power_mgmt_wq,
+ &ipa3_post_init_work);
+ }
return count;
}
@@ -4722,20 +4654,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_device_create;
}
- cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
- ipa3_ctx->cdev.owner = THIS_MODULE;
- ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
-
- result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
- if (result) {
- IPAERR(":cdev_add err=%d\n", -result);
- result = -ENODEV;
- goto fail_cdev_add;
- }
- IPADBG("ipa cdev added successful. major:%d minor:%d\n",
- MAJOR(ipa3_ctx->dev_num),
- MINOR(ipa3_ctx->dev_num));
-
if (ipa3_create_nat_device()) {
IPAERR("unable to create nat device\n");
result = -ENODEV;
@@ -4793,16 +4711,28 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
}
}
+ cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
+ ipa3_ctx->cdev.owner = THIS_MODULE;
+ ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
+
+ result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+ IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+ MAJOR(ipa3_ctx->dev_num),
+ MINOR(ipa3_ctx->dev_num));
return 0;
+fail_cdev_add:
fail_ipa_init_interrupts:
ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
fail_create_apps_resource:
ipa_rm_exit();
fail_ipa_rm_init:
fail_nat_dev_add:
- cdev_del(&ipa3_ctx->cdev);
-fail_cdev_add:
device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
fail_device_create:
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 0b8115f..564397a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -69,13 +69,15 @@ int ipa3_enable_data_path(u32 clnt_hdl)
}
/* Enable the pipe */
- if (IPA_CLIENT_IS_CONS(ep->client) &&
- (ep->keep_ipa_awake ||
- ipa3_ctx->resume_on_connect[ep->client] ||
- !ipa3_should_pipe_be_suspended(ep->client))) {
- memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = false;
- res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ if (IPA_CLIENT_IS_CONS(ep->client) &&
+ (ep->keep_ipa_awake ||
+ ipa3_ctx->resume_on_connect[ep->client] ||
+ !ipa3_should_pipe_be_suspended(ep->client))) {
+ memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ }
}
return res;
@@ -97,33 +99,41 @@ int ipa3_disable_data_path(u32 clnt_hdl)
res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
- /* Suspend the pipe */
- if (IPA_CLIENT_IS_CONS(ep->client)) {
- /*
- * for RG10 workaround uC needs to be loaded before pipe can
- * be suspended in this case.
- */
- if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
- IPADBG("uC is not loaded yet, waiting...\n");
- res = wait_for_completion_timeout(
- &ipa3_ctx->uc_loaded_completion_obj, 60 * HZ);
- if (res == 0)
- IPADBG("timeout waiting for uC to load\n");
+ /*
+ * for IPA 4.0 and above aggregation frame is closed together with
+ * channel STOP
+ */
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ /* Suspend the pipe */
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ /*
+ * for RG10 workaround uC needs to be loaded before
+ * pipe can be suspended in this case.
+ */
+ if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
+ IPADBG("uC is not loaded yet, waiting...\n");
+ res = wait_for_completion_timeout(
+ &ipa3_ctx->uc_loaded_completion_obj,
+ 60 * HZ);
+ if (res == 0)
+ IPADBG("timeout waiting for uC load\n");
+ }
+
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
- memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = true;
- res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
- }
-
- udelay(IPA_PKT_FLUSH_TO_US);
- ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
- if (ep_aggr.aggr_en) {
- res = ipa3_tag_aggr_force_close(clnt_hdl);
- if (res) {
- IPAERR("tag process timeout, client:%d err:%d\n",
- clnt_hdl, res);
- BUG();
+ udelay(IPA_PKT_FLUSH_TO_US);
+ ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl,
+ &ep_aggr);
+ if (ep_aggr.aggr_en) {
+ res = ipa3_tag_aggr_force_close(clnt_hdl);
+ if (res) {
+ IPAERR("tag process timeout client:%d err:%d\n",
+ clnt_hdl, res);
+ ipa_assert();
+ }
}
}
@@ -1257,10 +1267,12 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto disable_clk_and_exit;
}
- /* Suspend the DL/DPL EP */
- memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = true;
- ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ /* Suspend the DL/DPL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ }
/*
* Check if DL/DPL channel is empty again, data could enter the channel
@@ -1275,6 +1287,14 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto unsuspend_dl_and_exit;
}
+ /* Stop DL channel */
+ result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping DL/DPL channel: %d\n", result);
+ result = -EFAULT;
+ goto unsuspend_dl_and_exit;
+ }
+
/* STOP UL channel */
if (!is_dpl) {
source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
@@ -1283,7 +1303,7 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (result) {
IPAERR("Error stopping UL channel: result = %d\n",
result);
- goto unsuspend_dl_and_exit;
+ goto start_dl_and_exit;
}
}
@@ -1292,11 +1312,15 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
IPADBG("exit\n");
return 0;
+start_dl_and_exit:
+ gsi_start_channel(dl_ep->gsi_chan_hdl);
unsuspend_dl_and_exit:
- /* Unsuspend the DL EP */
- memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = false;
- ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ /* Unsuspend the DL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ }
disable_clk_and_exit:
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
return result;
@@ -1340,7 +1364,8 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
{
- struct ipa3_ep_context *ul_ep, *dl_ep;
+ struct ipa3_ep_context *ul_ep = NULL;
+ struct ipa3_ep_context *dl_ep = NULL;
enum gsi_status gsi_res;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
@@ -1360,10 +1385,17 @@ int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
- /* Unsuspend the DL/DPL EP */
- memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = false;
- ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ /* Unsuspend the DL/DPL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ }
+
+ /* Start DL channel */
+ gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS)
+ IPAERR("Error starting DL channel: %d\n", gsi_res);
/* Start UL channel */
if (!is_dpl) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 04d807f..915f2b8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -267,7 +267,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
int i = 0;
int j;
int result;
- int fail_dma_wrap = 0;
u32 mem_flag = GFP_ATOMIC;
const struct ipa_gsi_ep_config *gsi_ep_cfg;
@@ -298,7 +297,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
spin_lock_bh(&sys->spinlock);
for (i = 0; i < num_desc; i++) {
- fail_dma_wrap = 0;
tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
mem_flag);
if (!tx_pkt) {
@@ -319,7 +317,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
if (ipa_populate_tag_field(&desc[i], tx_pkt,
&tag_pyld_ret)) {
IPAERR("Failed to populate tag field\n");
- goto failure;
+ goto failure_dma_map;
}
}
@@ -335,11 +333,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
tx_pkt->mem.base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
- if (!tx_pkt->mem.phys_base) {
- IPAERR("failed to do dma map.\n");
- fail_dma_wrap = 1;
- goto failure;
- }
} else {
tx_pkt->mem.phys_base =
desc[i].dma_address;
@@ -355,17 +348,17 @@ int ipa3_send(struct ipa3_sys_context *sys,
desc[i].frag,
0, tx_pkt->mem.size,
DMA_TO_DEVICE);
- if (!tx_pkt->mem.phys_base) {
- IPAERR("dma map failed\n");
- fail_dma_wrap = 1;
- goto failure;
- }
} else {
tx_pkt->mem.phys_base =
desc[i].dma_address;
tx_pkt->no_unmap_dma = true;
}
}
+ if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
+ IPAERR("failed to do dma map.\n");
+ goto failure_dma_map;
+ }
+
tx_pkt->sys = sys;
tx_pkt->callback = desc[i].callback;
tx_pkt->user1 = desc[i].user1;
@@ -426,28 +419,31 @@ int ipa3_send(struct ipa3_sys_context *sys,
return 0;
+failure_dma_map:
+ kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
failure:
ipahal_destroy_imm_cmd(tag_pyld_ret);
tx_pkt = tx_pkt_first;
for (j = 0; j < i; j++) {
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
- if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
- dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
- tx_pkt->mem.size,
- DMA_TO_DEVICE);
- } else {
- dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
- tx_pkt->mem.size,
- DMA_TO_DEVICE);
+
+ if (!tx_pkt->no_unmap_dma) {
+ if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa3_ctx->pdev,
+ tx_pkt->mem.phys_base,
+ tx_pkt->mem.size, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa3_ctx->pdev,
+ tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ }
}
kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
tx_pkt = next_pkt;
}
- if (j < num_desc)
- /* last desc failed */
- if (fail_dma_wrap)
- kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
kfree(gsi_xfer_elem_array);
@@ -1444,8 +1440,7 @@ static void ipa3_wq_repl_rx(struct work_struct *work)
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
__func__, (void *)rx_pkt->data.dma_addr,
ptr, sys);
@@ -1605,8 +1600,7 @@ static void ipa3_alloc_wlan_rx_common_cache(u32 size)
ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -1676,8 +1670,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -1764,8 +1757,8 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -1780,8 +1773,8 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 410b96a..593d4fc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -490,6 +490,10 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->hdr,
entry->hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa3_ctx->pdev, entry->phys_base)) {
+ IPAERR("dma_map_single failure for entry\n");
+ goto fail_dma_mapping;
+ }
} else {
entry->is_hdr_proc_ctx = false;
if (list_empty(&htbl->head_free_offset_list[bin])) {
@@ -565,6 +569,9 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
list_del(&entry->link);
dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
entry->hdr_len, DMA_TO_DEVICE);
+fail_dma_mapping:
+ entry->is_hdr_proc_ctx = false;
+
bad_hdr_len:
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->hdr_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 86442b1..9a406d6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1970,8 +1970,6 @@ int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
void ipa3_set_resorce_groups_min_max_limits(void);
void ipa3_suspend_apps_pipes(bool suspend);
-void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
- uint32_t qmap_id);
int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
enum ipa_ip_type ip_type,
bool hashable,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 799246b..60dc04f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1600,13 +1600,15 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
if (IPA_CLIENT_IS_CONS(ep->client)) {
- ep_cfg_ctrl.ipa_ep_suspend = true;
- result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
- if (result)
- IPAERR("client (ep: %d) failed to suspend result=%d\n",
- clnt_hdl, result);
- else
- IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("(ep: %d) failed to suspend result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("(ep: %d) suspended\n", clnt_hdl);
+ }
} else {
ep_cfg_ctrl.ipa_ep_delay = true;
result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 23c8241..079481d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1110,7 +1110,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
true,
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
QMB_MASTER_SELECT_DDR,
- { 0, 1, 8, 16, IPA_EE_AP } },
+ { 1, 0, 8, 16, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_ETHERNET_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
@@ -1631,6 +1631,16 @@ bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
ep = &ipa3_ctx->ep[ipa_ep_idx];
+ /*
+ * starting IPA 4.0 pipe no longer can be suspended. Instead,
+ * the corresponding GSI channel should be stopped. Usually client
+ * driver will take care of stopping the channel. For client drivers
+ * that are not stopping the channel, IPA RM will do that based on
+ * ipa3_should_pipe_channel_be_stopped().
+ */
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ return false;
+
if (ep->keep_ipa_awake)
return false;
@@ -1651,6 +1661,41 @@ bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
}
/**
+ * ipa3_should_pipe_channel_be_stopped() - returns true when the client's
+ * channel should be stopped during a power save scenario. False otherwise.
+ * Most client already stops the GSI channel on suspend, and are not included
+ * in the list below.
+ *
+ * @client: [IN] IPA client
+ */
+static bool ipa3_should_pipe_channel_be_stopped(enum ipa_client_type client)
+{
+ struct ipa3_ep_context *ep;
+ int ipa_ep_idx;
+
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+ return false;
+
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ WARN_ON(1);
+ return false;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (ep->keep_ipa_awake)
+ return false;
+
+ if (client == IPA_CLIENT_ODU_EMB_CONS ||
+ client == IPA_CLIENT_ODU_TETH_CONS)
+ return true;
+
+ return false;
+}
+
+/**
* ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
* resource and decrement active clients counter, which may result in clock
* gating of IPA clocks.
@@ -1695,6 +1740,19 @@ int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
pipe_suspended = true;
}
}
+
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_channel_be_stopped(client)) {
+ if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+ /* Stop GSI channel */
+ res = ipa3_stop_gsi_channel(ipa_ep_idx);
+ if (res) {
+ IPAERR("failed stop gsi ch %lu\n",
+ ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+ return res;
+ }
+ }
+ }
}
/* Sleep ~1 msec */
if (pipe_suspended)
@@ -1761,6 +1819,12 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
}
}
+
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_channel_be_stopped(client)) {
+ res = -EPERM;
+ goto bail;
+ }
}
if (res == 0) {
@@ -1824,6 +1888,19 @@ int ipa3_resume_resource(enum ipa_rm_resource_name resource)
ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
}
}
+
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_channel_be_stopped(client)) {
+ if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+ res = gsi_start_channel(
+ ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+ if (res) {
+ IPAERR("failed to start gsi ch %lu\n",
+ ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+ return res;
+ }
+ }
+ }
}
return res;
@@ -2714,6 +2791,12 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
return -EINVAL;
}
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && ep_ctrl->ipa_ep_suspend) {
+ IPAERR("pipe suspend is not supported\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+
IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
clnt_hdl,
ep_ctrl->ipa_ep_suspend,
@@ -4674,6 +4757,7 @@ void ipa3_suspend_apps_pipes(bool suspend)
struct ipa_ep_cfg_ctrl cfg;
int ipa_ep_idx;
struct ipa3_ep_context *ep;
+ int res;
memset(&cfg, 0, sizeof(cfg));
cfg.ipa_ep_suspend = suspend;
@@ -4688,7 +4772,23 @@ void ipa3_suspend_apps_pipes(bool suspend)
if (ep->valid) {
IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
ipa_ep_idx);
- ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ if (suspend) {
+ res = ipa3_stop_gsi_channel(ipa_ep_idx);
+ if (res) {
+ IPAERR("failed to stop LAN channel\n");
+ ipa_assert();
+ }
+ } else {
+ res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("failed to start LAN channel\n");
+ ipa_assert();
+ }
+ }
+ } else {
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ }
if (suspend)
ipa3_gsi_poll_after_suspend(ep);
else if (!atomic_read(&ep->sys->curr_polling_state))
@@ -4706,7 +4806,23 @@ void ipa3_suspend_apps_pipes(bool suspend)
if (ep->valid) {
IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
ipa_ep_idx);
- ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ if (suspend) {
+ res = ipa3_stop_gsi_channel(ipa_ep_idx);
+ if (res) {
+ IPAERR("failed to stop WAN channel\n");
+ ipa_assert();
+ }
+ } else {
+ res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("failed to start WAN channel\n");
+ ipa_assert();
+ }
+ }
+ } else {
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ }
if (suspend)
ipa3_gsi_poll_after_suspend(ep);
else if (!atomic_read(&ep->sys->curr_polling_state))
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 1a119b9..3019e4d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -688,6 +688,19 @@ static void ipareg_parse_endp_init_ctrl_n(enum ipahal_reg_name reg,
IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT);
}
+static void ipareg_construct_endp_init_ctrl_n_v4_0(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_ctrl *ep_ctrl =
+ (struct ipa_ep_cfg_ctrl *)fields;
+
+ WARN_ON(ep_ctrl->ipa_ep_suspend);
+
+ IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
@@ -1444,6 +1457,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
ipareg_parse_hps_queue_weights, 0x000005a4, 0},
/* IPAv4.0 */
+ [IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_n] = {
+ ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy,
+ 0x00000800, 0x70 },
[IPA_HW_v4_0][IPA_TX_CFG] = {
ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0,
0x000001FC, 0},
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index b198348..f408f23 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1507,25 +1507,13 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
/* Flow enable */
case RMNET_IOCTL_FLOW_ENABLE:
- IPAWANDBG("Received flow enable\n");
- if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
- sizeof(struct rmnet_ioctl_data_s))) {
- rc = -EFAULT;
- break;
- }
- ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
- ioctl_data.u.tcm_handle);
+ IPAWANERR("RMNET_IOCTL_FLOW_ENABLE not supported\n");
+ rc = -EFAULT;
break;
/* Flow disable */
case RMNET_IOCTL_FLOW_DISABLE:
- IPAWANDBG("Received flow disable\n");
- if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
- sizeof(struct rmnet_ioctl_data_s))) {
- rc = -EFAULT;
- break;
- }
- ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
- ioctl_data.u.tcm_handle);
+ IPAWANERR("RMNET_IOCTL_FLOW_DISABLE not supported\n");
+ rc = -EFAULT;
break;
/* Set flow handle */
case RMNET_IOCTL_FLOW_SET_HNDL:
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 4e9bd64..b0b51d6 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -40,9 +40,6 @@
#define SMMU_SIZE ((SZ_1G * 4ULL) - SMMU_BASE)
#define WIGIG_ENABLE_DELAY 50
-#define PM_OPT_SUSPEND (MSM_PCIE_CONFIG_NO_CFG_RESTORE | \
- MSM_PCIE_CONFIG_LINKDOWN)
-#define PM_OPT_RESUME MSM_PCIE_CONFIG_NO_CFG_RESTORE
#define WIGIG_SUBSYS_NAME "WIGIG"
#define WIGIG_RAMDUMP_SIZE 0x200000 /* maximum ramdump size */
@@ -127,6 +124,8 @@ struct msm11ad_ctx {
bool use_cpu_boost;
bool is_cpu_boosted;
struct cpumask boost_cpu;
+
+ bool keep_radio_on_during_sleep;
};
static LIST_HEAD(dev_list);
@@ -523,30 +522,8 @@ int msm_11ad_ctrl_aspm_l1(struct msm11ad_ctx *ctx, bool enable)
return rc;
}
-static int ops_suspend(void *handle)
+static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx)
{
- int rc;
- struct msm11ad_ctx *ctx = handle;
- struct pci_dev *pcidev;
-
- pr_info("%s(%p)\n", __func__, handle);
- if (!ctx) {
- pr_err("No context\n");
- return -ENODEV;
- }
- pcidev = ctx->pcidev;
- rc = pci_save_state(pcidev);
- if (rc) {
- dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
- return rc;
- }
- rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
- pcidev, NULL, PM_OPT_SUSPEND);
- if (rc) {
- dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
- rc);
- return rc;
- }
if (ctx->gpio_en >= 0)
gpio_direction_output(ctx->gpio_en, 0);
@@ -557,20 +534,12 @@ static int ops_suspend(void *handle)
msm_11ad_disable_vregs(ctx);
- return rc;
+ return 0;
}
-static int ops_resume(void *handle)
+static int msm_11ad_turn_device_power_on(struct msm11ad_ctx *ctx)
{
int rc;
- struct msm11ad_ctx *ctx = handle;
- struct pci_dev *pcidev;
-
- pr_info("%s(%p)\n", __func__, handle);
- if (!ctx) {
- pr_err("No context\n");
- return -ENODEV;
- }
rc = msm_11ad_enable_vregs(ctx);
if (rc) {
@@ -588,25 +557,124 @@ static int ops_resume(void *handle)
if (ctx->sleep_clk_en >= 0)
gpio_direction_output(ctx->sleep_clk_en, 1);
- pcidev = ctx->pcidev;
if (ctx->gpio_en >= 0) {
gpio_direction_output(ctx->gpio_en, 1);
msleep(WIGIG_ENABLE_DELAY);
}
+ return 0;
+
+err_disable_vregs:
+ msm_11ad_disable_vregs(ctx);
+ return rc;
+}
+
+static int msm_11ad_suspend_power_off(void *handle)
+{
+ int rc;
+ struct msm11ad_ctx *ctx = handle;
+ struct pci_dev *pcidev;
+
+ pr_debug("%s\n", __func__);
+
+ if (!ctx) {
+ pr_err("%s: No context\n", __func__);
+ return -ENODEV;
+ }
+
+ pcidev = ctx->pcidev;
+
+ msm_pcie_shadow_control(ctx->pcidev, 0);
+
+ rc = pci_save_state(pcidev);
+ if (rc) {
+ dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
+ goto out;
+ }
+ ctx->pristine_state = pci_store_saved_state(pcidev);
+
+ rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+ pcidev, NULL, 0);
+ if (rc) {
+ dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
+ rc);
+ goto out;
+ }
+
+ rc = msm_11ad_turn_device_power_off(ctx);
+
+out:
+ return rc;
+}
+
+static int ops_suspend(void *handle, bool keep_device_power)
+{
+ struct msm11ad_ctx *ctx = handle;
+ struct pci_dev *pcidev;
+ int rc;
+
+ pr_debug("11ad suspend: %s\n", __func__);
+ if (!ctx) {
+ pr_err("11ad suspend: No context\n");
+ return -ENODEV;
+ }
+
+ if (!keep_device_power)
+ return msm_11ad_suspend_power_off(handle);
+
+ pcidev = ctx->pcidev;
+
+ msm_pcie_shadow_control(pcidev, 0);
+
+ dev_dbg(ctx->dev, "disable device and save config\n");
+ pci_disable_device(pcidev);
+ pci_save_state(pcidev);
+ ctx->pristine_state = pci_store_saved_state(pcidev);
+ dev_dbg(ctx->dev, "moving to D3\n");
+ pci_set_power_state(pcidev, PCI_D3hot);
+
+ rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+ pcidev, NULL, 0);
+ if (rc)
+ dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
+ rc);
+
+ return rc;
+}
+
+static int msm_11ad_resume_power_on(void *handle)
+{
+ int rc;
+ struct msm11ad_ctx *ctx = handle;
+ struct pci_dev *pcidev;
+
+ pr_debug("%s\n", __func__);
+
+ if (!ctx) {
+ pr_err("%s: No context\n", __func__);
+ return -ENODEV;
+ }
+ pcidev = ctx->pcidev;
+
+ rc = msm_11ad_turn_device_power_on(ctx);
+ if (rc)
+ return rc;
+
rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
- pcidev, NULL, PM_OPT_RESUME);
+ pcidev, NULL, 0);
if (rc) {
dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed :%d\n",
rc);
goto err_disable_power;
}
- rc = msm_pcie_recover_config(pcidev);
- if (rc) {
- dev_err(ctx->dev, "msm_pcie_recover_config failed :%d\n",
- rc);
- goto err_suspend_rc;
- }
+
+ pci_set_power_state(pcidev, PCI_D0);
+
+ if (ctx->pristine_state)
+ pci_load_saved_state(ctx->pcidev, ctx->pristine_state);
+ pci_restore_state(ctx->pcidev);
+
+ msm_pcie_shadow_control(ctx->pcidev, 1);
/* Disable L1, in case it is enabled */
if (ctx->l1_enabled_in_enum) {
@@ -622,18 +690,54 @@ static int ops_resume(void *handle)
err_suspend_rc:
msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
- pcidev, NULL, PM_OPT_SUSPEND);
+ pcidev, NULL, 0);
err_disable_power:
- if (ctx->gpio_en >= 0)
- gpio_direction_output(ctx->gpio_en, 0);
+ msm_11ad_turn_device_power_off(ctx);
+ return rc;
+}
- if (ctx->sleep_clk_en >= 0)
- gpio_direction_output(ctx->sleep_clk_en, 0);
+static int ops_resume(void *handle, bool device_powered_on)
+{
+ struct msm11ad_ctx *ctx = handle;
+ struct pci_dev *pcidev;
+ int rc;
- msm_11ad_disable_clocks(ctx);
-err_disable_vregs:
- msm_11ad_disable_vregs(ctx);
+ pr_debug("11ad resume: %s\n", __func__);
+ if (!ctx) {
+ pr_err("11ad resume: No context\n");
+ return -ENODEV;
+ }
+ pcidev = ctx->pcidev;
+
+ if (!device_powered_on)
+ return msm_11ad_resume_power_on(handle);
+
+ rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
+ pcidev, NULL, 0);
+ if (rc) {
+ dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed :%d\n",
+ rc);
+ return rc;
+ }
+ pci_set_power_state(pcidev, PCI_D0);
+
+ dev_dbg(ctx->dev, "restore state and enable device\n");
+ pci_load_saved_state(pcidev, ctx->pristine_state);
+ pci_restore_state(pcidev);
+
+ rc = pci_enable_device(pcidev);
+ if (rc) {
+ dev_err(ctx->dev, "pci_enable_device failed (%d)\n", rc);
+ goto out;
+ }
+
+ msm_pcie_shadow_control(pcidev, 1);
+
+ dev_dbg(ctx->dev, "pci set master\n");
+ pci_set_master(pcidev);
+
+out:
return rc;
}
@@ -992,6 +1096,8 @@ static int msm_11ad_probe(struct platform_device *pdev)
return -EINVAL;
}
ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
+ ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
+ "qcom,keep_radio_on_during_sleep");
ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
@@ -1104,13 +1210,6 @@ static int msm_11ad_probe(struct platform_device *pdev)
}
}
- rc = pci_save_state(pcidev);
- if (rc) {
- dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
- goto out_rc;
- }
- ctx->pristine_state = pci_store_saved_state(pcidev);
-
if (ctx->sleep_clk_en >= 0) {
rc = gpio_request(ctx->sleep_clk_en, "msm_11ad");
if (rc < 0) {
@@ -1146,7 +1245,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
device_disable_async_suspend(&pcidev->dev);
list_add_tail(&ctx->list, &dev_list);
- ops_suspend(ctx);
+ msm_11ad_suspend_power_off(ctx);
return 0;
out_rc:
@@ -1236,6 +1335,17 @@ static void msm_11ad_set_boost_affinity(struct msm11ad_ctx *ctx)
dev_warn(ctx->dev, "failed to set CPU boost affinity\n");
}
+static void msm_11ad_clear_boost_affinity(struct msm11ad_ctx *ctx)
+{
+ int rc;
+
+ irq_modify_status(ctx->pcidev->irq, IRQ_NO_BALANCING, 0);
+ rc = irq_set_affinity_hint(ctx->pcidev->irq, NULL);
+ if (rc)
+ dev_warn(ctx->dev,
+ "Failed clear affinity, rc=%d\n", rc);
+}
+
/* hooks for the wil6210 driver */
static int ops_bus_request(void *handle, u32 kbps /* KBytes/Sec */)
{
@@ -1287,8 +1397,7 @@ static int ops_bus_request(void *handle, u32 kbps /* KBytes/Sec */)
dev_err(ctx->dev,
"Failed disable boost rc=%d\n",
rc);
- irq_modify_status(ctx->pcidev->irq,
- IRQ_NO_BALANCING, 0);
+ msm_11ad_clear_boost_affinity(ctx);
dev_dbg(ctx->dev, "CPU boost disabled\n");
}
ctx->is_cpu_boosted = needs_boost;
@@ -1316,7 +1425,7 @@ static void ops_uninit(void *handle)
memset(&ctx->rops, 0, sizeof(ctx->rops));
ctx->wil_handle = NULL;
- ops_suspend(ctx);
+ msm_11ad_suspend_power_off(ctx);
}
static int msm_11ad_notify_crash(struct msm11ad_ctx *ctx)
@@ -1374,6 +1483,16 @@ static int ops_notify(void *handle, enum wil_platform_event evt)
return rc;
}
+static bool ops_keep_radio_on_during_sleep(void *handle)
+{
+ struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
+
+ pr_debug("%s: keep radio on during sleep is %s\n", __func__,
+ ctx->keep_radio_on_during_sleep ? "allowed" : "not allowed");
+
+ return ctx->keep_radio_on_during_sleep;
+}
+
void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
const struct wil_platform_rops *rops, void *wil_handle)
{
@@ -1413,6 +1532,7 @@ void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
ops->resume = ops_resume;
ops->uninit = ops_uninit;
ops->notify = ops_notify;
+ ops->keep_radio_on_during_sleep = ops_keep_radio_on_during_sleep;
return ctx;
}
@@ -1429,19 +1549,9 @@ int msm_11ad_modinit(void)
return -EINVAL;
}
- if (ctx->pristine_state) {
- /* in old kernels, pci_load_saved_state() is not exported;
- * so use pci_load_and_free_saved_state()
- * and re-allocate ctx->saved_state again
- */
- pci_load_and_free_saved_state(ctx->pcidev,
- &ctx->pristine_state);
- ctx->pristine_state = pci_store_saved_state(ctx->pcidev);
- }
-
ctx->subsys_handle = subsystem_get(ctx->subsysdesc.name);
- return ops_resume(ctx);
+ return msm_11ad_resume_power_on(ctx);
}
EXPORT_SYMBOL(msm_11ad_modinit);
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 5983b5c..7e6a4e8 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -219,6 +219,12 @@ enum slope_limit_status {
SLOPE_LIMIT_NUM_COEFFS,
};
+enum esr_timer_config {
+ TIMER_RETRY = 0,
+ TIMER_MAX,
+ NUM_ESR_TIMERS,
+};
+
/* DT parameters for FG device */
struct fg_dt_props {
bool force_load_profile;
@@ -234,9 +240,9 @@ struct fg_dt_props {
int recharge_soc_thr;
int recharge_volt_thr_mv;
int rsense_sel;
- int esr_timer_charging;
- int esr_timer_awake;
- int esr_timer_asleep;
+ int esr_timer_charging[NUM_ESR_TIMERS];
+ int esr_timer_awake[NUM_ESR_TIMERS];
+ int esr_timer_asleep[NUM_ESR_TIMERS];
int rconn_mohms;
int esr_clamp_mohms;
int cl_start_soc;
@@ -385,6 +391,7 @@ struct fg_chip {
int maint_soc;
int delta_soc;
int last_msoc;
+ int esr_timer_charging_default[NUM_ESR_TIMERS];
enum slope_limit_status slope_limit_sts;
bool profile_available;
bool profile_loaded;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 27047b4..e3ecf49 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1025,12 +1025,15 @@ static inline void get_esr_meas_current(int curr_ma, u8 *val)
*val <<= ESR_PULL_DOWN_IVAL_SHIFT;
}
-static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
- int flags)
+static int fg_set_esr_timer(struct fg_chip *chip, int cycles_init,
+ int cycles_max, bool charging, int flags)
{
u8 buf[2];
int rc, timer_max, timer_init;
+ if (cycles_init < 0 || cycles_max < 0)
+ return 0;
+
if (charging) {
timer_max = FG_SRAM_ESR_TIMER_CHG_MAX;
timer_init = FG_SRAM_ESR_TIMER_CHG_INIT;
@@ -1039,7 +1042,7 @@ static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
timer_init = FG_SRAM_ESR_TIMER_DISCHG_INIT;
}
- fg_encode(chip->sp, timer_max, cycles, buf);
+ fg_encode(chip->sp, timer_max, cycles_max, buf);
rc = fg_sram_write(chip,
chip->sp[timer_max].addr_word,
chip->sp[timer_max].addr_byte, buf,
@@ -1050,7 +1053,7 @@ static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
return rc;
}
- fg_encode(chip->sp, timer_init, cycles, buf);
+ fg_encode(chip->sp, timer_init, cycles_init, buf);
rc = fg_sram_write(chip,
chip->sp[timer_init].addr_word,
chip->sp[timer_init].addr_byte, buf,
@@ -1061,6 +1064,8 @@ static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
return rc;
}
+ fg_dbg(chip, FG_STATUS, "esr_%s_timer set to %d/%d\n",
+ charging ? "charging" : "discharging", cycles_init, cycles_max);
return 0;
}
@@ -2039,6 +2044,50 @@ static int fg_esr_fcc_config(struct fg_chip *chip)
return 0;
}
+static int fg_esr_timer_config(struct fg_chip *chip, bool sleep)
+{
+ int rc, cycles_init, cycles_max;
+ bool end_of_charge = false;
+
+ end_of_charge = is_input_present(chip) && chip->charge_done;
+ fg_dbg(chip, FG_STATUS, "sleep: %d eoc: %d\n", sleep, end_of_charge);
+
+ /* ESR discharging timer configuration */
+ cycles_init = sleep ? chip->dt.esr_timer_asleep[TIMER_RETRY] :
+ chip->dt.esr_timer_awake[TIMER_RETRY];
+ if (end_of_charge)
+ cycles_init = 0;
+
+ cycles_max = sleep ? chip->dt.esr_timer_asleep[TIMER_MAX] :
+ chip->dt.esr_timer_awake[TIMER_MAX];
+
+ rc = fg_set_esr_timer(chip, cycles_init, cycles_max, false,
+ sleep ? FG_IMA_NO_WLOCK : FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in setting ESR timer, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* ESR charging timer configuration */
+ cycles_init = cycles_max = -EINVAL;
+ if (end_of_charge || sleep) {
+ cycles_init = chip->dt.esr_timer_charging[TIMER_RETRY];
+ cycles_max = chip->dt.esr_timer_charging[TIMER_MAX];
+ } else if (is_input_present(chip)) {
+ cycles_init = chip->esr_timer_charging_default[TIMER_RETRY];
+ cycles_max = chip->esr_timer_charging_default[TIMER_MAX];
+ }
+
+ rc = fg_set_esr_timer(chip, cycles_init, cycles_max, true,
+ sleep ? FG_IMA_NO_WLOCK : FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in setting ESR timer, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static void fg_batt_avg_update(struct fg_chip *chip)
{
if (chip->charge_status == chip->prev_charge_status)
@@ -2112,6 +2161,10 @@ static void status_change_work(struct work_struct *work)
if (rc < 0)
pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
+ rc = fg_esr_timer_config(chip, false);
+ if (rc < 0)
+ pr_err("Error in configuring ESR timer, rc=%d\n", rc);
+
rc = fg_get_battery_temp(chip, &batt_temp);
if (!rc) {
rc = fg_slope_limit_config(chip, batt_temp);
@@ -3115,6 +3168,8 @@ static const struct power_supply_desc fg_psy_desc = {
/* INIT FUNCTIONS STAY HERE */
+#define DEFAULT_ESR_CHG_TIMER_RETRY 8
+#define DEFAULT_ESR_CHG_TIMER_MAX 16
static int fg_hw_init(struct fg_chip *chip)
{
int rc;
@@ -3283,22 +3338,29 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
- if (chip->dt.esr_timer_charging > 0) {
- rc = fg_set_esr_timer(chip, chip->dt.esr_timer_charging, true,
- FG_IMA_DEFAULT);
- if (rc < 0) {
- pr_err("Error in setting ESR timer, rc=%d\n", rc);
- return rc;
- }
+ if (chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+ chip->esr_timer_charging_default[TIMER_RETRY] =
+ DEFAULT_ESR_CHG_TIMER_RETRY;
+ chip->esr_timer_charging_default[TIMER_MAX] =
+ DEFAULT_ESR_CHG_TIMER_MAX;
+ } else {
+ /* We don't need this for pm660 at present */
+ chip->esr_timer_charging_default[TIMER_RETRY] = -EINVAL;
+ chip->esr_timer_charging_default[TIMER_MAX] = -EINVAL;
}
- if (chip->dt.esr_timer_awake > 0) {
- rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake, false,
- FG_IMA_DEFAULT);
- if (rc < 0) {
- pr_err("Error in setting ESR timer, rc=%d\n", rc);
- return rc;
- }
+ rc = fg_set_esr_timer(chip, chip->dt.esr_timer_charging[TIMER_RETRY],
+ chip->dt.esr_timer_charging[TIMER_MAX], true, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in setting ESR timer, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake[TIMER_RETRY],
+ chip->dt.esr_timer_awake[TIMER_MAX], false, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in setting ESR timer, rc=%d\n", rc);
+ return rc;
}
if (chip->cyc_ctr.en)
@@ -3778,6 +3840,32 @@ static int fg_register_interrupts(struct fg_chip *chip)
return 0;
}
+static int fg_parse_dt_property_u32_array(struct device_node *node,
+ const char *prop_name, int *buf, int len)
+{
+ int rc;
+
+ rc = of_property_count_elems_of_size(node, prop_name, sizeof(u32));
+ if (rc < 0) {
+ if (rc == -EINVAL)
+ return 0;
+ else
+ return rc;
+ } else if (rc != len) {
+ pr_err("Incorrect length %d for %s, rc=%d\n", len, prop_name,
+ rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(node, prop_name, buf, len);
+ if (rc < 0) {
+ pr_err("Error in reading %s, rc=%d\n", prop_name, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static int fg_parse_slope_limit_coefficients(struct fg_chip *chip)
{
struct device_node *node = chip->dev->of_node;
@@ -3788,17 +3876,10 @@ static int fg_parse_slope_limit_coefficients(struct fg_chip *chip)
if (rc < 0)
return 0;
- rc = of_property_count_elems_of_size(node, "qcom,slope-limit-coeffs",
- sizeof(u32));
- if (rc != SLOPE_LIMIT_NUM_COEFFS)
- return -EINVAL;
-
- rc = of_property_read_u32_array(node, "qcom,slope-limit-coeffs",
- chip->dt.slope_limit_coeffs, SLOPE_LIMIT_NUM_COEFFS);
- if (rc < 0) {
- pr_err("Error in reading qcom,slope-limit-coeffs, rc=%d\n", rc);
+ rc = fg_parse_dt_property_u32_array(node, "qcom,slope-limit-coeffs",
+ chip->dt.slope_limit_coeffs, SLOPE_LIMIT_NUM_COEFFS);
+ if (rc < 0)
return rc;
- }
for (i = 0; i < SLOPE_LIMIT_NUM_COEFFS; i++) {
if (chip->dt.slope_limit_coeffs[i] > SLOPE_LIMIT_COEFF_MAX ||
@@ -3817,44 +3898,20 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip)
struct device_node *node = chip->dev->of_node;
int rc, i;
- rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-soc-dischg",
- sizeof(u32));
- if (rc != KI_COEFF_SOC_LEVELS)
- return 0;
-
- rc = of_property_read_u32_array(node, "qcom,ki-coeff-soc-dischg",
- chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
- if (rc < 0) {
- pr_err("Error in reading ki-coeff-soc-dischg, rc=%d\n",
- rc);
+ rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-soc-dischg",
+ chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
+ if (rc < 0)
return rc;
- }
- rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-med-dischg",
- sizeof(u32));
- if (rc != KI_COEFF_SOC_LEVELS)
- return 0;
-
- rc = of_property_read_u32_array(node, "qcom,ki-coeff-med-dischg",
- chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
- if (rc < 0) {
- pr_err("Error in reading ki-coeff-med-dischg, rc=%d\n",
- rc);
+ rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-med-dischg",
+ chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
+ if (rc < 0)
return rc;
- }
- rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-hi-dischg",
- sizeof(u32));
- if (rc != KI_COEFF_SOC_LEVELS)
- return 0;
-
- rc = of_property_read_u32_array(node, "qcom,ki-coeff-hi-dischg",
- chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
- if (rc < 0) {
- pr_err("Error in reading ki-coeff-hi-dischg, rc=%d\n",
- rc);
+ rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-hi-dischg",
+ chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
+ if (rc < 0)
return rc;
- }
for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
if (chip->dt.ki_coeff_soc[i] < 0 ||
@@ -4099,23 +4156,26 @@ static int fg_parse_dt(struct fg_chip *chip)
rc);
}
- rc = of_property_read_u32(node, "qcom,fg-esr-timer-charging", &temp);
- if (rc < 0)
- chip->dt.esr_timer_charging = -EINVAL;
- else
- chip->dt.esr_timer_charging = temp;
+ rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-charging",
+ chip->dt.esr_timer_charging, NUM_ESR_TIMERS);
+ if (rc < 0) {
+ chip->dt.esr_timer_charging[TIMER_RETRY] = -EINVAL;
+ chip->dt.esr_timer_charging[TIMER_MAX] = -EINVAL;
+ }
- rc = of_property_read_u32(node, "qcom,fg-esr-timer-awake", &temp);
- if (rc < 0)
- chip->dt.esr_timer_awake = -EINVAL;
- else
- chip->dt.esr_timer_awake = temp;
+ rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-awake",
+ chip->dt.esr_timer_awake, NUM_ESR_TIMERS);
+ if (rc < 0) {
+ chip->dt.esr_timer_awake[TIMER_RETRY] = -EINVAL;
+ chip->dt.esr_timer_awake[TIMER_MAX] = -EINVAL;
+ }
- rc = of_property_read_u32(node, "qcom,fg-esr-timer-asleep", &temp);
- if (rc < 0)
- chip->dt.esr_timer_asleep = -EINVAL;
- else
- chip->dt.esr_timer_asleep = temp;
+ rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-asleep",
+ chip->dt.esr_timer_asleep, NUM_ESR_TIMERS);
+ if (rc < 0) {
+ chip->dt.esr_timer_asleep[TIMER_RETRY] = -EINVAL;
+ chip->dt.esr_timer_asleep[TIMER_MAX] = -EINVAL;
+ }
chip->cyc_ctr.en = of_property_read_bool(node, "qcom,cycle-counter-en");
if (chip->cyc_ctr.en)
@@ -4453,15 +4513,9 @@ static int fg_gen3_suspend(struct device *dev)
struct fg_chip *chip = dev_get_drvdata(dev);
int rc;
- if (chip->dt.esr_timer_awake > 0 && chip->dt.esr_timer_asleep > 0) {
- rc = fg_set_esr_timer(chip, chip->dt.esr_timer_asleep, false,
- FG_IMA_NO_WLOCK);
- if (rc < 0) {
- pr_err("Error in setting ESR timer during suspend, rc=%d\n",
- rc);
- return rc;
- }
- }
+ rc = fg_esr_timer_config(chip, true);
+ if (rc < 0)
+ pr_err("Error in configuring ESR timer, rc=%d\n", rc);
cancel_delayed_work_sync(&chip->batt_avg_work);
if (fg_sram_dump)
@@ -4474,15 +4528,9 @@ static int fg_gen3_resume(struct device *dev)
struct fg_chip *chip = dev_get_drvdata(dev);
int rc;
- if (chip->dt.esr_timer_awake > 0 && chip->dt.esr_timer_asleep > 0) {
- rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake, false,
- FG_IMA_DEFAULT);
- if (rc < 0) {
- pr_err("Error in setting ESR timer during resume, rc=%d\n",
- rc);
- return rc;
- }
- }
+ rc = fg_esr_timer_config(chip, false);
+ if (rc < 0)
+ pr_err("Error in configuring ESR timer, rc=%d\n", rc);
fg_circ_buf_clr(&chip->ibatt_circ_buf);
fg_circ_buf_clr(&chip->vbatt_circ_buf);
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 2987ed2..4f5f86c 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -393,10 +393,15 @@ static void rpmh_regulator_handle_arc_enable(struct rpmh_aggr_vreg *aggr_vreg,
* Mask the voltage level if "off" level is supported and the regulator
* has not been enabled.
*/
- if (aggr_vreg->level[0] == RPMH_REGULATOR_LEVEL_OFF &&
- (!(req->valid & BIT(RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE)) ||
- !req->reg[RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE]))
- req->reg[RPMH_REGULATOR_REG_ARC_LEVEL] = 0;
+ if (aggr_vreg->level[0] == RPMH_REGULATOR_LEVEL_OFF) {
+ if (req->valid & BIT(RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE)) {
+ if (!req->reg[RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE])
+ req->reg[RPMH_REGULATOR_REG_ARC_LEVEL] = 0;
+ } else {
+ /* Invalidate voltage level if enable is invalid. */
+ req->valid &= ~BIT(RPMH_REGULATOR_REG_ARC_LEVEL);
+ }
+ }
/*
* Mark the pseudo enable bit as invalid so that it is not accidentally
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c2ac982..967bb0d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2792,10 +2792,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
- q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
- rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
- } else
+ sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
+ rw_max = q->limits.io_opt =
+ sdkp->opt_xfer_blocks * sdp->sector_size;
+ else
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c8d9863..4446ed2 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,11 +151,6 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
return blocks << (ilog2(sdev->sector_size) - 9);
}
-static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
-{
- return blocks * sdev->sector_size;
-}
-
/*
* Look up the DIX operation based on whether the command is read or
* write and whether dix and dif are enabled.
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 6418c11..77ba414 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -381,6 +381,8 @@ static int ufshcd_disable_clocks(struct ufs_hba *hba,
bool is_gating_context);
static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
bool is_gating_context);
+static void ufshcd_hold_all(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
@@ -2055,6 +2057,22 @@ static void ufshcd_hibern8_enter_work(struct work_struct *work)
return;
}
+static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
+ unsigned long delay_ms)
+{
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold_all(hba);
+ ufshcd_scsi_block_requests(hba);
+ down_write(&hba->lock);
+ /* wait for all the outstanding requests to finish */
+ ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ ufshcd_set_auto_hibern8_timer(hba, delay_ms);
+ up_write(&hba->lock);
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release_all(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
static void ufshcd_hibern8_exit_work(struct work_struct *work)
{
int ret;
@@ -2106,19 +2124,32 @@ static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long flags, value;
+ bool change = true;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->hibern8_on_idle.delay_ms == value)
+ change = false;
+
+ if (value >= hba->clk_gating.delay_ms_pwr_save ||
+ value >= hba->clk_gating.delay_ms_perf) {
+ dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
+ value, hba->clk_gating.delay_ms_pwr_save,
+ hba->clk_gating.delay_ms_perf);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return -EINVAL;
+ }
+
hba->hibern8_on_idle.delay_ms = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Update auto hibern8 timer value if supported */
- if (ufshcd_is_auto_hibern8_supported(hba) &&
+ if (change && ufshcd_is_auto_hibern8_supported(hba) &&
hba->hibern8_on_idle.is_enabled)
- ufshcd_set_auto_hibern8_timer(hba,
- hba->hibern8_on_idle.delay_ms);
+ __ufshcd_set_auto_hibern8_timer(hba,
+ hba->hibern8_on_idle.delay_ms);
return count;
}
@@ -2148,7 +2179,7 @@ static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
/* Update auto hibern8 timer value if supported */
if (ufshcd_is_auto_hibern8_supported(hba)) {
- ufshcd_set_auto_hibern8_timer(hba,
+ __ufshcd_set_auto_hibern8_timer(hba,
value ? hba->hibern8_on_idle.delay_ms : value);
goto update;
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 77ccc39..c61a753 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -1289,8 +1289,8 @@ static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
- if (hba->var && hba->var->vops && hba->var->vops->apply_dev_quirks)
- return hba->var->vops->apply_dev_quirks(hba);
+ if (hba->var && hba->var->vops && hba->var->vops->suspend)
+ return hba->var->vops->suspend(hba, op);
return 0;
}
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 1455069..92dbd48 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -26,6 +26,7 @@
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/workqueue.h>
+#include <linux/power_supply.h>
#define EUD_ENABLE_CMD 1
#define EUD_DISABLE_CMD 0
@@ -87,15 +88,52 @@ static struct platform_device *eud_private;
static void enable_eud(struct platform_device *pdev)
{
struct eud_chip *priv = platform_get_drvdata(pdev);
+ struct power_supply *usb_psy = NULL;
+ union power_supply_propval pval = {0};
+ union power_supply_propval tval = {0};
+ int ret;
- /* write into CSR to enable EUD */
- writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
- /* Enable vbus, chgr & safe mode warning interrupts */
- writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
- priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
+ usb_psy = power_supply_get_by_name("usb");
+ if (!usb_psy) {
+ dev_warn(&pdev->dev, "Could not get usb power_supply\n");
+ return;
+ }
- /* Ensure Register Writes Complete */
- wmb();
+ ret = power_supply_get_property(usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to read USB PRESENT: %x\n", ret);
+ return;
+ }
+
+ ret = power_supply_get_property(usb_psy,
+ POWER_SUPPLY_PROP_REAL_TYPE, &tval);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to read USB TYPE: %x\n", ret);
+ return;
+ }
+
+ if (pval.intval && (tval.intval == POWER_SUPPLY_TYPE_USB ||
+ tval.intval == POWER_SUPPLY_TYPE_USB_CDP)) {
+ /* write into CSR to enable EUD */
+ writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
+ /* Enable vbus, chgr & safe mode warning interrupts */
+ writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
+ priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
+
+ /* Ensure Register Writes Complete */
+ wmb();
+
+ /*
+ * Set the default cable state to usb connect and charger
+ * enable
+ */
+ extcon_set_state_sync(priv->extcon, EXTCON_USB, true);
+ extcon_set_state_sync(priv->extcon, EXTCON_CHG_USB_SDP, true);
+ } else {
+ dev_warn(&pdev->dev, "Connect USB cable before enabling EUD\n");
+ return;
+ }
dev_dbg(&pdev->dev, "%s: EUD Enabled!\n", __func__);
}
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index df0c609c..26e2899 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -276,6 +276,10 @@ static int pil_mss_loadable_init(struct modem_data *drv,
if (!res) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"restart_reg_sec");
+ if (!res) {
+ dev_err(&pdev->dev, "No restart register defined\n");
+ return -ENOMEM;
+ }
q6->restart_reg_sec = true;
}
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 5ca0fe5..306510f 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -34,17 +34,21 @@
#define RPMH_MAX_REQ_IN_BATCH 10
#define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name) \
- struct rpmh_msg name = { \
- .msg = { 0 }, \
- .msg.state = s, \
- .msg.is_complete = true, \
- .msg.payload = name.cmd, \
- .msg.num_payload = 0, \
- .cmd = { { 0 } }, \
- .waitq = q, \
- .wait_count = c, \
- .rc = rc, \
- .bit = -1, \
+ struct rpmh_msg name = { \
+ .msg = { \
+ .state = s, \
+ .payload = name.cmd, \
+ .num_payload = 0, \
+ .is_read = false, \
+ .is_control = false, \
+ .is_complete = true, \
+ .invalidate = false, \
+ }, \
+ .cmd = { { 0 } }, \
+ .completion = q, \
+ .wait_count = c, \
+ .rc = rc, \
+ .bit = -1, \
}
struct rpmh_req {
@@ -57,7 +61,7 @@ struct rpmh_req {
struct rpmh_msg {
struct tcs_mbox_msg msg;
struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
- wait_queue_head_t *waitq;
+ struct completion *completion;
atomic_t *wait_count;
struct rpmh_client *rc;
int bit;
@@ -106,21 +110,31 @@ static struct rpmh_msg *get_msg_from_pool(struct rpmh_client *rc)
return msg;
}
+static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
+{
+ struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
+ unsigned long flags;
+
+ /* If we allocated the pool, set it as available */
+ if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
+ spin_lock_irqsave(&rpm->lock, flags);
+ bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
+ spin_unlock_irqrestore(&rpm->lock, flags);
+ }
+}
+
static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
{
struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
atomic_dec(rpm_msg->wait_count);
- wake_up(rpm_msg->waitq);
}
static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
{
struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
- struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
atomic_t *wc = rpm_msg->wait_count;
- wait_queue_head_t *waitq = rpm_msg->waitq;
- unsigned long flags;
+ struct completion *compl = rpm_msg->completion;
rpm_msg->err = r;
@@ -144,18 +158,12 @@ static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
* into an issue that the stack allocated parent object may be
* invalid before we can check the ->bit value.
*/
-
- /* If we allocated the pool, set it as available */
- if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
- spin_lock_irqsave(&rpm->lock, flags);
- bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
- spin_unlock_irqrestore(&rpm->lock, flags);
- }
+ free_msg_to_pool(rpm_msg);
/* Signal the blocking thread we are done */
if (wc && atomic_dec_and_test(wc))
- if (waitq)
- wake_up(waitq);
+ if (compl)
+ complete(compl);
}
static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
@@ -312,9 +320,9 @@ EXPORT_SYMBOL(rpmh_write_single_async);
int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
u32 addr, u32 data)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(1);
- DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+ DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
int ret;
if (IS_ERR_OR_NULL(rc))
@@ -333,7 +341,7 @@ int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
if (ret < 0)
return ret;
- wait_event(waitq, atomic_read(&wait_count) == 0);
+ wait_for_completion(&compl);
return rpm_msg.err;
}
@@ -408,9 +416,9 @@ EXPORT_SYMBOL(rpmh_write_async);
int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
struct tcs_cmd *cmd, int n)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(1);
- DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+ DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
int ret;
if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
@@ -428,7 +436,7 @@ int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
if (ret)
return ret;
- wait_event(waitq, atomic_read(&wait_count) == 0);
+ wait_for_completion(&compl);
return rpm_msg.err;
}
@@ -454,7 +462,7 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
struct tcs_cmd *cmd, int *n)
{
struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
int count = 0;
int ret, i, j, k;
@@ -507,9 +515,8 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
for (i = 0; i < count; i++) {
rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
if (IS_ERR_OR_NULL(rpm_msg[i])) {
- /* Clean up our call by spoofing tx_done */
for (j = 0 ; j < i; j++)
- rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, 0);
+ free_msg_to_pool(rpm_msg[j]);
return PTR_ERR(rpm_msg[i]);
}
cmd += n[i];
@@ -520,7 +527,7 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
might_sleep();
atomic_set(&wait_count, count);
for (i = 0; i < count; i++) {
- rpm_msg[i]->waitq = &waitq;
+ rpm_msg[i]->completion = &compl;
rpm_msg[i]->wait_count = &wait_count;
/* Bypass caching and write to mailbox directly */
ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
@@ -530,15 +537,17 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
break;
}
}
- wait_event(waitq, atomic_read(&wait_count) == (count - i));
+ /* For those unsent requests, spoof tx_done */
+ for (j = i; j < count; j++)
+ rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
+ wait_for_completion(&compl);
} else {
/* Send Sleep requests to the controller, expect no response */
for (i = 0; i < count; i++) {
- rpm_msg[i]->waitq = NULL;
+ rpm_msg[i]->completion = NULL;
ret = mbox_send_controller_data(rc->chan,
&rpm_msg[i]->msg);
- /* Clean up our call by spoofing tx_done */
- rpmh_tx_done(&rc->client, &rpm_msg[i]->msg, ret);
+ free_msg_to_pool(rpm_msg[i]);
}
return 0;
}
@@ -660,10 +669,10 @@ EXPORT_SYMBOL(rpmh_invalidate);
int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
{
int ret;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(2); /* wait for rx_cb and tx_done */
DEFINE_RPMH_MSG_ONSTACK(rc, RPMH_ACTIVE_ONLY_STATE,
- &waitq, &wait_count, rpm_msg);
+ &compl, &wait_count, rpm_msg);
if (IS_ERR_OR_NULL(rc) || !resp)
return -EINVAL;
@@ -684,7 +693,7 @@ int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
return ret;
/* Wait until the response is received from RPMH */
- wait_event(waitq, atomic_read(&wait_count) == 0);
+ wait_for_completion(&compl);
/* Read the data back from the tcs_mbox_msg structrure */
*resp = rpm_msg.cmd[0].data;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index d951abb..a50e327 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1588,6 +1588,7 @@ static void usbpd_sm(struct work_struct *w)
pd->vconn_enabled = false;
}
+ reset_vdm_state(pd);
if (pd->current_dr == DR_UFP)
stop_usb_peripheral(pd);
else if (pd->current_dr == DR_DFP)
@@ -1596,8 +1597,6 @@ static void usbpd_sm(struct work_struct *w)
pd->current_pr = PR_NONE;
pd->current_dr = DR_NONE;
- reset_vdm_state(pd);
-
if (pd->current_state == PE_ERROR_RECOVERY)
/* forced disconnect, wait before resetting to DRP */
usleep_range(ERROR_RECOVERY_TIME * USEC_PER_MSEC,
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index c1350ce..75ddcfa 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -34,4 +34,12 @@
#define PCLK_SRC_MUX_1_CLK 15
#define PCLK_SRC_1_CLK 16
#define PCLK_MUX_1_CLK 17
+
+/* DP PLL clocks */
+#define DP_VCO_CLK 0
+#define DP_LINK_CLK_DIVSEL_TEN 1
+#define DP_VCO_DIVIDED_TWO_CLK_SRC 2
+#define DP_VCO_DIVIDED_FOUR_CLK_SRC 3
+#define DP_VCO_DIVIDED_SIX_CLK_SRC 4
+#define DP_VCO_DIVIDED_CLK_SRC_MUX 5
#endif
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index be2210c..9d52d2e 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -43,6 +43,7 @@
#define MSM_BUS_FAB_MC_VIRT 6151
#define MSM_BUS_FAB_MEM_NOC 6152
#define MSM_BUS_FAB_IPA_VIRT 6153
+#define MSM_BUS_FAB_CAMNOC_VIRT 6154
#define MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
#define MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -236,7 +237,7 @@
#define MSM_BUS_MASTER_MNOC_SF_MEM_NOC 133
#define MSM_BUS_MASTER_SNOC_GC_MEM_NOC 134
#define MSM_BUS_MASTER_SNOC_SF_MEM_NOC 135
-#define MSM_BUS_MASTER_CAMNOC_HF 136
+#define MSM_BUS_MASTER_CAMNOC_HF0 136
#define MSM_BUS_MASTER_CAMNOC_SF 137
#define MSM_BUS_MASTER_VIDEO_PROC 138
#define MSM_BUS_MASTER_GNOC_SNOC 139
@@ -245,7 +246,11 @@
#define MSM_BUS_MASTER_MEM_NOC_SNOC 142
#define MSM_BUS_MASTER_IPA_CORE 143
#define MSM_BUS_MASTER_ALC 144
-#define MSM_BUS_MASTER_MASTER_LAST 145
+#define MSM_BUS_MASTER_CAMNOC_HF1 145
+#define MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP 146
+#define MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP 147
+#define MSM_BUS_MASTER_CAMNOC_SF_UNCOMP 148
+#define MSM_BUS_MASTER_MASTER_LAST 149
#define MSM_BUS_MASTER_LLCC_DISPLAY 20000
#define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -584,7 +589,8 @@
#define MSM_BUS_SLAVE_SNOC_MEM_NOC_SF 775
#define MSM_BUS_SLAVE_MEM_NOC_SNOC 776
#define MSM_BUS_SLAVE_IPA 777
-#define MSM_BUS_SLAVE_LAST 778
+#define MSM_BUS_SLAVE_CAMNOC_UNCOMP 778
+#define MSM_BUS_SLAVE_LAST 779
#define MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
#define MSM_BUS_SLAVE_LLCC_DISPLAY 20513
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index b1f2d00..8ee110a 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -277,6 +277,8 @@ struct regulator;
regulator
* @level_votes: array of votes for each level
* @num_levels: specifies the size of level_votes array
+ * @skip_handoff: do not vote for the max possible voltage during init
+ * @use_max_uV: use INT_MAX for max_uV when calling regulator_set_voltage
* @cur_level: the currently set voltage level
* @lock: lock to protect this struct
*/
@@ -288,6 +290,8 @@ struct clk_vdd_class {
int *vdd_uv;
int *level_votes;
int num_levels;
+ bool skip_handoff;
+ bool use_max_uV;
unsigned long cur_level;
struct mutex lock;
};
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e3d181e..7bdddb3 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -244,6 +244,8 @@ struct iommu_ops {
void (*tlbi_domain)(struct iommu_domain *domain);
int (*enable_config_clocks)(struct iommu_domain *domain);
void (*disable_config_clocks)(struct iommu_domain *domain);
+ uint64_t (*iova_to_pte)(struct iommu_domain *domain,
+ dma_addr_t iova);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
@@ -331,6 +333,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t offset, u64 size,
int prot);
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+
+extern uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova);
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 959414b..227b1e2 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -232,6 +232,7 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd);
+extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/sched.h b/include/linux/sched.h
index decb943..6c6ae4d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -178,7 +178,9 @@ extern u64 nr_running_integral(unsigned int cpu);
#endif
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
-extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+ unsigned int *max_nr,
+ unsigned int *big_max_nr);
extern unsigned int sched_get_cpu_util(int cpu);
extern void calc_global_load(unsigned long ticks);
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
new file mode 100644
index 0000000..f2322f3
--- /dev/null
+++ b/include/linux/usb/audio-v3.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file holds USB constants and structures defined
+ * by the USB Device Class Definition for Audio Devices in version 3.0.
+ * Comments below reference relevant sections of the documents contained
+ * in http://www.usb.org/developers/docs/devclass_docs/USB_Audio_v3.0.zip
+ */
+
+#ifndef __LINUX_USB_AUDIO_V3_H
+#define __LINUX_USB_AUDIO_V3_H
+
+#include <linux/types.h>
+
+#define UAC3_MIXER_UNIT_V3 0x05
+#define UAC3_FEATURE_UNIT_V3 0x07
+#define UAC3_CLOCK_SOURCE 0x0b
+
+#define BADD_MAXPSIZE_SYNC_MONO_16 0x0060
+#define BADD_MAXPSIZE_SYNC_MONO_24 0x0090
+#define BADD_MAXPSIZE_SYNC_STEREO_16 0x00c0
+#define BADD_MAXPSIZE_SYNC_STEREO_24 0x0120
+
+#define BADD_MAXPSIZE_ASYNC_MONO_16 0x0062
+#define BADD_MAXPSIZE_ASYNC_MONO_24 0x0093
+#define BADD_MAXPSIZE_ASYNC_STEREO_16 0x00c4
+#define BADD_MAXPSIZE_ASYNC_STEREO_24 0x0126
+
+#define BIT_RES_16_BIT 0x10
+#define BIT_RES_24_BIT 0x18
+
+#define SUBSLOTSIZE_16_BIT 0x02
+#define SUBSLOTSIZE_24_BIT 0x03
+
+#define BADD_SAMPLING_RATE 48000
+
+#define NUM_CHANNELS_MONO 1
+#define NUM_CHANNELS_STEREO 2
+#define BADD_CH_CONFIG_MONO 0
+#define BADD_CH_CONFIG_STEREO 3
+#define CLUSTER_ID_MONO 0x0001
+#define CLUSTER_ID_STEREO 0x0002
+
+#define FULL_ADC_PROFILE 0x01
+
+/* BADD Profile IDs */
+#define PROF_GENERIC_IO 0x20
+#define PROF_HEADPHONE 0x21
+#define PROF_SPEAKER 0x22
+#define PROF_MICROPHONE 0x23
+#define PROF_HEADSET 0x24
+#define PROF_HEADSET_ADAPTER 0x25
+#define PROF_SPEAKERPHONE 0x26
+
+/* BADD Entity IDs */
+#define BADD_OUT_TERM_ID_BAOF 0x03
+#define BADD_OUT_TERM_ID_BAIF 0x06
+#define BADD_IN_TERM_ID_BAOF 0x01
+#define BADD_IN_TERM_ID_BAIF 0x04
+#define BADD_FU_ID_BAOF 0x02
+#define BADD_FU_ID_BAIF 0x05
+#define BADD_CLOCK_SOURCE 0x09
+#define BADD_FU_ID_BAIOF 0x07
+#define BADD_MU_ID_BAIOF 0x08
+
+#define UAC_BIDIR_TERMINAL_HEADSET 0x0402
+#define UAC_BIDIR_TERMINAL_SPEAKERPHONE 0x0403
+
+#define NUM_BADD_DESCS 7
+
+struct uac3_input_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bCSourceID;
+ __u32 bmControls;
+ __u16 wClusterDescrID;
+ __u16 wExTerminalDescrID;
+ __u16 wConnectorsDescrID;
+ __u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_INPUT_TERMINAL_SIZE 0x14
+
+extern struct uac3_input_terminal_descriptor badd_baif_in_term_desc;
+extern struct uac3_input_terminal_descriptor badd_baof_in_term_desc;
+
+struct uac3_output_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 bCSourceID;
+ __u32 bmControls;
+ __u16 wExTerminalDescrID;
+ __u16 wConnectorsDescrID;
+ __u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_OUTPUT_TERMINAL_SIZE 0x13
+
+extern struct uac3_output_terminal_descriptor badd_baif_out_term_desc;
+extern struct uac3_output_terminal_descriptor badd_baof_out_term_desc;
+
+extern __u8 monoControls[];
+extern __u8 stereoControls[];
+extern __u8 badd_mu_src_ids[];
+
+struct uac3_mixer_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bNrInPins;
+ __u8 *baSourceID;
+ __u16 wClusterDescrID;
+ __u8 bmMixerControls;
+ __u32 bmControls;
+ __u16 wMixerDescrStr;
+} __packed;
+
+#define UAC3_DT_MIXER_UNIT_SIZE 0x10
+
+extern struct uac3_mixer_unit_descriptor badd_baiof_mu_desc;
+
+struct uac3_feature_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ __u8 *bmaControls;
+ __u16 wFeatureDescrStr;
+} __packed;
+
+extern struct uac3_feature_unit_descriptor badd_baif_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baof_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baiof_fu_desc;
+
+struct uac3_clock_source_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bmAttributes;
+ __u32 bmControls;
+ __u8 bReferenceTerminal;
+ __u16 wClockSourceStr;
+} __packed;
+
+#define UAC3_DT_CLOCK_SRC_SIZE 0x0c
+
+extern struct uac3_clock_source_descriptor badd_clock_desc;
+
+extern void *badd_desc_list[];
+
+#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 0583431..8053c8a 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -121,5 +121,6 @@ int msm_vidc_subscribe_event(void *instance,
int msm_vidc_unsubscribe_event(void *instance,
const struct v4l2_event_subscription *sub);
int msm_vidc_dqevent(void *instance, struct v4l2_event *event);
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *a);
int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
#endif
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index dbae8e8..50e4b8c 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -62,7 +62,7 @@ static inline uint32_t msm_dump_table_version(void)
#define MSM_DUMP_MINOR(val) (val & 0xFFFFF)
-#define MAX_NUM_ENTRIES 0x140
+#define MAX_NUM_ENTRIES 0x150
enum msm_dump_data_ids {
MSM_DUMP_DATA_CPU_CTX = 0x00,
@@ -88,6 +88,7 @@ enum msm_dump_data_ids {
MSM_DUMP_DATA_LOG_BUF = 0x110,
MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
MSM_DUMP_DATA_SCANDUMP_PER_CPU = 0x130,
+ MSM_DUMP_DATA_LLCC_PER_INSTANCE = 0x140,
MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES,
};
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4a9c625..8c1746a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1834,24 +1834,30 @@ TRACE_EVENT(sched_overutilized,
TRACE_EVENT(sched_get_nr_running_avg,
- TP_PROTO(int avg, int big_avg, int iowait_avg),
+ TP_PROTO(int avg, int big_avg, int iowait_avg,
+ unsigned int max_nr, unsigned int big_max_nr),
- TP_ARGS(avg, big_avg, iowait_avg),
+ TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr),
TP_STRUCT__entry(
__field( int, avg )
__field( int, big_avg )
__field( int, iowait_avg )
+ __field( unsigned int, max_nr )
+ __field( unsigned int, big_max_nr )
),
TP_fast_assign(
__entry->avg = avg;
__entry->big_avg = big_avg;
__entry->iowait_avg = iowait_avg;
+ __entry->max_nr = max_nr;
+ __entry->big_max_nr = big_max_nr;
),
- TP_printk("avg=%d big_avg=%d iowait_avg=%d",
- __entry->avg, __entry->big_avg, __entry->iowait_avg)
+ TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u",
+ __entry->avg, __entry->big_avg, __entry->iowait_avg,
+ __entry->max_nr, __entry->big_max_nr)
);
TRACE_EVENT(core_ctl_eval_need,
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index d2314be..c6f5b09 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -26,6 +26,7 @@
/* bInterfaceProtocol values to denote the version of the standard used */
#define UAC_VERSION_1 0x00
#define UAC_VERSION_2 0x20
+#define UAC_VERSION_3 0x30
/* A.2 Audio Interface Subclass Codes */
#define USB_SUBCLASS_AUDIOCONTROL 0x01
diff --git a/include/uapi/media/cam_cpas.h b/include/uapi/media/cam_cpas.h
index 300bd87..c5cbac8 100644
--- a/include/uapi/media/cam_cpas.h
+++ b/include/uapi/media/cam_cpas.h
@@ -11,13 +11,15 @@
*
* @camera_family : Camera family type
* @reserved : Reserved field for alignment
- * @camera_version : Camera version
+ * @camera_version : Camera platform version
+ * @cpas_version : Camera CPAS version within camera platform
*
*/
struct cam_cpas_query_cap {
uint32_t camera_family;
uint32_t reserved;
struct cam_hw_version camera_version;
+ struct cam_hw_version cpas_version;
};
#endif /* __UAPI_CAM_CPAS_H__ */
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 005d15e..b140e55 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -39,11 +39,13 @@ struct cluster_data {
cpumask_t cpu_mask;
unsigned int need_cpus;
unsigned int task_thres;
+ unsigned int max_nr;
s64 need_ts;
struct list_head lru;
bool pending;
spinlock_t pending_lock;
bool is_big_cluster;
+ bool enable;
int nrrun;
struct task_struct *core_ctl_thread;
unsigned int first_cpu;
@@ -247,6 +249,29 @@ static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
}
+static ssize_t store_enable(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ bool bval;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ bval = !!val;
+ if (bval != state->enable) {
+ state->enable = bval;
+ apply_need(state);
+ }
+
+ return count;
+}
+
+static ssize_t show_enable(const struct cluster_data *state, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
+}
+
static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
@@ -377,6 +402,7 @@ core_ctl_attr_ro(need_cpus);
core_ctl_attr_ro(active_cpus);
core_ctl_attr_ro(global_state);
core_ctl_attr_rw(not_preferred);
+core_ctl_attr_rw(enable);
static struct attribute *default_attrs[] = {
&min_cpus.attr,
@@ -386,6 +412,7 @@ static struct attribute *default_attrs[] = {
&busy_down_thres.attr,
&task_thres.attr,
&is_big_cluster.attr,
+ &enable.attr,
&need_cpus.attr,
&active_cpus.attr,
&global_state.attr,
@@ -432,47 +459,25 @@ static struct kobj_type ktype_core_ctl = {
/* ==================== runqueue based core count =================== */
-#define NR_RUNNING_TOLERANCE 5
-
static void update_running_avg(void)
{
int avg, iowait_avg, big_avg;
+ int max_nr, big_max_nr;
struct cluster_data *cluster;
unsigned int index = 0;
- sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
-
- /*
- * Round up to the next integer if the average nr running tasks
- * is within NR_RUNNING_TOLERANCE/100 of the next integer.
- * If normal rounding up is used, it will allow a transient task
- * to trigger online event. By the time core is onlined, the task
- * has finished.
- * Rounding to closest suffers same problem because scheduler
- * might only provide running stats per jiffy, and a transient
- * task could skew the number for one jiffy. If core control
- * samples every 2 jiffies, it will observe 0.5 additional running
- * average which rounds up to 1 task.
- */
- avg = (avg + NR_RUNNING_TOLERANCE) / 100;
- big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
+ sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
+ &max_nr, &big_max_nr);
for_each_cluster(cluster, index) {
if (!cluster->inited)
continue;
- /*
- * Big cluster only need to take care of big tasks, but if
- * there are not enough big cores, big tasks need to be run
- * on little as well. Thus for little's runqueue stat, it
- * has to use overall runqueue average, or derive what big
- * tasks would have to be run on little. The latter approach
- * is not easy to get given core control reacts much slower
- * than scheduler, and can't predict scheduler's behavior.
- */
cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
+ cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
}
}
+#define MAX_NR_THRESHOLD 4
/* adjust needed CPUs based on current runqueue information */
static unsigned int apply_task_need(const struct cluster_data *cluster,
unsigned int new_need)
@@ -483,7 +488,15 @@ static unsigned int apply_task_need(const struct cluster_data *cluster,
/* only unisolate more cores if there are tasks to run */
if (cluster->nrrun > new_need)
- return new_need + 1;
+ new_need = new_need + 1;
+
+ /*
+ * We don't want tasks to be overcrowded in a cluster.
+ * If any CPU has more than MAX_NR_THRESHOLD in the last
+ * window, bring another CPU to help out.
+ */
+ if (cluster->max_nr > MAX_NR_THRESHOLD)
+ new_need = new_need + 1;
return new_need;
}
@@ -529,7 +542,7 @@ static bool eval_need(struct cluster_data *cluster)
spin_lock_irqsave(&state_lock, flags);
- if (cluster->boost) {
+ if (cluster->boost || !cluster->enable) {
need_cpus = cluster->max_cpus;
} else {
cluster->active_cpus = get_active_cpu_count(cluster);
@@ -1020,6 +1033,7 @@ static int cluster_init(const struct cpumask *mask)
cluster->offline_delay_ms = 100;
cluster->task_thres = UINT_MAX;
cluster->nrrun = cluster->num_cpus;
+ cluster->enable = true;
INIT_LIST_HEAD(&cluster->lru);
spin_lock_init(&cluster->pending_lock);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6ccd3a7..cd406da 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5553,6 +5553,9 @@ static int group_idle_state(struct sched_group *sg)
for_each_cpu(i, sched_group_cpus(sg))
state = min(state, idle_get_state_idx(cpu_rq(i)));
+ if (unlikely(state == INT_MAX))
+ return -EINVAL;
+
/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
state++;
@@ -5638,6 +5641,9 @@ static int sched_group_energy(struct energy_env *eenv)
}
idle_idx = group_idle_state(sg);
+ if (unlikely(idle_idx < 0))
+ return idle_idx;
+
group_util = group_norm_util(eenv, sg);
sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5220511..3194ae6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2451,6 +2451,11 @@ static inline bool hmp_capable(void)
return max_possible_capacity != min_max_possible_capacity;
}
+static inline bool is_max_capacity_cpu(int cpu)
+{
+ return cpu_max_possible_capacity(cpu) == max_possible_capacity;
+}
+
/*
* 'load' is in reference to "best cpu" at its best frequency.
* Scale that in reference to a given cpu, accounting for how bad it is
@@ -2719,6 +2724,8 @@ static inline int is_task_migration_throttled(struct task_struct *p)
return 0;
}
+static inline bool is_max_capacity_cpu(int cpu) { return true; }
+
static inline void
inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index f820094..7f86c0b 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -27,11 +27,13 @@ static DEFINE_PER_CPU(u64, nr_prod_sum);
static DEFINE_PER_CPU(u64, last_time);
static DEFINE_PER_CPU(u64, nr_big_prod_sum);
static DEFINE_PER_CPU(u64, nr);
+static DEFINE_PER_CPU(u64, nr_max);
static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static s64 last_get_time;
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
/**
* sched_get_nr_running_avg
* @return: Average nr_running, iowait and nr_big_tasks value since last poll.
@@ -41,7 +43,8 @@ static s64 last_get_time;
* Obtains the average nr_running value since the last poll.
* This function may not be called concurrently with itself
*/
-void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
+void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+ unsigned int *max_nr, unsigned int *big_max_nr)
{
int cpu;
u64 curr_time = sched_clock();
@@ -51,6 +54,8 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
*avg = 0;
*iowait_avg = 0;
*big_avg = 0;
+ *max_nr = 0;
+ *big_max_nr = 0;
if (!diff)
return;
@@ -79,17 +84,35 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
per_cpu(nr_big_prod_sum, cpu) = 0;
per_cpu(iowait_prod_sum, cpu) = 0;
+ if (*max_nr < per_cpu(nr_max, cpu))
+ *max_nr = per_cpu(nr_max, cpu);
+
+ if (is_max_capacity_cpu(cpu)) {
+ if (*big_max_nr < per_cpu(nr_max, cpu))
+ *big_max_nr = per_cpu(nr_max, cpu);
+ }
+
+ per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
}
diff = curr_time - last_get_time;
last_get_time = curr_time;
- *avg = (int)div64_u64(tmp_avg * 100, diff);
- *big_avg = (int)div64_u64(tmp_big_avg * 100, diff);
- *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
+ /*
+ * Any task running on BIG cluster and BIG tasks running on little
+ * cluster contributes to big_avg. Small or medium tasks can also
+ * run on BIG cluster when co-location and scheduler boost features
+ * are activated. We don't want these tasks to downmigrate to little
+ * cluster when BIG CPUs are available but isolated. Round up the
+ * average values so that core_ctl aggressively unisolate BIG CPUs.
+ */
+ *avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff);
+ *big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff);
+ *iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff);
- trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg);
+ trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg,
+ *max_nr, *big_max_nr);
BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
@@ -122,6 +145,9 @@ void sched_update_nr_prod(int cpu, long delta, bool inc)
BUG_ON((s64)per_cpu(nr, cpu) < 0);
+ if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
+ per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
+
per_cpu(nr_prod_sum, cpu) += nr_running * diff;
per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index b89abbd..b38ec53 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -152,6 +152,8 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
* IMPORTANT: Initialize both copies to same value!!
*/
+static __read_mostly bool sched_predl;
+
__read_mostly unsigned int sched_ravg_hist_size = 5;
__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
@@ -231,6 +233,16 @@ static int __init set_sched_ravg_window(char *str)
early_param("sched_ravg_window", set_sched_ravg_window);
+static int __init set_sched_predl(char *str)
+{
+ unsigned int predl;
+
+ get_option(&str, &predl);
+ sched_predl = !!predl;
+ return 0;
+}
+early_param("sched_predl", set_sched_predl);
+
void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
{
inc_nr_big_task(&rq->hmp_stats, p);
@@ -402,7 +414,7 @@ unsigned int nr_eligible_big_tasks(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+ if (!is_max_capacity_cpu(cpu))
return rq->hmp_stats.nr_big_tasks;
return rq->nr_running;
@@ -1096,6 +1108,9 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
{
u32 new, old;
+ if (!sched_predl)
+ return;
+
if (is_idle_task(p) || exiting_task(p))
return;
@@ -1618,6 +1633,9 @@ static inline u32 predict_and_update_buckets(struct rq *rq,
int bidx;
u32 pred_demand;
+ if (!sched_predl)
+ return 0;
+
bidx = busy_to_bucket(runtime);
pred_demand = get_pred_busy(rq, p, bidx, runtime);
bucket_increase(p->ravg.busy_buckets, bidx);
diff --git a/sound/soc/codecs/wcd-mbhc-adc.c b/sound/soc/codecs/wcd-mbhc-adc.c
index 7278431..e44eec9 100644
--- a/sound/soc/codecs/wcd-mbhc-adc.c
+++ b/sound/soc/codecs/wcd-mbhc-adc.c
@@ -729,7 +729,8 @@ static void wcd_correct_swch_plug(struct work_struct *work)
* otherwise report unsupported plug
*/
if (mbhc->mbhc_cfg->swap_gnd_mic &&
- mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+ mbhc->mbhc_cfg->swap_gnd_mic(codec,
+ true)) {
pr_debug("%s: US_EU gpio present,flip switch\n"
, __func__);
continue;
diff --git a/sound/soc/codecs/wcd-mbhc-legacy.c b/sound/soc/codecs/wcd-mbhc-legacy.c
index 83023bc..745e2e8 100644
--- a/sound/soc/codecs/wcd-mbhc-legacy.c
+++ b/sound/soc/codecs/wcd-mbhc-legacy.c
@@ -633,7 +633,8 @@ static void wcd_correct_swch_plug(struct work_struct *work)
* otherwise report unsupported plug
*/
if (mbhc->mbhc_cfg->swap_gnd_mic &&
- mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+ mbhc->mbhc_cfg->swap_gnd_mic(codec,
+ true)) {
pr_debug("%s: US_EU gpio present,flip switch\n"
, __func__);
continue;
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 510a8dc..ebcb413 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -1460,18 +1460,12 @@ static int wcd_mbhc_usb_c_analog_setup_gpios(struct wcd_mbhc *mbhc,
if (config->usbc_en1_gpio_p)
rc = msm_cdc_pinctrl_select_active_state(
config->usbc_en1_gpio_p);
- if (rc == 0 && config->usbc_en2n_gpio_p)
- rc = msm_cdc_pinctrl_select_active_state(
- config->usbc_en2n_gpio_p);
if (rc == 0 && config->usbc_force_gpio_p)
rc = msm_cdc_pinctrl_select_active_state(
config->usbc_force_gpio_p);
mbhc->usbc_mode = POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
} else {
/* no delay is required when disabling GPIOs */
- if (config->usbc_en2n_gpio_p)
- msm_cdc_pinctrl_select_sleep_state(
- config->usbc_en2n_gpio_p);
if (config->usbc_en1_gpio_p)
msm_cdc_pinctrl_select_sleep_state(
config->usbc_en1_gpio_p);
@@ -1490,6 +1484,8 @@ static int wcd_mbhc_usb_c_analog_setup_gpios(struct wcd_mbhc *mbhc,
}
mbhc->usbc_mode = POWER_SUPPLY_TYPEC_NONE;
+ if (mbhc->mbhc_cfg->swap_gnd_mic)
+ mbhc->mbhc_cfg->swap_gnd_mic(mbhc->codec, false);
}
return rc;
@@ -1675,19 +1671,12 @@ int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg)
dev_dbg(mbhc->codec->dev, "%s: usbc analog enabled\n",
__func__);
rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
- "qcom,usbc-analog-en1_gpio",
+ "qcom,usbc-analog-en1-gpio",
&config->usbc_en1_gpio,
&config->usbc_en1_gpio_p);
if (rc)
goto err;
- rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
- "qcom,usbc-analog-en2_n_gpio",
- &config->usbc_en2n_gpio,
- &config->usbc_en2n_gpio_p);
- if (rc)
- goto err;
-
if (of_find_property(card->dev->of_node,
"qcom,usbc-analog-force_detect_gpio",
NULL)) {
@@ -1734,12 +1723,6 @@ int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg)
gpio_free(config->usbc_en1_gpio);
config->usbc_en1_gpio = 0;
}
- if (config->usbc_en2n_gpio > 0) {
- dev_dbg(card->dev, "%s free usb_en2 gpio %d\n",
- __func__, config->usbc_en2n_gpio);
- gpio_free(config->usbc_en2n_gpio);
- config->usbc_en2n_gpio = 0;
- }
if (config->usbc_force_gpio > 0) {
dev_dbg(card->dev, "%s free usb_force gpio %d\n",
__func__, config->usbc_force_gpio);
@@ -1748,8 +1731,6 @@ int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg)
}
if (config->usbc_en1_gpio_p)
of_node_put(config->usbc_en1_gpio_p);
- if (config->usbc_en2n_gpio_p)
- of_node_put(config->usbc_en2n_gpio_p);
if (config->usbc_force_gpio_p)
of_node_put(config->usbc_force_gpio_p);
dev_dbg(mbhc->codec->dev, "%s: leave %d\n", __func__, rc);
@@ -1790,15 +1771,11 @@ void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
/* free GPIOs */
if (config->usbc_en1_gpio > 0)
gpio_free(config->usbc_en1_gpio);
- if (config->usbc_en2n_gpio > 0)
- gpio_free(config->usbc_en2n_gpio);
if (config->usbc_force_gpio)
gpio_free(config->usbc_force_gpio);
if (config->usbc_en1_gpio_p)
of_node_put(config->usbc_en1_gpio_p);
- if (config->usbc_en2n_gpio_p)
- of_node_put(config->usbc_en2n_gpio_p);
if (config->usbc_force_gpio_p)
of_node_put(config->usbc_force_gpio_p);
}
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index 4ea4401..7ed06c3 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -404,10 +404,10 @@ enum mbhc_moisture_rref {
struct usbc_ana_audio_config {
int usbc_en1_gpio;
- int usbc_en2n_gpio;
+ int usbc_en2_gpio;
int usbc_force_gpio;
struct device_node *usbc_en1_gpio_p; /* used by pinctrl API */
- struct device_node *usbc_en2n_gpio_p; /* used by pinctrl API */
+ struct device_node *usbc_en2_gpio_p; /* used by pinctrl API */
struct device_node *usbc_force_gpio_p; /* used by pinctrl API */
};
@@ -416,7 +416,7 @@ struct wcd_mbhc_config {
void *calibration;
bool detect_extn_cable;
bool mono_stero_detection;
- bool (*swap_gnd_mic)(struct snd_soc_codec *codec);
+ bool (*swap_gnd_mic)(struct snd_soc_codec *codec, bool active);
bool hs_ext_micbias;
bool gnd_det_en;
int key_code[WCD_MBHC_KEYCODE_NUM];
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index dedf4dc..eb556f8 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -5902,8 +5902,6 @@ static int tasha_codec_enable_dec(struct snd_soc_dapm_widget *w,
CF_MIN_3DB_150HZ << 5);
/* Enable TX PGA Mute */
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
- /* Enable APC */
- snd_soc_update_bits(codec, dec_cfg_reg, 0x08, 0x08);
break;
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x00);
@@ -5930,7 +5928,6 @@ static int tasha_codec_enable_dec(struct snd_soc_dapm_widget *w,
hpf_cut_off_freq =
tasha->tx_hpf_work[decimator].hpf_cut_off_freq;
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
- snd_soc_update_bits(codec, dec_cfg_reg, 0x08, 0x00);
if (cancel_delayed_work_sync(
&tasha->tx_hpf_work[decimator].dwork)) {
if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 304bf47..130cc56 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -173,7 +173,9 @@ struct msm_pinctrl_info {
struct msm_asoc_mach_data {
u32 mclk_freq;
int us_euro_gpio; /* used by gpio driver API */
+ int usbc_en2_gpio; /* used by gpio driver API */
struct device_node *us_euro_gpio_p; /* used by pinctrl API */
+ struct pinctrl *usbc_en2_gpio_p; /* used by pinctrl API */
struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
struct snd_info_entry *codec_root;
@@ -3106,27 +3108,126 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return rc;
}
-static bool msm_swap_gnd_mic(struct snd_soc_codec *codec)
+static bool msm_usbc_swap_gnd_mic(struct snd_soc_codec *codec, bool active)
{
- struct snd_soc_card *card = codec->component.card;
- struct msm_asoc_mach_data *pdata =
- snd_soc_card_get_drvdata(card);
int value = 0;
+ bool ret = 0;
+ struct snd_soc_card *card = codec->component.card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct pinctrl_state *en2_pinctrl_active;
+ struct pinctrl_state *en2_pinctrl_sleep;
- if (pdata->us_euro_gpio_p) {
- value = msm_cdc_pinctrl_get_state(pdata->us_euro_gpio_p);
- if (value)
- msm_cdc_pinctrl_select_sleep_state(
- pdata->us_euro_gpio_p);
- else
- msm_cdc_pinctrl_select_active_state(
- pdata->us_euro_gpio_p);
- } else if (pdata->us_euro_gpio >= 0) {
- value = gpio_get_value_cansleep(pdata->us_euro_gpio);
- gpio_set_value_cansleep(pdata->us_euro_gpio, !value);
+ if (!pdata->usbc_en2_gpio_p) {
+ if (active) {
+ /* if active and usbc_en2_gpio undefined, get pin */
+ pdata->usbc_en2_gpio_p = devm_pinctrl_get(card->dev);
+ if (IS_ERR_OR_NULL(pdata->usbc_en2_gpio_p)) {
+ dev_err(card->dev,
+ "%s: Can't get EN2 gpio pinctrl:%ld\n",
+ __func__,
+ PTR_ERR(pdata->usbc_en2_gpio_p));
+ pdata->usbc_en2_gpio_p = NULL;
+ return false;
+ }
+ } else
+ /* if not active and usbc_en2_gpio undefined, return */
+ return false;
}
- pr_debug("%s: swap select switch %d to %d\n", __func__, value, !value);
- return true;
+
+ pdata->usbc_en2_gpio = of_get_named_gpio(card->dev->of_node,
+ "qcom,usbc-analog-en2-gpio", 0);
+ if (!gpio_is_valid(pdata->usbc_en2_gpio)) {
+ dev_err(card->dev, "%s, property %s not in node %s",
+ __func__, "qcom,usbc-analog-en2-gpio",
+ card->dev->of_node->full_name);
+ return false;
+ }
+
+ en2_pinctrl_active = pinctrl_lookup_state(
+ pdata->usbc_en2_gpio_p, "aud_active");
+ if (IS_ERR_OR_NULL(en2_pinctrl_active)) {
+ dev_err(card->dev,
+ "%s: Cannot get aud_active pinctrl state:%ld\n",
+ __func__, PTR_ERR(en2_pinctrl_active));
+ ret = false;
+ goto err_lookup_state;
+ }
+
+ en2_pinctrl_sleep = pinctrl_lookup_state(
+ pdata->usbc_en2_gpio_p, "aud_sleep");
+ if (IS_ERR_OR_NULL(en2_pinctrl_sleep)) {
+ dev_err(card->dev,
+ "%s: Cannot get aud_sleep pinctrl state:%ld\n",
+ __func__, PTR_ERR(en2_pinctrl_sleep));
+ ret = false;
+ goto err_lookup_state;
+ }
+
+ /* if active and usbc_en2_gpio_p defined, swap using usbc_en2_gpio_p */
+ if (active) {
+ dev_dbg(codec->dev, "%s: enter\n", __func__);
+ if (pdata->usbc_en2_gpio_p) {
+ value = gpio_get_value_cansleep(pdata->usbc_en2_gpio);
+ if (value)
+ pinctrl_select_state(pdata->usbc_en2_gpio_p,
+ en2_pinctrl_sleep);
+ else
+ pinctrl_select_state(pdata->usbc_en2_gpio_p,
+ en2_pinctrl_active);
+ } else if (pdata->usbc_en2_gpio >= 0) {
+ value = gpio_get_value_cansleep(pdata->usbc_en2_gpio);
+ gpio_set_value_cansleep(pdata->usbc_en2_gpio, !value);
+ }
+ pr_debug("%s: swap select switch %d to %d\n", __func__,
+ value, !value);
+ ret = true;
+ } else {
+ /* if not active, release usbc_en2_gpio_p pin */
+ pinctrl_select_state(pdata->usbc_en2_gpio_p,
+ en2_pinctrl_sleep);
+ }
+
+err_lookup_state:
+ devm_pinctrl_put(pdata->usbc_en2_gpio_p);
+ pdata->usbc_en2_gpio_p = NULL;
+ return ret;
+}
+
+static bool msm_swap_gnd_mic(struct snd_soc_codec *codec, bool active)
+{
+ int value = 0;
+ int ret = 0;
+ struct snd_soc_card *card = codec->component.card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+
+ if (!pdata)
+ return false;
+
+ if (!wcd_mbhc_cfg.enable_usbc_analog) {
+ /* if usbc is not defined, swap using us_euro_gpio_p */
+ if (pdata->us_euro_gpio_p) {
+ value = msm_cdc_pinctrl_get_state(
+ pdata->us_euro_gpio_p);
+ if (value)
+ msm_cdc_pinctrl_select_sleep_state(
+ pdata->us_euro_gpio_p);
+ else
+ msm_cdc_pinctrl_select_active_state(
+ pdata->us_euro_gpio_p);
+ } else if (pdata->us_euro_gpio >= 0) {
+ value = gpio_get_value_cansleep(
+ pdata->us_euro_gpio);
+ gpio_set_value_cansleep(
+ pdata->us_euro_gpio, !value);
+ }
+ pr_debug("%s: swap select switch %d to %d\n", __func__,
+ value, !value);
+ ret = true;
+ } else {
+ /* if usbc is defined, swap using usbc_en2 */
+ ret = msm_usbc_swap_gnd_mic(codec, active);
+ }
+ return ret;
}
static int msm_afe_set_config(struct snd_soc_codec *codec)
@@ -6454,6 +6555,7 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
char *mclk_freq_prop_name;
const struct of_device_id *match;
int ret;
+ const char *usb_c_dt = "qcom,msm-mbhc-usbc-audio-supported";
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform supplied from device tree\n");
@@ -6601,6 +6703,9 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
wcd_mbhc_cfg.swap_gnd_mic = msm_swap_gnd_mic;
}
+ if (of_find_property(pdev->dev.of_node, usb_c_dt, NULL))
+ wcd_mbhc_cfg.swap_gnd_mic = msm_swap_gnd_mic;
+
ret = msm_prepare_us_euro(card);
if (ret)
dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index d2ac038..083887b 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -13,7 +13,8 @@
pcm.o \
proc.o \
quirks.o \
- stream.o
+ stream.o \
+ badd.o
snd-usbmidi-lib-objs := midi.o
diff --git a/sound/usb/badd.c b/sound/usb/badd.c
new file mode 100644
index 0000000..cc6c26c
--- /dev/null
+++ b/sound/usb/badd.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
+
+struct uac3_input_terminal_descriptor badd_baif_in_term_desc = {
+ .bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = BADD_IN_TERM_ID_BAIF,
+ .bCSourceID = BADD_CLOCK_SOURCE,
+ .wExTerminalDescrID = 0x0000,
+ .wTerminalDescrStr = 0x0000
+};
+
+struct uac3_input_terminal_descriptor badd_baof_in_term_desc = {
+ .bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = BADD_IN_TERM_ID_BAOF,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0x00,
+ .bCSourceID = BADD_CLOCK_SOURCE,
+ .bmControls = 0x00000000,
+ .wExTerminalDescrID = 0x0000,
+ .wConnectorsDescrID = 0x0000,
+ .wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baif_out_term_desc = {
+ .bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = BADD_OUT_TERM_ID_BAIF,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0x00, /* No associated terminal */
+ .bSourceID = BADD_FU_ID_BAIF,
+ .bCSourceID = BADD_CLOCK_SOURCE,
+ .bmControls = 0x00000000, /* No controls */
+ .wExTerminalDescrID = 0x0000,
+ .wConnectorsDescrID = 0x0000,
+ .wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baof_out_term_desc = {
+ .bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = BADD_OUT_TERM_ID_BAOF,
+ .bSourceID = BADD_FU_ID_BAOF,
+ .bCSourceID = BADD_CLOCK_SOURCE,
+ .wExTerminalDescrID = 0x0000,
+ .wTerminalDescrStr = 0x0000
+};
+
+__u8 monoControls[] = {
+ 0x03, 0x00, 0x00, 0x00,
+ 0x0c, 0x00, 0x00, 0x00};
+
+__u8 stereoControls[] = {
+ 0x03, 0x00, 0x00, 0x00,
+ 0x0c, 0x00, 0x00, 0x00,
+ 0x0c, 0x00, 0x00, 0x00
+};
+
+__u8 badd_mu_src_ids[] = {BADD_IN_TERM_ID_BAOF, BADD_FU_ID_BAIOF};
+
+struct uac3_mixer_unit_descriptor badd_baiof_mu_desc = {
+ .bLength = UAC3_DT_MIXER_UNIT_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_MIXER_UNIT_V3,
+ .bUnitID = BADD_MU_ID_BAIOF,
+ .bNrInPins = 0x02,
+ .baSourceID = badd_mu_src_ids,
+ .bmMixerControls = 0x00,
+ .bmControls = 0x00000000,
+ .wMixerDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baif_fu_desc = {
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+ .bUnitID = BADD_FU_ID_BAIF,
+ .bSourceID = BADD_IN_TERM_ID_BAIF,
+ .wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baof_fu_desc = {
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+ .bUnitID = BADD_FU_ID_BAOF,
+ .wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baiof_fu_desc = {
+ .bLength = 0x0f,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+ .bUnitID = BADD_FU_ID_BAIOF,
+ .bSourceID = BADD_IN_TERM_ID_BAIF,
+ .bmaControls = monoControls,
+ .wFeatureDescrStr = 0x0000
+};
+
+struct uac3_clock_source_descriptor badd_clock_desc = {
+ .bLength = UAC3_DT_CLOCK_SRC_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_CLOCK_SOURCE,
+ .bClockID = BADD_CLOCK_SOURCE,
+ .bmControls = 0x00000001,
+ .bReferenceTerminal = 0x00,
+ .wClockSourceStr = 0x0000
+};
+
+void *badd_desc_list[] = {
+ &badd_baif_in_term_desc,
+ &badd_baof_in_term_desc,
+ &badd_baiof_mu_desc,
+ &badd_baif_fu_desc,
+ &badd_baof_fu_desc,
+ &badd_baiof_fu_desc,
+ &badd_clock_desc
+};
+
diff --git a/sound/usb/card.c b/sound/usb/card.c
index ccf06de..eaf18aa 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -45,6 +45,7 @@
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
#include <linux/module.h>
+#include <linux/usb/audio-v3.h>
#include <sound/control.h>
#include <sound/core.h>
@@ -285,7 +286,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
struct usb_host_interface *host_iface;
struct usb_interface_descriptor *altsd;
struct usb_interface *usb_iface;
- void *control_header;
int i, protocol;
usb_iface = usb_ifnum_to_if(dev, ctrlif);
@@ -302,16 +302,13 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
return -EINVAL;
}
- control_header = snd_usb_find_csint_desc(host_iface->extra,
- host_iface->extralen,
- NULL, UAC_HEADER);
altsd = get_iface_desc(host_iface);
protocol = altsd->bInterfaceProtocol;
- if (!control_header) {
- dev_err(&dev->dev, "cannot find UAC_HEADER\n");
- return -EINVAL;
- }
+ /*
+ * UAC 1.0 devices use AC HEADER Desc for linking AS interfaces;
+ * UAC 2.0 and 3.0 devices use IAD for linking AS interfaces
+ */
switch (protocol) {
default:
@@ -321,8 +318,17 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
/* fall through */
case UAC_VERSION_1: {
- struct uac1_ac_header_descriptor *h1 = control_header;
+ void *control_header;
+ struct uac1_ac_header_descriptor *h1;
+ control_header = snd_usb_find_csint_desc(host_iface->extra,
+ host_iface->extralen, NULL, UAC_HEADER);
+ if (!control_header) {
+ dev_err(&dev->dev, "cannot find UAC_HEADER\n");
+ return -EINVAL;
+ }
+
+ h1 = control_header;
if (!h1->bInCollection) {
dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
return -EINVAL;
@@ -339,7 +345,8 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
break;
}
- case UAC_VERSION_2: {
+ case UAC_VERSION_2:
+ case UAC_VERSION_3: {
struct usb_interface_assoc_descriptor *assoc =
usb_iface->intf_assoc;
if (!assoc) {
@@ -358,7 +365,8 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
}
if (!assoc) {
- dev_err(&dev->dev, "Audio class v2 interfaces need an interface association\n");
+ dev_err(&dev->dev, "Audio class V%d interfaces need an interface association\n",
+ protocol);
return -EINVAL;
}
@@ -606,6 +614,15 @@ static int usb_audio_probe(struct usb_interface *intf,
struct usb_host_interface *alts;
int ifnum;
u32 id;
+ struct usb_interface_assoc_descriptor *assoc;
+
+ assoc = intf->intf_assoc;
+ if (assoc && assoc->bFunctionClass == USB_CLASS_AUDIO &&
+ assoc->bFunctionProtocol == UAC_VERSION_3 &&
+ assoc->bFunctionSubClass == FULL_ADC_PROFILE) {
+ dev_info(&dev->dev, "No support for full-fledged ADC 3.0 yet!!\n");
+ return -EINVAL;
+ }
alts = &intf->altsetting[0];
ifnum = get_iface_desc(alts)->bInterfaceNumber;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 26dd5f2..8238180 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -428,6 +428,10 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
case UAC_VERSION_2:
return set_sample_rate_v2(chip, iface, alts, fmt, rate);
+
+ /* Clock rate is fixed at 48 kHz for BADD devices */
+ case UAC_VERSION_3:
+ return 0;
}
}
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 2c44386..eaf2615 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -20,6 +20,7 @@
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -69,6 +70,34 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
format <<= 1;
break;
}
+
+ case UAC_VERSION_3: {
+ switch (fp->maxpacksize) {
+ case BADD_MAXPSIZE_SYNC_MONO_16:
+ case BADD_MAXPSIZE_SYNC_STEREO_16:
+ case BADD_MAXPSIZE_ASYNC_MONO_16:
+ case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+ sample_width = BIT_RES_16_BIT;
+ sample_bytes = SUBSLOTSIZE_16_BIT;
+ break;
+ }
+
+ case BADD_MAXPSIZE_SYNC_MONO_24:
+ case BADD_MAXPSIZE_SYNC_STEREO_24:
+ case BADD_MAXPSIZE_ASYNC_MONO_24:
+ case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+ sample_width = BIT_RES_24_BIT;
+ sample_bytes = SUBSLOTSIZE_24_BIT;
+ break;
+ }
+ default:
+ usb_audio_err(chip, "%u:%d : Invalid wMaxPacketSize\n",
+ fp->iface, fp->altsetting);
+ return pcm_formats;
+ }
+ format = 1 << format;
+ break;
+ }
}
if ((pcm_formats == 0) &&
@@ -364,17 +393,34 @@ static int parse_audio_format_rates_v2(struct snd_usb_audio *chip,
return ret;
}
+static int badd_set_audio_rate_v3(struct snd_usb_audio *chip,
+ struct audioformat *fp)
+{
+ unsigned int rate;
+
+ fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+ if (fp->rate_table == NULL)
+ return -ENOMEM;
+
+ fp->nr_rates = 1;
+ rate = BADD_SAMPLING_RATE;
+ fp->rate_min = fp->rate_max = fp->rate_table[0] = rate;
+ fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+ return 0;
+}
+
/*
* parse the format type I and III descriptors
*/
static int parse_audio_format_i(struct snd_usb_audio *chip,
struct audioformat *fp, unsigned int format,
+ u8 format_type,
struct uac_format_type_i_continuous_descriptor *fmt)
{
snd_pcm_format_t pcm_format;
int ret;
- if (fmt->bFormatType == UAC_FORMAT_TYPE_III) {
+ if (format_type == UAC_FORMAT_TYPE_III) {
/* FIXME: the format type is really IECxxx
* but we give normal PCM format to get the existing
* apps working...
@@ -413,6 +459,9 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
/* fp->channels is already set in this case */
ret = parse_audio_format_rates_v2(chip, fp);
break;
+ case UAC_VERSION_3:
+ ret = badd_set_audio_rate_v3(chip, fp);
+ break;
}
if (fp->channels < 1) {
@@ -484,11 +533,18 @@ int snd_usb_parse_audio_format(struct snd_usb_audio *chip,
int stream)
{
int err;
+ int format_type = -EINVAL;
- switch (fmt->bFormatType) {
+ if ((fp->protocol == UAC_VERSION_1) ||
+ (fp->protocol == UAC_VERSION_2))
+ format_type = fmt->bFormatType;
+ else
+ format_type = UAC_FORMAT_TYPE_I; /* only BADD is supported */
+
+ switch (format_type) {
case UAC_FORMAT_TYPE_I:
case UAC_FORMAT_TYPE_III:
- err = parse_audio_format_i(chip, fp, format, fmt);
+ err = parse_audio_format_i(chip, fp, format, format_type, fmt);
break;
case UAC_FORMAT_TYPE_II:
err = parse_audio_format_ii(chip, fp, format, fmt);
@@ -497,10 +553,10 @@ int snd_usb_parse_audio_format(struct snd_usb_audio *chip,
usb_audio_info(chip,
"%u:%d : format type %d is not supported yet\n",
fp->iface, fp->altsetting,
- fmt->bFormatType);
+ format_type);
return -ENOTSUPP;
}
- fp->fmt_type = fmt->bFormatType;
+ fp->fmt_type = format_type;
if (err < 0)
return err;
#if 1
@@ -511,7 +567,7 @@ int snd_usb_parse_audio_format(struct snd_usb_audio *chip,
if (chip->usb_id == USB_ID(0x041e, 0x3000) ||
chip->usb_id == USB_ID(0x041e, 0x3020) ||
chip->usb_id == USB_ID(0x041e, 0x3061)) {
- if (fmt->bFormatType == UAC_FORMAT_TYPE_I &&
+ if (format_type == UAC_FORMAT_TYPE_I &&
fp->rates != SNDRV_PCM_RATE_48000 &&
fp->rates != SNDRV_PCM_RATE_96000)
return -ENOTSUPP;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 932ce3e..c3bf5ff 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -51,6 +51,7 @@
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -185,6 +186,17 @@ static void *find_audio_control_unit(struct mixer_build *state,
/* we just parse the header */
struct uac_feature_unit_descriptor *hdr = NULL;
+ if (state->mixer->protocol == UAC_VERSION_3) {
+ int i;
+
+ for (i = 0; i < NUM_BADD_DESCS; i++) {
+ hdr = (void *)badd_desc_list[i];
+ if (hdr->bUnitID == unit)
+ return hdr;
+ }
+
+ return NULL;
+ }
while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr,
USB_DT_CS_INTERFACE)) != NULL) {
if (hdr->bLength >= 4 &&
@@ -718,7 +730,7 @@ static int check_input_term(struct mixer_build *state, int id,
term->channels = d->bNrChannels;
term->chconfig = le16_to_cpu(d->wChannelConfig);
term->name = d->iTerminal;
- } else { /* UAC_VERSION_2 */
+ } else if (state->mixer->protocol == UAC_VERSION_2) {
struct uac2_input_terminal_descriptor *d = p1;
/* call recursively to verify that the
@@ -735,6 +747,24 @@ static int check_input_term(struct mixer_build *state, int id,
term->channels = d->bNrChannels;
term->chconfig = le32_to_cpu(d->bmChannelConfig);
term->name = d->iTerminal;
+ } else { /* UAC_VERSION_3 */
+ struct uac3_input_terminal_descriptor *d = p1;
+
+ err = check_input_term(state,
+ d->bCSourceID, term);
+ if (err < 0)
+ return err;
+
+ term->id = id;
+ term->type = d->wTerminalType;
+ if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+ term->channels = NUM_CHANNELS_MONO;
+ term->chconfig = BADD_CH_CONFIG_MONO;
+ } else {
+ term->channels = NUM_CHANNELS_STEREO;
+ term->chconfig = BADD_CH_CONFIG_STEREO;
+ }
+ term->name = d->wTerminalDescrStr;
}
return 0;
case UAC_FEATURE_UNIT: {
@@ -752,41 +782,81 @@ static int check_input_term(struct mixer_build *state, int id,
return 0;
}
case UAC_SELECTOR_UNIT:
- case UAC2_CLOCK_SELECTOR: {
- struct uac_selector_unit_descriptor *d = p1;
- /* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
- if (err < 0)
- return err;
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
- term->id = id;
- term->name = uac_selector_unit_iSelector(d);
+ /* UAC3_MIXER_UNIT_V3 */
+ case UAC2_CLOCK_SELECTOR:
+ /* UAC3_CLOCK_SOURCE */ {
+ if (state->mixer->protocol == UAC_VERSION_3
+ && hdr[2] == UAC3_CLOCK_SOURCE) {
+ struct uac3_clock_source_descriptor *d = p1;
+
+ term->type = d->bDescriptorSubtype << 16;
+ term->id = id;
+ term->name = d->wClockSourceStr;
+ } else if (state->mixer->protocol == UAC_VERSION_3
+ && hdr[2] == UAC3_MIXER_UNIT_V3) {
+ struct uac3_mixer_unit_descriptor *d = p1;
+
+ term->type = d->bDescriptorSubtype << 16;
+ if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+ term->channels = NUM_CHANNELS_MONO;
+ term->chconfig = BADD_CH_CONFIG_MONO;
+ } else {
+ term->channels = NUM_CHANNELS_STEREO;
+ term->chconfig = BADD_CH_CONFIG_STEREO;
+ }
+ term->name = d->wMixerDescrStr;
+ } else {
+ struct uac_selector_unit_descriptor *d = p1;
+ /* call recursively to retrieve channel info */
+ err = check_input_term(state,
+ d->baSourceID[0], term);
+ if (err < 0)
+ return err;
+ /* virtual type */
+ term->type = d->bDescriptorSubtype << 16;
+ term->id = id;
+ term->name = uac_selector_unit_iSelector(d);
+ }
return 0;
}
case UAC1_PROCESSING_UNIT:
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 */
/* UAC2_EFFECT_UNIT */
+ /* UAC3_FEATURE_UNIT_V3 */
case UAC2_EXTENSION_UNIT_V2: {
- struct uac_processing_unit_descriptor *d = p1;
+ if (state->mixer->protocol == UAC_VERSION_3) {
+ struct uac_feature_unit_descriptor *d = p1;
- if (state->mixer->protocol == UAC_VERSION_2 &&
- hdr[2] == UAC2_EFFECT_UNIT) {
- /* UAC2/UAC1 unit IDs overlap here in an
- * uncompatible way. Ignore this unit for now.
- */
+ id = d->bSourceID;
+ } else {
+ struct uac_processing_unit_descriptor *d = p1;
+
+ if (state->mixer->protocol == UAC_VERSION_2 &&
+ hdr[2] == UAC2_EFFECT_UNIT) {
+ /* UAC2/UAC1 unit IDs overlap here in an
+ * uncompatible way. Ignore this unit
+ * for now.
+ */
+ return 0;
+ }
+
+ if (d->bNrInPins) {
+ id = d->baSourceID[0];
+ break; /* continue to parse */
+ }
+ /* virtual type */
+ term->type = d->bDescriptorSubtype << 16;
+ term->channels =
+ uac_processing_unit_bNrChannels(d);
+ term->chconfig =
+ uac_processing_unit_wChannelConfig(
+ d, state->mixer->protocol);
+ term->name = uac_processing_unit_iProcessing(
+ d, state->mixer->protocol);
return 0;
}
-
- if (d->bNrInPins) {
- id = d->baSourceID[0];
- break; /* continue to parse */
- }
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
- term->channels = uac_processing_unit_bNrChannels(d);
- term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol);
- term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol);
- return 0;
+ break;
}
case UAC2_CLOCK_SOURCE: {
struct uac_clock_source_descriptor *d = p1;
@@ -1233,12 +1303,18 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
struct usb_feature_control_info *ctl_info;
unsigned int len = 0;
int mapped_name = 0;
- int nameid = uac_feature_unit_iFeature(desc);
+ int nameid;
struct snd_kcontrol *kctl;
struct usb_mixer_elem_info *cval;
const struct usbmix_name_map *map;
unsigned int range;
+ if (state->mixer->protocol == UAC_VERSION_3)
+ nameid = ((struct uac3_feature_unit_descriptor *)
+ raw_desc)->wFeatureDescrStr;
+ else
+ nameid = uac_feature_unit_iFeature(desc);
+
control++; /* change from zero-based to 1-based value */
if (control == UAC_FU_GRAPHIC_EQUALIZER) {
@@ -1259,7 +1335,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
ctl_info = &audio_feature_info[control-1];
if (state->mixer->protocol == UAC_VERSION_1)
cval->val_type = ctl_info->type;
- else /* UAC_VERSION_2 */
+ else /* UAC_VERSION_2 or UAC_VERSION_3*/
cval->val_type = ctl_info->type_uac2 >= 0 ?
ctl_info->type_uac2 : ctl_info->type;
@@ -1447,6 +1523,62 @@ static int parse_clock_source_unit(struct mixer_build *state, int unitid,
return snd_usb_mixer_add_control(&cval->head, kctl);
}
+static int find_num_channels(struct mixer_build *state, int dir)
+{
+ int num_ch = -EINVAL, num, i, j, wMaxPacketSize;
+ int ctrlif = get_iface_desc(state->mixer->hostif)->bInterfaceNumber;
+ struct usb_interface *usb_iface =
+ usb_ifnum_to_if(state->mixer->chip->dev, ctrlif);
+ struct usb_interface_assoc_descriptor *assoc = usb_iface->intf_assoc;
+ struct usb_host_interface *alts;
+
+ for (i = 0; i < assoc->bInterfaceCount; i++) {
+ int intf = assoc->bFirstInterface + i;
+
+ if (intf != ctrlif) {
+ struct usb_interface *iface =
+ usb_ifnum_to_if(state->mixer->chip->dev, intf);
+
+ alts = &iface->altsetting[1];
+ if (dir == USB_DIR_OUT &&
+ get_endpoint(alts, 0)->bEndpointAddress &
+ USB_DIR_IN)
+ continue;
+ if (dir == USB_DIR_IN &&
+ !(get_endpoint(alts, 0)->bEndpointAddress &
+ USB_DIR_IN))
+ continue;
+ num = iface->num_altsetting;
+ for (j = 1; j < num; j++) {
+ num_ch = NUM_CHANNELS_MONO;
+ alts = &iface->altsetting[j];
+ wMaxPacketSize = le16_to_cpu(
+ get_endpoint(alts, 0)->
+ wMaxPacketSize);
+ switch (wMaxPacketSize) {
+ case BADD_MAXPSIZE_SYNC_MONO_16:
+ case BADD_MAXPSIZE_SYNC_MONO_24:
+ case BADD_MAXPSIZE_ASYNC_MONO_16:
+ case BADD_MAXPSIZE_ASYNC_MONO_24:
+ break;
+ case BADD_MAXPSIZE_SYNC_STEREO_16:
+ case BADD_MAXPSIZE_SYNC_STEREO_24:
+ case BADD_MAXPSIZE_ASYNC_STEREO_16:
+ case BADD_MAXPSIZE_ASYNC_STEREO_24:
+ num_ch = NUM_CHANNELS_STEREO;
+ break;
+ }
+ if (num_ch == NUM_CHANNELS_MONO)
+ continue;
+ else
+ break;
+ }
+ }
+ }
+
+ return num_ch;
+}
+
/*
* parse a feature unit
*
@@ -1478,7 +1610,7 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
unitid);
return -EINVAL;
}
- } else {
+ } else if (state->mixer->protocol == UAC_VERSION_2) {
struct uac2_feature_unit_descriptor *ftr = _ftr;
csize = 4;
channels = (hdr->bLength - 6) / 4 - 1;
@@ -1489,11 +1621,118 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
unitid);
return -EINVAL;
}
+ } else {
+ struct usb_interface *usb_iface =
+ usb_ifnum_to_if(state->mixer->chip->dev,
+ get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+ struct usb_interface_assoc_descriptor *assoc =
+ usb_iface->intf_assoc;
+
+ csize = 4;
+ switch (unitid) {
+ case BADD_FU_ID_BAIOF:
+ channels = NUM_CHANNELS_MONO;
+ bmaControls = monoControls;
+ badd_baif_in_term_desc.wClusterDescrID =
+ CLUSTER_ID_MONO;
+ break;
+
+ case BADD_FU_ID_BAOF:
+ switch (assoc->bFunctionSubClass) {
+ case PROF_HEADPHONE:
+ case PROF_HEADSET_ADAPTER:
+ channels = NUM_CHANNELS_STEREO;
+ bmaControls = stereoControls;
+ badd_baiof_mu_desc.wClusterDescrID =
+ CLUSTER_ID_MONO;
+ break;
+ case PROF_SPEAKERPHONE:
+ channels = NUM_CHANNELS_MONO;
+ bmaControls = monoControls;
+ badd_baof_in_term_desc.wClusterDescrID =
+ CLUSTER_ID_MONO;
+ break;
+ default:
+ channels = find_num_channels(state,
+ USB_DIR_OUT);
+ if (channels < 0) {
+ usb_audio_err(state->chip,
+ "unit %u: Cant find num of channels\n",
+ unitid);
+ return channels;
+ }
+
+ bmaControls = (channels == NUM_CHANNELS_MONO) ?
+ monoControls : stereoControls;
+ badd_baof_in_term_desc.wClusterDescrID =
+ (channels == NUM_CHANNELS_MONO) ?
+ CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+ break;
+ }
+ break;
+
+ case BADD_FU_ID_BAIF:
+ switch (assoc->bFunctionSubClass) {
+ case PROF_HEADSET:
+ case PROF_HEADSET_ADAPTER:
+ case PROF_SPEAKERPHONE:
+ channels = NUM_CHANNELS_MONO;
+ bmaControls = monoControls;
+ badd_baif_in_term_desc.wClusterDescrID =
+ CLUSTER_ID_MONO;
+ break;
+ default:
+ channels = find_num_channels(state, USB_DIR_IN);
+ if (channels < 0) {
+ usb_audio_err(state->chip,
+ "unit %u: Cant find num of channels\n",
+ unitid);
+ return channels;
+ }
+
+ bmaControls = (channels == NUM_CHANNELS_MONO) ?
+ monoControls : stereoControls;
+ badd_baif_in_term_desc.wClusterDescrID =
+ (channels == NUM_CHANNELS_MONO) ?
+ CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+ break;
+ }
+ break;
+
+ default:
+ usb_audio_err(state->chip, "Invalid unit %u\n", unitid);
+ return -EINVAL;
+ }
}
/* parse the source unit */
- if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0)
- return err;
+ if (state->mixer->protocol != UAC_VERSION_3) {
+ err = parse_audio_unit(state, hdr->bSourceID);
+ if (err < 0)
+ return err;
+ } else {
+ struct usb_interface *usb_iface =
+ usb_ifnum_to_if(state->mixer->chip->dev,
+ get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+ struct usb_interface_assoc_descriptor *assoc =
+ usb_iface->intf_assoc;
+
+ switch (unitid) {
+ case BADD_FU_ID_BAOF:
+ switch (assoc->bFunctionSubClass) {
+ case PROF_HEADSET:
+ case PROF_HEADSET_ADAPTER:
+ hdr->bSourceID = BADD_MU_ID_BAIOF;
+ break;
+ default:
+ hdr->bSourceID = BADD_IN_TERM_ID_BAOF;
+ break;
+ }
+ }
+ err = parse_audio_unit(state, hdr->bSourceID);
+ if (err < 0)
+ return err;
+ }
/* determine the input source type and name */
err = check_input_term(state, hdr->bSourceID, &iterm);
@@ -1547,7 +1786,7 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
build_feature_ctl(state, _ftr, 0, i, &iterm,
unitid, 0);
}
- } else { /* UAC_VERSION_2 */
+ } else { /* UAC_VERSION_2 or UAC_VERSION_3*/
for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) {
unsigned int ch_bits = 0;
unsigned int ch_read_only = 0;
@@ -1665,12 +1904,20 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
int input_pins, num_ins, num_outs;
int pin, ich, err;
- if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
- !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
- usb_audio_err(state->chip,
- "invalid MIXER UNIT descriptor %d\n",
- unitid);
- return -EINVAL;
+ if (state->mixer->protocol == UAC_VERSION_3) {
+ input_pins = badd_baiof_mu_desc.bNrInPins;
+ num_outs =
+ (badd_baiof_mu_desc.wClusterDescrID == CLUSTER_ID_MONO) ?
+ NUM_CHANNELS_MONO : NUM_CHANNELS_STEREO;
+ } else {
+ input_pins = desc->bNrInPins;
+ num_outs = uac_mixer_unit_bNrChannels(desc);
+ if (desc->bLength < 11 || !input_pins || !num_outs) {
+ usb_audio_err(state->chip,
+ "invalid MIXER UNIT descriptor %d\n",
+ unitid);
+ return -EINVAL;
+ }
}
num_ins = 0;
@@ -1690,9 +1937,14 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
int och, ich_has_controls = 0;
for (och = 0; och < num_outs; och++) {
- __u8 *c = uac_mixer_unit_bmControls(desc,
- state->mixer->protocol);
+ __u8 *c = NULL;
+ if (state->mixer->protocol == UAC_VERSION_3)
+ c =
+ &(badd_baiof_mu_desc.bmMixerControls);
+ else
+ c = uac_mixer_unit_bmControls(desc,
+ state->mixer->protocol);
if (check_matrix_bitmap(c, ich, och, num_outs)) {
ich_has_controls = 1;
break;
@@ -2201,16 +2453,28 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
case UAC2_CLOCK_SOURCE:
return parse_clock_source_unit(state, unitid, p1);
case UAC_SELECTOR_UNIT:
+ /* UAC3_MIXER_UNIT_V3 has the same value */
case UAC2_CLOCK_SELECTOR:
- return parse_audio_selector_unit(state, unitid, p1);
+ /* UAC3_CLOCK_SOURCE has the same value */
+ if (state->mixer->protocol == UAC_VERSION_3 &&
+ p1[2] == UAC3_CLOCK_SOURCE)
+ return 0; /* NOP */
+ else if (state->mixer->protocol == UAC_VERSION_3
+ && p1[2] == UAC3_MIXER_UNIT_V3)
+ return parse_audio_mixer_unit(state, unitid, p1);
+ else
+ return parse_audio_selector_unit(state, unitid, p1);
case UAC_FEATURE_UNIT:
return parse_audio_feature_unit(state, unitid, p1);
case UAC1_PROCESSING_UNIT:
/* UAC2_EFFECT_UNIT has the same value */
+ /* UAC3_FEATURE_UNIT_V3 has the same value */
if (state->mixer->protocol == UAC_VERSION_1)
return parse_audio_processing_unit(state, unitid, p1);
- else
+ else if (state->mixer->protocol == UAC_VERSION_2)
return 0; /* FIXME - effect units not implemented yet */
+ else
+ return parse_audio_feature_unit(state, unitid, p1);
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 has the same value */
if (state->mixer->protocol == UAC_VERSION_1)
@@ -2245,6 +2509,23 @@ static int snd_usb_mixer_dev_free(struct snd_device *device)
return 0;
}
+static int make_out_term(struct mixer_build state, int wTerminalType)
+{
+ struct uac3_output_terminal_descriptor *desc = NULL;
+
+ if (wTerminalType == UAC_TERMINAL_STREAMING)
+ desc = &badd_baif_out_term_desc;
+ else {
+ desc = &badd_baof_out_term_desc;
+ desc->wTerminalType = wTerminalType;
+ }
+ set_bit(desc->bTerminalID, state.unitbitmap);
+ state.oterm.id = desc->bTerminalID;
+ state.oterm.type = desc->wTerminalType;
+ state.oterm.name = desc->wTerminalDescrStr;
+ return parse_audio_unit(&state, desc->bSourceID);
+}
+
/*
* create mixer controls
*
@@ -2253,9 +2534,8 @@ static int snd_usb_mixer_dev_free(struct snd_device *device)
static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
{
struct mixer_build state;
- int err;
+ int err = -EINVAL;
const struct usbmix_ctl_map *map;
- void *p;
memset(&state, 0, sizeof(state));
state.chip = mixer->chip;
@@ -2273,44 +2553,108 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
}
}
- p = NULL;
- while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
- mixer->hostif->extralen,
- p, UAC_OUTPUT_TERMINAL)) != NULL) {
- if (mixer->protocol == UAC_VERSION_1) {
- struct uac1_output_terminal_descriptor *desc = p;
+ if (mixer->protocol == UAC_VERSION_3) {
+ struct usb_interface *usb_iface =
+ usb_ifnum_to_if(mixer->chip->dev,
+ get_iface_desc(mixer->hostif)->bInterfaceNumber);
+ struct usb_interface_assoc_descriptor *assoc =
+ usb_iface->intf_assoc;
- if (desc->bLength < sizeof(*desc))
- continue; /* invalid descriptor? */
- /* mark terminal ID as visited */
- set_bit(desc->bTerminalID, state.unitbitmap);
- state.oterm.id = desc->bTerminalID;
- state.oterm.type = le16_to_cpu(desc->wTerminalType);
- state.oterm.name = desc->iTerminal;
- err = parse_audio_unit(&state, desc->bSourceID);
+ switch (assoc->bFunctionSubClass) {
+ case PROF_GENERIC_IO: {
+ if (assoc->bInterfaceCount == 0x02) {
+ if (get_endpoint(mixer->hostif,
+ 0)->bEndpointAddress | USB_DIR_IN)
+ err = make_out_term(state,
+ UAC_TERMINAL_STREAMING);
+ else
+ err = make_out_term(state,
+ UAC_OUTPUT_TERMINAL_UNDEFINED);
+ } else {
+ err = make_out_term(state,
+ UAC_OUTPUT_TERMINAL_UNDEFINED);
+ if (err < 0 && err != -EINVAL)
+ return err;
+ err = make_out_term(state,
+ UAC_TERMINAL_STREAMING);
+ }
+ break;
+ }
+
+ case PROF_HEADPHONE:
+ err = make_out_term(state,
+ UAC_OUTPUT_TERMINAL_HEADPHONES);
+ break;
+ case PROF_SPEAKER:
+ err = make_out_term(state, UAC_OUTPUT_TERMINAL_SPEAKER);
+ break;
+ case PROF_MICROPHONE:
+ err = make_out_term(state, UAC_TERMINAL_STREAMING);
+ break;
+ case PROF_HEADSET:
+ case PROF_HEADSET_ADAPTER:
+ err = make_out_term(state, UAC_BIDIR_TERMINAL_HEADSET);
if (err < 0 && err != -EINVAL)
return err;
- } else { /* UAC_VERSION_2 */
- struct uac2_output_terminal_descriptor *desc = p;
-
- if (desc->bLength < sizeof(*desc))
- continue; /* invalid descriptor? */
- /* mark terminal ID as visited */
- set_bit(desc->bTerminalID, state.unitbitmap);
- state.oterm.id = desc->bTerminalID;
- state.oterm.type = le16_to_cpu(desc->wTerminalType);
- state.oterm.name = desc->iTerminal;
- err = parse_audio_unit(&state, desc->bSourceID);
+ err = make_out_term(state, UAC_TERMINAL_STREAMING);
+ break;
+ case PROF_SPEAKERPHONE:
+ err = make_out_term(state,
+ UAC_BIDIR_TERMINAL_SPEAKERPHONE);
if (err < 0 && err != -EINVAL)
return err;
+ err = make_out_term(state, UAC_TERMINAL_STREAMING);
+ break;
+ }
+ if (err < 0 && err != -EINVAL)
+ return err;
+ } else {
+ void *p;
- /*
- * For UAC2, use the same approach to also add the
- * clock selectors
- */
- err = parse_audio_unit(&state, desc->bCSourceID);
- if (err < 0 && err != -EINVAL)
- return err;
+ p = NULL;
+ while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
+ mixer->hostif->extralen, p,
+ UAC_OUTPUT_TERMINAL)) != NULL) {
+ if (mixer->protocol == UAC_VERSION_1) {
+ struct uac1_output_terminal_descriptor *desc =
+ p;
+
+ if (desc->bLength < sizeof(*desc))
+ continue; /* invalid descriptor? */
+ /* mark terminal ID as visited */
+ set_bit(desc->bTerminalID, state.unitbitmap);
+ state.oterm.id = desc->bTerminalID;
+ state.oterm.type =
+ le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+ if (err < 0 && err != -EINVAL)
+ return err;
+ } else { /* UAC_VERSION_2 */
+ struct uac2_output_terminal_descriptor *desc =
+ p;
+
+ if (desc->bLength < sizeof(*desc))
+ continue; /* invalid descriptor? */
+ /* mark terminal ID as visited */
+ set_bit(desc->bTerminalID, state.unitbitmap);
+ state.oterm.id = desc->bTerminalID;
+ state.oterm.type =
+ le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+ if (err < 0 && err != -EINVAL)
+ return err;
+
+ /*
+ * For UAC2, use the same approach to also add
+ * the clock selectors
+ */
+ err = parse_audio_unit(&state,
+ desc->bCSourceID);
+ if (err < 0 && err != -EINVAL)
+ return err;
+ }
}
}
@@ -2552,6 +2896,9 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
case UAC_VERSION_2:
mixer->protocol = UAC_VERSION_2;
break;
+ case UAC_VERSION_3:
+ mixer->protocol = UAC_VERSION_3;
+ break;
}
if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 7437cd5..5bc84b4 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -20,6 +20,7 @@
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -282,8 +283,6 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
0 /* terminator */
};
struct snd_pcm_chmap_elem *chmap;
- const unsigned int *maps;
- int c;
if (channels > ARRAY_SIZE(chmap->map))
return NULL;
@@ -292,26 +291,41 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
if (!chmap)
return NULL;
- maps = protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
chmap->channels = channels;
- c = 0;
- if (bits) {
- for (; bits && *maps; maps++, bits >>= 1)
- if (bits & 1)
- chmap->map[c++] = *maps;
+ if (protocol == UAC_VERSION_3) {
+ switch (channels) {
+ case 1:
+ chmap->map[0] = SNDRV_CHMAP_MONO;
+ break;
+ case 2:
+ chmap->map[0] = SNDRV_CHMAP_FL;
+ chmap->map[1] = SNDRV_CHMAP_FR;
+ break;
+ }
} else {
- /* If we're missing wChannelConfig, then guess something
- to make sure the channel map is not skipped entirely */
- if (channels == 1)
- chmap->map[c++] = SNDRV_CHMAP_MONO;
- else
- for (; c < channels && *maps; maps++)
- chmap->map[c++] = *maps;
- }
+ int c = 0;
+ const unsigned int *maps =
+ protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
- for (; c < channels; c++)
- chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+ if (bits) {
+ for (; bits && *maps; maps++, bits >>= 1)
+ if (bits & 1)
+ chmap->map[c++] = *maps;
+ } else {
+ /*
+ * If we're missing wChannelConfig, then guess something
+ * to make sure the channel map is not skipped entirely
+ */
+ if (channels == 1)
+ chmap->map[c++] = SNDRV_CHMAP_MONO;
+ else
+ for (; c < channels && *maps; maps++)
+ chmap->map[c++] = *maps;
+ }
+ for (; c < channels; c++)
+ chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+ }
return chmap;
}
@@ -409,6 +423,9 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
struct usb_interface_descriptor *altsd = get_iface_desc(alts);
int attributes = 0;
+ if (protocol == UAC_VERSION_3)
+ return 0;
+
csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
/* Creamware Noah has this descriptor after the 2nd endpoint */
@@ -492,7 +509,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
unsigned int format = 0, num_channels = 0;
struct audioformat *fp = NULL;
int num, protocol, clock = 0;
- struct uac_format_type_i_continuous_descriptor *fmt;
+ struct uac_format_type_i_continuous_descriptor *fmt = NULL;
unsigned int chconfig;
dev = chip->dev;
@@ -629,38 +646,78 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
iface_no, altno, as->bTerminalLink);
continue;
}
+
+ case UAC_VERSION_3: {
+ int wMaxPacketSize;
+
+ format = UAC_FORMAT_TYPE_I_PCM;
+ clock = BADD_CLOCK_SOURCE;
+ wMaxPacketSize = le16_to_cpu(get_endpoint(alts, 0)
+ ->wMaxPacketSize);
+ switch (wMaxPacketSize) {
+ case BADD_MAXPSIZE_SYNC_MONO_16:
+ case BADD_MAXPSIZE_SYNC_MONO_24:
+ case BADD_MAXPSIZE_ASYNC_MONO_16:
+ case BADD_MAXPSIZE_ASYNC_MONO_24: {
+ num_channels = NUM_CHANNELS_MONO;
+ chconfig = BADD_CH_CONFIG_MONO;
+ break;
+ }
+
+ case BADD_MAXPSIZE_SYNC_STEREO_16:
+ case BADD_MAXPSIZE_SYNC_STEREO_24:
+ case BADD_MAXPSIZE_ASYNC_STEREO_16:
+ case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+ num_channels = NUM_CHANNELS_STEREO;
+ chconfig = BADD_CH_CONFIG_STEREO;
+ break;
+ }
+ default:
+ dev_err(&dev->dev,
+ "%u:%d: invalid wMaxPacketSize\n",
+ iface_no, altno);
+ continue;
+ }
+ }
}
- /* get format type */
- fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE);
- if (!fmt) {
- dev_err(&dev->dev,
- "%u:%d : no UAC_FORMAT_TYPE desc\n",
- iface_no, altno);
- continue;
- }
- if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8)) ||
- ((protocol == UAC_VERSION_2) && (fmt->bLength < 6))) {
- dev_err(&dev->dev,
- "%u:%d : invalid UAC_FORMAT_TYPE desc\n",
- iface_no, altno);
- continue;
- }
+ if ((protocol == UAC_VERSION_1) ||
+ (protocol == UAC_VERSION_2)) {
+ /* get format type */
+ fmt = snd_usb_find_csint_desc(alts->extra,
+ alts->extralen, NULL, UAC_FORMAT_TYPE);
+ if (!fmt) {
+ dev_err(&dev->dev,
+ "%u:%d : no UAC_FORMAT_TYPE desc\n",
+ iface_no, altno);
+ continue;
+ }
+ if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8))
+ || ((protocol == UAC_VERSION_2) &&
+ (fmt->bLength < 6))) {
+ dev_err(&dev->dev,
+ "%u:%d :invalid UAC_FORMAT_TYPE desc\n",
+ iface_no, altno);
+ continue;
+ }
- /*
- * Blue Microphones workaround: The last altsetting is identical
- * with the previous one, except for a larger packet size, but
- * is actually a mislabeled two-channel setting; ignore it.
- */
- if (fmt->bNrChannels == 1 &&
- fmt->bSubframeSize == 2 &&
- altno == 2 && num == 3 &&
- fp && fp->altsetting == 1 && fp->channels == 1 &&
- fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
- protocol == UAC_VERSION_1 &&
- le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) ==
+ /*
+ * Blue Microphones workaround: The last altsetting is
+ * identical with the previous one, except for a larger
+ * packet size, but is actually a mislabeled two-channel
+ * setting; ignore it.
+ */
+ if (fmt->bNrChannels == 1 &&
+ fmt->bSubframeSize == 2 &&
+ altno == 2 && num == 3 &&
+ fp && fp->altsetting == 1 && fp->channels == 1 &&
+ fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
+ protocol == UAC_VERSION_1 &&
+ le16_to_cpu(
+ get_endpoint(alts, 0)->wMaxPacketSize) ==
fp->maxpacksize * 2)
- continue;
+ continue;
+ }
fp = kzalloc(sizeof(*fp), GFP_KERNEL);
if (! fp) {
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 5a1974e..801508c 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -27,6 +27,7 @@
#include <soc/qcom/msm_qmi_interface.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
+#include <linux/usb/audio-v3.h>
#include "usbaudio.h"
#include "card.h"
@@ -427,12 +428,14 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
protocol = altsd->bInterfaceProtocol;
/* get format type */
- fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
- UAC_FORMAT_TYPE);
- if (!fmt) {
- pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n", __func__,
- subs->interface, subs->altset_idx);
- goto err;
+ if (protocol != UAC_VERSION_3) {
+ fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+ UAC_FORMAT_TYPE);
+ if (!fmt) {
+ pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n",
+ __func__, subs->interface, subs->altset_idx);
+ goto err;
+ }
}
if (!uadev[card_num].ctrl_intf) {
@@ -440,12 +443,15 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
goto err;
}
- hdr_ptr = snd_usb_find_csint_desc(uadev[card_num].ctrl_intf->extra,
- uadev[card_num].ctrl_intf->extralen,
- NULL, UAC_HEADER);
- if (!hdr_ptr) {
- pr_err("%s: no UAC_HEADER desc\n", __func__);
- goto err;
+ if (protocol != UAC_VERSION_3) {
+ hdr_ptr = snd_usb_find_csint_desc(
+ uadev[card_num].ctrl_intf->extra,
+ uadev[card_num].ctrl_intf->extralen,
+ NULL, UAC_HEADER);
+ if (!hdr_ptr) {
+ pr_err("%s: no UAC_HEADER desc\n", __func__);
+ goto err;
+ }
}
if (protocol == UAC_VERSION_1) {
@@ -473,6 +479,31 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
resp->usb_audio_spec_revision =
((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
resp->usb_audio_spec_revision_valid = 1;
+ } else if (protocol == UAC_VERSION_3) {
+ switch (le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize)) {
+ case BADD_MAXPSIZE_SYNC_MONO_16:
+ case BADD_MAXPSIZE_SYNC_STEREO_16:
+ case BADD_MAXPSIZE_ASYNC_MONO_16:
+ case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+ resp->usb_audio_subslot_size = SUBSLOTSIZE_16_BIT;
+ break;
+ }
+
+ case BADD_MAXPSIZE_SYNC_MONO_24:
+ case BADD_MAXPSIZE_SYNC_STEREO_24:
+ case BADD_MAXPSIZE_ASYNC_MONO_24:
+ case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+ resp->usb_audio_subslot_size = SUBSLOTSIZE_24_BIT;
+ break;
+ }
+
+ default:
+ pr_err("%d: %u: Invalid wMaxPacketSize\n",
+ subs->interface, subs->altset_idx);
+ ret = -EINVAL;
+ goto err;
+ }
+ resp->usb_audio_subslot_size_valid = 1;
} else {
pr_err("%s: unknown protocol version %x\n", __func__, protocol);
goto err;