Merge "sched: avoid migrating when softint on tgt cpu should be short"
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index c467327..0c75cf6 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -32,6 +32,8 @@
- clocks : List of phandle and clock specifier pairs
- clock-names : List of clock input name strings sorted in the same
order as the clocks property.
+- qcom,keep-radio-on-during-sleep: Boolean flag to indicate if to suspend to d3hot
+ instead of turning off the device
Example:
wil6210: qcom,wil6210 {
@@ -56,5 +58,6 @@
clocks = <&clock_gcc clk_rf_clk3>,
<&clock_gcc clk_rf_clk3_pin>;
clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
+ qcom,keep-radio-on-during-sleep;
};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index aefd42c..5cf2cb8 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -132,8 +132,6 @@
power collapse feature available or not.
- qcom,sde-has-mixer-gc: Boolean property to indicate if mixer has gamma correction
feature available or not.
-- qcom,sde-has-cdp: Boolean property to indicate if cdp feature is
- available or not.
- qcom,sde-sspp-clk-ctrl: Array of offsets describing clk control
offsets for dynamic clock gating. 1st value
in the array represents offset of the control
@@ -236,6 +234,8 @@
of (pps, OT limit), where pps is pixel per second and
OT limit is the write limit to apply if the given
pps is not exceeded.
+- qcom,sde-vbif-memtype-0: Array of u32 vbif memory type settings, group 0
+- qcom,sde-vbif-memtype-1: Array of u32 vbif memory type settings, group 1
- qcom,sde-wb-id: Array of writeback ids corresponding to the
offsets defined in property: qcom,sde-wb-off.
- qcom,sde-wb-clk-ctrl: Array of 2 cell property describing clk control
@@ -319,6 +319,9 @@
<fill level, lut hi, lut lo> in ascending fill level
indicating the qos luts for cwb on sspp.
Zero fill level on the last entry identifies the default lut.
+- qcom,sde-cdp-setting: Array of 2 cell property, with a format of
+ <read enable, write enable> for cdp use cases in
+ order of <real_time>, and <non_real_time>.
Bus Scaling Subnodes:
- qcom,sde-reg-bus: Property to provide Bus scaling for register access for
@@ -447,7 +450,6 @@
qcom,sde-ubwc-static = <0x100>;
qcom,sde-ubwc-swizzle = <0>;
qcom,sde-panic-per-pipe;
- qcom,sde-has-cdp;
qcom,sde-has-src-split;
qcom,sde-has-dim-layer;
qcom,sde-sspp-src-size = <0x100>;
@@ -517,6 +519,8 @@
qcom,sde-qos-lut-cwb =
<0 0x75300000 0x00000000>;
+ qcom,sde-cdp-setting = <1 1>, <1 0>;
+
qcom,sde-vbif-off = <0 0>;
qcom,sde-vbif-id = <0 1>;
qcom,sde-vbif-default-ot-rd-limit = <32>;
@@ -525,6 +529,8 @@
<124416000 4>, <248832000 16>;
qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
<124416000 4>, <248832000 16>;
+ qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+ qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
qcom,sde-dram-channels = <2>;
qcom,sde-num-nrt-paths = <1>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index d0d7fff..59fa6a0 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -16,7 +16,7 @@
"qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
"qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
"qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
- "qcom,mdss_hdmi_pll_8998"
+ "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm".
- cell-index: Specifies the controller used
- reg: offset and length of the register set for the device.
- reg-names : names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 375eaf2..1394fd3 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -74,14 +74,6 @@
address size faults are due to a fundamental programming
error from which we don't care about recovering anyways.
-- qcom,skip-init : Disable resetting configuration for all context banks
- during device reset. This is useful for targets where
- some context banks are dedicated to other execution
- environments outside of Linux and those other EEs are
- programming their own stream match tables, SCTLR, etc.
- Without setting this option we will trample on their
- configuration.
-
- qcom,dynamic : Allow dynamic domains to be attached. This is only
useful if the upstream hardware is capable of switching
between multiple domains within a single context bank.
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index a61bab3..62a51cf 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -104,6 +104,17 @@
Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
for the properties above.
+- vdd-corners
+ Usage: required
+ Value type: <u32>
+ Definition: List of vdd corners to map for ahb level.
+
+- vdd-corner-ahb-mapping
+ Usage: required
+ Value type: <string>
+ Definition: List of ahb level strings corresponds to vdd-corners.
+ Supported strings: suspend, svs, nominal, turbo
+
- client-id-based
Usage: required
Value type: <empty>
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index e1f194f3..441d771 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -35,11 +35,6 @@
addition battery properties will be faked such that the device
assumes normal operation.
-- qcom,external-vconn
- Usage: optional
- Value type: <empty>
- Definition: Boolean flag which indicates VCONN is sourced externally.
-
- qcom,fcc-max-ua
Usage: optional
Value type: <u32>
@@ -181,6 +176,12 @@
Definition: Specifies the maximum charger buck/boost switching frequency in
KHz. It overrides the max frequency defined for the charger.
+- qcom,otg-deglitch-time-ms
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the deglitch interval for OTG detection.
+ If the value is not present, 50 msec is used as default.
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index e0ab31f..3a09b28 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2256,8 +2256,8 @@
- qcom,wcn-btfm : Property to specify if WCN BT/FM chip is used for the target
- qcom,msm-mbhc-usbc-audio-supported : Property to specify if analog audio feature is
enabled or not.
-- qcom,usbc-analog-en1_gpio : EN1 GPIO to enable USB type-C analog audio
-- qcom,usbc-analog-en2_n_gpio : EN2 GPIO to enable USB type-C analog audio
+- qcom,usbc-analog-en1-gpio : EN1 GPIO to enable USB type-C analog audio
+- qcom,usbc-analog-en2-gpio : EN2 GPIO to enable USB type-C analog audio
- qcom,usbc-analog-force_detect_gpio : Force detect GPIO to enable USB type-C analog audio
Example:
@@ -2333,8 +2333,8 @@
qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
"SpkrRight", "SpkrLeft";
qcom,msm-mbhc-usbc-audio-supported = <1>;
- qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
- qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
+ qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+ qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
qcom,usbc-analog-force_detect_gpio = <&wcd_usbc_analog_f_gpio>;
};
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 958194b..4901fa0 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -70,6 +70,7 @@
2: 38.4 MHz
3: 52 MHz
Defaults to 26 MHz if not specified.
+- extcon: phandle to external connector (Refer Documentation/devicetree/bindings/extcon/extcon-gpio.txt for more details).
Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on.
diff --git a/Makefile b/Makefile
index df4d437..29030c6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 30
+SUBLEVEL = 31
EXTRAVERSION =
NAME = Roaring Lionus
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 9e46d6e..fa47df6a 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index fc0d3b0..40289a8 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -270,6 +270,8 @@
CONFIG_MSM_SMP2P=y
CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_PWM=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_ANDROID=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 9d12771..d91f5f6 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -263,6 +263,8 @@
CONFIG_MSM_BOOT_STATS=y
CONFIG_TRACER_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_PWM=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index d060641..9edea10 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -24,6 +24,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+unsigned long arch_get_cpu_efficiency(int cpu);
#ifdef CONFIG_CPU_FREQ
#define arch_scale_freq_capacity cpufreq_scale_freq_capacity
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index bd884da..2b6c530 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -196,6 +196,14 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
return 0;
}
+static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_get_cpu_efficiency(int cpu)
+{
+ return per_cpu(cpu_efficiency, cpu);
+}
+EXPORT_SYMBOL(arch_get_cpu_efficiency);
+
#ifdef CONFIG_OF
struct cpu_efficiency {
const char *compatible;
@@ -272,6 +280,7 @@ static int __init parse_dt_topology(void)
for_each_possible_cpu(cpu) {
const u32 *rate;
int len;
+ u32 efficiency;
/* too early to use cpu->of_node */
cn = of_get_cpu_node(cpu, NULL);
@@ -280,12 +289,26 @@ static int __init parse_dt_topology(void)
continue;
}
- for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
- if (of_device_is_compatible(cn, cpu_eff->compatible))
- break;
+ /*
+ * The CPU efficiency value passed from the device tree
+ * overrides the value defined in the table_efficiency[]
+ */
+ if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) {
- if (cpu_eff->compatible == NULL)
- continue;
+ for (cpu_eff = table_efficiency;
+ cpu_eff->compatible; cpu_eff++)
+
+ if (of_device_is_compatible(cn,
+ cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL)
+ continue;
+
+ efficiency = cpu_eff->efficiency;
+ }
+
+ per_cpu(cpu_efficiency, cpu) = efficiency;
rate = of_get_property(cn, "clock-frequency", &len);
if (!rate || len != 4) {
@@ -294,7 +317,7 @@ static int __init parse_dt_topology(void)
continue;
}
- capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+ capacity = ((be32_to_cpup(rate)) >> 20) * efficiency;
/* Save min capacity of the system */
if (capacity < min_capacity)
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index ea89751..0c2ae5f 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -63,7 +63,6 @@
<0x150c2000 0x20>;
reg-names = "base", "tcu-base";
#iommu-cells = <2>;
- qcom,skip-init;
qcom,use-3-lvl-tables;
#global-interrupts = <1>;
#size-cells = <1>;
@@ -330,9 +329,19 @@
apps_iommu_test_device {
compatible = "iommu-debug-test";
/*
- * This SID belongs to QUP1-GSI. We can't use a fake SID for
+ * This SID belongs to TSIF. We can't use a fake SID for
* the apps_smmu device.
*/
- iommus = <&apps_smmu 0x16 0>;
+ iommus = <&apps_smmu 0x20 0>;
+ };
+
+ apps_iommu_coherent_test_device {
+ compatible = "iommu-debug-test";
+ /*
+ * This SID belongs to QUP1-DMA. We can't use a fake SID for
+ * the apps_smmu device.
+ */
+ iommus = <&apps_smmu 0x3 0>;
+ dma-coherent;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
new file mode 100644
index 0000000..61ef7ff
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ system_heap: qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ qcom,ion-heap@22 { /* ADSP HEAP */
+ reg = <22>;
+ memory-region = <&adsp_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@27 { /* QSEECOM HEAP */
+ reg = <27>;
+ memory-region = <&qseecom_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@13 { /* SPSS HEAP */
+ reg = <13>;
+ memory-region = <&sp_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+ reg = <10>;
+ memory-region = <&secure_display_memory>;
+ qcom,ion-heap-type = "HYP_CMA";
+ };
+
+ qcom,ion-heap@9 {
+ reg = <9>;
+ qcom,ion-heap-type = "SYSTEM_SECURE";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 2cbb990..5969c1d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -279,9 +279,113 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+
+ removed_regions: removed_regions@85700000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x85700000 0 0x3800000>;
+ };
+
+ pil_camera_mem: camera_region@8ab00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x8ab00000 0 0x500000>;
+ };
+
+ pil_modem_mem: modem_region@8b000000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x8b000000 0 0x7e00000>;
+ };
+
+ pil_video_mem: pil_video_region@92e00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x92e00000 0 0x500000>;
+ };
+
+ pil_cdsp_mem: cdsp_regions@93300000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x93300000 0 0x600000>;
+ };
+
+ pil_mba_mem: pil_mba_region@0x93900000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x93900000 0 0x200000>;
+ };
+
+ pil_adsp_mem: pil_adsp_region@93b00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x93b00000 0 0x1e00000>;
+ };
+
+ pil_ipa_fw_mem: pil_ipa_fw_region@95900000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95900000 0 0x10000>;
+ };
+
+ pil_ipa_gsi_mem: pil_ipa_gsi_region@95910000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95910000 0 0x5000>;
+ };
+
+ pil_gpu_mem: pil_gpu_region@95915000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95915000 0 0x1000>;
+ };
+
+ adsp_mem: adsp_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0xc00000>;
+ };
+
+ qseecom_mem: qseecom_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x1400000>;
+ };
+
+ sp_mem: sp_region { /* SPSS-HLOS ION shared mem */
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x800000>;
+ };
+
+ secure_display_memory: secure_display_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x5c00000>;
+ };
+
+ /* global autoconfigured region for contiguous allocations */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x2000000>;
+ linux,cma-default;
+ };
};
};
+#include "sdm670-ion.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -308,6 +412,11 @@
clock-frequency = <19200000>;
};
+ qcom,sps {
+ compatible = "qcom,msm_sps_4k";
+ qcom,pipe-attr-ee;
+ };
+
timer@0x17c90000{
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index e9b71b9..4b7a680 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -22,7 +22,7 @@
qcom,board-id = <1 1>;
};
-&dsi_nt35597_truly_dsc_cmd_display {
+&dsi_dual_nt35597_truly_cmd_display {
/delete-property/ qcom,dsi-display-active;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 73df071..fcf6ad1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -22,7 +22,7 @@
qcom,board-id = <8 1>;
};
-&dsi_nt35597_truly_dsc_cmd_display {
+&dsi_dual_nt35597_truly_cmd_display {
/delete-property/ qcom,dsi-display-active;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index 628b6cc..709c89d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -78,6 +78,9 @@
qcom,hph-en0-gpio = <&tavil_hph_en0>;
qcom,hph-en1-gpio = <&tavil_hph_en1>;
qcom,tavil-mclk-clk-freq = <9600000>;
+
+ qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&compr>,
@@ -136,6 +139,18 @@
<&wsa881x_0213>, <&wsa881x_0214>;
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
"SpkrLeft", "SpkrRight";
+
+ qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+ };
+
+ wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
};
wcd9xxx_intc: wcd9xxx-irq {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index b1c91bf..e26f888 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -25,12 +25,14 @@
<0x1380000 0x40000>,
<0x1740000 0x40000>,
<0x1620000 0x40000>,
+ <0x1620000 0x40000>,
<0x1620000 0x40000>;
reg-names = "aggre1_noc-base", "aggre2_noc-base",
"config_noc-base", "dc_noc-base",
"gladiator_noc-base", "mc_virt-base", "mem_noc-base",
- "mmss_noc-base", "system_noc-base", "ipa_virt-base";
+ "mmss_noc-base", "system_noc-base", "ipa_virt-base",
+ "camnoc_virt-base";
mbox-names = "apps_rsc", "disp_rsc";
mboxes = <&apps_rsc 0 &disp_rsc 0>;
@@ -368,6 +370,15 @@
clocks = <>;
};
+ fab_camnoc_virt: fab-camnoc_virt {
+ cell-id = <MSM_BUS_FAB_CAMNOC_VIRT>;
+ label = "fab-camnoc_virt";
+ qcom,fab-dev;
+ qcom,base-name = "camnoc_virt-base";
+ qcom,bypass-qos-prg;
+ clocks = <>;
+ };
+
fab_config_noc: fab-config_noc {
cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
label = "fab-config_noc";
@@ -654,6 +665,33 @@
qcom,bus-dev = <&fab_aggre2_noc>;
};
+ mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP>;
+ label = "mas-qxm-camnoc-hf0-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
+ mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP>;
+ label = "mas-qxm-camnoc-hf1-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
+ mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP>;
+ label = "mas-qxm-camnoc-sf-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
mas_qhm_spdm: mas-qhm-spdm {
cell-id = <MSM_BUS_MASTER_SPDM>;
label = "mas-qhm-spdm";
@@ -900,12 +938,23 @@
qcom,bus-dev = <&fab_mmss_noc>;
};
- mas_qxm_camnoc_hf: mas-qxm-camnoc-hf {
- cell-id = <MSM_BUS_MASTER_CAMNOC_HF>;
- label = "mas-qxm-camnoc-hf";
+ mas_qxm_camnoc_hf0: mas-qxm-camnoc-hf0 {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF0>;
+ label = "mas-qxm-camnoc-hf0";
qcom,buswidth = <32>;
- qcom,agg-ports = <2>;
- qcom,qport = <1 2>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <1>;
+ qcom,connections = <&slv_qns_mem_noc_hf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm1>;
+ };
+
+ mas_qxm_camnoc_hf1: mas-qxm-camnoc-hf1 {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF1>;
+ label = "mas-qxm-camnoc-hf1";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <2>;
qcom,connections = <&slv_qns_mem_noc_hf>;
qcom,bus-dev = <&fab_mmss_noc>;
qcom,bcms = <&bcm_mm1>;
@@ -1193,6 +1242,15 @@
qcom,bcms = <&bcm_sn11>;
};
+ slv_qns_camnoc_uncomp:slv-qns-camnoc-uncomp {
+ cell-id = <MSM_BUS_SLAVE_CAMNOC_UNCOMP>;
+ label = "slv-qns-camnoc-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ qcom,bcms = <&bcm_mm1>;
+ };
+
slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg {
cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
label = "slv-qhs-a1-noc-cfg";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 922e990..a715025 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -73,7 +73,7 @@
};
};
-&cci {
+&cam_cci {
actuator_rear: qcom,actuator@0 {
cell-index = <0>;
reg = <0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index 922e990..a715025 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -73,7 +73,7 @@
};
};
-&cci {
+&cam_cci {
actuator_rear: qcom,actuator@0 {
cell-index = <0>;
reg = <0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index cb20e0f..91b8738 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -17,7 +17,7 @@
status = "ok";
};
- qcom,csiphy@ac65000 {
+ cam_csiphy0: qcom,csiphy@ac65000 {
cell-index = <0>;
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0x0ac65000 0x1000>;
@@ -53,7 +53,7 @@
status = "ok";
};
- qcom,csiphy@ac66000{
+ cam_csiphy1: qcom,csiphy@ac66000{
cell-index = <1>;
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0xac66000 0x1000>;
@@ -90,7 +90,7 @@
status = "ok";
};
- qcom,csiphy@ac67000 {
+ cam_csiphy2: qcom,csiphy@ac67000 {
cell-index = <2>;
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0xac67000 0x1000>;
@@ -126,7 +126,7 @@
status = "ok";
};
- cci: qcom,cci@ac4a000 {
+ cam_cci: qcom,cci@ac4a000 {
cell-index = <0>;
compatible = "qcom,cci";
reg = <0xac4a000 0x4000>;
@@ -343,17 +343,17 @@
clock-names = "gcc_ahb_clk",
"gcc_axi_clk",
"soc_ahb_clk",
- "cpas_ahb_clk",
"slow_ahb_clk_src",
+ "cpas_ahb_clk",
"camnoc_axi_clk";
clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
<&clock_gcc GCC_CAMERA_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
- <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
src-clock-name = "slow_ahb_clk_src";
- clock-rates = <0 0 0 0 80000000 0>;
+ clock-rates = <0 0 0 80000000 0 0>;
qcom,msm-bus,name = "cam_ahb";
qcom,msm-bus,num-cases = <4>;
qcom,msm-bus,num-paths = <1>;
@@ -366,6 +366,21 @@
MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+ vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
+ RPMH_REGULATOR_LEVEL_RETENTION
+ RPMH_REGULATOR_LEVEL_MIN_SVS
+ RPMH_REGULATOR_LEVEL_LOW_SVS
+ RPMH_REGULATOR_LEVEL_SVS
+ RPMH_REGULATOR_LEVEL_SVS_L1
+ RPMH_REGULATOR_LEVEL_NOM
+ RPMH_REGULATOR_LEVEL_NOM_L1
+ RPMH_REGULATOR_LEVEL_NOM_L2
+ RPMH_REGULATOR_LEVEL_TURBO
+ RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ vdd-corner-ahb-mapping = "suspend", "suspend",
+ "svs", "svs", "svs", "svs",
+ "nominal", "nominal", "nominal",
+ "turbo", "turbo";
client-id-based;
client-names =
"csiphy0", "csiphy1", "csiphy2", "cci0",
@@ -389,10 +404,10 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_HF0
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF0
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
};
qcom,axi-port-camnoc {
qcom,msm-bus,name = "cam_hf_1_camnoc";
@@ -400,10 +415,10 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
};
};
qcom,axi-port2 {
@@ -414,21 +429,21 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_HF1
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF1
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
};
qcom,axi-port-camnoc {
- qcom,msm-bus,name = "cam_hf_1_camnoc";
+ qcom,msm-bus,name = "cam_hf_2_camnoc";
qcom,msm-bus-vector-dyn-vote;
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_HF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
};
};
qcom,axi-port3 {
@@ -439,10 +454,10 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_SF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_SF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_SF
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_SF
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
};
qcom,axi-port-camnoc {
qcom,msm-bus,name = "cam_sf_1_camnoc";
@@ -450,10 +465,10 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <MSM_BUS_MASTER_CAMNOC_SF
- MSM_BUS_SLAVE_EBI_CH0 0 0>,
- <MSM_BUS_MASTER_CAMNOC_SF
- MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
};
};
};
@@ -503,7 +518,7 @@
status = "ok";
};
- qcom,csid0@acb3000 {
+ cam_csid0: qcom,csid0@acb3000 {
cell-index = <0>;
compatible = "qcom,csid170";
reg-names = "csid";
@@ -545,7 +560,7 @@
status = "ok";
};
- qcom,vfe0@acaf000 {
+ cam_vfe0: qcom,vfe0@acaf000 {
cell-index = <0>;
compatible = "qcom,vfe170";
reg-names = "ife";
@@ -582,7 +597,7 @@
status = "ok";
};
- qcom,csid1@acba000 {
+ cam_csid1: qcom,csid1@acba000 {
cell-index = <1>;
compatible = "qcom,csid170";
reg-names = "csid";
@@ -624,7 +639,7 @@
status = "ok";
};
- qcom,vfe1@acb6000 {
+ cam_vfe1: qcom,vfe1@acb6000 {
cell-index = <1>;
compatible = "qcom,vfe170";
reg-names = "ife";
@@ -661,7 +676,7 @@
status = "ok";
};
- qcom,csid-lite@acc8000 {
+ cam_csid_lite: qcom,csid-lite@acc8000 {
cell-index = <2>;
compatible = "qcom,csid-lite170";
reg-names = "csid-lite";
@@ -700,7 +715,7 @@
status = "ok";
};
- qcom,vfe-lite@acc4000 {
+ cam_vfe_lite: qcom,vfe-lite@acc4000 {
cell-index = <2>;
compatible = "qcom,vfe-lite170";
reg-names = "ife-lite";
@@ -743,7 +758,7 @@
status = "ok";
};
- qcom,a5@ac00000 {
+ cam_a5: qcom,a5@ac00000 {
cell-index = <0>;
compatible = "qcom,cam_a5";
reg = <0xac00000 0x6000>,
@@ -757,6 +772,7 @@
camss-vdd-supply = <&titan_top_gdsc>;
clock-names = "gcc_cam_ahb_clk",
"gcc_cam_axi_clk",
+ "soc_fast_ahb",
"soc_ahb_clk",
"cpas_ahb_clk",
"camnoc_axi_clk",
@@ -765,6 +781,7 @@
"icp_clk_src";
clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
<&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_FAST_AHB_CLK_SRC>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
@@ -772,12 +789,12 @@
<&clock_camcc CAM_CC_ICP_CLK>,
<&clock_camcc CAM_CC_ICP_CLK_SRC>;
- clock-rates = <0 0 0 80000000 0 0 0 600000000>;
+ clock-rates = <0 0 400000000 0 0 0 0 0 600000000>;
fw_name = "CAMERA_ICP.elf";
status = "ok";
};
- qcom,ipe0 {
+ cam_ipe0: qcom,ipe0 {
cell-index = <0>;
compatible = "qcom,cam_ipe";
regulator-names = "ipe0-vdd";
@@ -793,11 +810,11 @@
<&clock_camcc CAM_CC_IPE_0_CLK>,
<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
- clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-rates = <0 0 0 0 600000000>;
status = "ok";
};
- qcom,ipe1 {
+ cam_ipe1: qcom,ipe1 {
cell-index = <1>;
compatible = "qcom,cam_ipe";
regulator-names = "ipe1-vdd";
@@ -813,11 +830,11 @@
<&clock_camcc CAM_CC_IPE_1_CLK>,
<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
- clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-rates = <0 0 0 0 600000000>;
status = "ok";
};
- qcom,bps {
+ cam_bps: qcom,bps {
cell-index = <0>;
compatible = "qcom,cam_bps";
regulator-names = "bps-vdd";
@@ -833,7 +850,7 @@
<&clock_camcc CAM_CC_BPS_CLK>,
<&clock_camcc CAM_CC_BPS_CLK_SRC>;
- clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-rates = <0 0 0 0 600000000>;
status = "ok";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
index fff9160..ef964ae 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
@@ -13,8 +13,13 @@
/dts-v1/;
/plugin/;
-#include "sdm845.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
#include "sdm845-cdp.dtsi"
+#include "sdm845-qupv3.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM845 v1 CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index a1e0e4f..8ace432 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -155,6 +155,8 @@
qcom,vddp-ref-clk-supply = <&pm8998_l2>;
qcom,vddp-ref-clk-max-microamp = <100>;
+ extcon = <&extcon_storage_cd>;
+
status = "ok";
};
@@ -298,7 +300,7 @@
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
-&dsi_nt35597_truly_dsc_cmd_display {
+&dsi_dual_nt35597_truly_cmd_display {
qcom,dsi-display-active;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 92f8586..1ce68e1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -219,6 +219,7 @@
qcom,secure_align_mask = <0xfff>;
qcom,global_pt;
+ qcom,hyp_secure_alloc;
gfx3d_user: gfx3d_user {
compatible = "qcom,smmu-kgsl-cb";
@@ -277,7 +278,7 @@
qcom,gmu-pwrlevel@1 {
reg = <1>;
- qcom,gmu-freq = <19200000>;
+ qcom,gmu-freq = <200000000>;
};
qcom,gmu-pwrlevel@2 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
index 79fa580..548bd49 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
@@ -13,8 +13,13 @@
/dts-v1/;
/plugin/;
-#include "sdm845.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
#include "sdm845-mtp.dtsi"
+#include "sdm845-qupv3.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM845 v1 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 85bec57..ab266ef 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -150,7 +150,7 @@
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
-&dsi_nt35597_truly_dsc_cmd_display {
+&dsi_dual_nt35597_truly_cmd_display {
qcom,dsi-display-active;
};
@@ -188,6 +188,17 @@
status = "ok";
};
+&extcon_storage_cd {
+ gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
+ debounce-ms = <200>;
+ irq-flags = <IRQ_TYPE_EDGE_BOTH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&storage_cd>;
+
+ status = "ok";
+};
+
&ufsphy_card {
compatible = "qcom,ufs-phy-qmp-v3";
@@ -210,6 +221,8 @@
qcom,vddp-ref-clk-supply = <&pm8998_l2>;
qcom,vddp-ref-clk-max-microamp = <100>;
+ extcon = <&extcon_storage_cd>;
+
status = "ok";
};
@@ -230,6 +243,8 @@
50000000 100000000 200000000>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+ extcon = <&extcon_storage_cd>;
+
status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 1fbd8a2..9946a25 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -299,6 +299,63 @@
};
};
+ /* USB C analog configuration */
+ wcd_usbc_analog_en1 {
+ wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+ mux {
+ pins = "gpio49";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio49";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+ mux {
+ pins = "gpio49";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio49";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ wcd_usbc_analog_en2 {
+ wcd_usbc_analog_en2_idle: wcd_usbc_ana_en2_idle {
+ mux {
+ pins = "gpio51";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio51";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_usbc_analog_en2_active: wcd_usbc_ana_en2_active {
+ mux {
+ pins = "gpio51";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio51";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
pri_aux_pcm_clk {
pri_aux_pcm_clk_sleep: pri_aux_pcm_clk_sleep {
mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 4dba65b..c2fbed5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -105,6 +105,41 @@
qcom,wsa-max-devs = <1>;
qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+
+ qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+ };
+};
+
+&wcd934x_cdc {
+ wcd: wcd_pinctrl@5 {
+ us_euro_sw_wcd_active: us_euro_sw_wcd_active {
+ mux {
+ pins = "gpio1";
+ };
+
+ config {
+ pins = "gpio1";
+ /delete-property/ output-high;
+ bias-high-impedance;
+ };
+ };
+
+ us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep {
+ mux {
+ pins = "gpio1";
+ };
+
+ config {
+ pins = "gpio1";
+ /delete-property/ output-low;
+ bias-high-impedance;
+ };
+ };
};
};
@@ -133,6 +168,17 @@
status = "ok";
};
+&extcon_storage_cd {
+ gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
+ debounce-ms = <200>;
+ irq-flags = <IRQ_TYPE_EDGE_BOTH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&storage_cd>;
+
+ status = "ok";
+};
+
&ufsphy_card {
compatible = "qcom,ufs-phy-qmp-v3";
@@ -155,6 +201,30 @@
qcom,vddp-ref-clk-supply = <&pm8998_l2>;
qcom,vddp-ref-clk-max-microamp = <100>;
+ extcon = <&extcon_storage_cd>;
+
+ status = "ok";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8998_l21>;
+ qcom,vdd-voltage-level = <2950000 2960000>;
+ qcom,vdd-current-level = <200 800000>;
+
+ vdd-io-supply = <&pm8998_l13>;
+ qcom,vdd-io-voltage-level = <1808000 2960000>;
+ qcom,vdd-io-current-level = <200 22000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+ extcon = <&extcon_storage_cd>;
+
status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index bf58da6..1500bb5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -368,10 +368,106 @@
cell-index = <0>;
label = "wb_display";
};
+
+ sde_dp: qcom,dp_display@0{
+ cell-index = <0>;
+ compatible = "qcom,dp-display";
+
+ gdsc-supply = <&mdss_core_gdsc>;
+ vdda-1p2-supply = <&pm8998_l26>;
+ vdda-0p9-supply = <&pm8998_l1>;
+
+ reg = <0xae90000 0xa84>,
+ <0x88eaa00 0x200>,
+ <0x88ea200 0x200>,
+ <0x88ea600 0x200>,
+ <0xaf02000 0x1a0>,
+ <0x780000 0x621c>,
+ <0x88ea030 0x10>,
+ <0x0aee1000 0x034>;
+ reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+ "dp_mmss_cc", "qfprom_physical", "dp_pll",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <12 0>;
+
+ clocks = <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+ <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
+ clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+ "core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+ "core_usb_pipe_clk", "ctrl_link_clk",
+ "ctrl_link_iface_clk", "ctrl_crypto_clk",
+ "ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent";
+
+ qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
+
+ qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ qcom,ctrl-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ctrl-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda-1p2";
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <21800>;
+ qcom,supply-disable-load = <4>;
+ };
+ };
+
+ qcom,phy-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,phy-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda-0p9";
+ qcom,supply-min-voltage = <880000>;
+ qcom,supply-max-voltage = <880000>;
+ qcom,supply-enable-load = <36000>;
+ qcom,supply-disable-load = <32>;
+ };
+ };
+ };
+};
+
+&sde_dp {
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
+ pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 43 0>;
+ qcom,aux-sel-gpio = <&tlmm 51 0>;
+ qcom,usbplug-cc-gpio = <&tlmm 38 0>;
};
&mdss_mdp {
- connectors = <&sde_rscc &sde_wb>;
+ connectors = <&sde_rscc &sde_wb &sde_dp>;
};
&dsi_dual_nt35597_truly_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
index 168f2a9..b9eac3c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
@@ -64,4 +64,46 @@
};
};
};
+
+ mdss_dp_pll: qcom,mdss_dp_pll@c011000 {
+ compatible = "qcom,mdss_dp_pll_10nm";
+ label = "MDSS DP PLL";
+ cell-index = <0>;
+ #clock-cells = <1>;
+
+ reg = <0x088ea000 0x200>,
+ <0x088eaa00 0x200>,
+ <0x088ea200 0x200>,
+ <0x088ea600 0x200>,
+ <0xaf03000 0x8>;
+ reg-names = "pll_base", "phy_base", "ln_tx0_base",
+ "ln_tx1_base", "gdsc_base";
+
+ gdsc-supply = <&mdss_core_gdsc>;
+
+ clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "iface_clk", "ref_clk_src", "ref_clk",
+ "cfg_ahb_clk", "pipe_clk";
+ clock-rate = <0>;
+
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+
+ };
+ };
+
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index c350800..08c7cf0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -86,8 +86,6 @@
qcom,sde-dither-version = <0x00010000>;
qcom,sde-dither-size = <0x20>;
- qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
-
qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
"dma", "dma", "dma", "dma";
@@ -136,10 +134,40 @@
qcom,sde-vbif-off = <0>;
qcom,sde-vbif-size = <0x1040>;
qcom,sde-vbif-id = <0>;
+ qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+ qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+ qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
+ 0x00000000>;
+ qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+ qcom,sde-qos-lut-linear =
+ <4 0x00000000 0x00000357>,
+ <5 0x00000000 0x00003357>,
+ <6 0x00000000 0x00023357>,
+ <7 0x00000000 0x00223357>,
+ <8 0x00000000 0x02223357>,
+ <9 0x00000000 0x22223357>,
+ <10 0x00000002 0x22223357>,
+ <11 0x00000022 0x22223357>,
+ <12 0x00000222 0x22223357>,
+ <13 0x00002222 0x22223357>,
+ <14 0x00012222 0x22223357>,
+ <0 0x00112222 0x22223357>;
+ qcom,sde-qos-lut-macrotile =
+ <10 0x00000003 0x44556677>,
+ <11 0x00000033 0x44556677>,
+ <12 0x00000233 0x44556677>,
+ <13 0x00002233 0x44556677>,
+ <14 0x00012233 0x44556677>,
+ <0 0x00112233 0x44556677>;
+ qcom,sde-qos-lut-nrt =
+ <0 0x00000000 0x00000000>;
+ qcom,sde-qos-lut-cwb =
+ <0 0x75300000 0x00000000>;
+
qcom,sde-inline-rotator = <&mdss_rotator 0>;
qcom,sde-reg-dma-off = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 4fe9282..af12224 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -27,6 +27,10 @@
qcom,max-secure-instances = <5>;
qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
+ /* LLCC Info */
+ cache-slice-names = "vidsc0", "vidsc1";
+ cache-slices = <&llcc 2>, <&llcc 3>;
+
/* Supply */
venus-supply = <&venus_gdsc>;
venus-core0-supply = <&vcodec0_gdsc>;
@@ -91,6 +95,14 @@
qcom,bus-governor = "performance";
qcom,bus-range-kbps = <1000 1000>;
};
+ venus_bus_llcc {
+ compatible = "qcom,msm-vidc,bus";
+ label = "venus-llcc";
+ qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+ qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
+ qcom,bus-governor = "performance";
+ qcom,bus-range-kbps = <17000 125700>;
+ };
/* MMUs */
non_secure_cb {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index a38ac59..6284361 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -459,14 +459,35 @@
compatible = "simple-bus";
};
+ firmware: firmware {
+ android {
+ compatible = "android,firmware";
+ fstab {
+ compatible = "android,fstab";
+ vendor {
+ compatible = "android,vendor";
+ dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
+ type = "ext4";
+ mnt_flags = "ro,barrier=1,discard";
+ fsmgr_flags = "wait,slotselect";
+ };
+ };
+ };
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
- removed_regions: removed_regions@85800000 {
+ removed_region1: removed_region1@85700000 {
no-map;
- reg = <0 0x85800000 0 0x3700000>;
+ reg = <0 0x85700000 0 0x800000>;
+ };
+
+ removed_region2: removed_region2@85fc0000 {
+ no-map;
+ reg = <0 0x85fc0000 0 0x2f40000>;
};
pil_camera_mem: camera_region@8ab00000 {
@@ -755,63 +776,6 @@
};
};
- msm_cpufreq: qcom,msm-cpufreq {
- compatible = "qcom,msm-cpufreq";
- clock-names = "cpu0_clk", "cpu4_clk";
- clocks = <&clock_cpucc CPU0_PWRCL_CLK>,
- <&clock_cpucc CPU4_PERFCL_CLK>;
-
- qcom,governor-per-policy;
-
- qcom,cpufreq-table-0 =
- < 300000 >,
- < 422400 >,
- < 499200 >,
- < 576000 >,
- < 652800 >,
- < 748800 >,
- < 825600 >,
- < 902400 >,
- < 979200 >,
- < 1056000 >,
- < 1132800 >,
- < 1209600 >,
- < 1286400 >,
- < 1363200 >,
- < 1440000 >,
- < 1516800 >,
- < 1593600 >,
- < 1651200 >,
- < 1708800 >;
-
- qcom,cpufreq-table-4 =
- < 300000 >,
- < 422400 >,
- < 499200 >,
- < 576000 >,
- < 652800 >,
- < 729600 >,
- < 806400 >,
- < 883200 >,
- < 960000 >,
- < 1036800 >,
- < 1113600 >,
- < 1190400 >,
- < 1267200 >,
- < 1344000 >,
- < 1420800 >,
- < 1497600 >,
- < 1574400 >,
- < 1651200 >,
- < 1728000 >,
- < 1804800 >,
- < 1881600 >,
- < 1958400 >,
- < 2035200 >,
- < 2092800 >,
- < 2208000 >;
- };
-
cpubw: qcom,cpubw {
compatible = "qcom,devbw";
governor = "performance";
@@ -1084,6 +1048,20 @@
reg-names = "cc_base";
vdd_cx-supply = <&pm8998_s9_level>;
vdd_mx-supply = <&pm8998_s6_level>;
+ qcom,cam_cc_csi0phytimer_clk_src-opp-handle = <&cam_csiphy0>;
+ qcom,cam_cc_csi1phytimer_clk_src-opp-handle = <&cam_csiphy1>;
+ qcom,cam_cc_csi2phytimer_clk_src-opp-handle = <&cam_csiphy2>;
+ qcom,cam_cc_cci_clk_src-opp-handle = <&cam_cci>;
+ qcom,cam_cc_ife_0_csid_clk_src-opp-handle = <&cam_csid0>;
+ qcom,cam_cc_ife_0_clk_src-opp-handle = <&cam_vfe0>;
+ qcom,cam_cc_ife_1_csid_clk_src-opp-handle = <&cam_csid1>;
+ qcom,cam_cc_ife_1_clk_src-opp-handle = <&cam_vfe1>;
+ qcom,cam_cc_ife_lite_csid_clk_src-opp-handle = <&cam_csid_lite>;
+ qcom,cam_cc_ife_lite_clk_src-opp-handle = <&cam_vfe_lite>;
+ qcom,cam_cc_icp_clk_src-opp-handle = <&cam_a5>;
+ qcom,cam_cc_ipe_0_clk_src-opp-handle = <&cam_ipe0>;
+ qcom,cam_cc_ipe_1_clk_src-opp-handle = <&cam_ipe1>;
+ qcom,cam_cc_bps_clk_src-opp-handle = <&cam_bps>;
#clock-cells = <1>;
#reset-cells = <1>;
};
@@ -3264,44 +3242,48 @@
};
};
- gpu0-step {
+ gpu-virt-max-step {
polling-delay-passive = <10>;
- polling-delay = <0>;
- thermal-sensors = <&tsens0 11>;
+ polling-delay = <100>;
thermal-governor = "step_wise";
trips {
- gpu0_trip: gpu0-trip {
+ gpu_trip0: gpu-trip0 {
temperature = <95000>;
hysteresis = <0>;
type = "passive";
};
};
cooling-maps {
- gpu0_cdev {
- trip = <&gpu0_trip>;
+ gpu_cdev0 {
+ trip = <&gpu_trip0>;
cooling-device =
- <&msm_gpu 1 THERMAL_NO_LIMIT>;
+ <&msm_gpu 0 THERMAL_NO_LIMIT>;
};
};
};
- gpu1-step {
- polling-delay-passive = <10>;
- polling-delay = <0>;
- thermal-sensors = <&tsens0 12>;
- thermal-governor = "step_wise";
+ silver-virt-max-usr {
+ polling-delay-passive = <100>;
+ polling-delay = <100>;
+ thermal-governor = "user_space";
trips {
- gpu1_trip: gpu1-trip {
- temperature = <95000>;
+ silver-trip {
+ temperature = <120000>;
hysteresis = <0>;
type = "passive";
};
};
- cooling-maps {
- gpu1_cdev {
- trip = <&gpu1_trip>;
- cooling-device =
- <&msm_gpu 1 THERMAL_NO_LIMIT>;
+ };
+
+ gold-virt-max-usr {
+ polling-delay-passive = <100>;
+ polling-delay = <100>;
+ thermal-governor = "user_space";
+ trips {
+ gold-trip {
+ temperature = <120000>;
+ hysteresis = <0>;
+ type = "passive";
};
};
};
@@ -4203,10 +4185,12 @@
};
&vcodec0_gdsc {
+ qcom,support-hw-trigger;
status = "ok";
};
&vcodec1_gdsc {
+ qcom,support-hw-trigger;
status = "ok";
};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 8c20b3f..f10047f 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -9,6 +9,8 @@
CONFIG_SCHED_WALT=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
@@ -322,6 +324,7 @@
CONFIG_THERMAL_TSENS=y
CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD934X_CODEC=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 0bebc63b..4cd202c 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -12,6 +12,8 @@
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
@@ -331,6 +333,7 @@
CONFIG_THERMAL_TSENS=y
CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD934X_CODEC=y
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 9c4b57a..d8199e1 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -252,8 +252,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
*/
off = offsetof(struct bpf_array, ptrs);
emit_a64_mov_i64(tmp, off, ctx);
- emit(A64_LDR64(tmp, r2, tmp), ctx);
- emit(A64_LDR64(prg, tmp, r3), ctx);
+ emit(A64_ADD(1, tmp, r2, tmp), ctx);
+ emit(A64_LSL(1, prg, r3, 3), ctx);
+ emit(A64_LDR64(prg, tmp, prg), ctx);
emit(A64_CBZ(1, prg, jmp_offset), ctx);
/* goto *(prog->bpf_func + prologue_size); */
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 1fd147f..5f10f9b 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _UAPI__ASM_AVR32_SOCKET_H */
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index afbc98f0..ed960d3 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -90,5 +90,7 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 0018fad..9790d13 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -99,4 +99,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 5fe42fc..ad25676 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 2027240a..2f106d0 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -108,4 +108,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 5129f23..69f9618 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 9c935d7..b96a193 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -89,4 +89,6 @@
#define SO_CNX_ADVICE 0x402E
+#define SO_COOKIE 0x4032
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 1672e33..e78550f 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e84d8fb..378c37a 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
(REGION_ID(ea) != USER_REGION_ID)) {
spin_unlock(&spu->register_lock);
- ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
+ ret = hash_page(ea,
+ _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
+ 0x300, dsisr);
spin_lock(&spu->register_lock);
if (!ret) {
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 41b51c2..04fe908 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -96,4 +96,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index ce6f569..cf19072 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@ extern unsigned long pfn_base;
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 29d64b1..be0cc1b 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -16,7 +16,7 @@ extern char reboot_command[];
*/
extern unsigned char boot_cpu_id;
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern int serial_console;
static inline int con_is_present(void)
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 31aede3..de15f0a 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -86,6 +86,8 @@
#define SO_CNX_ADVICE 0x0037
+#define SO_COOKIE 0x003b
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 6bcff69..cec54dc 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -130,18 +130,17 @@ unsigned long prepare_ftrace_return(unsigned long parent,
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return parent + 8UL;
+ trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace))
+ return parent + 8UL;
+
if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
frame_pointer, NULL) == -EBUSY)
return parent + 8UL;
- trace.func = self_addr;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
- return parent + 8UL;
- }
-
return return_hooker;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index eb82871..3b7092d 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -301,7 +301,7 @@ void __init mem_init(void)
/* Saves us work later. */
- memset((void *)&empty_zero_page, 0, PAGE_SIZE);
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 34d9e15..4669b3a 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -94,7 +94,7 @@
quiet_cmd_check_data_rel = DATAREL $@
define cmd_check_data_rel
for obj in $(filter %.o,$^); do \
- readelf -S $$obj | grep -qF .rel.local && { \
+ ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
echo "error: $$obj has data relocations!" >&2; \
exit 1; \
} || true; \
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 9bd7ff5..70c9cc3 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -257,6 +257,7 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
#endif
int mce_available(struct cpuinfo_x86 *c);
+bool mce_is_memory_error(struct mce *m);
DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 22cda29..8ca5f8a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -598,16 +598,14 @@ static void mce_read_aux(struct mce *m, int i)
}
}
-static bool memory_error(struct mce *m)
+bool mce_is_memory_error(struct mce *m)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- if (c->x86_vendor == X86_VENDOR_AMD) {
+ if (m->cpuvendor == X86_VENDOR_AMD) {
/* ErrCodeExt[20:16] */
u8 xec = (m->status >> 16) & 0x1f;
return (xec == 0x0 || xec == 0x8);
- } else if (c->x86_vendor == X86_VENDOR_INTEL) {
+ } else if (m->cpuvendor == X86_VENDOR_INTEL) {
/*
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
*
@@ -628,6 +626,7 @@ static bool memory_error(struct mce *m)
return false;
}
+EXPORT_SYMBOL_GPL(mce_is_memory_error);
DEFINE_PER_CPU(unsigned, mce_poll_count);
@@ -691,7 +690,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
- if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
+ if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
if (m.status & MCI_STATUS_ADDRV)
m.severity = severity;
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 81435d9..fc7ca28 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -101,4 +101,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f7d0018..93110d7 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -221,6 +221,44 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
return 0;
}
+static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+ struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
+ u8 *buffer, *alignbuffer;
+ unsigned long absize;
+ int ret;
+
+ absize = keylen + alignmask;
+ buffer = kmalloc(absize, GFP_ATOMIC);
+ if (!buffer)
+ return -ENOMEM;
+
+ alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ memcpy(alignbuffer, key, keylen);
+ ret = cipher->setkey(tfm, alignbuffer, keylen);
+ kzfree(buffer);
+ return ret;
+}
+
+static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
+ unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+
+ if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if ((unsigned long)key & alignmask)
+ return skcipher_setkey_unaligned(tfm, key, keylen);
+
+ return cipher->setkey(tfm, key, keylen);
+}
+
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
@@ -241,7 +279,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
tfm->__crt_alg->cra_type == &crypto_givcipher_type)
return crypto_init_skcipher_ops_ablkcipher(tfm);
- skcipher->setkey = alg->setkey;
+ skcipher->setkey = skcipher_setkey;
skcipher->encrypt = alg->encrypt;
skcipher->decrypt = alg->decrypt;
skcipher->ivsize = alg->ivsize;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 6d5a8c1..e19f530 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -113,7 +113,7 @@ struct acpi_button {
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
static unsigned long lid_report_interval __read_mostly = 500;
module_param(lid_report_interval, ulong, 0644);
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index e5ce81c..e25787a 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
struct nfit_spa *nfit_spa;
/* We only care about memory errors */
- if (!(mce->status & MCACOD))
+ if (!mce_is_memory_error(mce))
return NOTIFY_DONE;
/*
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 3f2ce31..9102df7 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1381,24 +1381,14 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
goto bail;
}
- PERF(fl->profile, fl->perf.invargs,
- if (!fl->sctx->smmu.coherent) {
+ if (!fl->sctx->smmu.coherent)
inv_args_pre(ctx);
- if (mode == FASTRPC_MODE_SERIAL)
- inv_args(ctx);
- }
- PERF_END);
-
PERF(fl->profile, fl->perf.link,
VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
PERF_END);
if (err)
goto bail;
- PERF(fl->profile, fl->perf.invargs,
- if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
- inv_args(ctx);
- PERF_END);
wait:
if (kernel)
wait_for_completion(&ctx->work);
@@ -1408,6 +1398,12 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
if (err)
goto bail;
}
+
+ PERF(fl->profile, fl->perf.invargs,
+ if (!fl->sctx->smmu.coherent)
+ inv_args(ctx);
+ PERF_END);
+
VERIFY(err, 0 == (err = ctx->retval));
if (err)
goto bail;
@@ -1790,11 +1786,9 @@ void fastrpc_glink_notify_state(void *handle, const void *priv,
link->port_state = FASTRPC_LINK_DISCONNECTED;
break;
case GLINK_REMOTE_DISCONNECTED:
- if (me->channel[cid].chan &&
- link->link_state == FASTRPC_LINK_STATE_UP) {
+ if (me->channel[cid].chan) {
fastrpc_glink_close(me->channel[cid].chan, cid);
me->channel[cid].chan = 0;
- link->port_state = FASTRPC_LINK_DISCONNECTED;
}
break;
default:
@@ -1960,10 +1954,9 @@ static int fastrpc_glink_open(int cid)
if (err)
goto bail;
- if (link->port_state == FASTRPC_LINK_CONNECTED ||
- link->port_state == FASTRPC_LINK_CONNECTING) {
+ VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
+ if (err)
goto bail;
- }
link->port_state = FASTRPC_LINK_CONNECTING;
cfg->priv = (void *)(uintptr_t)cid;
@@ -2113,7 +2106,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
fl->ssrcount = me->channel[cid].ssrcount;
if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
(me->channel[cid].chan == 0)) {
- fastrpc_glink_register(cid, me);
+ VERIFY(err, 0 == fastrpc_glink_register(cid, me));
+ if (err)
+ goto bail;
VERIFY(err, 0 == fastrpc_glink_open(cid));
if (err)
goto bail;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index fc061f7..a7de8ae 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
return rc;
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
for (i = 0; i < bytes_to_write; i++) {
rc = wait_for_bulk_out_ready(dev);
if (rc <= 0) {
- DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+ DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
return rc;
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index adbabea..03d3ab9 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -87,7 +87,7 @@ static const char * const cam_cc_parent_names_1[] = {
};
static struct pll_vco fabia_vco[] = {
- { 250000000, 2000000000, 0 },
+ { 249600000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
};
@@ -278,6 +278,7 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
};
static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -316,7 +317,6 @@ static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
{ }
};
-
static struct clk_rcg2 cam_cc_cci_clk_src = {
.cmd_rcgr = 0xb0d8,
.mnd_width = 8,
@@ -341,7 +341,7 @@ static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
F(320000000, P_CAM_CC_PLL2_OUT_ODD, 3, 0, 0),
- F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+ F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
{ }
};
@@ -430,6 +430,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
@@ -490,13 +491,22 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
},
};
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 cam_cc_icp_clk_src = {
.cmd_rcgr = 0xb088,
.mnd_width = 0,
.hid_width = 5,
.enable_safe_config = true,
.parent_map = cam_cc_parent_map_0,
- .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "cam_cc_icp_clk_src",
.parent_names = cam_cc_parent_names_0,
@@ -513,6 +523,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -544,6 +555,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
@@ -655,6 +667,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -733,6 +746,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 1e71204..2902f87 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -33,6 +33,8 @@
#include <linux/regmap.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
#include <soc/qcom/scm.h>
#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
@@ -42,6 +44,7 @@
#include "clk-voter.h"
#include "clk-debug.h"
+#define OSM_INIT_RATE 300000000UL
#define OSM_TABLE_SIZE 40
#define SINGLE_CORE 1
#define MAX_CLUSTER_CNT 3
@@ -923,6 +926,193 @@ static struct clk_osm *logical_cpu_to_clk(int cpu)
return cpu_clk_map[cpu];
}
+static struct clk_osm *osm_configure_policy(struct cpufreq_policy *policy)
+{
+ int cpu;
+ struct clk_hw *parent, *c_parent;
+ struct clk_osm *first;
+ struct clk_osm *c, *n;
+
+ c = logical_cpu_to_clk(policy->cpu);
+ if (!c)
+ return NULL;
+
+ c_parent = clk_hw_get_parent(&c->hw);
+ if (!c_parent)
+ return NULL;
+
+ /*
+ * Don't put any other CPUs into the policy if we're doing
+ * per_core_dcvs
+ */
+ if (to_clk_osm(c_parent)->per_core_dcvs)
+ return c;
+
+ first = c;
+ /* Find CPUs that share the same clock domain */
+ for_each_possible_cpu(cpu) {
+ n = logical_cpu_to_clk(cpu);
+ if (!n)
+ continue;
+
+ parent = clk_hw_get_parent(&n->hw);
+ if (!parent)
+ return NULL;
+ if (parent != c_parent)
+ continue;
+
+ cpumask_set_cpu(cpu, policy->cpus);
+ if (n->core_num == 0)
+ first = n;
+ }
+
+ return first;
+}
+
+static void
+osm_set_index(struct clk_osm *c, unsigned int index, unsigned int num)
+{
+ clk_osm_write_reg(c, index, DCVS_PERF_STATE_DESIRED_REG(num), OSM_BASE);
+
+ /* Make sure the write goes through before proceeding */
+ clk_osm_mb(c, OSM_BASE);
+}
+
+static int
+osm_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct clk_osm *c = policy->driver_data;
+
+ osm_set_index(c, index, c->core_num);
+ return 0;
+}
+
+static unsigned int osm_cpufreq_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct clk_osm *c;
+ u32 index;
+
+ if (!policy)
+ return 0;
+
+ c = policy->driver_data;
+ index = clk_osm_read_reg(c, DCVS_PERF_STATE_DESIRED_REG(c->core_num));
+
+ return policy->freq_table[index].frequency;
+}
+
+static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table;
+ struct clk_osm *c, *parent;
+ struct clk_hw *p_hw;
+ int ret;
+ unsigned int i;
+ unsigned int xo_kHz;
+
+ c = osm_configure_policy(policy);
+ if (!c) {
+ pr_err("no clock for CPU%d\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ p_hw = clk_hw_get_parent(&c->hw);
+ if (!p_hw) {
+ pr_err("no parent clock for CPU%d\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ parent = to_clk_osm(p_hw);
+ c->vbases[OSM_BASE] = parent->vbases[OSM_BASE];
+
+ p_hw = clk_hw_get_parent(p_hw);
+ if (!p_hw) {
+ pr_err("no xo clock for CPU%d\n", policy->cpu);
+ return -ENODEV;
+ }
+ xo_kHz = clk_hw_get_rate(p_hw) / 1000;
+
+ table = kcalloc(OSM_TABLE_SIZE + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ for (i = 0; i < OSM_TABLE_SIZE; i++) {
+ u32 data, src, div, lval, core_count;
+
+ data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
+ src = (data & GENMASK(31, 30)) >> 30;
+ div = (data & GENMASK(29, 28)) >> 28;
+ lval = data & GENMASK(7, 0);
+ core_count = CORE_COUNT_VAL(data);
+
+ if (!src)
+ table[i].frequency = OSM_INIT_RATE / 1000;
+ else
+ table[i].frequency = xo_kHz * lval;
+ table[i].driver_data = table[i].frequency;
+
+ if (core_count != MAX_CORE_COUNT)
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
+
+ /* Two of the same frequencies means end of table */
+ if (i > 0 && table[i - 1].driver_data == table[i].driver_data) {
+ struct cpufreq_frequency_table *prev = &table[i - 1];
+
+ if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
+ prev->flags = CPUFREQ_BOOST_FREQ;
+ prev->frequency = prev->driver_data;
+ }
+
+ break;
+ }
+ }
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ ret = cpufreq_table_validate_and_show(policy, table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ goto err;
+ }
+
+ policy->driver_data = c;
+
+ clk_osm_enable(&parent->hw);
+ udelay(300);
+
+ return 0;
+
+err:
+ kfree(table);
+ return ret;
+}
+
+static int osm_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ kfree(policy->freq_table);
+ policy->freq_table = NULL;
+ return 0;
+}
+
+static struct freq_attr *osm_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &cpufreq_freq_attr_scaling_boost_freqs,
+ NULL
+};
+
+static struct cpufreq_driver qcom_osm_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = osm_cpufreq_target_index,
+ .get = osm_cpufreq_get,
+ .init = osm_cpufreq_cpu_init,
+ .exit = osm_cpufreq_cpu_exit,
+ .name = "osm-cpufreq",
+ .attr = osm_cpufreq_attr,
+ .boost_enabled = true,
+};
+
static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
{
u64 temp;
@@ -2890,16 +3080,13 @@ static int clk_osm_acd_init(struct clk_osm *c)
return 0;
}
-static unsigned long init_rate = 300000000;
-
static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
{
- int rc = 0, cpu, i;
+ int rc = 0, i;
int pvs_ver = 0;
u32 pte_efuse, val;
int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
struct clk *ext_xo_clk, *clk;
- struct clk_osm *c, *parent;
struct device *dev = &pdev->dev;
struct clk_onecell_data *clk_data;
char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
@@ -3207,7 +3394,7 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
get_online_cpus();
/* Set the L3 clock to run off GPLL0 and enable OSM for the domain */
- rc = clk_set_rate(l3_clk.hw.clk, init_rate);
+ rc = clk_set_rate(l3_clk.hw.clk, OSM_INIT_RATE);
if (rc) {
dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
rc);
@@ -3217,43 +3404,12 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
"Failed to enable clock for L3\n");
udelay(300);
- /* Set CPU clocks to run off GPLL0 and enable OSM for both domains */
- for_each_online_cpu(cpu) {
- c = logical_cpu_to_clk(cpu);
- if (!c) {
- pr_err("no clock device for CPU=%d\n", cpu);
- return -EINVAL;
- }
-
- parent = to_clk_osm(clk_hw_get_parent(&c->hw));
- if (!parent->per_core_dcvs) {
- if (cpu >= 0 && cpu <= 3)
- c = logical_cpu_to_clk(0);
- else if (cpu >= 4 && cpu <= 7)
- c = logical_cpu_to_clk(4);
- if (!c)
- return -EINVAL;
- }
-
- rc = clk_set_rate(c->hw.clk, init_rate);
- if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on %s, rc=%d\n",
- clk_hw_get_name(&parent->hw), rc);
- goto provider_err;
- }
- WARN(clk_prepare_enable(c->hw.clk),
- "Failed to enable OSM for %s\n",
- clk_hw_get_name(&parent->hw));
- udelay(300);
+ /* Configure default rate to lowest frequency */
+ for (i = 0; i < MAX_CORE_COUNT; i++) {
+ osm_set_index(&pwrcl_clk, 0, i);
+ osm_set_index(&perfcl_clk, 0, i);
}
- /*
- * Add always-on votes for the CPU cluster clocks since we do not want
- * to re-enable OSM at any point.
- */
- clk_prepare_enable(pwrcl_clk.hw.clk);
- clk_prepare_enable(perfcl_clk.hw.clk);
-
populate_opp_table(pdev);
populate_debugfs_dir(&l3_clk);
populate_debugfs_dir(&pwrcl_clk);
@@ -3261,18 +3417,24 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
register_cpu_cycle_counter_cb(&cb);
- pr_info("OSM driver inited\n");
put_online_cpus();
+ rc = cpufreq_register_driver(&qcom_osm_cpufreq_driver);
+ if (rc)
+ goto provider_err;
+
+ pr_info("OSM CPUFreq driver inited\n");
return 0;
+
provider_err:
if (clk_data)
devm_kfree(&pdev->dev, clk_data->clks);
clk_err:
devm_kfree(&pdev->dev, clk_data);
exit:
- dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc);
- panic("Unable to Setup OSM");
+ dev_err(&pdev->dev, "OSM CPUFreq driver failed to initialize, rc=%d\n",
+ rc);
+ panic("Unable to Setup OSM CPUFreq");
}
static const struct of_device_id match_table[] = {
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index 44c5b81..9ffa555 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -104,14 +104,6 @@ static const char *const debug_mux_parent_names[] = {
"disp_cc_mdss_rot_clk",
"disp_cc_mdss_rscc_ahb_clk",
"disp_cc_mdss_rscc_vsync_clk",
- "disp_cc_mdss_spdm_debug_clk",
- "disp_cc_mdss_spdm_dp_crypto_clk",
- "disp_cc_mdss_spdm_dp_pixel1_clk",
- "disp_cc_mdss_spdm_dp_pixel_clk",
- "disp_cc_mdss_spdm_mdp_clk",
- "disp_cc_mdss_spdm_pclk0_clk",
- "disp_cc_mdss_spdm_pclk1_clk",
- "disp_cc_mdss_spdm_rot_clk",
"disp_cc_mdss_vsync_clk",
"measure_only_snoc_clk",
"measure_only_cnoc_clk",
@@ -254,13 +246,13 @@ static const char *const debug_mux_parent_names[] = {
"gpu_cc_cxo_aon_clk",
"gpu_cc_cxo_clk",
"gpu_cc_gx_cxo_clk",
+ "gpu_cc_gx_gfx3d_clk",
"gpu_cc_gx_gmu_clk",
"gpu_cc_gx_qdss_tsctr_clk",
"gpu_cc_gx_vsense_clk",
"gpu_cc_rbcpr_ahb_clk",
"gpu_cc_rbcpr_clk",
"gpu_cc_sleep_clk",
- "gpu_cc_spdm_gx_gfx3d_div_clk",
"video_cc_apb_clk",
"video_cc_at_clk",
"video_cc_qdss_trig_clk",
@@ -433,22 +425,6 @@ static struct clk_debug_mux gcc_debug_mux = {
0x17, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
{ "disp_cc_mdss_rscc_vsync_clk", 0x47, 4, DISP_CC,
0x18, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
- { "disp_cc_mdss_spdm_debug_clk", 0x47, 4, DISP_CC,
- 0x20, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
- { "disp_cc_mdss_spdm_dp_crypto_clk", 0x47, 4, DISP_CC,
- 0x1D, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
- { "disp_cc_mdss_spdm_dp_pixel1_clk", 0x47, 4, DISP_CC,
- 0x1F, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
- { "disp_cc_mdss_spdm_dp_pixel_clk", 0x47, 4, DISP_CC,
- 0x1E, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
- { "disp_cc_mdss_spdm_mdp_clk", 0x47, 4, DISP_CC,
- 0x1B, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
- { "disp_cc_mdss_spdm_pclk0_clk", 0x47, 4, DISP_CC,
- 0x19, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
- { "disp_cc_mdss_spdm_pclk1_clk", 0x47, 4, DISP_CC,
- 0x1A, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
- { "disp_cc_mdss_spdm_rot_clk", 0x47, 4, DISP_CC,
- 0x1C, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
{ "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC,
0x6, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
{ "measure_only_snoc_clk", 0x7, 4, GCC,
@@ -733,6 +709,8 @@ static struct clk_debug_mux gcc_debug_mux = {
0xA, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
{ "gpu_cc_gx_cxo_clk", 0x144, 4, GPU_CC,
0xF, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_gx_gfx3d_clk", 0x144, 4, GPU_CC,
+ 0xC, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
{ "gpu_cc_gx_gmu_clk", 0x144, 4, GPU_CC,
0x10, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
{ "gpu_cc_gx_qdss_tsctr_clk", 0x144, 4, GPU_CC,
@@ -745,8 +723,6 @@ static struct clk_debug_mux gcc_debug_mux = {
0x1C, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
{ "gpu_cc_sleep_clk", 0x144, 4, GPU_CC,
0x17, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_spdm_gx_gfx3d_div_clk", 0x144, 4, GPU_CC,
- 0x1E, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
{ "video_cc_apb_clk", 0x48, 4, VIDEO_CC,
0x8, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
{ "video_cc_at_clk", 0x48, 4, VIDEO_CC,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 2742ab3..4e0711d 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -197,7 +197,7 @@ static struct clk_dummy measure_only_ipa_2x_clk = {
};
static struct pll_vco fabia_vco[] = {
- { 250000000, 2000000000, 0 },
+ { 249600000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
};
@@ -790,8 +790,8 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(9600000, P_BI_TCXO, 2, 0, 0),
F(19200000, P_BI_TCXO, 1, 0, 0),
- F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index d183393..87feee6 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,3 +1,6 @@
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm-util.o
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
new file mode 100644
index 0000000..eb2092a
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
@@ -0,0 +1,766 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/usb/usbpd.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+#define DP_PHY_REVISION_ID0 0x0000
+#define DP_PHY_REVISION_ID1 0x0004
+#define DP_PHY_REVISION_ID2 0x0008
+#define DP_PHY_REVISION_ID3 0x000C
+
+#define DP_PHY_CFG 0x0010
+#define DP_PHY_PD_CTL 0x0018
+#define DP_PHY_MODE 0x001C
+
+#define DP_PHY_AUX_CFG0 0x0020
+#define DP_PHY_AUX_CFG1 0x0024
+#define DP_PHY_AUX_CFG2 0x0028
+#define DP_PHY_AUX_CFG3 0x002C
+#define DP_PHY_AUX_CFG4 0x0030
+#define DP_PHY_AUX_CFG5 0x0034
+#define DP_PHY_AUX_CFG6 0x0038
+#define DP_PHY_AUX_CFG7 0x003C
+#define DP_PHY_AUX_CFG8 0x0040
+#define DP_PHY_AUX_CFG9 0x0044
+#define DP_PHY_AUX_INTERRUPT_MASK 0x0048
+#define DP_PHY_AUX_INTERRUPT_CLEAR 0x004C
+#define DP_PHY_AUX_BIST_CFG 0x0050
+
+#define DP_PHY_VCO_DIV 0x0064
+#define DP_PHY_TX0_TX1_LANE_CTL 0x006C
+#define DP_PHY_TX2_TX3_LANE_CTL 0x0088
+
+#define DP_PHY_SPARE0 0x00AC
+#define DP_PHY_STATUS 0x00C0
+
+/* Tx registers */
+#define TXn_BIST_MODE_LANENO 0x0000
+#define TXn_CLKBUF_ENABLE 0x0008
+#define TXn_TX_EMP_POST1_LVL 0x000C
+
+#define TXn_TX_DRV_LVL 0x001C
+
+#define TXn_RESET_TSYNC_EN 0x0024
+#define TXn_PRE_STALL_LDO_BOOST_EN 0x0028
+#define TXn_TX_BAND 0x002C
+#define TXn_SLEW_CNTL 0x0030
+#define TXn_INTERFACE_SELECT 0x0034
+
+#define TXn_RES_CODE_LANE_TX 0x003C
+#define TXn_RES_CODE_LANE_RX 0x0040
+#define TXn_RES_CODE_LANE_OFFSET_TX 0x0044
+#define TXn_RES_CODE_LANE_OFFSET_RX 0x0048
+
+#define TXn_DEBUG_BUS_SEL 0x0058
+#define TXn_TRANSCEIVER_BIAS_EN 0x005C
+#define TXn_HIGHZ_DRVR_EN 0x0060
+#define TXn_TX_POL_INV 0x0064
+#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0068
+
+#define TXn_LANE_MODE_1 0x008C
+
+#define TXn_TRAN_DRVR_EMP_EN 0x00C0
+#define TXn_TX_INTERFACE_MODE 0x00C4
+
+#define TXn_VMODE_CTRL1 0x00F0
+
+/* PLL register offset */
+#define QSERDES_COM_ATB_SEL1 0x0000
+#define QSERDES_COM_ATB_SEL2 0x0004
+#define QSERDES_COM_FREQ_UPDATE 0x0008
+#define QSERDES_COM_BG_TIMER 0x000C
+#define QSERDES_COM_SSC_EN_CENTER 0x0010
+#define QSERDES_COM_SSC_ADJ_PER1 0x0014
+#define QSERDES_COM_SSC_ADJ_PER2 0x0018
+#define QSERDES_COM_SSC_PER1 0x001C
+#define QSERDES_COM_SSC_PER2 0x0020
+#define QSERDES_COM_SSC_STEP_SIZE1 0x0024
+#define QSERDES_COM_SSC_STEP_SIZE2 0x0028
+#define QSERDES_COM_POST_DIV 0x002C
+#define QSERDES_COM_POST_DIV_MUX 0x0030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x0034
+#define QSERDES_COM_CLK_ENABLE1 0x0038
+#define QSERDES_COM_SYS_CLK_CTRL 0x003C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x0040
+#define QSERDES_COM_PLL_EN 0x0044
+#define QSERDES_COM_PLL_IVCO 0x0048
+#define QSERDES_COM_CMN_IETRIM 0x004C
+#define QSERDES_COM_CMN_IPTRIM 0x0050
+
+#define QSERDES_COM_CP_CTRL_MODE0 0x0060
+#define QSERDES_COM_CP_CTRL_MODE1 0x0064
+#define QSERDES_COM_PLL_RCTRL_MODE0 0x0068
+#define QSERDES_COM_PLL_RCTRL_MODE1 0x006C
+#define QSERDES_COM_PLL_CCTRL_MODE0 0x0070
+#define QSERDES_COM_PLL_CCTRL_MODE1 0x0074
+#define QSERDES_COM_PLL_CNTRL 0x0078
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x007C
+#define QSERDES_COM_SYSCLK_EN_SEL 0x0080
+#define QSERDES_COM_CML_SYSCLK_SEL 0x0084
+#define QSERDES_COM_RESETSM_CNTRL 0x0088
+#define QSERDES_COM_RESETSM_CNTRL2 0x008C
+#define QSERDES_COM_LOCK_CMP_EN 0x0090
+#define QSERDES_COM_LOCK_CMP_CFG 0x0094
+#define QSERDES_COM_LOCK_CMP1_MODE0 0x0098
+#define QSERDES_COM_LOCK_CMP2_MODE0 0x009C
+#define QSERDES_COM_LOCK_CMP3_MODE0 0x00A0
+
+#define QSERDES_COM_DEC_START_MODE0 0x00B0
+#define QSERDES_COM_DEC_START_MODE1 0x00B4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x00B8
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x00BC
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x00C0
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x00C4
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x00C8
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x00CC
+#define QSERDES_COM_INTEGLOOP_INITVAL 0x00D0
+#define QSERDES_COM_INTEGLOOP_EN 0x00D4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00D8
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x00DC
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00E0
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00E4
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x00E8
+#define QSERDES_COM_VCO_TUNE_CTRL 0x00EC
+#define QSERDES_COM_VCO_TUNE_MAP 0x00F0
+
+#define QSERDES_COM_CMN_STATUS 0x0124
+#define QSERDES_COM_RESET_SM_STATUS 0x0128
+
+#define QSERDES_COM_CLK_SEL 0x0138
+#define QSERDES_COM_HSCLK_SEL 0x013C
+
+#define QSERDES_COM_CORECLK_DIV_MODE0 0x0148
+
+#define QSERDES_COM_SW_RESET 0x0150
+#define QSERDES_COM_CORE_CLK_EN 0x0154
+#define QSERDES_COM_C_READY_STATUS 0x0158
+#define QSERDES_COM_CMN_CONFIG 0x015C
+
+#define QSERDES_COM_SVS_MODE_CLK_SEL 0x0164
+
+#define DP_PHY_PLL_POLL_SLEEP_US 500
+#define DP_PHY_PLL_POLL_TIMEOUT_US 10000
+
+#define DP_VCO_RATE_8100MHZDIV1000 8100000UL
+#define DP_VCO_RATE_9720MHZDIV1000 9720000UL
+#define DP_VCO_RATE_10800MHZDIV1000 10800000UL
+
+int dp_mux_set_parent_10nm(void *context, unsigned int reg, unsigned int val)
+{
+ struct mdss_pll_resources *dp_res = context;
+ int rc;
+ u32 auxclk_div;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP PLL resources\n");
+ return rc;
+ }
+
+ auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+ auxclk_div &= ~0x03; /* bits 0 to 1 */
+
+ if (val == 0) /* mux parent index = 0 */
+ auxclk_div |= 1;
+ else if (val == 1) /* mux parent index = 1 */
+ auxclk_div |= 2;
+ else if (val == 2) /* mux parent index = 2 */
+ auxclk_div |= 0;
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_VCO_DIV, auxclk_div);
+ /* Make sure the PHY registers writes are done */
+ wmb();
+ pr_debug("%s: mux=%d auxclk_div=%x\n", __func__, val, auxclk_div);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ return 0;
+}
+
+int dp_mux_get_parent_10nm(void *context, unsigned int reg, unsigned int *val)
+{
+ int rc;
+ u32 auxclk_div = 0;
+ struct mdss_pll_resources *dp_res = context;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable dp_res resources\n");
+ return rc;
+ }
+
+ auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+ auxclk_div &= 0x03;
+
+ if (auxclk_div == 1) /* Default divider */
+ *val = 0;
+ else if (auxclk_div == 2)
+ *val = 1;
+ else if (auxclk_div == 0)
+ *val = 2;
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ pr_debug("%s: auxclk_div=%d, val=%d\n", __func__, auxclk_div, *val);
+
+ return 0;
+}
+
+static int dp_vco_pll_init_db_10nm(struct dp_pll_db *pdb,
+ unsigned long rate)
+{
+ struct mdss_pll_resources *dp_res = pdb->pll;
+ u32 spare_value = 0;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ pdb->lane_cnt = spare_value & 0x0F;
+ pdb->orientation = (spare_value & 0xF0) >> 4;
+
+ pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+ __func__, spare_value, pdb->lane_cnt, pdb->orientation);
+
+ switch (rate) {
+ case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+ pr_debug("%s: VCO rate: %ld\n", __func__,
+ DP_VCO_RATE_9720MHZDIV1000);
+ pdb->hsclk_sel = 0x0c;
+ pdb->dec_start_mode0 = 0x69;
+ pdb->div_frac_start1_mode0 = 0x00;
+ pdb->div_frac_start2_mode0 = 0x80;
+ pdb->div_frac_start3_mode0 = 0x07;
+ pdb->integloop_gain0_mode0 = 0x3f;
+ pdb->integloop_gain1_mode0 = 0x00;
+ pdb->vco_tune_map = 0x00;
+ pdb->lock_cmp1_mode0 = 0x6f;
+ pdb->lock_cmp2_mode0 = 0x08;
+ pdb->lock_cmp3_mode0 = 0x00;
+ pdb->phy_vco_div = 0x1;
+ pdb->lock_cmp_en = 0x00;
+ break;
+ case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+ pr_debug("%s: VCO rate: %ld\n", __func__,
+ DP_VCO_RATE_10800MHZDIV1000);
+ pdb->hsclk_sel = 0x04;
+ pdb->dec_start_mode0 = 0x69;
+ pdb->div_frac_start1_mode0 = 0x00;
+ pdb->div_frac_start2_mode0 = 0x80;
+ pdb->div_frac_start3_mode0 = 0x07;
+ pdb->integloop_gain0_mode0 = 0x3f;
+ pdb->integloop_gain1_mode0 = 0x00;
+ pdb->vco_tune_map = 0x00;
+ pdb->lock_cmp1_mode0 = 0x0f;
+ pdb->lock_cmp2_mode0 = 0x0e;
+ pdb->lock_cmp3_mode0 = 0x00;
+ pdb->phy_vco_div = 0x1;
+ pdb->lock_cmp_en = 0x00;
+ break;
+ case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+ pr_debug("%s: VCO rate: %ld\n", __func__,
+ DP_VCO_RATE_10800MHZDIV1000);
+ pdb->hsclk_sel = 0x00;
+ pdb->dec_start_mode0 = 0x8c;
+ pdb->div_frac_start1_mode0 = 0x00;
+ pdb->div_frac_start2_mode0 = 0x00;
+ pdb->div_frac_start3_mode0 = 0x0a;
+ pdb->integloop_gain0_mode0 = 0x3f;
+ pdb->integloop_gain1_mode0 = 0x00;
+ pdb->vco_tune_map = 0x00;
+ pdb->lock_cmp1_mode0 = 0x1f;
+ pdb->lock_cmp2_mode0 = 0x1c;
+ pdb->lock_cmp3_mode0 = 0x00;
+ pdb->phy_vco_div = 0x2;
+ pdb->lock_cmp_en = 0x00;
+ break;
+ case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
+ pr_debug("%s: VCO rate: %ld\n", __func__,
+ DP_VCO_RATE_8100MHZDIV1000);
+ pdb->hsclk_sel = 0x03;
+ pdb->dec_start_mode0 = 0x69;
+ pdb->div_frac_start1_mode0 = 0x00;
+ pdb->div_frac_start2_mode0 = 0x80;
+ pdb->div_frac_start3_mode0 = 0x07;
+ pdb->integloop_gain0_mode0 = 0x3f;
+ pdb->integloop_gain1_mode0 = 0x00;
+ pdb->vco_tune_map = 0x00;
+ pdb->lock_cmp1_mode0 = 0x2f;
+ pdb->lock_cmp2_mode0 = 0x2a;
+ pdb->lock_cmp3_mode0 = 0x00;
+ pdb->phy_vco_div = 0x0;
+ pdb->lock_cmp_en = 0x08;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dp_config_vco_rate_10nm(struct dp_pll_vco_clk *vco,
+ unsigned long rate)
+{
+ u32 res = 0;
+ struct mdss_pll_resources *dp_res = vco->priv;
+ struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+
+ res = dp_vco_pll_init_db_10nm(pdb, rate);
+ if (res) {
+ pr_err("VCO Init DB failed\n");
+ return res;
+ }
+
+ if (pdb->lane_cnt != 4) {
+ if (pdb->orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x6d);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x75);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x7d);
+ }
+
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0e);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_SEL, 0x30);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+ /* Different for each clock rates */
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_INTEGLOOP_GAIN0_MODE0, pdb->integloop_gain0_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_INTEGLOOP_GAIN1_MODE0, pdb->integloop_gain1_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_VCO_TUNE_MAP, pdb->vco_tune_map);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP3_MODE0, pdb->lock_cmp3_mode0);
+ /* Make sure the PLL register writes are done */
+ wmb();
+
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BG_TIMER, 0x0a);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORE_CLK_EN, 0x1f);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_IVCO, 0x07);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CP_CTRL_MODE0, 0x06);
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+ if (pdb->orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x4c);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x5c);
+ /* Make sure the PLL register writes are done */
+ wmb();
+
+ /* TX Lane configuration */
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+
+ /* TX-0 register configuration */
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_VMODE_CTRL1, 0x40);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3d);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_CLKBUF_ENABLE, 0x0f);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+ TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_INTERFACE_MODE, 0x00);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_BAND, 0x4);
+
+ /* TX-1 register configuration */
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_VMODE_CTRL1, 0x40);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3d);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_CLKBUF_ENABLE, 0x0f);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+ TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_INTERFACE_MODE, 0x00);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_BAND, 0x4);
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+ /* dependent on the vco frequency */
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, pdb->phy_vco_div);
+
+ return res;
+}
+
+static bool dp_10nm_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+ u32 status;
+ bool pll_locked;
+
+ /* poll for PLL lock status */
+ if (readl_poll_timeout_atomic((dp_res->pll_base +
+ QSERDES_COM_C_READY_STATUS),
+ status,
+ ((status & BIT(0)) > 0),
+ DP_PHY_PLL_POLL_SLEEP_US,
+ DP_PHY_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: C_READY status is not high. Status=%x\n",
+ __func__, status);
+ pll_locked = false;
+ } else {
+ pll_locked = true;
+ }
+
+ return pll_locked;
+}
+
+static bool dp_10nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
+{
+ u32 status;
+ bool phy_ready = true;
+
+ /* poll for PHY ready status */
+ if (readl_poll_timeout_atomic((dp_res->phy_base +
+ DP_PHY_STATUS),
+ status,
+ ((status & (BIT(1))) > 0),
+ DP_PHY_PLL_POLL_SLEEP_US,
+ DP_PHY_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: Phy_ready is not high. Status=%x\n",
+ __func__, status);
+ phy_ready = false;
+ }
+
+ return phy_ready;
+}
+
+static int dp_pll_enable_10nm(struct clk_hw *hw)
+{
+ int rc = 0;
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+ struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+ u32 bias_en, drvr_en;
+
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x04);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
+ wmb(); /* Make sure the PHY register writes are done */
+
+ MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+ wmb(); /* Make sure the PLL register writes are done */
+
+ if (!dp_10nm_pll_lock_status(dp_res)) {
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+ /* Make sure the PHY register writes are done */
+ wmb();
+ /* poll for PHY ready status */
+ if (!dp_10nm_phy_rdy_status(dp_res)) {
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ pr_debug("%s: PLL is locked\n", __func__);
+
+ if (pdb->lane_cnt == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
+ }
+
+ if (pdb->lane_cnt != 4) {
+ if (pdb->orientation == ORIENTATION_CC1) {
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+ TXn_HIGHZ_DRVR_EN, drvr_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+ TXn_TRANSCEIVER_BIAS_EN, bias_en);
+ } else {
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+ TXn_HIGHZ_DRVR_EN, drvr_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+ TXn_TRANSCEIVER_BIAS_EN, bias_en);
+ }
+ } else {
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+ TXn_TRANSCEIVER_BIAS_EN, bias_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+ TXn_TRANSCEIVER_BIAS_EN, bias_en);
+ }
+
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_POL_INV, 0x0a);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_POL_INV, 0x0a);
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
+ udelay(2000);
+
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+
+ /*
+ * Make sure all the register writes are completed before
+ * doing any other operation
+ */
+ wmb();
+
+ /* poll for PHY ready status */
+ if (!dp_10nm_phy_rdy_status(dp_res)) {
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_DRV_LVL, 0x38);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_DRV_LVL, 0x38);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_EMP_POST1_LVL, 0x20);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_EMP_POST1_LVL, 0x20);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+ MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+ MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+ /* Make sure the PHY register writes are done */
+ wmb();
+
+lock_err:
+ return rc;
+}
+
+static int dp_pll_disable_10nm(struct clk_hw *hw)
+{
+ int rc = 0;
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ /* Assert DP PHY power down */
+ MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
+ /*
+ * Make sure all the register writes to disable PLL are
+ * completed before doing any other operation
+ */
+ wmb();
+
+ return rc;
+}
+
+
+int dp_vco_prepare_10nm(struct clk_hw *hw)
+{
+ int rc = 0;
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ pr_debug("rate=%ld\n", vco->rate);
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP pll resources\n");
+ goto error;
+ }
+
+ if ((dp_res->vco_cached_rate != 0)
+ && (dp_res->vco_cached_rate == vco->rate)) {
+ rc = vco->hw.init->ops->set_rate(hw,
+ dp_res->vco_cached_rate, dp_res->vco_cached_rate);
+ if (rc) {
+ pr_err("index=%d vco_set_rate failed. rc=%d\n",
+ rc, dp_res->index);
+ mdss_pll_resource_enable(dp_res, false);
+ goto error;
+ }
+ }
+
+ rc = dp_pll_enable_10nm(hw);
+ if (rc) {
+ mdss_pll_resource_enable(dp_res, false);
+ pr_err("ndx=%d failed to enable dp pll\n",
+ dp_res->index);
+ goto error;
+ }
+
+ mdss_pll_resource_enable(dp_res, false);
+error:
+ return rc;
+}
+
+void dp_vco_unprepare_10nm(struct clk_hw *hw)
+{
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ if (!dp_res) {
+ pr_err("Invalid input parameter\n");
+ return;
+ }
+
+ if (!dp_res->pll_on &&
+ mdss_pll_resource_enable(dp_res, true)) {
+ pr_err("pll resource can't be enabled\n");
+ return;
+ }
+ dp_res->vco_cached_rate = vco->rate;
+ dp_pll_disable_10nm(hw);
+
+ dp_res->handoff_resources = false;
+ mdss_pll_resource_enable(dp_res, false);
+ dp_res->pll_on = false;
+}
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ struct mdss_pll_resources *dp_res = vco->priv;
+ int rc;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("pll resource can't be enabled\n");
+ return rc;
+ }
+
+ pr_debug("DP lane CLK rate=%ld\n", rate);
+
+ rc = dp_config_vco_rate_10nm(vco, rate);
+ if (rc)
+ pr_err("%s: Failed to set clk rate\n", __func__);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ vco->rate = rate;
+
+ return 0;
+}
+
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+ int rc;
+ u32 div, hsclk_div, link_clk_div = 0;
+ u64 vco_rate;
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
+ return rc;
+ }
+
+ div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+ div &= 0x0f;
+
+ if (div == 12)
+ hsclk_div = 6; /* Default */
+ else if (div == 4)
+ hsclk_div = 4;
+ else if (div == 0)
+ hsclk_div = 2;
+ else if (div == 3)
+ hsclk_div = 1;
+ else {
+ pr_debug("unknown divider. forcing to default\n");
+ hsclk_div = 5;
+ }
+
+ div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_AUX_CFG2);
+ div >>= 2;
+
+ if ((div & 0x3) == 0)
+ link_clk_div = 5;
+ else if ((div & 0x3) == 1)
+ link_clk_div = 10;
+ else if ((div & 0x3) == 2)
+ link_clk_div = 20;
+ else
+ pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
+
+ if (link_clk_div == 20) {
+ vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+ } else {
+ if (hsclk_div == 6)
+ vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
+ else if (hsclk_div == 4)
+ vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+ else if (hsclk_div == 2)
+ vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+ else
+ vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
+ }
+
+ pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ dp_res->vco_cached_rate = vco->rate = vco_rate;
+ return (unsigned long)vco_rate;
+}
+
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long rrate = rate;
+ struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+
+ if (rate <= vco->min_rate)
+ rrate = vco->min_rate;
+ else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+ rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+ else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+ rrate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+ else
+ rrate = vco->max_rate;
+
+ pr_debug("%s: rrate=%ld\n", __func__, rrate);
+
+ *parent_rate = rrate;
+ return rrate;
+}
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
new file mode 100644
index 0000000..e30ef82
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Display Port PLL driver block diagram for branch clocks
+ *
+ * +------------------------------+
+ * | DP_VCO_CLK |
+ * | |
+ * | +-------------------+ |
+ * | | (DP PLL/VCO) | |
+ * | +---------+---------+ |
+ * | v |
+ * | +----------+-----------+ |
+ * | | hsclk_divsel_clk_src | |
+ * | +----------+-----------+ |
+ * +------------------------------+
+ * |
+ * +------------<---------v------------>----------+
+ * | |
+ * +-----v------------+ |
+ * | dp_link_clk_src | |
+ * | divsel_ten | |
+ * +---------+--------+ |
+ * | |
+ * | |
+ * v v
+ * Input to DISPCC block |
+ * for link clk, crypto clk |
+ * and interface clock |
+ * |
+ * |
+ * +--------<------------+-----------------+---<---+
+ * | | |
+ * +-------v------+ +--------v-----+ +--------v------+
+ * | vco_divided | | vco_divided | | vco_divided |
+ * | _clk_src | | _clk_src | | _clk_src |
+ * | | | | | |
+ * |divsel_six | | divsel_two | | divsel_four |
+ * +-------+------+ +-----+--------+ +--------+------+
+ * | | |
+ * v------->----------v-------------<------v
+ * |
+ * +----------+---------+
+ * | vco_divided_clk |
+ * | _src_mux |
+ * +---------+----------+
+ * |
+ * v
+ * Input to DISPCC block
+ * for DP pixel clock
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+static struct dp_pll_db dp_pdb;
+static struct clk_ops mux_clk_ops;
+
+static struct regmap_config dp_pll_10nm_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x910,
+};
+
+static struct regmap_bus dp_pixel_mux_regmap_ops = {
+ .reg_write = dp_mux_set_parent_10nm,
+ .reg_read = dp_mux_get_parent_10nm,
+};
+
+/* Op structures */
+static const struct clk_ops dp_10nm_vco_clk_ops = {
+ .recalc_rate = dp_vco_recalc_rate_10nm,
+ .set_rate = dp_vco_set_rate_10nm,
+ .round_rate = dp_vco_round_rate_10nm,
+ .prepare = dp_vco_prepare_10nm,
+ .unprepare = dp_vco_unprepare_10nm,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+ .min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
+ .max_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_clk",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &dp_10nm_vco_clk_ops,
+ },
+};
+
+static struct clk_fixed_factor dp_link_clk_divsel_ten = {
+ .div = 10,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_link_clk_divsel_ten",
+ .parent_names =
+ (const char *[]){ "dp_vco_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
+ .div = 2,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_divsel_two_clk_src",
+ .parent_names =
+ (const char *[]){ "dp_vco_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
+ .div = 4,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_divsel_four_clk_src",
+ .parent_names =
+ (const char *[]){ "dp_vco_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dp_vco_divsel_six_clk_src = {
+ .div = 6,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_divsel_six_clk_src",
+ .parent_names =
+ (const char *[]){ "dp_vco_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+
+static int clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int ret = 0;
+
+ ret = __clk_mux_determine_rate_closest(hw, req);
+ if (ret)
+ return ret;
+
+ /* Set the new parent of mux if there is a new valid parent */
+ if (hw->clk && req->best_parent_hw->clk)
+ clk_set_parent(hw->clk, req->best_parent_hw->clk);
+
+ return 0;
+}
+
+static unsigned long mux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk *div_clk = NULL, *vco_clk = NULL;
+ struct dp_pll_vco_clk *vco = NULL;
+
+ div_clk = clk_get_parent(hw->clk);
+ if (!div_clk)
+ return 0;
+
+ vco_clk = clk_get_parent(div_clk);
+ if (!vco_clk)
+ return 0;
+
+ vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
+ if (!vco)
+ return 0;
+
+ if (vco->rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
+ return (vco->rate / 6);
+ else if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+ return (vco->rate / 4);
+ else
+ return (vco->rate / 2);
+}
+
+static struct clk_regmap_mux dp_vco_divided_clk_src_mux = {
+ .reg = 0x64,
+ .shift = 0,
+ .width = 2,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dp_vco_divided_clk_src_mux",
+ .parent_names =
+ (const char *[]){"dp_vco_divsel_two_clk_src",
+ "dp_vco_divsel_four_clk_src",
+ "dp_vco_divsel_six_clk_src"},
+ .num_parents = 3,
+ .ops = &mux_clk_ops,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ },
+ },
+};
+
+static struct clk_hw *mdss_dp_pllcc_10nm[] = {
+ [DP_VCO_CLK] = &dp_vco_clk.hw,
+ [DP_LINK_CLK_DIVSEL_TEN] = &dp_link_clk_divsel_ten.hw,
+ [DP_VCO_DIVIDED_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
+ [DP_VCO_DIVIDED_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
+ [DP_VCO_DIVIDED_SIX_CLK_SRC] = &dp_vco_divsel_six_clk_src.hw,
+ [DP_VCO_DIVIDED_CLK_SRC_MUX] = &dp_vco_divided_clk_src_mux.clkr.hw,
+};
+
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = -ENOTSUPP, i = 0;
+ struct clk_onecell_data *clk_data;
+ struct clk *clk;
+ struct regmap *regmap;
+ int num_clks = ARRAY_SIZE(mdss_dp_pllcc_10nm);
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ if (!pll_res || !pll_res->pll_base || !pll_res->phy_base ||
+ !pll_res->ln_tx0_base || !pll_res->ln_tx1_base) {
+ pr_err("%s: Invalid input parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+ sizeof(struct clk *)), GFP_KERNEL);
+ if (!clk_data->clks) {
+ devm_kfree(&pdev->dev, clk_data);
+ return -ENOMEM;
+ }
+ clk_data->clk_num = num_clks;
+
+ pll_res->priv = &dp_pdb;
+ dp_pdb.pll = pll_res;
+
+ /* Set client data for vco, mux and div clocks */
+ regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
+ pll_res, &dp_pll_10nm_cfg);
+ dp_vco_divided_clk_src_mux.clkr.regmap = regmap;
+ mux_clk_ops = clk_regmap_mux_closest_ops;
+ mux_clk_ops.determine_rate = clk_mux_determine_rate;
+ mux_clk_ops.recalc_rate = mux_recalc_rate;
+
+ dp_vco_clk.priv = pll_res;
+
+ for (i = DP_VCO_CLK; i <= DP_VCO_DIVIDED_CLK_SRC_MUX; i++) {
+ pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+ clk = devm_clk_register(&pdev->dev,
+ mdss_dp_pllcc_10nm[i]);
+ if (IS_ERR(clk)) {
+ pr_err("clk registration failed for DP: %d\n",
+ pll_res->index);
+ rc = -EINVAL;
+ goto clk_reg_fail;
+ }
+ clk_data->clks[i] = clk;
+ }
+
+ rc = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, clk_data);
+ if (rc) {
+ pr_err("%s: Clock register failed rc=%d\n", __func__, rc);
+ rc = -EPROBE_DEFER;
+ } else {
+ pr_debug("%s SUCCESS\n", __func__);
+ }
+ return 0;
+clk_reg_fail:
+ devm_kfree(&pdev->dev, clk_data->clks);
+ devm_kfree(&pdev->dev, clk_data);
+ return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
new file mode 100644
index 0000000..c3b5635
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_DP_PLL_10NM_H
+#define __MDSS_DP_PLL_10NM_H
+
+#define DP_VCO_HSCLK_RATE_1620MHZDIV1000 1620000UL
+#define DP_VCO_HSCLK_RATE_2700MHZDIV1000 2700000UL
+#define DP_VCO_HSCLK_RATE_5400MHZDIV1000 5400000UL
+#define DP_VCO_HSCLK_RATE_8100MHZDIV1000 8100000UL
+
+struct dp_pll_db {
+ struct mdss_pll_resources *pll;
+
+ /* lane and orientation settings */
+ u8 lane_cnt;
+ u8 orientation;
+
+ /* COM PHY settings */
+ u32 hsclk_sel;
+ u32 dec_start_mode0;
+ u32 div_frac_start1_mode0;
+ u32 div_frac_start2_mode0;
+ u32 div_frac_start3_mode0;
+ u32 integloop_gain0_mode0;
+ u32 integloop_gain1_mode0;
+ u32 vco_tune_map;
+ u32 lock_cmp1_mode0;
+ u32 lock_cmp2_mode0;
+ u32 lock_cmp3_mode0;
+ u32 lock_cmp_en;
+
+ /* PHY vco divider */
+ u32 phy_vco_div;
+};
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+ unsigned long parent_rate);
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate);
+int dp_vco_prepare_10nm(struct clk_hw *hw);
+void dp_vco_unprepare_10nm(struct clk_hw *hw);
+int dp_mux_set_parent_10nm(void *context,
+ unsigned int reg, unsigned int val);
+int dp_mux_get_parent_10nm(void *context,
+ unsigned int reg, unsigned int *val);
+#endif /* __MDSS_DP_PLL_10NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c
deleted file mode 100644
index a3ed8a8..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-8998.h"
-
-int link2xclk_divsel_set_div(struct div_clk *clk, int div)
-{
- int rc;
- u32 link2xclk_div_tx0, link2xclk_div_tx1;
- u32 phy_mode;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable mdss DP PLL resources\n");
- return rc;
- }
-
- link2xclk_div_tx0 = MDSS_PLL_REG_R(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_BAND);
- link2xclk_div_tx1 = MDSS_PLL_REG_R(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_BAND);
-
- link2xclk_div_tx0 &= ~0x07; /* bits 0 to 2 */
- link2xclk_div_tx1 &= ~0x07; /* bits 0 to 2 */
-
- /* Configure TX band Mux */
- link2xclk_div_tx0 |= 0x4;
- link2xclk_div_tx1 |= 0x4;
-
- /*configure DP PHY MODE */
- phy_mode = 0x58;
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_BAND,
- link2xclk_div_tx0);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_BAND,
- link2xclk_div_tx1);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_MODE, phy_mode);
- /* Make sure the PHY register writes are done */
- wmb();
-
- pr_debug("%s: div=%d link2xclk_div_tx0=%x, link2xclk_div_tx1=%x\n",
- __func__, div, link2xclk_div_tx0, link2xclk_div_tx1);
-
- mdss_pll_resource_enable(dp_res, false);
-
- return rc;
-}
-
-int link2xclk_divsel_get_div(struct div_clk *clk)
-{
- int rc;
- u32 div = 0, phy_mode;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable dp_res resources\n");
- return rc;
- }
-
- phy_mode = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_MODE);
-
- if (phy_mode & 0x48)
- pr_err("%s: DP PAR Rate not correct\n", __func__);
-
- if ((phy_mode & 0x3) == 1)
- div = 10;
- else if ((phy_mode & 0x3) == 0)
- div = 5;
- else
- pr_err("%s: unsupported div: %d\n", __func__, phy_mode);
-
- mdss_pll_resource_enable(dp_res, false);
- pr_debug("%s: phy_mode=%d, div=%d\n", __func__,
- phy_mode, div);
-
- return div;
-}
-
-int vco_divided_clk_set_div(struct div_clk *clk, int div)
-{
- int rc;
- u32 auxclk_div;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable mdss DP PLL resources\n");
- return rc;
- }
-
- auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
- auxclk_div &= ~0x03; /* bits 0 to 1 */
-
- auxclk_div |= 1; /* Default divider */
-
- if (div == 4)
- auxclk_div |= 2;
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_VCO_DIV, auxclk_div);
- /* Make sure the PHY registers writes are done */
- wmb();
- pr_debug("%s: div=%d auxclk_div=%x\n", __func__, div, auxclk_div);
-
- mdss_pll_resource_enable(dp_res, false);
-
- return rc;
-}
-
-
-enum handoff vco_divided_clk_handoff(struct clk *c)
-{
- /*
- * Since cont-splash is not enabled, disable handoff
- * for vco_divider_clk.
- */
- return HANDOFF_DISABLED_CLK;
-}
-
-int vco_divided_clk_get_div(struct div_clk *clk)
-{
- int rc;
- u32 div, auxclk_div;
- struct mdss_pll_resources *dp_res = clk->priv;
-
- rc = mdss_pll_resource_enable(dp_res, true);
- if (rc) {
- pr_err("Failed to enable dp_res resources\n");
- return rc;
- }
-
- auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
- auxclk_div &= 0x03;
-
- div = 2; /* Default divider */
- if (auxclk_div == 2)
- div = 4;
-
- mdss_pll_resource_enable(dp_res, false);
-
- pr_debug("%s: auxclk_div=%d, div=%d\n", __func__, auxclk_div, div);
-
- return div;
-}
-
-int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
-{
- u32 res = 0;
- struct mdss_pll_resources *dp_res = vco->priv;
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x3d);
- /* Make sure the PHY register writes are done */
- wmb();
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYSCLK_EN_SEL, 0x37);
-
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CLK_ENABLE1, 0x0e);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CLK_SEL, 0x30);
-
- /* Different for each clock rates */
- if (rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000) {
- pr_debug("%s: VCO rate: %ld\n", __func__,
- DP_VCO_RATE_8100MHZDIV1000);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYS_CLK_CTRL, 0x02);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_HSCLK_SEL, 0x2c);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP_EN, 0x04);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DEC_START_MODE0, 0x69);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x42);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP1_MODE0, 0xbf);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP2_MODE0, 0x21);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- } else if (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000) {
- pr_debug("%s: VCO rate: %ld\n", __func__,
- DP_VCO_RATE_8100MHZDIV1000);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYS_CLK_CTRL, 0x06);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_HSCLK_SEL, 0x84);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP_EN, 0x08);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DEC_START_MODE0, 0x69);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x02);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP1_MODE0, 0x3f);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP2_MODE0, 0x38);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- } else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000) {
- pr_debug("%s: VCO rate: %ld\n", __func__,
- DP_VCO_RATE_10800MHZDIV1000);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_SYS_CLK_CTRL, 0x06);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_HSCLK_SEL, 0x80);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP_EN, 0x08);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DEC_START_MODE0, 0x8c);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0xa0);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CMN_CONFIG, 0x12);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP2_MODE0, 0x70);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- } else {
- pr_err("%s: unsupported rate: %ld\n", __func__, rate);
- return -EINVAL;
- }
- /* Make sure the PLL register writes are done */
- wmb();
-
- if ((rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000)
- || (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000)) {
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_VCO_DIV, 0x1);
- } else {
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_VCO_DIV, 0x2);
- }
- /* Make sure the PHY register writes are done */
- wmb();
-
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_VCO_TUNE_MAP, 0x00);
-
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_BG_TIMER, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_BG_TIMER, 0x0a);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CORECLK_DIV_MODE0, 0x05);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_VCO_TUNE_CTRL, 0x00);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CP_CTRL_MODE0, 0x06);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_PLL_IVCO, 0x07);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x37);
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_CORE_CLK_EN, 0x0f);
-
- /* Make sure the PLL register writes are done */
- wmb();
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_MODE, 0x58);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_TX0_TX1_LANE_CTL, 0x05);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_TX2_TX3_LANE_CTL, 0x05);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x1a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x1a);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1,
- 0x40);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1,
- 0x40);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
- 0x30);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
- 0x30);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_INTERFACE_SELECT,
- 0x3d);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_INTERFACE_SELECT,
- 0x3d);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
- 0x0f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
- 0x0f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_RESET_TSYNC_EN,
- 0x03);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_RESET_TSYNC_EN,
- 0x03);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TRAN_DRVR_EMP_EN,
- 0x03);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TRAN_DRVR_EMP_EN,
- 0x03);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
- 0x00);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
- 0x00);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_INTERFACE_MODE,
- 0x00);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE,
- 0x00);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_BAND,
- 0x4);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_BAND,
- 0x4);
- /* Make sure the PHY register writes are done */
- wmb();
- return res;
-}
-
-static bool dp_pll_lock_status(struct mdss_pll_resources *dp_res)
-{
- u32 status;
- bool pll_locked;
-
- /* poll for PLL ready status */
- if (readl_poll_timeout_atomic((dp_res->pll_base +
- QSERDES_COM_C_READY_STATUS),
- status,
- ((status & BIT(0)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: C_READY status is not high. Status=%x\n",
- __func__, status);
- pll_locked = false;
- } else if (readl_poll_timeout_atomic((dp_res->pll_base +
- DP_PHY_STATUS),
- status,
- ((status & BIT(1)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: Phy_ready is not high. Status=%x\n",
- __func__, status);
- pll_locked = false;
- } else {
- pll_locked = true;
- }
-
- return pll_locked;
-}
-
-
-static int dp_pll_enable(struct clk *c)
-{
- int rc = 0;
- u32 status;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *dp_res = vco->priv;
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x01);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x05);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x01);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x09);
- /* Make sure the PHY register writes are done */
- wmb();
- MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_RESETSM_CNTRL, 0x20);
- /* Make sure the PLL register writes are done */
- wmb();
- /* poll for PLL ready status */
- if (readl_poll_timeout_atomic((dp_res->pll_base +
- QSERDES_COM_C_READY_STATUS),
- status,
- ((status & BIT(0)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: C_READY status is not high. Status=%x\n",
- __func__, status);
- rc = -EINVAL;
- goto lock_err;
- }
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x19);
- /* Make sure the PHY register writes are done */
- wmb();
- /* poll for PHY ready status */
- if (readl_poll_timeout_atomic((dp_res->phy_base +
- DP_PHY_STATUS),
- status,
- ((status & BIT(1)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: Phy_ready is not high. Status=%x\n",
- __func__, status);
- rc = -EINVAL;
- goto lock_err;
- }
-
- pr_debug("%s: PLL is locked\n", __func__);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
- 0x0a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_POL_INV,
- 0x0a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x18);
- udelay(2000);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x19);
-
- /*
- * Make sure all the register writes are completed before
- * doing any other operation
- */
- wmb();
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_LANE_MODE_1,
- 0xf6);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_LANE_MODE_1,
- 0xf6);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
- 0x1f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
- 0x1f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
- 0x0f);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
- 0x0f);
- /*
- * Make sure all the register writes are completed before
- * doing any other operation
- */
- wmb();
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x09);
- udelay(2000);
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_CFG, 0x19);
- udelay(2000);
- /* poll for PHY ready status */
- if (readl_poll_timeout_atomic((dp_res->phy_base +
- DP_PHY_STATUS),
- status,
- ((status & BIT(1)) > 0),
- DP_PLL_POLL_SLEEP_US,
- DP_PLL_POLL_TIMEOUT_US)) {
- pr_err("%s: Lane_mode: Phy_ready is not high. Status=%x\n",
- __func__, status);
- rc = -EINVAL;
- goto lock_err;
- }
-
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL,
- 0x2a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL,
- 0x2a);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL,
- 0x20);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL,
- 0x20);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
- 0x11);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
- 0x11);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
- 0x11);
- MDSS_PLL_REG_W(dp_res->phy_base,
- QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
- 0x11);
- /* Make sure the PHY register writes are done */
- wmb();
-
-lock_err:
- return rc;
-}
-
-static int dp_pll_disable(struct clk *c)
-{
- int rc = 0;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *dp_res = vco->priv;
-
- /* Assert DP PHY power down */
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x2);
- /*
- * Make sure all the register writes to disable PLL are
- * completed before doing any other operation
- */
- wmb();
-
- return rc;
-}
-
-
-int dp_vco_prepare(struct clk *c)
-{
- int rc = 0;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *dp_pll_res = vco->priv;
-
- DEV_DBG("rate=%ld\n", vco->rate);
- rc = mdss_pll_resource_enable(dp_pll_res, true);
- if (rc) {
- pr_err("Failed to enable mdss DP pll resources\n");
- goto error;
- }
-
- rc = dp_pll_enable(c);
- if (rc) {
- mdss_pll_resource_enable(dp_pll_res, false);
- pr_err("ndx=%d failed to enable dsi pll\n",
- dp_pll_res->index);
- goto error;
- }
-
- mdss_pll_resource_enable(dp_pll_res, false);
-error:
- return rc;
-}
-
-void dp_vco_unprepare(struct clk *c)
-{
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *io = vco->priv;
-
- if (!io) {
- DEV_ERR("Invalid input parameter\n");
- return;
- }
-
- if (!io->pll_on &&
- mdss_pll_resource_enable(io, true)) {
- DEV_ERR("pll resource can't be enabled\n");
- return;
- }
- dp_pll_disable(c);
-
- io->handoff_resources = false;
- mdss_pll_resource_enable(io, false);
- io->pll_on = false;
-}
-
-int dp_vco_set_rate(struct clk *c, unsigned long rate)
-{
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *io = vco->priv;
- int rc;
-
- rc = mdss_pll_resource_enable(io, true);
- if (rc) {
- DEV_ERR("pll resource can't be enabled\n");
- return rc;
- }
-
- DEV_DBG("DP lane CLK rate=%ld\n", rate);
-
- rc = dp_config_vco_rate(vco, rate);
- if (rc)
- DEV_ERR("%s: Failed to set clk rate\n", __func__);
-
- mdss_pll_resource_enable(io, false);
-
- vco->rate = rate;
-
- return 0;
-}
-
-unsigned long dp_vco_get_rate(struct clk *c)
-{
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- int rc;
- u32 div, hsclk_div, link2xclk_div;
- u64 vco_rate;
- struct mdss_pll_resources *pll = vco->priv;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable mdss DP pll=%d\n", pll->index);
- return rc;
- }
-
- div = MDSS_PLL_REG_R(pll->pll_base, QSERDES_COM_HSCLK_SEL);
- div &= 0x0f;
-
- if (div == 12)
- hsclk_div = 5; /* Default */
- else if (div == 4)
- hsclk_div = 3;
- else if (div == 0)
- hsclk_div = 2;
- else {
- pr_debug("unknown divider. forcing to default\n");
- hsclk_div = 5;
- }
-
- div = MDSS_PLL_REG_R(pll->phy_base, DP_PHY_MODE);
-
- if (div & 0x58)
- pr_err("%s: DP PAR Rate not correct\n", __func__);
-
- if ((div & 0x3) == 1)
- link2xclk_div = 10;
- else if ((div & 0x3) == 0)
- link2xclk_div = 5;
- else
- pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
-
- if (link2xclk_div == 10) {
- vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
- } else {
- if (hsclk_div == 5)
- vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
- else if (hsclk_div == 3)
- vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
- else
- vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
- }
-
- pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
- mdss_pll_resource_enable(pll, false);
-
- return (unsigned long)vco_rate;
-}
-
-long dp_vco_round_rate(struct clk *c, unsigned long rate)
-{
- unsigned long rrate = rate;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-
- if (rate <= vco->min_rate)
- rrate = vco->min_rate;
- else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
- rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
- else
- rrate = vco->max_rate;
-
- pr_debug("%s: rrate=%ld\n", __func__, rrate);
-
- return rrate;
-}
-
-enum handoff dp_vco_handoff(struct clk *c)
-{
- enum handoff ret = HANDOFF_DISABLED_CLK;
- struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
- struct mdss_pll_resources *io = vco->priv;
-
- if (mdss_pll_resource_enable(io, true)) {
- DEV_ERR("pll resource can't be enabled\n");
- return ret;
- }
-
- if (dp_pll_lock_status(io)) {
- io->pll_on = true;
- c->rate = dp_vco_get_rate(c);
- io->handoff_resources = true;
- ret = HANDOFF_ENABLED_CLK;
- } else {
- io->handoff_resources = false;
- mdss_pll_resource_enable(io, false);
- DEV_DBG("%s: PLL not locked\n", __func__);
- }
-
- DEV_DBG("done, ret=%d\n", ret);
- return ret;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c b/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c
deleted file mode 100644
index 6a49d15..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-/*
- ***************************************************************************
- ******** Display Port PLL driver block diagram for branch clocks **********
- ***************************************************************************
-
- +--------------------------+
- | DP_VCO_CLK |
- | |
- | +-------------------+ |
- | | (DP PLL/VCO) | |
- | +---------+---------+ |
- | v |
- | +----------+-----------+ |
- | | hsclk_divsel_clk_src | |
- | +----------+-----------+ |
- +--------------------------+
- |
- v
- +------------<------------|------------>-------------+
- | | |
-+----------v----------+ +----------v----------+ +----------v----------+
-| dp_link_2x_clk | | vco_divided_clk_src | | vco_divided_clk_src |
-| divsel_five | | | | |
-v----------+----------v | divsel_two | | divsel_four |
- | +----------+----------+ +----------+----------+
- | | |
- v v v
- | +---------------------+ |
- Input to MMSSCC block | | (aux_clk_ops) | |
- for link clk, crypto clk +--> vco_divided_clk <-+
- and interface clock | _src_mux |
- +----------+----------+
- |
- v
- Input to MMSSCC block
- for DP pixel clock
-
- ******************************************************************************
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-8998.h"
-
-static const struct clk_ops clk_ops_vco_divided_clk_src_c;
-static const struct clk_ops clk_ops_link_2x_clk_div_c;
-static const struct clk_ops clk_ops_gen_mux_dp;
-
-static struct clk_div_ops link2xclk_divsel_ops = {
- .set_div = link2xclk_divsel_set_div,
- .get_div = link2xclk_divsel_get_div,
-};
-
-static struct clk_div_ops vco_divided_clk_ops = {
- .set_div = vco_divided_clk_set_div,
- .get_div = vco_divided_clk_get_div,
-};
-
-static const struct clk_ops dp_8998_vco_clk_ops = {
- .set_rate = dp_vco_set_rate,
- .round_rate = dp_vco_round_rate,
- .prepare = dp_vco_prepare,
- .unprepare = dp_vco_unprepare,
- .handoff = dp_vco_handoff,
-};
-
-static struct clk_mux_ops mdss_mux_ops = {
- .set_mux_sel = mdss_set_mux_sel,
- .get_mux_sel = mdss_get_mux_sel,
-};
-
-static struct dp_pll_vco_clk dp_vco_clk = {
- .min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
- .max_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000,
- .c = {
- .dbg_name = "dp_vco_clk",
- .ops = &dp_8998_vco_clk_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dp_vco_clk.c),
- },
-};
-
-static struct div_clk dp_link_2x_clk_divsel_five = {
- .data = {
- .div = 5,
- .min_div = 5,
- .max_div = 5,
- },
- .ops = &link2xclk_divsel_ops,
- .c = {
- .parent = &dp_vco_clk.c,
- .dbg_name = "dp_link_2x_clk_divsel_five",
- .ops = &clk_ops_link_2x_clk_div_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dp_link_2x_clk_divsel_five.c),
- },
-};
-
-static struct div_clk vco_divsel_four_clk_src = {
- .data = {
- .div = 4,
- .min_div = 4,
- .max_div = 4,
- },
- .ops = &vco_divided_clk_ops,
- .c = {
- .parent = &dp_vco_clk.c,
- .dbg_name = "vco_divsel_four_clk_src",
- .ops = &clk_ops_vco_divided_clk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(vco_divsel_four_clk_src.c),
- },
-};
-
-static struct div_clk vco_divsel_two_clk_src = {
- .data = {
- .div = 2,
- .min_div = 2,
- .max_div = 2,
- },
- .ops = &vco_divided_clk_ops,
- .c = {
- .parent = &dp_vco_clk.c,
- .dbg_name = "vco_divsel_two_clk_src",
- .ops = &clk_ops_vco_divided_clk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(vco_divsel_two_clk_src.c),
- },
-};
-
-static struct mux_clk vco_divided_clk_src_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&vco_divsel_two_clk_src.c, 0},
- {&vco_divsel_four_clk_src.c, 1},
- },
- .ops = &mdss_mux_ops,
- .c = {
- .parent = &vco_divsel_two_clk_src.c,
- .dbg_name = "vco_divided_clk_src_mux",
- .ops = &clk_ops_gen_mux_dp,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(vco_divided_clk_src_mux.c),
- }
-};
-
-static struct clk_lookup dp_pllcc_8998[] = {
- CLK_LIST(dp_vco_clk),
- CLK_LIST(dp_link_2x_clk_divsel_five),
- CLK_LIST(vco_divsel_four_clk_src),
- CLK_LIST(vco_divsel_two_clk_src),
- CLK_LIST(vco_divided_clk_src_mux),
-};
-
-int dp_pll_clock_register_8998(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res)
-{
- int rc = -ENOTSUPP;
-
- if (!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
- DEV_ERR("%s: Invalid input parameters\n", __func__);
- return -EINVAL;
- }
-
- /* Set client data for vco, mux and div clocks */
- dp_vco_clk.priv = pll_res;
- vco_divided_clk_src_mux.priv = pll_res;
- vco_divsel_two_clk_src.priv = pll_res;
- vco_divsel_four_clk_src.priv = pll_res;
- dp_link_2x_clk_divsel_five.priv = pll_res;
-
- clk_ops_link_2x_clk_div_c = clk_ops_div;
- clk_ops_link_2x_clk_div_c.prepare = mdss_pll_div_prepare;
-
- /*
- * Set the ops for the divider in the pixel clock tree to the
- * slave_div to ensure that a set rate on this divider clock will not
- * be propagated to it's parent. This is needed ensure that when we set
- * the rate for pixel clock, the vco is not reconfigured
- */
- clk_ops_vco_divided_clk_src_c = clk_ops_slave_div;
- clk_ops_vco_divided_clk_src_c.prepare = mdss_pll_div_prepare;
- clk_ops_vco_divided_clk_src_c.handoff = vco_divided_clk_handoff;
-
- clk_ops_gen_mux_dp = clk_ops_gen_mux;
- clk_ops_gen_mux_dp.get_rate = parent_get_rate;
-
- /* We can select different clock ops for future versions */
- dp_vco_clk.c.ops = &dp_8998_vco_clk_ops;
-
- rc = of_msm_clock_register(pdev->dev.of_node, dp_pllcc_8998,
- ARRAY_SIZE(dp_pllcc_8998));
- if (rc) {
- DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
- rc = -EPROBE_DEFER;
- } else {
- DEV_DBG("%s SUCCESS\n", __func__);
- }
-
- return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h b/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h
deleted file mode 100644
index 11d5ddc..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __MDSS_DP_PLL_8998_H
-#define __MDSS_DP_PLL_8998_H
-
-#define DP_PHY_REVISION_ID0 0x0000
-#define DP_PHY_REVISION_ID1 0x0004
-#define DP_PHY_REVISION_ID2 0x0008
-#define DP_PHY_REVISION_ID3 0x000C
-
-#define DP_PHY_CFG 0x0010
-#define DP_PHY_PD_CTL 0x0014
-#define DP_PHY_MODE 0x0018
-
-#define DP_PHY_AUX_CFG0 0x001C
-#define DP_PHY_AUX_CFG1 0x0020
-#define DP_PHY_AUX_CFG2 0x0024
-#define DP_PHY_AUX_CFG3 0x0028
-#define DP_PHY_AUX_CFG4 0x002C
-#define DP_PHY_AUX_CFG5 0x0030
-#define DP_PHY_AUX_CFG6 0x0034
-#define DP_PHY_AUX_CFG7 0x0038
-#define DP_PHY_AUX_CFG8 0x003C
-#define DP_PHY_AUX_CFG9 0x0040
-#define DP_PHY_AUX_INTERRUPT_MASK 0x0044
-#define DP_PHY_AUX_INTERRUPT_CLEAR 0x0048
-#define DP_PHY_AUX_BIST_CFG 0x004C
-
-#define DP_PHY_VCO_DIV 0x0064
-#define DP_PHY_TX0_TX1_LANE_CTL 0x0068
-
-#define DP_PHY_TX2_TX3_LANE_CTL 0x0084
-#define DP_PHY_STATUS 0x00BC
-
-/* Tx registers */
-#define QSERDES_TX0_OFFSET 0x0400
-#define QSERDES_TX1_OFFSET 0x0800
-
-#define TXn_BIST_MODE_LANENO 0x0000
-#define TXn_CLKBUF_ENABLE 0x0008
-#define TXn_TX_EMP_POST1_LVL 0x000C
-
-#define TXn_TX_DRV_LVL 0x001C
-
-#define TXn_RESET_TSYNC_EN 0x0024
-#define TXn_PRE_STALL_LDO_BOOST_EN 0x0028
-#define TXn_TX_BAND 0x002C
-#define TXn_SLEW_CNTL 0x0030
-#define TXn_INTERFACE_SELECT 0x0034
-
-#define TXn_RES_CODE_LANE_TX 0x003C
-#define TXn_RES_CODE_LANE_RX 0x0040
-#define TXn_RES_CODE_LANE_OFFSET_TX 0x0044
-#define TXn_RES_CODE_LANE_OFFSET_RX 0x0048
-
-#define TXn_DEBUG_BUS_SEL 0x0058
-#define TXn_TRANSCEIVER_BIAS_EN 0x005C
-#define TXn_HIGHZ_DRVR_EN 0x0060
-#define TXn_TX_POL_INV 0x0064
-#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0068
-
-#define TXn_LANE_MODE_1 0x008C
-
-#define TXn_TRAN_DRVR_EMP_EN 0x00C0
-#define TXn_TX_INTERFACE_MODE 0x00C4
-
-#define TXn_VMODE_CTRL1 0x00F0
-
-
-/* PLL register offset */
-#define QSERDES_COM_ATB_SEL1 0x0000
-#define QSERDES_COM_ATB_SEL2 0x0004
-#define QSERDES_COM_FREQ_UPDATE 0x0008
-#define QSERDES_COM_BG_TIMER 0x000C
-#define QSERDES_COM_SSC_EN_CENTER 0x0010
-#define QSERDES_COM_SSC_ADJ_PER1 0x0014
-#define QSERDES_COM_SSC_ADJ_PER2 0x0018
-#define QSERDES_COM_SSC_PER1 0x001C
-#define QSERDES_COM_SSC_PER2 0x0020
-#define QSERDES_COM_SSC_STEP_SIZE1 0x0024
-#define QSERDES_COM_SSC_STEP_SIZE2 0x0028
-#define QSERDES_COM_POST_DIV 0x002C
-#define QSERDES_COM_POST_DIV_MUX 0x0030
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x0034
-#define QSERDES_COM_CLK_ENABLE1 0x0038
-#define QSERDES_COM_SYS_CLK_CTRL 0x003C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x0040
-#define QSERDES_COM_PLL_EN 0x0044
-#define QSERDES_COM_PLL_IVCO 0x0048
-#define QSERDES_COM_CMN_IETRIM 0x004C
-#define QSERDES_COM_CMN_IPTRIM 0x0050
-
-#define QSERDES_COM_CP_CTRL_MODE0 0x0060
-#define QSERDES_COM_CP_CTRL_MODE1 0x0064
-#define QSERDES_COM_PLL_RCTRL_MODE0 0x0068
-#define QSERDES_COM_PLL_RCTRL_MODE1 0x006C
-#define QSERDES_COM_PLL_CCTRL_MODE0 0x0070
-#define QSERDES_COM_PLL_CCTRL_MODE1 0x0074
-#define QSERDES_COM_PLL_CNTRL 0x0078
-#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x007C
-#define QSERDES_COM_SYSCLK_EN_SEL 0x0080
-#define QSERDES_COM_CML_SYSCLK_SEL 0x0084
-#define QSERDES_COM_RESETSM_CNTRL 0x0088
-#define QSERDES_COM_RESETSM_CNTRL2 0x008C
-#define QSERDES_COM_LOCK_CMP_EN 0x0090
-#define QSERDES_COM_LOCK_CMP_CFG 0x0094
-#define QSERDES_COM_LOCK_CMP1_MODE0 0x0098
-#define QSERDES_COM_LOCK_CMP2_MODE0 0x009C
-#define QSERDES_COM_LOCK_CMP3_MODE0 0x00A0
-
-#define QSERDES_COM_DEC_START_MODE0 0x00B0
-#define QSERDES_COM_DEC_START_MODE1 0x00B4
-#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x00B8
-#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x00BC
-#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x00C0
-#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x00C4
-#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x00C8
-#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x00CC
-#define QSERDES_COM_INTEGLOOP_INITVAL 0x00D0
-#define QSERDES_COM_INTEGLOOP_EN 0x00D4
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00D8
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x00DC
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00E0
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00E4
-#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x00E8
-#define QSERDES_COM_VCO_TUNE_CTRL 0x00EC
-#define QSERDES_COM_VCO_TUNE_MAP 0x00F0
-
-#define QSERDES_COM_CMN_STATUS 0x0124
-#define QSERDES_COM_RESET_SM_STATUS 0x0128
-
-#define QSERDES_COM_CLK_SEL 0x0138
-#define QSERDES_COM_HSCLK_SEL 0x013C
-
-#define QSERDES_COM_CORECLK_DIV_MODE0 0x0148
-
-#define QSERDES_COM_SW_RESET 0x0150
-#define QSERDES_COM_CORE_CLK_EN 0x0154
-#define QSERDES_COM_C_READY_STATUS 0x0158
-#define QSERDES_COM_CMN_CONFIG 0x015C
-
-#define QSERDES_COM_SVS_MODE_CLK_SEL 0x0164
-
-#define DP_PLL_POLL_SLEEP_US 500
-#define DP_PLL_POLL_TIMEOUT_US 10000
-
-#define DP_VCO_RATE_8100MHZDIV1000 8100000UL
-#define DP_VCO_RATE_10800MHZDIV1000 10800000UL
-
-#define DP_VCO_HSCLK_RATE_1620MHZDIV1000 1620000UL
-#define DP_VCO_HSCLK_RATE_2700MHZDIV1000 2700000UL
-#define DP_VCO_HSCLK_RATE_5400MHZDIV1000 5400000UL
-
-int dp_vco_set_rate(struct clk *c, unsigned long rate);
-unsigned long dp_vco_get_rate(struct clk *c);
-long dp_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff dp_vco_handoff(struct clk *c);
-enum handoff vco_divided_clk_handoff(struct clk *c);
-int dp_vco_prepare(struct clk *c);
-void dp_vco_unprepare(struct clk *c);
-int hsclk_divsel_set_div(struct div_clk *clk, int div);
-int hsclk_divsel_get_div(struct div_clk *clk);
-int link2xclk_divsel_set_div(struct div_clk *clk, int div);
-int link2xclk_divsel_get_div(struct div_clk *clk);
-int vco_divided_clk_set_div(struct div_clk *clk, int div);
-int vco_divided_clk_get_div(struct div_clk *clk);
-
-#endif /* __MDSS_DP_PLL_8998_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll.h b/drivers/clk/qcom/mdss/mdss-dp-pll.h
index 2805ff9..2b1d70e 100644
--- a/drivers/clk/qcom/mdss/mdss-dp-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,21 +15,19 @@
#define __MDSS_DP_PLL_H
struct dp_pll_vco_clk {
+ struct clk_hw hw;
unsigned long rate; /* current vco rate */
u64 min_rate; /* min vco rate */
u64 max_rate; /* max vco rate */
void *priv;
-
- struct clk c;
};
-static inline struct dp_pll_vco_clk *mdss_dp_to_vco_clk(struct clk *clk)
+static inline struct dp_pll_vco_clk *to_dp_vco_hw(struct clk_hw *hw)
{
- return container_of(clk, struct dp_pll_vco_clk, c);
+ return container_of(hw, struct dp_pll_vco_clk, hw);
}
-int dp_pll_clock_register_8998(struct platform_device *pdev,
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
-
#endif /* __MDSS_DP_PLL_H */
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index 7f82fda..e292ef8 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -21,6 +21,7 @@
#include <linux/iopoll.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
+#include "mdss-dp-pll.h"
int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
{
@@ -126,6 +127,8 @@ static int mdss_pll_resource_parse(struct platform_device *pdev,
if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+ if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_10nm"))
+ pll_res->pll_interface_type = MDSS_DP_PLL_10NM;
else
goto err;
@@ -151,6 +154,9 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
case MDSS_DSI_PLL_10NM:
rc = dsi_pll_clock_register_10nm(pdev, pll_res);
break;
+ case MDSS_DP_PLL_10NM:
+ rc = dp_pll_clock_register_10nm(pdev, pll_res);
+ break;
case MDSS_UNKNOWN_PLL:
default:
rc = -EINVAL;
@@ -171,6 +177,7 @@ static int mdss_pll_probe(struct platform_device *pdev)
const char *label;
struct resource *pll_base_reg;
struct resource *phy_base_reg;
+ struct resource *tx0_base_reg, *tx1_base_reg;
struct resource *dynamic_pll_base_reg;
struct resource *gdsc_base_reg;
struct mdss_pll_resources *pll_res;
@@ -272,6 +279,30 @@ static int mdss_pll_probe(struct platform_device *pdev)
}
}
+ tx0_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ln_tx0_base");
+ if (tx0_base_reg) {
+ pll_res->ln_tx0_base = ioremap(tx0_base_reg->start,
+ resource_size(tx0_base_reg));
+ if (!pll_res->ln_tx0_base) {
+ pr_err("Unable to remap Lane TX0 base resources\n");
+ rc = -ENOMEM;
+ goto tx0_io_error;
+ }
+ }
+
+ tx1_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ln_tx1_base");
+ if (tx1_base_reg) {
+ pll_res->ln_tx1_base = ioremap(tx1_base_reg->start,
+ resource_size(tx1_base_reg));
+ if (!pll_res->ln_tx1_base) {
+ pr_err("Unable to remap Lane TX1 base resources\n");
+ rc = -ENOMEM;
+ goto tx1_io_error;
+ }
+ }
+
gdsc_base_reg = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "gdsc_base");
if (!gdsc_base_reg) {
@@ -309,6 +340,12 @@ static int mdss_pll_probe(struct platform_device *pdev)
if (pll_res->gdsc_base)
iounmap(pll_res->gdsc_base);
gdsc_io_error:
+ if (pll_res->ln_tx1_base)
+ iounmap(pll_res->ln_tx1_base);
+tx1_io_error:
+ if (pll_res->ln_tx0_base)
+ iounmap(pll_res->ln_tx0_base);
+tx0_io_error:
if (pll_res->dyn_pll_base)
iounmap(pll_res->dyn_pll_base);
dyn_pll_io_error:
@@ -347,6 +384,7 @@ static int mdss_pll_remove(struct platform_device *pdev)
static const struct of_device_id mdss_pll_dt_match[] = {
{.compatible = "qcom,mdss_dsi_pll_10nm"},
+ {.compatible = "qcom,mdss_dp_pll_10nm"},
{}
};
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index ee91e11..033462d 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -37,6 +37,7 @@
enum {
MDSS_DSI_PLL_10NM,
+ MDSS_DP_PLL_10NM,
MDSS_UNKNOWN_PLL,
};
@@ -81,6 +82,8 @@ struct mdss_pll_resources {
*/
void __iomem *pll_base;
void __iomem *phy_base;
+ void __iomem *ln_tx0_base;
+ void __iomem *ln_tx1_base;
void __iomem *gdsc_base;
void __iomem *dyn_pll_base;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 8af73ac..d9ebe11 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -4877,15 +4877,12 @@ static int _qce_suspend(void *handle)
if (handle == NULL)
return -ENODEV;
- qce_enable_clk(pce_dev);
-
sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
sps_disconnect(sps_pipe_info);
sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
sps_disconnect(sps_pipe_info);
- qce_disable_clk(pce_dev);
return 0;
}
@@ -4899,8 +4896,6 @@ static int _qce_resume(void *handle)
if (handle == NULL)
return -ENODEV;
- qce_enable_clk(pce_dev);
-
sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
@@ -4923,7 +4918,6 @@ static int _qce_resume(void *handle)
if (rc)
pr_err("Producer callback registration failed rc = %d\n", rc);
- qce_disable_clk(pce_dev);
return rc;
}
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index ff64631..56fbb94 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -5371,8 +5371,11 @@ static int _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
spin_unlock_irqrestore(&cp->lock, flags);
if (ret)
return ret;
- if (qce_pm_table.suspend)
+ if (qce_pm_table.suspend) {
+ qcrypto_ce_set_bus(pengine, true);
qce_pm_table.suspend(pengine->qce);
+ qcrypto_ce_set_bus(pengine, false);
+ }
return 0;
}
@@ -5393,9 +5396,11 @@ static int _qcrypto_resume(struct platform_device *pdev)
spin_lock_irqsave(&cp->lock, flags);
if (pengine->bw_state == BUS_SUSPENDED) {
spin_unlock_irqrestore(&cp->lock, flags);
- if (qce_pm_table.resume)
+ if (qce_pm_table.resume) {
+ qcrypto_ce_set_bus(pengine, true);
qce_pm_table.resume(pengine->qce);
-
+ qcrypto_ce_set_bus(pengine, false);
+ }
spin_lock_irqsave(&cp->lock, flags);
pengine->bw_state = BUS_NO_BANDWIDTH;
pengine->active_seq++;
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
index 4403f86..a8ec359 100644
--- a/drivers/edac/qcom_llcc_edac.c
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -103,7 +103,7 @@ struct errors_edac {
struct erp_drvdata {
struct regmap *llcc_map;
- phys_addr_t *llcc_banks;
+ u32 *llcc_banks;
u32 ecc_irq;
u32 num_banks;
u32 b_off;
@@ -353,12 +353,13 @@ static int qcom_llcc_erp_probe(struct platform_device *pdev)
struct erp_drvdata *drv;
struct edac_device_ctl_info *edev_ctl;
struct device *dev = &pdev->dev;
- u32 *banks;
- u32 i;
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(sizeof(*drv), "qcom-llcc", 1,
- NULL, 1, 1, NULL, 0, edac_device_alloc_index());
+ NULL, 0, 1, NULL, 0, edac_device_alloc_index());
+
+ if (!edev_ctl)
+ return -ENOMEM;
edev_ctl->dev = dev;
edev_ctl->mod_name = dev_name(dev);
@@ -404,20 +405,15 @@ static int qcom_llcc_erp_probe(struct platform_device *pdev)
drv->num_banks >>= LLCC_LB_CNT_SHIFT;
drv->llcc_banks = devm_kzalloc(&pdev->dev,
- sizeof(phys_addr_t) * drv->num_banks, GFP_KERNEL);
+ sizeof(u32) * drv->num_banks, GFP_KERNEL);
- if (!drv->num_banks) {
+ if (!drv->llcc_banks) {
dev_err(dev, "Cannot allocate memory for llcc_banks\n");
return -ENOMEM;
}
- banks = devm_kzalloc(&pdev->dev,
- sizeof(u32) * drv->num_banks, GFP_KERNEL);
- if (!banks)
- return -ENOMEM;
-
rc = of_property_read_u32_array(dev->parent->of_node,
- "qcom,llcc-banks-off", banks, drv->num_banks);
+ "qcom,llcc-banks-off", drv->llcc_banks, drv->num_banks);
if (rc) {
dev_err(dev, "Cannot read llcc-banks-off property\n");
return -EINVAL;
@@ -430,9 +426,6 @@ static int qcom_llcc_erp_probe(struct platform_device *pdev)
return -EINVAL;
}
- for (i = 0; i < drv->num_banks; i++)
- drv->llcc_banks[i] = banks[i];
-
platform_set_drvdata(pdev, edev_ctl);
rc = edac_device_add_device(edev_ctl);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index fd7c912..79e9d36 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -774,20 +774,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
+ DRM_DEBUG_KMS("Using mode from DDC\n");
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this? */
- if (mode_dev->vbt_mode)
+ if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev, mode_dev->vbt_mode);
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (!mode_dev->panel_fixed_mode)
- if (dev_priv->lfp_lvds_vbt_mode)
- mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev,
- dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using mode from VBT\n");
+ goto out;
+ }
+ }
/*
* If we didn't get EDID, try checking if the panel is already turned
@@ -804,6 +807,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using pre-programmed mode\n");
goto out; /* FIXME: check for quirks */
}
}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index a79a9c9..70581e2 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -20,14 +20,8 @@
#define DP_AUX_ENUM_STR(x) #x
-struct aux_buf {
- u8 *start; /* buffer start addr */
- u8 *end; /* buffer end addr */
- u8 *data; /* data pou32er */
- u32 size; /* size of buffer */
- u32 len; /* dara length */
- u8 trans_num; /* transaction number */
- enum aux_tx_mode tx_mode;
+enum {
+ DP_AUX_DATA_INDEX_WRITE = BIT(31),
};
struct dp_aux_private {
@@ -38,15 +32,12 @@ struct dp_aux_private {
struct mutex mutex;
struct completion comp;
- struct aux_cmd *cmds;
- struct aux_buf txp;
- struct aux_buf rxp;
-
u32 aux_error_num;
bool cmd_busy;
+ bool native;
+ bool read;
- u8 txbuf[256];
- u8 rxbuf[256];
+ struct drm_dp_aux drm_aux;
};
static char *dp_aux_get_error(u32 aux_error)
@@ -69,159 +60,104 @@ static char *dp_aux_get_error(u32 aux_error)
}
}
-static void dp_aux_buf_init(struct aux_buf *buf, u8 *data, u32 size)
+static u32 dp_aux_write(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
{
- buf->start = data;
- buf->size = size;
- buf->data = buf->start;
- buf->end = buf->start + buf->size;
- buf->len = 0;
- buf->trans_num = 0;
- buf->tx_mode = AUX_NATIVE;
-}
+ u32 data[4], reg, len;
+ u8 *msgdata = msg->buffer;
+ int const aux_cmd_fifo_len = 128;
+ int i = 0;
-static void dp_aux_buf_set(struct dp_aux_private *aux)
-{
- init_completion(&aux->comp);
- aux->cmd_busy = false;
- mutex_init(&aux->mutex);
-
- dp_aux_buf_init(&aux->txp, aux->txbuf, sizeof(aux->txbuf));
- dp_aux_buf_init(&aux->rxp, aux->rxbuf, sizeof(aux->rxbuf));
-}
-
-static void dp_aux_buf_reset(struct aux_buf *buf)
-{
- buf->data = buf->start;
- buf->len = 0;
- buf->trans_num = 0;
- buf->tx_mode = AUX_NATIVE;
-
- memset(buf->start, 0x0, 256);
-}
-
-static void dp_aux_buf_push(struct aux_buf *buf, u32 len)
-{
- buf->data += len;
- buf->len += len;
-}
-
-static u32 dp_aux_buf_trailing(struct aux_buf *buf)
-{
- return (u32)(buf->end - buf->data);
-}
-
-static u32 dp_aux_add_cmd(struct aux_buf *buf, struct aux_cmd *cmd)
-{
- u8 data;
- u8 *bp, *cp;
- u32 i, len;
-
- if (cmd->ex_mode == AUX_READ)
+ if (aux->read)
len = 4;
else
- len = cmd->len + 4;
-
- if (dp_aux_buf_trailing(buf) < len) {
- pr_err("buf trailing error\n");
- return 0;
- }
+ len = msg->size + 4;
/*
* cmd fifo only has depth of 144 bytes
* limit buf length to 128 bytes here
*/
- if ((buf->len + len) > 128) {
+ if (len > aux_cmd_fifo_len) {
pr_err("buf len error\n");
return 0;
}
- bp = buf->data;
- data = cmd->addr >> 16;
- data &= 0x0f; /* 4 addr bits */
+ /* Pack cmd and write to HW */
+ data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+ if (aux->read)
+ data[0] |= BIT(4); /* R/W */
- if (cmd->ex_mode == AUX_READ)
- data |= BIT(4);
+ data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
+ data[2] = msg->address & 0xff; /* addr[7:0] */
+ data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
- *bp++ = data;
- *bp++ = cmd->addr >> 8;
- *bp++ = cmd->addr;
- *bp++ = cmd->len - 1;
-
- if (cmd->ex_mode == AUX_WRITE) {
- cp = cmd->buf;
-
- for (i = 0; i < cmd->len; i++)
- *bp++ = *cp++;
- }
-
- dp_aux_buf_push(buf, len);
-
- buf->tx_mode = cmd->tx_mode;
-
- buf->trans_num++;
-
- return cmd->len - 1;
-}
-
-static u32 dp_aux_cmd_fifo_tx(struct dp_aux_private *aux)
-{
- u8 *dp;
- u32 data, len, cnt;
- struct aux_buf *tp = &aux->txp;
-
- len = tp->len;
- if (len == 0) {
- pr_err("invalid len\n");
- return 0;
- }
-
- cnt = 0;
- dp = tp->start;
-
- while (cnt < len) {
- data = *dp;
- data <<= 8;
- data &= 0x00ff00;
- if (cnt == 0)
- data |= BIT(31);
-
- aux->catalog->data = data;
+ for (i = 0; i < len; i++) {
+ reg = (i < 4) ? data[i] : msgdata[i - 4];
+ reg = ((reg) << 8) & 0x0000ff00; /* index = 0, write */
+ if (i == 0)
+ reg |= DP_AUX_DATA_INDEX_WRITE;
+ aux->catalog->data = reg;
aux->catalog->write_data(aux->catalog);
-
- cnt++;
- dp++;
}
- data = (tp->trans_num - 1);
- if (tp->tx_mode == AUX_I2C) {
- data |= BIT(8); /* I2C */
- data |= BIT(10); /* NO SEND ADDR */
- data |= BIT(11); /* NO SEND STOP */
- }
+ reg = 0; /* Transaction number == 1 */
+ if (!aux->native) /* i2c */
+ reg |= (BIT(8) | BIT(10) | BIT(11));
- data |= BIT(9); /* GO */
- aux->catalog->data = data;
+ reg |= BIT(9);
+ aux->catalog->data = reg;
aux->catalog->write_trans(aux->catalog);
- return tp->len;
+ return len;
}
-static u32 dp_cmd_fifo_rx(struct dp_aux_private *aux, u32 len)
+static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ u32 ret = 0, len = 0, timeout;
+ int const aux_timeout_ms = HZ/4;
+
+ reinit_completion(&aux->comp);
+
+ len = dp_aux_write(aux, msg);
+ if (len == 0) {
+ pr_err("DP AUX write failed\n");
+ return -EINVAL;
+ }
+
+ timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
+ if (!timeout) {
+ pr_err("aux write timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ pr_debug("aux status %s\n",
+ dp_aux_get_error(aux->aux_error_num));
+
+ if (aux->aux_error_num == DP_AUX_ERR_NONE)
+ ret = len;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
{
u32 data;
u8 *dp;
u32 i;
- struct aux_buf *rp = &aux->rxp;
+ u32 len = msg->size;
data = 0;
- data |= BIT(31); /* INDEX_WRITE */
+ data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= BIT(0); /* read */
aux->catalog->data = data;
aux->catalog->write_data(aux->catalog);
- dp = rp->data;
+ dp = msg->buffer;
/* discard first byte */
data = aux->catalog->read_data(aux->catalog);
@@ -230,9 +166,6 @@ static u32 dp_cmd_fifo_rx(struct dp_aux_private *aux, u32 len)
data = aux->catalog->read_data(aux->catalog);
*dp++ = (u8)((data >> 8) & 0xff);
}
-
- rp->len = len;
- return len;
}
static void dp_aux_native_handler(struct dp_aux_private *aux)
@@ -292,219 +225,76 @@ static void dp_aux_isr(struct dp_aux *dp_aux)
if (!aux->cmd_busy)
return;
- if (aux->cmds->tx_mode == AUX_NATIVE)
+ if (aux->native)
dp_aux_native_handler(aux);
else
dp_aux_i2c_handler(aux);
}
-
-
-static int dp_aux_write(struct dp_aux_private *aux)
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
+ struct drm_dp_aux_msg *msg)
{
- struct aux_cmd *cm;
- struct aux_buf *tp;
- u32 len, ret, timeout;
+ ssize_t ret;
+ int const aux_cmd_native_max = 16;
+ int const aux_cmd_i2c_max = 128;
+ struct dp_aux_private *aux = container_of(drm_aux,
+ struct dp_aux_private, drm_aux);
mutex_lock(&aux->mutex);
- tp = &aux->txp;
- dp_aux_buf_reset(tp);
-
- cm = aux->cmds;
- while (cm) {
- ret = dp_aux_add_cmd(tp, cm);
- if (ret <= 0)
- break;
-
- if (!cm->next)
- break;
- cm++;
- }
-
- reinit_completion(&aux->comp);
+ aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+ aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
aux->cmd_busy = true;
- len = dp_aux_cmd_fifo_tx(aux);
-
- timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
- if (!timeout)
- pr_err("aux write timeout\n");
-
- pr_debug("aux status %s\n",
- dp_aux_get_error(aux->aux_error_num));
-
- if (aux->aux_error_num == DP_AUX_ERR_NONE)
- ret = len;
- else
- ret = aux->aux_error_num;
-
- aux->cmd_busy = false;
- mutex_unlock(&aux->mutex);
- return ret;
-}
-
-static int dp_aux_read(struct dp_aux_private *aux)
-{
- struct aux_cmd *cm;
- struct aux_buf *tp, *rp;
- u32 len, ret, timeout;
-
- mutex_lock(&aux->mutex);
-
- tp = &aux->txp;
- rp = &aux->rxp;
-
- dp_aux_buf_reset(tp);
- dp_aux_buf_reset(rp);
-
- cm = aux->cmds;
- len = 0;
-
- while (cm) {
- ret = dp_aux_add_cmd(tp, cm);
- len += cm->len;
-
- if (ret <= 0)
- break;
-
- if (!cm->next)
- break;
- cm++;
+ /* Ignore address only message */
+ if ((msg->size == 0) || (msg->buffer == NULL)) {
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ ret = msg->size;
+ goto unlock_exit;
}
- reinit_completion(&aux->comp);
- aux->cmd_busy = true;
+ /* msg sanity check */
+ if ((aux->native && (msg->size > aux_cmd_native_max)) ||
+ (msg->size > aux_cmd_i2c_max)) {
+ pr_err("%s: invalid msg: size(%zu), request(%x)\n",
+ __func__, msg->size, msg->request);
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
- dp_aux_cmd_fifo_tx(aux);
+ ret = dp_aux_cmd_fifo_tx(aux, msg);
+ if (ret < 0) {
+ aux->catalog->reset(aux->catalog); /* reset aux */
+ goto unlock_exit;
+ }
- timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
- if (!timeout)
- pr_err("aux read timeout\n");
+ if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+ if (aux->read)
+ dp_aux_cmd_fifo_rx(aux, msg);
- pr_debug("aux status %s\n",
- dp_aux_get_error(aux->aux_error_num));
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ } else {
+ /* Reply defer to retry */
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+ }
- if (aux->aux_error_num == DP_AUX_ERR_NONE)
- ret = dp_cmd_fifo_rx(aux, len);
- else
- ret = aux->aux_error_num;
+ /* Return requested size for success or retry */
+ ret = msg->size;
- aux->cmds->buf = rp->data;
+unlock_exit:
aux->cmd_busy = false;
-
mutex_unlock(&aux->mutex);
-
return ret;
}
-static int dp_aux_write_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
- enum aux_tx_mode mode, u8 *buf)
-{
- struct aux_cmd cmd = {0};
- struct dp_aux_private *aux;
-
- if (!dp_aux || !len) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
-
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- cmd.ex_mode = AUX_WRITE;
- cmd.tx_mode = mode;
- cmd.addr = addr;
- cmd.len = len;
- cmd.buf = buf;
-
- aux->cmds = &cmd;
-
- return dp_aux_write(aux);
-}
-
-static int dp_aux_read_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
- enum aux_tx_mode mode, u8 **buf)
-{
- int rc = 0;
- struct aux_cmd cmd = {0};
- struct dp_aux_private *aux;
-
- if (!dp_aux || !len) {
- pr_err("invalid input\n");
- rc = -EINVAL;
- goto end;
- }
-
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- cmd.ex_mode = AUX_READ;
- cmd.tx_mode = mode;
- cmd.addr = addr;
- cmd.len = len;
-
- aux->cmds = &cmd;
-
- rc = dp_aux_read(aux);
- if (rc <= 0) {
- rc = -EINVAL;
- goto end;
- }
-
- *buf = cmd.buf;
-end:
- return rc;
-}
-
-static int dp_aux_process(struct dp_aux *dp_aux, struct aux_cmd *cmds)
-{
- struct dp_aux_private *aux;
-
- if (!dp_aux || !cmds) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
-
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- aux->cmds = cmds;
-
- if (cmds->ex_mode == AUX_READ)
- return dp_aux_read(aux);
- else
- return dp_aux_write(aux);
-}
-
-static bool dp_aux_ready(struct dp_aux *dp_aux)
-{
- u8 data = 0;
- int count, ret;
- struct dp_aux_private *aux;
-
- if (!dp_aux) {
- pr_err("invalid input\n");
- goto error;
- }
-
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- for (count = 5; count; count--) {
- ret = dp_aux_write_ex(dp_aux, 0x50, 1, AUX_I2C, &data);
- if (ret >= 0)
- break;
-
- msleep(100);
- }
-
- if (count <= 0) {
- pr_err("aux chan NOT ready\n");
- goto error;
- }
-
- return true;
-error:
- return false;
-}
-
static void dp_aux_init(struct dp_aux *dp_aux, u32 *aux_cfg)
{
struct dp_aux_private *aux;
@@ -535,6 +325,45 @@ static void dp_aux_deinit(struct dp_aux *dp_aux)
aux->catalog->enable(aux->catalog, false);
}
+static int dp_aux_register(struct dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+ int ret = 0;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ aux->drm_aux.name = "sde_dp_aux";
+ aux->drm_aux.dev = aux->dev;
+ aux->drm_aux.transfer = dp_aux_transfer;
+ ret = drm_dp_aux_register(&aux->drm_aux);
+ if (ret) {
+ pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
+ goto exit;
+ }
+ dp_aux->drm_aux = &aux->drm_aux;
+exit:
+ return ret;
+}
+
+static void dp_aux_deregister(struct dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ drm_dp_aux_unregister(&aux->drm_aux);
+}
+
struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog)
{
int rc = 0;
@@ -553,21 +382,19 @@ struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog)
goto error;
}
+ init_completion(&aux->comp);
+ aux->cmd_busy = false;
+ mutex_init(&aux->mutex);
+
aux->dev = dev;
-
- dp_aux_buf_set(aux);
-
aux->catalog = catalog;
-
dp_aux = &aux->dp_aux;
- dp_aux->process = dp_aux_process;
- dp_aux->read = dp_aux_read_ex;
- dp_aux->write = dp_aux_write_ex;
- dp_aux->ready = dp_aux_ready;
dp_aux->isr = dp_aux_isr;
dp_aux->init = dp_aux_init;
dp_aux->deinit = dp_aux_deinit;
+ dp_aux->drm_aux_register = dp_aux_register;
+ dp_aux->drm_aux_deregister = dp_aux_deregister;
return dp_aux;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 0603c15..f08c12b 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -16,6 +16,7 @@
#define _DP_AUX_H_
#include "dp_catalog.h"
+#include "drm_dp_helper.h"
enum dp_aux_error {
DP_AUX_ERR_NONE = 0,
@@ -26,32 +27,10 @@ enum dp_aux_error {
DP_AUX_ERR_NACK_DEFER = -5,
};
-enum aux_tx_mode {
- AUX_NATIVE,
- AUX_I2C,
-};
-
-enum aux_exe_mode {
- AUX_WRITE,
- AUX_READ,
-};
-
-struct aux_cmd {
- enum aux_exe_mode ex_mode;
- enum aux_tx_mode tx_mode;
- u32 addr;
- u32 len;
- u8 *buf;
- bool next;
-};
-
struct dp_aux {
- int (*process)(struct dp_aux *aux, struct aux_cmd *cmd);
- int (*write)(struct dp_aux *aux, u32 addr, u32 len,
- enum aux_tx_mode mode, u8 *buf);
- int (*read)(struct dp_aux *aux, u32 addr, u32 len,
- enum aux_tx_mode mode, u8 **buf);
- bool (*ready)(struct dp_aux *aux);
+ struct drm_dp_aux *drm_aux;
+ int (*drm_aux_register)(struct dp_aux *aux);
+ void (*drm_aux_deregister)(struct dp_aux *aux);
void (*isr)(struct dp_aux *aux);
void (*init)(struct dp_aux *aux, u32 *aux_cfg);
void (*deinit)(struct dp_aux *aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index ca55d16..9361b52 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -177,8 +177,6 @@
#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x004)
-#define EDID_START_ADDRESS 0x50
-
/* DP MMSS_CC registers */
#define MMSS_DP_LINK_CMD_RCGR (0x0138)
#define MMSS_DP_LINK_CFG_RCGR (0x013C)
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 56f6052..888c511 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -20,14 +20,9 @@
#include "dp_ctrl.h"
-#define DP_LINK_RATE_MULTIPLIER 27000000
#define DP_KHZ_TO_HZ 1000
#define DP_CRYPTO_CLK_RATE_KHZ 180000
-/* sink power state */
-#define SINK_POWER_ON 1
-#define SINK_POWER_OFF 2
-
#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0)
#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3)
@@ -103,14 +98,6 @@ static void dp_ctrl_video_ready(struct dp_ctrl_private *ctrl)
complete(&ctrl->video_comp);
}
-static void dp_ctrl_set_sink_power_state(struct dp_ctrl_private *ctrl,
- u8 power_state)
-{
- const int len = 1;
-
- ctrl->aux->write(ctrl->aux, 0x600, len, AUX_NATIVE, &power_state);
-}
-
static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state)
{
ctrl->catalog->state_ctrl(ctrl->catalog, state);
@@ -128,7 +115,7 @@ static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_OFF);
+ drm_dp_link_power_down(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
reinit_completion(&ctrl->idle_comp);
dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
@@ -143,12 +130,13 @@ static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
{
u32 config = 0, tbd;
+ u8 *dpcd = ctrl->panel->dpcd;
config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */
config |= (0 << 11); /* RGB */
/* Scrambler reset enable */
- if (ctrl->panel->dpcd.scrambler_reset)
+ if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
config |= (1 << 10);
tbd = ctrl->link->get_test_bits_depth(ctrl->link,
@@ -158,7 +146,7 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
/* Num of Lanes */
config |= ((ctrl->link->lane_count - 1) << 4);
- if (ctrl->panel->dpcd.enhanced_frame)
+ if (drm_dp_enhanced_frame_cap(dpcd))
config |= 0x40;
config |= 0x04; /* progressive video */
@@ -327,7 +315,7 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
even_distribution = 0;
min_hblank = 0;
- lclk = link_rate * DP_LINK_RATE_MULTIPLIER;
+ lclk = drm_dp_bw_code_to_link_rate(link_rate) * DP_KHZ_TO_HZ;
pr_debug("pclk=%lld, active_width=%d, h_blank=%d\n",
pclk, lwidth, h_blank);
@@ -763,7 +751,7 @@ static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl,
buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
pr_debug("p|v=0x%x\n", voltage_level | pre_emphasis_level);
- return ctrl->aux->write(ctrl->aux, 0x103, 4, AUX_NATIVE, buf);
+ return drm_dp_dpcd_write(ctrl->aux->drm_aux, 0x103, buf, 4);
}
static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
@@ -778,25 +766,6 @@ static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
dp_ctrl_update_sink_vx_px(ctrl, link->v_level, link->p_level);
}
-static void dp_ctrl_cap_lane_rate_set(struct dp_ctrl_private *ctrl)
-{
- u8 buf[4];
- struct dp_panel_dpcd *cap;
-
- cap = &ctrl->panel->dpcd;
-
- pr_debug("bw=%x lane=%d\n", ctrl->link->link_rate,
- ctrl->link->lane_count);
-
- buf[0] = ctrl->link->link_rate;
- buf[1] = ctrl->link->lane_count;
-
- if (cap->enhanced_frame)
- buf[1] |= 0x80;
-
- ctrl->aux->write(ctrl->aux, 0x100, 2, AUX_NATIVE, buf);
-}
-
static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
u8 pattern)
{
@@ -805,33 +774,39 @@ static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
pr_debug("pattern=%x\n", pattern);
buf[0] = pattern;
- ctrl->aux->write(ctrl->aux, 0x102, 1, AUX_NATIVE, buf);
+ drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_TRAINING_PATTERN_SET, buf, 1);
}
static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
{
- int tries, old_v_level;
- int ret = 0;
- int usleep_time;
+ int tries, old_v_level, ret = 0, len = 0;
+ u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 5;
dp_ctrl_state_ctrl(ctrl, 0);
-
/* Make sure to clear the current pattern before starting a new one */
wmb();
ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
- dp_ctrl_cap_lane_rate_set(ctrl);
- dp_ctrl_train_pattern_set(ctrl, 0x21); /* train_1 */
+ dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+ DP_RECOVERED_CLOCK_OUT_EN); /* train_1 */
dp_ctrl_update_vx_px(ctrl);
tries = 0;
old_v_level = ctrl->link->v_level;
while (1) {
- usleep_time = ctrl->panel->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time * 2);
+ drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
- if (ctrl->link->clock_recovery(ctrl->link)) {
+ len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+ link_status);
+ if (len < DP_LINK_STATUS_SIZE) {
+ pr_err("[%s]: DP link status read failed\n", __func__);
+ ret = -1;
+ break;
+ }
+
+ if (drm_dp_clock_recovery_ok(link_status,
+ ctrl->link->lane_count)) {
ret = 0;
break;
}
@@ -852,8 +827,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
old_v_level = ctrl->link->v_level;
}
- ctrl->link->adjust_levels(ctrl->link);
-
+ ctrl->link->adjust_levels(ctrl->link, link_status);
dp_ctrl_update_vx_px(ctrl);
}
@@ -869,15 +843,15 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
switch (ctrl->link->link_rate) {
case DP_LINK_RATE_810:
- ctrl->link->link_rate = DP_LINK_RATE_540;
+ ctrl->link->link_rate = DP_LINK_BW_5_4;
break;
- case DP_LINK_RATE_540:
- ctrl->link->link_rate = DP_LINK_RATE_270;
+ case DP_LINK_BW_5_4:
+ ctrl->link->link_rate = DP_LINK_BW_2_7;
break;
- case DP_LINK_RATE_270:
- ctrl->link->link_rate = DP_LINK_RATE_162;
+ case DP_LINK_BW_2_7:
+ ctrl->link->link_rate = DP_LINK_BW_1_62;
break;
- case DP_LINK_RATE_162:
+ case DP_LINK_BW_1_62:
default:
ret = -EINVAL;
break;
@@ -890,36 +864,38 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
{
- int usleep_time;
-
dp_ctrl_train_pattern_set(ctrl, 0);
-
- usleep_time = ctrl->panel->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time * 2);
+ drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
}
static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
{
- int tries = 0;
- int ret = 0;
- int usleep_time;
+ int tries = 0, ret = 0, len = 0;
char pattern;
int const maximum_retries = 5;
+ u8 link_status[DP_LINK_STATUS_SIZE];
- if (ctrl->panel->dpcd.flags & DPCD_TPS3)
- pattern = 0x03;
+ if (drm_dp_tps3_supported(ctrl->panel->dpcd))
+ pattern = DP_TRAINING_PATTERN_3;
else
- pattern = 0x02;
+ pattern = DP_TRAINING_PATTERN_2;
dp_ctrl_update_vx_px(ctrl);
ctrl->catalog->set_pattern(ctrl->catalog, pattern);
- dp_ctrl_train_pattern_set(ctrl, pattern | 0x20);
+ dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
do {
- usleep_time = ctrl->panel->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time * 2);
+ drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
- if (ctrl->link->channel_equalization(ctrl->link)) {
+ len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+ link_status);
+ if (len < DP_LINK_STATUS_SIZE) {
+ pr_err("[%s]: DP link status read failed\n", __func__);
+ ret = -1;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(link_status, ctrl->link->lane_count)) {
ret = 0;
break;
}
@@ -930,8 +906,7 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
}
tries++;
- ctrl->link->adjust_levels(ctrl->link);
-
+ ctrl->link->adjust_levels(ctrl->link, link_status);
dp_ctrl_update_vx_px(ctrl);
} while (1);
@@ -941,12 +916,7 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
{
int ret = 0;
-
- ret = ctrl->aux->ready(ctrl->aux);
- if (!ret) {
- pr_err("aux chan NOT ready\n");
- return ret;
- }
+ struct drm_dp_link dp_link;
ctrl->link->p_level = 0;
ctrl->link->v_level = 0;
@@ -954,6 +924,11 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
dp_ctrl_config_ctrl(ctrl);
dp_ctrl_state_ctrl(ctrl, 0);
+ dp_link.num_lanes = ctrl->link->lane_count;
+ dp_link.rate = ctrl->link->link_rate;
+ dp_link.capabilities = ctrl->panel->dp_link.capabilities;
+ drm_dp_link_configure(ctrl->aux->drm_aux, &dp_link);
+
ret = dp_ctrl_link_train_1(ctrl);
if (ret < 0) {
if (!dp_ctrl_link_rate_down_shift(ctrl)) {
@@ -1007,7 +982,7 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, bool train)
ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
- dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_ON);
+ drm_dp_link_power_up(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
if (ctrl->link->phy_pattern_requested(ctrl->link))
goto end;
@@ -1065,8 +1040,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
ctrl->power->set_pixel_clk_parent(ctrl->power);
dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk",
- (ctrl->link->link_rate * DP_LINK_RATE_MULTIPLIER) /
- DP_KHZ_TO_HZ);
+ drm_dp_bw_code_to_link_rate(ctrl->link->link_rate));
dp_ctrl_set_clock_rate(ctrl, "ctrl_crypto_clk", DP_CRYPTO_CLK_RATE_KHZ);
@@ -1208,7 +1182,7 @@ static int dp_ctrl_on_hpd(struct dp_ctrl_private *ctrl)
ctrl->catalog->hpd_config(ctrl->catalog, true);
ctrl->link->link_rate = ctrl->panel->get_link_rate(ctrl->panel);
- ctrl->link->lane_count = ctrl->panel->dpcd.max_lane_count;
+ ctrl->link->lane_count = ctrl->panel->dp_link.num_lanes;
ctrl->pixel_rate = ctrl->panel->pinfo.pixel_clk_khz;
pr_debug("link_rate=%d, lane_count=%d, pixel_rate=%d\n",
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 850acbf..d3f6bca 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -195,6 +195,18 @@ static int dp_display_bind(struct device *dev, struct device *master,
goto end;
}
+ rc = dp->aux->drm_aux_register(dp->aux);
+ if (rc) {
+ pr_err("DRM DP AUX register failed\n");
+ goto end;
+ }
+
+ rc = dp->panel->sde_edid_register(dp->panel);
+ if (rc) {
+ pr_err("DRM DP EDID register failed\n");
+ goto end;
+ }
+
rc = dp->power->power_client_init(dp->power, &priv->phandle);
if (rc) {
pr_err("Power client create failed\n");
@@ -227,6 +239,10 @@ static void dp_display_unbind(struct device *dev, struct device *master,
(void)dp->power->power_client_deinit(dp->power);
+ (void) dp->panel->sde_edid_deregister(dp->panel);
+
+ (void) dp->aux->drm_aux_deregister(dp->aux);
+
(void)dp_display_debugfs_deinit(dp);
mutex_unlock(&dp->lock);
@@ -245,9 +261,8 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
if (rc)
goto end;
- rc = dp->panel->read_edid(dp->panel);
- if (rc)
- goto end;
+ sde_get_edid(dp->dp_display.connector, &dp->aux->drm_aux->ddc,
+ (void **)&dp->panel->edid_ctrl);
return 0;
end:
@@ -256,6 +271,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
static int dp_display_process_hpd_low(struct dp_display_private *dp)
{
+ dp->dp_display.is_connected = false;
return 0;
}
@@ -290,6 +306,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
if (dp->usbpd->hpd_high)
dp_display_process_hpd_high(dp);
+ dp->dp_display.is_connected = true;
mutex_unlock(&dp->lock);
end:
@@ -315,6 +332,7 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev)
}
mutex_lock(&dp->lock);
+ dp->dp_display.is_connected = false;
disable_irq(dp->irq);
mutex_unlock(&dp->lock);
@@ -573,33 +591,17 @@ static int dp_display_validate_mode(struct dp_display *dp,
return 0;
}
-static int dp_display_get_modes(struct dp_display *dp,
- struct dp_display_mode *modes, u32 *count)
+static int dp_display_get_modes(struct dp_display *dp)
{
- *count = 1;
+ int ret = 0;
+ struct dp_display_private *dp_display;
- if (modes) {
- modes->timing.h_active = 1920;
- modes->timing.v_active = 1080;
- modes->timing.h_back_porch = 148;
- modes->timing.h_front_porch = 88;
- modes->timing.h_sync_width = 44;
- modes->timing.h_active_low = 0;
- modes->timing.v_back_porch = 36;
- modes->timing.v_front_porch = 4;
- modes->timing.v_sync_width = 5;
- modes->timing.v_active_low = 0;
- modes->timing.h_skew = 0;
- modes->timing.refresh_rate = 60;
- modes->timing.pixel_clk_khz = 148500;
- }
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
- return 0;
-}
+ ret = _sde_edid_update_modes(dp->connector,
+ dp_display->panel->edid_ctrl);
-static int dp_display_detect(struct dp_display *dp)
-{
- return 0;
+ return ret;
}
static int dp_display_probe(struct platform_device *pdev)
@@ -637,7 +639,6 @@ static int dp_display_probe(struct platform_device *pdev)
g_dp_display->set_mode = dp_display_set_mode;
g_dp_display->validate_mode = dp_display_validate_mode;
g_dp_display->get_modes = dp_display_get_modes;
- g_dp_display->detect = dp_display_detect;
g_dp_display->prepare = dp_display_prepare;
g_dp_display->unprepare = dp_display_unprepare;
g_dp_display->request_irq = dp_request_irq;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index e684854..877287a 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -27,6 +27,8 @@ struct dp_display_mode {
struct dp_display {
struct drm_device *drm_dev;
struct dp_bridge *bridge;
+ struct drm_connector *connector;
+ bool is_connected;
int (*enable)(struct dp_display *dp_display);
int (*post_enable)(struct dp_display *dp_display);
@@ -38,11 +40,7 @@ struct dp_display {
struct dp_display_mode *mode);
int (*validate_mode)(struct dp_display *dp_display,
struct dp_display_mode *mode);
- int (*get_modes)(struct dp_display *dp_display,
- struct dp_display_mode *modes, u32 *count);
-
- int (*detect)(struct dp_display *dp_display);
-
+ int (*get_modes)(struct dp_display *dp_display);
int (*prepare)(struct dp_display *dp_display);
int (*unprepare)(struct dp_display *dp_display);
int (*request_irq)(struct dp_display *dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 0f6e36f..78c04c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -26,8 +26,10 @@
#define to_dp_bridge(x) container_of((x), struct dp_bridge, base)
static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
- struct dp_display_mode *dp_mode)
+ struct dp_display_mode *dp_mode, struct dp_display *dp)
{
+ const u32 num_components = 3;
+
memset(dp_mode, 0, sizeof(*dp_mode));
dp_mode->timing.h_active = drm_mode->hdisplay;
@@ -45,6 +47,7 @@ static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
dp_mode->timing.v_front_porch = drm_mode->vsync_start -
drm_mode->vdisplay;
+ dp_mode->timing.bpp = dp->connector->display_info.bpc * num_components;
dp_mode->timing.refresh_rate = drm_mode->vrefresh;
@@ -235,7 +238,7 @@ static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
dp = bridge->display;
memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode));
- convert_to_dp_mode(adjusted_mode, &bridge->dp_mode);
+ convert_to_dp_mode(adjusted_mode, &bridge->dp_mode, dp);
}
static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
@@ -257,7 +260,7 @@ static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
bridge = to_dp_bridge(drm_bridge);
dp = bridge->display;
- convert_to_dp_mode(mode, &dp_mode);
+ convert_to_dp_mode(mode, &dp_mode, dp);
rc = dp->validate_mode(dp, &dp_mode);
if (rc) {
@@ -289,6 +292,7 @@ int dp_connector_post_init(struct drm_connector *connector,
if (!info || !dp_display)
return -EINVAL;
+ dp_display->connector = connector;
return 0;
}
@@ -315,7 +319,7 @@ int dp_connector_get_topology(const struct drm_display_mode *drm_mode,
int dp_connector_get_info(struct msm_display_info *info, void *data)
{
- struct dsi_display *display = data;
+ struct dp_display *display = data;
if (!info || !display) {
pr_err("invalid params\n");
@@ -326,17 +330,10 @@ int dp_connector_get_info(struct msm_display_info *info, void *data)
info->num_of_h_tiles = 1;
info->h_tile_instance[0] = 0;
-
- info->is_connected = true;
- info->frame_rate = 60;
- info->width_mm = 160;
- info->height_mm = 90;
- info->max_width = 1920;
- info->max_height = 1080;
- info->vtotal = 1125;
- info->is_primary = true;
+ info->is_connected = display->is_connected;
info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
- info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
+ info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID |
+ MSM_DISPLAY_CAP_HOT_PLUG;
return 0;
}
@@ -375,60 +372,23 @@ enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
int dp_connector_get_modes(struct drm_connector *connector,
void *display)
{
- u32 count = 0;
- u32 size = 0;
- struct dp_display_mode *modes;
- struct drm_display_mode drm_mode;
+ int rc = 0;
struct dp_display *dp;
- int rc, i;
- if (!connector || !display || sde_connector_get_panel(connector))
- goto end;
+ if (!connector || !display)
+ return -EINVAL;
dp = display;
-
- rc = dp->get_modes(dp, NULL, &count);
- if (rc) {
- pr_err("failed to get num of modes, rc=%d\n", rc);
- goto end;
+ /* pluggable case assumes EDID is read when HPD */
+ if (dp->is_connected) {
+ rc = dp->get_modes(dp);
+ if (!rc)
+ pr_err("failed to get DP sink modes, rc=%d\n", rc);
+ } else {
+ pr_err("No sink connected\n");
}
- size = count * sizeof(*modes);
- modes = kzalloc(size, GFP_KERNEL);
- if (!modes) {
- count = 0;
- goto end;
- }
-
- rc = dp->get_modes(dp, modes, &count);
- if (rc) {
- pr_err("failed to get modes, rc=%d\n", rc);
- count = 0;
- goto error;
- }
-
- for (i = 0; i < count; i++) {
- struct drm_display_mode *m;
-
- memset(&drm_mode, 0x0, sizeof(drm_mode));
- convert_to_drm_mode(&modes[i], &drm_mode);
- m = drm_mode_duplicate(connector->dev, &drm_mode);
- if (!m) {
- pr_err("failed to add mode %ux%u\n",
- drm_mode.hdisplay,
- drm_mode.vdisplay);
- count = -ENOMEM;
- goto error;
- }
- m->width_mm = connector->display_info.width_mm;
- m->height_mm = connector->display_info.height_mm;
- drm_mode_probed_add(connector, m);
- }
-error:
- kfree(modes);
-end:
- pr_debug("MODE COUNT =%d\n\n", count);
- return count;
+ return 0;
}
int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index e9955a9..741acfca 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -114,18 +114,6 @@ struct dp_link_sink_count {
bool cp_ready;
};
-struct dp_link_status {
- u8 lane_01_status;
- u8 lane_23_status;
- u8 interlane_align_done;
- u8 downstream_port_status_changed;
- u8 link_status_updated;
- u8 port_0_in_sync;
- u8 port_1_in_sync;
- u8 req_voltage_swing[4];
- u8 req_pre_emphasis[4];
-};
-
struct dp_link_private {
struct device *dev;
struct dp_aux *aux;
@@ -133,7 +121,7 @@ struct dp_link_private {
struct dp_link_request request;
struct dp_link_sink_count sink_count;
- struct dp_link_status link_status;
+ u8 link_status[DP_LINK_STATUS_SIZE];
};
/**
@@ -232,13 +220,12 @@ static int dp_link_get_period(struct dp_link_private *link, int const addr)
int ret = 0;
u8 *bp;
u8 data;
- int rlen;
u32 const param_len = 0x1;
u32 const max_audio_period = 0xA;
/* TEST_AUDIO_PERIOD_CH_XX */
- rlen = link->aux->read(link->aux, addr, param_len, AUX_NATIVE, &bp);
- if (rlen < param_len) {
+ if (drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp,
+ param_len) < param_len) {
pr_err("failed to read test_audio_period (0x%x)\n", addr);
ret = -EINVAL;
goto exit;
@@ -350,8 +337,8 @@ static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
int const max_audio_pattern_type = 0x1;
/* Read the requested audio pattern type (Byte 0x272). */
- rlen = link->aux->read(link->aux, test_audio_pattern_type_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux,
+ test_audio_pattern_type_addr, &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link audio mode data\n");
ret = -EINVAL;
@@ -387,8 +374,8 @@ static int dp_link_parse_audio_mode(struct dp_link_private *link)
int channel_count = 0x0;
/* Read the requested audio mode (Byte 0x271). */
- rlen = link->aux->read(link->aux, test_audio_mode_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_audio_mode_addr,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link audio mode data\n");
ret = -EINVAL;
@@ -555,7 +542,7 @@ static int dp_link_parse_timing_params1(struct dp_link_private *link,
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
if (rlen < len) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -576,7 +563,7 @@ static int dp_link_parse_timing_params2(struct dp_link_private *link,
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
if (rlen < len) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -596,7 +583,7 @@ static int dp_link_parse_timing_params3(struct dp_link_private *link,
int rlen;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
if (rlen < 1) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -625,8 +612,8 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
int const test_misc_addr = 0x232;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = link->aux->read(link->aux, test_video_pattern_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_video_pattern_addr,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link video pattern\n");
ret = -EINVAL;
@@ -647,8 +634,8 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
link->request.test_video_pattern));
/* Read the requested color bit depth and dynamic range (Byte 0x232) */
- rlen = link->aux->read(link->aux, test_misc_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_misc_addr,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link bit depth\n");
ret = -EINVAL;
@@ -780,9 +767,9 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
*/
static bool dp_link_is_link_rate_valid(u32 link_rate)
{
- return ((link_rate == DP_LINK_RATE_162) ||
- (link_rate == DP_LINK_RATE_270) ||
- (link_rate == DP_LINK_RATE_540) ||
+ return ((link_rate == DP_LINK_BW_1_62) ||
+ (link_rate == DP_LINK_BW_2_7) ||
+ (link_rate == DP_LINK_BW_5_4) ||
(link_rate == DP_LINK_RATE_810));
}
@@ -814,12 +801,10 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
int ret = 0;
int rlen;
int const param_len = 0x1;
- int const test_link_rate_addr = 0x219;
- int const test_lane_count_addr = 0x220;
/* Read the requested link rate (Byte 0x219). */
- rlen = link->aux->read(link->aux, test_link_rate_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LINK_RATE,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read link rate\n");
ret = -EINVAL;
@@ -837,8 +822,8 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
pr_debug("link rate = 0x%x\n", link->request.test_link_rate);
/* Read the requested lane count (Byte 0x220). */
- rlen = link->aux->read(link->aux, test_lane_count_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LANE_COUNT,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read lane count\n");
ret = -EINVAL;
@@ -890,8 +875,8 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link)
int const phy_test_pattern_addr = 0x248;
int ret = 0;
- rlen = link->aux->read(link->aux, phy_test_pattern_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, phy_test_pattern_addr,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read phy link pattern\n");
ret = -EINVAL;
@@ -965,16 +950,14 @@ static int dp_link_parse_request(struct dp_link_private *link)
u8 data;
int rlen;
u32 const param_len = 0x1;
- u32 const device_service_irq_addr = 0x201;
- u32 const test_request_addr = 0x218;
u8 buf[4];
/**
* Read the device service IRQ vector (Byte 0x201) to determine
* whether an automated link has been requested by the sink.
*/
- rlen = link->aux->read(link->aux, device_service_irq_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR, &bp, param_len);
if (rlen < param_len) {
pr_err("aux read failed\n");
ret = -EINVAL;
@@ -994,8 +977,8 @@ static int dp_link_parse_request(struct dp_link_private *link)
* Read the link request byte (Byte 0x218) to determine what type
* of automated link has been requested by the sink.
*/
- rlen = link->aux->read(link->aux, test_request_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_REQUEST,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("aux read failed\n");
ret = -EINVAL;
@@ -1033,7 +1016,7 @@ static int dp_link_parse_request(struct dp_link_private *link)
end:
/* clear the link request IRQ */
buf[0] = 1;
- link->aux->write(link->aux, test_request_addr, 1, AUX_NATIVE, buf);
+ drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_REQUEST, buf, 1);
/**
* Send a TEST_ACK if all link parameters are valid, otherwise send
@@ -1060,10 +1043,9 @@ static void dp_link_parse_sink_count(struct dp_link_private *link)
u8 data;
int rlen;
int const param_len = 0x1;
- int const sink_count_addr = 0x200;
- rlen = link->aux->read(link->aux, sink_count_addr,
- param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_SINK_COUNT,
+ &bp, param_len);
if (rlen < param_len) {
pr_err("failed to read sink count\n");
return;
@@ -1080,67 +1062,16 @@ static void dp_link_parse_sink_count(struct dp_link_private *link)
link->sink_count.count, link->sink_count.cp_ready);
}
-static int dp_link_link_status_read(struct dp_link_private *link)
-{
- u8 *bp;
- u8 data;
- int rlen, ret = 0;
- int const addr = 0x202;
- int const len = 6;
- struct dp_link_status *sp;
-
- rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
- if (rlen < len) {
- pr_err("edp aux read failed\n");
- ret = -EINVAL;
- goto error;
- }
-
- sp = &link->link_status;
-
- data = *bp++; /* byte 0x202 */
- sp->lane_01_status = data; /* lane 0, 1 */
-
- data = *bp++; /* byte 0x203 */
- sp->lane_23_status = data; /* lane 2, 3 */
-
- data = *bp++; /* byte 0x204 */
- sp->interlane_align_done = (data & BIT(0));
- sp->downstream_port_status_changed = (data & BIT(6));
- sp->link_status_updated = (data & BIT(7));
-
- data = *bp++; /* byte 0x205 */
- sp->port_0_in_sync = (data & BIT(0));
- sp->port_1_in_sync = (data & BIT(1));
-
- data = *bp++; /* byte 0x206 */
- sp->req_voltage_swing[0] = data & 0x03;
- data >>= 2;
- sp->req_pre_emphasis[0] = data & 0x03;
- data >>= 2;
- sp->req_voltage_swing[1] = data & 0x03;
- data >>= 2;
- sp->req_pre_emphasis[1] = data & 0x03;
-
- data = *bp++; /* byte 0x207 */
- sp->req_voltage_swing[2] = data & 0x03;
- data >>= 2;
- sp->req_pre_emphasis[2] = data & 0x03;
- data >>= 2;
- sp->req_voltage_swing[3] = data & 0x03;
- data >>= 2;
- sp->req_pre_emphasis[3] = data & 0x03;
-
- return 0;
-error:
- return ret;
-}
-
static void dp_link_parse_sink_status_field(struct dp_link_private *link)
{
+ int len = 0;
+
dp_link_parse_sink_count(link);
dp_link_parse_request(link);
- dp_link_link_status_read(link);
+ len = drm_dp_dpcd_read_link_status(link->aux->drm_aux,
+ link->link_status);
+ if (len < DP_LINK_STATUS_SIZE)
+ pr_err("DP link status read failed\n");
}
static bool dp_link_is_link_training_requested(struct dp_link_private *link)
@@ -1196,7 +1127,7 @@ static int dp_link_parse_vx_px(struct dp_link_private *link)
pr_debug("\n");
- rlen = link->aux->read(link->aux, addr1, param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr1, &bp, param_len);
if (rlen < param_len) {
pr_err("failed reading lanes 0/1\n");
ret = -EINVAL;
@@ -1217,7 +1148,7 @@ static int dp_link_parse_vx_px(struct dp_link_private *link)
p1 = data & 0x3;
data = data >> 2;
- rlen = link->aux->read(link->aux, addr2, param_len, AUX_NATIVE, &bp);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr2, &bp, param_len);
if (rlen < param_len) {
pr_err("failed reading lanes 2/3\n");
ret = -EINVAL;
@@ -1294,76 +1225,6 @@ static int dp_link_process_phy_test_pattern_request(
return 0;
}
-static bool dp_link_is_link_status_updated(struct dp_link_private *link)
-{
- return link->link_status.link_status_updated;
-}
-
-static bool dp_link_channel_eq_done(struct dp_link_private *link)
-{
- u32 mask, data;
- struct dp_link *dp_link = &link->dp_link;
-
- pr_debug("\n");
-
- dp_link_link_status_read(link);
-
- if (!link->link_status.interlane_align_done) { /* not align */
- pr_err("interlane align failed\n");
- return 0;
- }
-
- if (dp_link->lane_count == 1) {
- mask = 0x7;
- data = link->link_status.lane_01_status;
- } else if (dp_link->lane_count == 2) {
- mask = 0x77;
- data = link->link_status.lane_01_status;
- } else {
- mask = 0x7777;
- data = link->link_status.lane_23_status;
- data <<= 8;
- data |= link->link_status.lane_01_status;
- }
-
- data &= mask;
- pr_debug("data=%x mask=%x\n", data, mask);
-
- if (data == mask)/* all done */
- return true;
-
- return false;
-}
-
-static bool dp_link_clock_recovery_done(struct dp_link_private *link)
-{
- u32 mask, data;
- struct dp_link *dp_link = &link->dp_link;
-
- dp_link_link_status_read(link);
-
- if (dp_link->lane_count == 1) {
- mask = 0x01; /* lane 0 */
- data = link->link_status.lane_01_status;
- } else if (dp_link->lane_count == 2) {
- mask = 0x011; /*B lane 0, 1 */
- data = link->link_status.lane_01_status;
- } else {
- mask = 0x01111; /*B lane 0, 1 */
- data = link->link_status.lane_23_status;
- data <<= 8;
- data |= link->link_status.lane_01_status;
- }
-
- data &= mask;
- pr_debug("data=%x mask=%x\n", data, mask);
-
- if (data == mask) /* all done */
- return true;
-
- return false;
-}
-
/**
* dp_link_process_link_status_update() - processes link status updates
* @link: Display Port link module data
@@ -1377,21 +1238,25 @@ static bool dp_link_clock_recovery_done(struct dp_link_private *link)
*/
static int dp_link_process_link_status_update(struct dp_link_private *link)
{
- if (!dp_link_is_link_status_updated(link) ||
- (dp_link_channel_eq_done(link) &&
- dp_link_clock_recovery_done(link)))
+ if (!(link->link_status[2] & BIT(7)) || /* link status updated */
+ (drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.lane_count) &&
+ drm_dp_channel_eq_ok(link->link_status,
+ link->dp_link.lane_count)))
return -EINVAL;
pr_debug("channel_eq_done = %d, clock_recovery_done = %d\n",
- dp_link_channel_eq_done(link),
- dp_link_clock_recovery_done(link));
+ drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.lane_count),
+ drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.lane_count));
return 0;
}
static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link)
{
- return link->link_status.downstream_port_status_changed;
+ return (link->link_status[2] & BIT(6)); /* port status changed */
}
/**
@@ -1562,37 +1427,6 @@ static int dp_link_process_request(struct dp_link *dp_link)
return ret;
}
-static u8 *dp_link_get_voltage_swing(struct dp_link *dp_link)
-
-{
- struct dp_link_private *link;
-
- if (!dp_link) {
- pr_err("invalid input\n");
- return ERR_PTR(-EINVAL);
- }
-
- link = container_of(dp_link, struct dp_link_private, dp_link);
-
- return link->link_status.req_voltage_swing;
-}
-
-static u8 *dp_link_get_pre_emphasis(struct dp_link *dp_link)
-
-{
- struct dp_link_private *link;
-
-
- if (!dp_link) {
- pr_err("invalid input\n");
- return ERR_PTR(-EINVAL);
- }
-
- link = container_of(dp_link, struct dp_link_private, dp_link);
-
- return link->link_status.req_pre_emphasis;
-}
-
static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
{
u32 cc;
@@ -1625,38 +1459,11 @@ static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
return cc;
}
-static bool dp_link_clock_recovery(struct dp_link *dp_link)
-{
- struct dp_link_private *link;
-
- if (!dp_link) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
-
- link = container_of(dp_link, struct dp_link_private, dp_link);
-
- return dp_link_clock_recovery_done(link);
-}
-
-static bool dp_link_channel_equalization(struct dp_link *dp_link)
-{
- struct dp_link_private *link;
-
- if (!dp_link) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
-
- link = container_of(dp_link, struct dp_link_private, dp_link);
-
- return dp_link_channel_eq_done(link);
-}
-
-static int dp_link_adjust_levels(struct dp_link *dp_link)
+static int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
{
int i;
int max = 0;
+ u8 data;
struct dp_link_private *link;
if (!dp_link) {
@@ -1668,24 +1475,24 @@ static int dp_link_adjust_levels(struct dp_link *dp_link)
/* use the max level across lanes */
for (i = 0; i < dp_link->lane_count; i++) {
- pr_debug("lane=%d req_voltage_swing=%d\n",
- i, link->link_status.req_voltage_swing[i]);
- if (max < link->link_status.req_voltage_swing[i])
- max = link->link_status.req_voltage_swing[i];
+ data = drm_dp_get_adjust_request_voltage(link_status, i);
+ pr_debug("lane=%d req_voltage_swing=%d\n", i, data);
+ if (max < data)
+ max = data;
}
- dp_link->v_level = max;
+ dp_link->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
/* use the max level across lanes */
max = 0;
for (i = 0; i < dp_link->lane_count; i++) {
- pr_debug("lane=%d req_pre_emphasis=%d\n",
- i, link->link_status.req_pre_emphasis[i]);
- if (max < link->link_status.req_pre_emphasis[i])
- max = link->link_status.req_pre_emphasis[i];
+ data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+ pr_debug("lane=%d req_pre_emphasis=%d\n", i, data);
+ if (max < data)
+ max = data;
}
- dp_link->p_level = max;
+ dp_link->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
/**
* Adjust the voltage swing and pre-emphasis level combination to within
@@ -1781,12 +1588,8 @@ struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux)
dp_link = &link->dp_link;
dp_link->process_request = dp_link_process_request;
- dp_link->get_voltage_swing = dp_link_get_voltage_swing;
dp_link->get_test_bits_depth = dp_link_get_test_bits_depth;
- dp_link->get_pre_emphasis = dp_link_get_pre_emphasis;
dp_link->get_colorimetry_config = dp_link_get_colorimetry_config;
- dp_link->clock_recovery = dp_link_clock_recovery;
- dp_link->channel_equalization = dp_link_channel_equalization;
dp_link->adjust_levels = dp_link_adjust_levels;
dp_link->send_psm_request = dp_link_send_psm_request;
dp_link->phy_pattern_requested = dp_link_phy_pattern_requested;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index de10e9a..26249d6 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -54,15 +54,11 @@ struct dp_link {
u32 v_level;
u32 p_level;
- u8 *(*get_voltage_swing)(struct dp_link *dp_link);
- u8 *(*get_pre_emphasis)(struct dp_link *dp_link);
u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp);
int (*process_request)(struct dp_link *dp_link);
int (*get_colorimetry_config)(struct dp_link *dp_link);
- int (*adjust_levels)(struct dp_link *dp_link);
+ int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status);
int (*send_psm_request)(struct dp_link *dp_link, bool req);
- bool (*clock_recovery)(struct dp_link *dp_link);
- bool (*channel_equalization)(struct dp_link *dp_link);
bool (*phy_pattern_requested)(struct dp_link *dp_link);
};
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index f9616c4..fed1dbb 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -16,7 +16,9 @@
#include "dp_panel.h"
-#define DP_LINK_RATE_MULTIPLIER 27000000
+enum {
+ DP_LINK_RATE_MULTIPLIER = 27000000,
+};
struct dp_panel_private {
struct device *dev;
@@ -27,13 +29,10 @@ struct dp_panel_private {
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
- u8 *bp;
- u8 data;
- u32 const addr = 0x0;
- u32 const len = 16;
int rlen, rc = 0;
struct dp_panel_private *panel;
- struct dp_panel_dpcd *cap;
+ struct drm_dp_link *dp_link;
+ u8 major = 0, minor = 0;
if (!dp_panel) {
pr_err("invalid input\n");
@@ -41,236 +40,38 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
goto end;
}
- cap = &dp_panel->dpcd;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ dp_link = &dp_panel->dp_link;
- rlen = panel->aux->read(panel->aux, addr, len, AUX_NATIVE, &bp);
- if (rlen != len) {
+ rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
+ dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+ if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
pr_err("dpcd read failed, rlen=%d\n", rlen);
rc = -EINVAL;
goto end;
}
- memset(cap, 0, sizeof(*cap));
+ dp_link->revision = dp_panel->dpcd[DP_DPCD_REV];
- data = *bp++; /* byte 0 */
- cap->major = (data >> 4) & 0x0f;
- cap->minor = data & 0x0f;
- pr_debug("version: %d.%d\n", cap->major, cap->minor);
+ major = (dp_link->revision >> 4) & 0x0f;
+ minor = dp_link->revision & 0x0f;
+ pr_debug("version: %d.%d\n", major, minor);
- data = *bp++; /* byte 1 */
- /* 162, 270, 540, 810 MB, symbol rate, NOT bit rate */
- cap->max_link_rate = data;
- pr_debug("link_rate=%d\n", cap->max_link_rate);
+ dp_link->rate =
+ drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
+ pr_debug("link_rate=%d\n", dp_link->rate);
- data = *bp++; /* byte 2 */
- if (data & BIT(7))
- cap->enhanced_frame++;
+ dp_link->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
+ DP_MAX_LANE_COUNT_MASK;
+ pr_debug("lane_count=%d\n", dp_link->num_lanes);
- if (data & 0x40) {
- cap->flags |= DPCD_TPS3;
- pr_debug("pattern 3 supported\n");
- } else {
- pr_debug("pattern 3 not supported\n");
- }
+ if (dp_panel->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+ dp_link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
- data &= 0x0f;
- cap->max_lane_count = data;
- pr_debug("lane_count=%d\n", cap->max_lane_count);
-
- data = *bp++; /* byte 3 */
- if (data & BIT(0)) {
- cap->flags |= DPCD_MAX_DOWNSPREAD_0_5;
- pr_debug("max_downspread\n");
- }
-
- if (data & BIT(6)) {
- cap->flags |= DPCD_NO_AUX_HANDSHAKE;
- pr_debug("NO Link Training\n");
- }
-
- data = *bp++; /* byte 4 */
- cap->num_rx_port = (data & BIT(0)) + 1;
- pr_debug("rx_ports=%d", cap->num_rx_port);
-
- data = *bp++; /* Byte 5: DOWN_STREAM_PORT_PRESENT */
- cap->downstream_port.dfp_present = data & BIT(0);
- cap->downstream_port.dfp_type = data & 0x6;
- cap->downstream_port.format_conversion = data & BIT(3);
- cap->downstream_port.detailed_cap_info_available = data & BIT(4);
- pr_debug("dfp_present = %d, dfp_type = %d\n",
- cap->downstream_port.dfp_present,
- cap->downstream_port.dfp_type);
- pr_debug("format_conversion = %d, detailed_cap_info_available = %d\n",
- cap->downstream_port.format_conversion,
- cap->downstream_port.detailed_cap_info_available);
-
- bp += 1; /* Skip Byte 6 */
- rlen -= 1;
-
- data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */
- cap->downstream_port.dfp_count = data & 0x7;
- cap->downstream_port.msa_timing_par_ignored = data & BIT(6);
- cap->downstream_port.oui_support = data & BIT(7);
- pr_debug("dfp_count = %d, msa_timing_par_ignored = %d\n",
- cap->downstream_port.dfp_count,
- cap->downstream_port.msa_timing_par_ignored);
- pr_debug("oui_support = %d\n", cap->downstream_port.oui_support);
-
- data = *bp++; /* byte 8 */
- if (data & BIT(1)) {
- cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
- pr_debug("edid presented\n");
- }
-
- data = *bp++; /* byte 9 */
- cap->rx_port0_buf_size = (data + 1) * 32;
- pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size);
-
- bp += 2; /* skip 10, 11 port1 capability */
- rlen -= 2;
-
- data = *bp++; /* byte 12 */
- cap->i2c_speed_ctrl = data;
- if (cap->i2c_speed_ctrl > 0)
- pr_debug("i2c_rate=%d", cap->i2c_speed_ctrl);
-
- data = *bp++; /* byte 13 */
- cap->scrambler_reset = data & BIT(0);
- pr_debug("scrambler_reset=%d\n", cap->scrambler_reset);
-
- if (data & BIT(1))
- cap->enhanced_frame++;
-
- pr_debug("enhanced_framing=%d\n", cap->enhanced_frame);
-
- data = *bp++; /* byte 14 */
- if (data == 0)
- cap->training_read_interval = 4000; /* us */
- else
- cap->training_read_interval = 4000 * data; /* us */
- pr_debug("training_interval=%d\n", cap->training_read_interval);
end:
return rc;
}
-/*
- * edid standard header bytes
- */
-static u8 edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
-
-static bool dp_panel_is_edid_header_valid(u8 *buf)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(edid_hdr); i++) {
- if (buf[i] != edid_hdr[i])
- return false;
- }
-
- return true;
-}
-
-static int dp_panel_validate_edid(u8 *bp, int len)
-{
- int i;
- u8 csum = 0;
- u32 const size = 128;
-
- if (len < size) {
- pr_err("Error: len=%x\n", len);
- return -EINVAL;
- }
-
- for (i = 0; i < size; i++)
- csum += *bp++;
-
- if (csum != 0) {
- pr_err("error: csum=0x%x\n", csum);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dp_panel_read_edid(struct dp_panel *dp_panel)
-{
- u8 *edid_buf;
- u32 checksum = 0;
- int rlen, ret = 0;
- int edid_blk = 0, blk_num = 0, retries = 10;
- u32 const segment_addr = 0x30;
- bool edid_parsing_done = false;
- struct dp_panel_private *panel;
-
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
- ret = panel->aux->ready(panel->aux);
- if (!ret) {
- pr_err("aux chan NOT ready\n");
- goto end;
- }
-
- do {
- u8 segment;
-
-
- /*
- * Write the segment first.
- * Segment = 0, for blocks 0 and 1
- * Segment = 1, for blocks 2 and 3
- * Segment = 2, for blocks 3 and 4
- * and so on ...
- */
- segment = blk_num >> 1;
-
- panel->aux->write(panel->aux, segment_addr, 1, AUX_I2C,
- &segment);
-
- rlen = panel->aux->read(panel->aux, EDID_START_ADDRESS +
- (blk_num * EDID_BLOCK_SIZE),
- EDID_BLOCK_SIZE, AUX_I2C, &edid_buf);
- if (rlen != EDID_BLOCK_SIZE) {
- pr_err("invalid edid len: %d\n", rlen);
- continue;
- }
-
- pr_debug("=== EDID data ===\n");
- print_hex_dump(KERN_DEBUG, "EDID: ", DUMP_PREFIX_NONE, 16, 1,
- edid_buf, EDID_BLOCK_SIZE, false);
-
- pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
-
- if (dp_panel_is_edid_header_valid(edid_buf)) {
- ret = dp_panel_validate_edid(edid_buf, rlen);
- if (ret) {
- pr_err("corrupt edid block detected\n");
- goto end;
- }
-
- if (edid_parsing_done) {
- blk_num++;
- continue;
- }
-
- dp_panel->edid.ext_block_cnt = edid_buf[0x7E];
- edid_parsing_done = true;
- checksum = edid_buf[rlen - 1];
- } else {
- edid_blk++;
- blk_num++;
- }
-
- memcpy(dp_panel->edid.buf + (edid_blk * EDID_BLOCK_SIZE),
- edid_buf, EDID_BLOCK_SIZE);
-
- if (edid_blk == dp_panel->edid.ext_block_cnt)
- goto end;
- } while (retries--);
-end:
- return ret;
-}
-
static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
{
int rc = 0;
@@ -334,6 +135,36 @@ static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
return rc;
}
+static int dp_panel_edid_register(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ dp_panel->edid_ctrl = sde_edid_init();
+ if (!dp_panel->edid_ctrl) {
+ pr_err("sde edid init for DP failed\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+end:
+ return rc;
+}
+
+static void dp_panel_edid_deregister(struct dp_panel *dp_panel)
+{
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ sde_edid_deinit((void **)&dp_panel->edid_ctrl);
+}
+
static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
int rc = 0;
@@ -350,12 +181,12 @@ static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
return rc;
}
-static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
+static u32 dp_panel_get_link_rate(struct dp_panel *dp_panel)
{
const u32 encoding_factx10 = 8;
const u32 ln_to_link_ratio = 10;
u32 min_link_rate, reminder = 0;
- u8 calc_link_rate = 0, lane_cnt;
+ u32 calc_link_rate = 0, lane_cnt, max_rate = 0;
struct dp_panel_private *panel;
struct dp_panel_info *pinfo;
@@ -366,11 +197,10 @@ static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- lane_cnt = dp_panel->dpcd.max_lane_count;
+ lane_cnt = dp_panel->dp_link.num_lanes;
+ max_rate = drm_dp_link_rate_to_bw_code(dp_panel->dp_link.rate);
pinfo = &dp_panel->pinfo;
- pinfo->bpp = 24;
-
/*
* The max pixel clock supported is 675Mhz. The
* current calculations below will make sure
@@ -393,12 +223,12 @@ static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
min_link_rate += 1;
pr_debug("min_link_rate = %d\n", min_link_rate);
- if (min_link_rate <= DP_LINK_RATE_162)
- calc_link_rate = DP_LINK_RATE_162;
- else if (min_link_rate <= DP_LINK_RATE_270)
- calc_link_rate = DP_LINK_RATE_270;
- else if (min_link_rate <= DP_LINK_RATE_540)
- calc_link_rate = DP_LINK_RATE_540;
+ if (min_link_rate <= DP_LINK_BW_1_62)
+ calc_link_rate = DP_LINK_BW_1_62;
+ else if (min_link_rate <= DP_LINK_BW_2_7)
+ calc_link_rate = DP_LINK_BW_2_7;
+ else if (min_link_rate <= DP_LINK_BW_5_4)
+ calc_link_rate = DP_LINK_BW_5_4;
else if (min_link_rate <= DP_LINK_RATE_810)
calc_link_rate = DP_LINK_RATE_810;
else {
@@ -407,8 +237,8 @@ static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
calc_link_rate = DP_LINK_RATE_810;
}
- if (calc_link_rate > dp_panel->dpcd.max_link_rate)
- calc_link_rate = dp_panel->dpcd.max_link_rate;
+ if (calc_link_rate > max_rate)
+ calc_link_rate = max_rate;
pr_debug("calc_link_rate = 0x%x\n", calc_link_rate);
end:
@@ -440,12 +270,10 @@ struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
dp_panel = &panel->dp_panel;
- dp_panel->edid.buf = devm_kzalloc(dev,
- sizeof(EDID_BLOCK_SIZE) * 4, GFP_KERNEL);
-
+ dp_panel->sde_edid_register = dp_panel_edid_register;
+ dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
dp_panel->init_info = dp_panel_init_panel_info;
dp_panel->timing_cfg = dp_panel_timing_cfg;
- dp_panel->read_edid = dp_panel_read_edid;
dp_panel->read_dpcd = dp_panel_read_dpcd;
dp_panel->get_link_rate = dp_panel_get_link_rate;
@@ -463,6 +291,5 @@ void dp_panel_put(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- devm_kfree(panel->dev, dp_panel->edid.buf);
devm_kfree(panel->dev, panel);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 5c145eb..5852c70 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -16,66 +16,9 @@
#define _DP_PANEL_H_
#include "dp_aux.h"
+#include "sde_edid_parser.h"
-#define DPCD_ENHANCED_FRAME BIT(0)
-#define DPCD_TPS3 BIT(1)
-#define DPCD_MAX_DOWNSPREAD_0_5 BIT(2)
-#define DPCD_NO_AUX_HANDSHAKE BIT(3)
-#define DPCD_PORT_0_EDID_PRESENTED BIT(4)
-
-#define EDID_START_ADDRESS 0x50
-#define EDID_BLOCK_SIZE 0x80
-
-
-#define DP_LINK_RATE_162 6 /* 1.62G = 270M * 6 */
-#define DP_LINK_RATE_270 10 /* 2.70G = 270M * 10 */
-#define DP_LINK_RATE_540 20 /* 5.40G = 270M * 20 */
#define DP_LINK_RATE_810 30 /* 8.10G = 270M * 30 */
-#define DP_LINK_RATE_MAX DP_LINK_RATE_810
-
-struct downstream_port_config {
- /* Byte 02205h */
- bool dfp_present;
- u32 dfp_type;
- bool format_conversion;
- bool detailed_cap_info_available;
- /* Byte 02207h */
- u32 dfp_count;
- bool msa_timing_par_ignored;
- bool oui_support;
-};
-
-struct dp_panel_dpcd {
- u8 major;
- u8 minor;
- u8 max_lane_count;
- u8 num_rx_port;
- u8 i2c_speed_ctrl;
- u8 scrambler_reset;
- u8 enhanced_frame;
- u32 max_link_rate; /* 162, 270 and 540 Mb, divided by 10 */
- u32 flags;
- u32 rx_port0_buf_size;
- u32 training_read_interval;/* us */
- struct downstream_port_config downstream_port;
-};
-
-struct dp_panel_edid {
- u8 *buf;
- u8 id_name[4];
- u8 id_product;
- u8 version;
- u8 revision;
- u8 video_intf; /* dp == 0x5 */
- u8 color_depth; /* 6, 8, 10, 12 and 14 bits */
- u8 color_format; /* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
- u8 dpm; /* display power management */
- u8 sync_digital; /* 1 = digital */
- u8 sync_separate; /* 1 = separate */
- u8 vsync_pol; /* 0 = negative, 1 = positive */
- u8 hsync_pol; /* 0 = negative, 1 = positive */
- u8 ext_block_cnt;
-};
struct dp_panel_info {
u32 h_active;
@@ -95,17 +38,21 @@ struct dp_panel_info {
};
struct dp_panel {
- struct dp_panel_dpcd dpcd;
- struct dp_panel_edid edid;
+ /* dpcd raw data */
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct drm_dp_link dp_link;
+
+ struct sde_edid_ctrl *edid_ctrl;
struct dp_panel_info pinfo;
u32 vic;
+ int (*sde_edid_register)(struct dp_panel *dp_panel);
+ void (*sde_edid_deregister)(struct dp_panel *dp_panel);
int (*init_info)(struct dp_panel *dp_panel);
int (*timing_cfg)(struct dp_panel *dp_panel);
- int (*read_edid)(struct dp_panel *dp_panel);
int (*read_dpcd)(struct dp_panel *dp_panel);
- u8 (*get_link_rate)(struct dp_panel *dp_panel);
+ u32 (*get_link_rate)(struct dp_panel *dp_panel);
};
struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 133dc93..3dd4950 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1371,21 +1371,21 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
goto error_disable_cmd_engine;
}
}
- return rc;
-put_iova:
- msm_gem_put_iova(display->tx_cmd_buf, 0);
-free_gem:
- msm_gem_free_object(display->tx_cmd_buf);
error_disable_cmd_engine:
(void)dsi_display_cmd_engine_disable(display);
error_disable_clks:
rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
DSI_ALL_CLKS, DSI_CLK_OFF);
if (rc) {
- pr_err("[%s] failed to enable all DSI clocks, rc=%d\n",
+ pr_err("[%s] failed to disable all DSI clocks, rc=%d\n",
display->name, rc);
}
+ return rc;
+put_iova:
+ msm_gem_put_iova(display->tx_cmd_buf, 0);
+free_gem:
+ msm_gem_free_object(display->tx_cmd_buf);
error:
return rc;
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 37ed411..4e09cfb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -20,6 +20,7 @@
#include "msm_kms.h"
#include "sde_connector.h"
#include "dsi_drm.h"
+#include "sde_trace.h"
#define to_dsi_bridge(x) container_of((x), struct dsi_bridge, base)
#define to_dsi_state(x) container_of((x), struct dsi_connector_state, base)
@@ -134,19 +135,24 @@ static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
+ SDE_ATRACE_BEGIN("dsi_bridge_pre_enable");
rc = dsi_display_prepare(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display prepare failed, rc=%d\n",
c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_bridge_pre_enable");
return;
}
+ SDE_ATRACE_BEGIN("dsi_display_enable");
rc = dsi_display_enable(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display enable failed, rc=%d\n",
c_bridge->id, rc);
(void)dsi_display_unprepare(c_bridge->display);
}
+ SDE_ATRACE_END("dsi_display_enable");
+ SDE_ATRACE_END("dsi_bridge_pre_enable");
}
static void dsi_bridge_enable(struct drm_bridge *bridge)
@@ -197,19 +203,25 @@ static void dsi_bridge_post_disable(struct drm_bridge *bridge)
return;
}
+ SDE_ATRACE_BEGIN("dsi_bridge_post_disable");
+ SDE_ATRACE_BEGIN("dsi_display_disable");
rc = dsi_display_disable(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display disable failed, rc=%d\n",
c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_display_disable");
return;
}
+ SDE_ATRACE_END("dsi_display_disable");
rc = dsi_display_unprepare(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display unprepare failed, rc=%d\n",
c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_bridge_post_disable");
return;
}
+ SDE_ATRACE_END("dsi_bridge_post_disable");
}
static void dsi_bridge_mode_set(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index ff6802e..efeea31 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -20,6 +20,7 @@
#include "msm_kms.h"
#include "msm_gem.h"
#include "msm_fence.h"
+#include "sde_trace.h"
struct msm_commit {
struct drm_device *dev;
@@ -96,6 +97,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_crtc_state *old_crtc_state;
int i;
+ SDE_ATRACE_BEGIN("msm_disable");
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
@@ -177,6 +179,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
else
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
+ SDE_ATRACE_END("msm_disable");
}
static void
@@ -286,6 +289,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int bridge_enable_count = 0;
int i;
+ SDE_ATRACE_BEGIN("msm_enable");
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
@@ -352,8 +356,10 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
}
/* If no bridges were pre_enabled, skip iterating over them again */
- if (bridge_enable_count == 0)
+ if (bridge_enable_count == 0) {
+ SDE_ATRACE_END("msm_enable");
return;
+ }
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
struct drm_encoder *encoder;
@@ -373,6 +379,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_bridge_enable(encoder->bridge);
}
+ SDE_ATRACE_END("msm_enable");
}
/* The (potentially) asynchronous part of the commit. At this point
@@ -430,7 +437,9 @@ static void _msm_drm_commit_work_cb(struct kthread_work *work)
commit = container_of(work, struct msm_commit, commit_work);
+ SDE_ATRACE_BEGIN("complete_commit");
complete_commit(commit);
+ SDE_ATRACE_END("complete_commit");
}
static struct msm_commit *commit_init(struct drm_atomic_state *state)
@@ -512,9 +521,12 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_plane_state *plane_state;
int i, ret;
+ SDE_ATRACE_BEGIN("atomic_commit");
ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
+ if (ret) {
+ SDE_ATRACE_END("atomic_commit");
return ret;
+ }
c = commit_init(state);
if (!c) {
@@ -592,14 +604,17 @@ int msm_atomic_commit(struct drm_device *dev,
commit_destroy(c);
goto error;
}
+ SDE_ATRACE_END("atomic_commit");
return 0;
}
complete_commit(c);
+ SDE_ATRACE_END("atomic_commit");
return 0;
error:
drm_atomic_helper_cleanup_planes(dev, state);
+ SDE_ATRACE_END("atomic_commit");
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f75be8a..962087c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -59,12 +59,68 @@
#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+static void msm_drm_helper_hotplug_event(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ char *event_string;
+ char const *connector_name;
+ char *envp[2];
+
+ if (!dev) {
+ DRM_ERROR("hotplug_event failed, invalid input\n");
+ return;
+ }
+
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ event_string = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!event_string) {
+ DRM_ERROR("failed to allocate event string\n");
+ return;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev) {
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+
+ if (connector->name)
+ connector_name = connector->name;
+ else
+ connector_name = "unknown";
+
+ snprintf(event_string, SZ_4K, "name=%s status=%s\n",
+ connector_name,
+ drm_get_connector_status_name(connector->status));
+ DRM_DEBUG("generating hotplug event [%s]\n", event_string);
+ envp[0] = event_string;
+ envp[1] = NULL;
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+ envp);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ kfree(event_string);
+}
+
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = NULL;
+
+ if (!dev) {
+ DRM_ERROR("output_poll_changed failed, invalid input\n");
+ return;
+ }
+
+ priv = dev->dev_private;
if (priv->fbdev)
drm_fb_helper_hotplug_event(priv->fbdev);
+ else
+ msm_drm_helper_hotplug_event(dev);
}
int msm_atomic_check(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_evtlog.c b/drivers/gpu/drm/msm/msm_evtlog.c
deleted file mode 100644
index dbe9b88..0000000
--- a/drivers/gpu/drm/msm/msm_evtlog.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "msm_evtlog:[%s] " fmt, __func__
-
-#include "msm_evtlog.h"
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <asm-generic/current.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-
-#include "sde_trace.h"
-
-#define SIZE_MASK(x) (x - 1)
-
-static int msm_evtlog_debugfs_dump(struct seq_file *s, void *data)
-{
- struct msm_evtlog *log = s->private;
- unsigned long cnt; /* # of samples since clear */
- unsigned long n; /* # of samples to print, also head index */
- unsigned long i;
- struct timespec timespec;
-
- /**
- * Prints in chronological order, oldest -> newest
- * Note due to lock-less design, the first few printed entries
- * may be corrupted by new writer not oldest.
- * This is a tradeoff for speed of sampling
- */
- cnt = atomic_read(&log->cnt);
- if (!cnt)
- return 0;
-
- n = cnt & SIZE_MASK(log->size);
-
- /**
- * If not full, print from first log
- * (which is index 1 since atomic_inc_return is prefix operator)
- */
- i = (cnt < log->size) ? 0 : n;
-
- seq_puts(s, "time_ns, pid, func, line, val1, val2, msg\n");
- do {
- i = (i + 1) & SIZE_MASK(log->size);
- timespec = ktime_to_timespec(log->events[i].ktime);
- seq_printf(s, "[%5lu.%06lu], %d, %s, %d, %llu, %llu, %s\n",
- timespec.tv_sec,
- timespec.tv_nsec / 1000,
- log->events[i].pid,
- log->events[i].func,
- log->events[i].line,
- log->events[i].val1,
- log->events[i].val2,
- log->events[i].msg);
- } while (i != n);
-
- return 0;
-}
-
-static int msm_evtlog_debugfs_open_dump(struct inode *inode, struct file *file)
-{
- return single_open(file, msm_evtlog_debugfs_dump, inode->i_private);
-}
-
-static ssize_t msm_evtlog_debugfs_write(
- struct file *file,
- const char __user *user_buf,
- size_t size,
- loff_t *ppos)
-{
- struct seq_file *s = file->private_data;
- struct msm_evtlog *log = s->private;
- char buf[64];
- int buf_size;
-
- buf_size = min(size, (sizeof(buf) - 1));
- if (strncpy_from_user(buf, user_buf, buf_size) < 0)
- return -EFAULT;
- buf[buf_size] = 0;
-
- if (strcmp(buf, "0") == 0)
- atomic_set(&log->cnt, 0);
-
- return size;
-
-}
-
-static const struct file_operations msm_evtlog_fops = {
- .open = msm_evtlog_debugfs_open_dump,
- .read = seq_read,
- .write = msm_evtlog_debugfs_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-int msm_evtlog_init(
- struct msm_evtlog *log,
- int size,
- struct dentry *parent)
-{
- if (!log || size < 1) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- memset(log, 0, sizeof(*log));
- log->size = roundup_pow_of_two(size);
- log->events = kcalloc(log->size, sizeof(struct msm_evtlog_evt),
- GFP_KERNEL);
-
- if (!log->events) {
- pr_err("Insufficient memory\n");
- return -ENOMEM;
- }
-
- atomic_set(&log->cnt, 0);
-
- log->dentry = debugfs_create_file("evtlog", 0644, parent,
- log, &msm_evtlog_fops);
-
- if (IS_ERR_OR_NULL(log->dentry)) {
- int rc = PTR_ERR(log->dentry);
-
- pr_err("debugfs create file failed, rc=%d\n", rc);
- kfree(log->events);
- return rc;
- }
-
- return 0;
-}
-
-void msm_evtlog_destroy(struct msm_evtlog *log)
-{
- debugfs_remove(log->dentry);
-
- /* Caller needs to make sure that log sampling has stopped */
- kfree(log->events);
-
-}
-
-void msm_evtlog_sample(
- struct msm_evtlog *log,
- const char *func,
- const char *msg,
- uint64_t val1,
- uint64_t val2,
- uint32_t line)
-{
- unsigned long i;
-
- /**
- * Since array sized with pow of 2, roll to 0 when cnt overflows
- * mod the value with the size to get current idx into array
- */
- i = (unsigned long)(atomic_inc_return(&log->cnt)) &
- SIZE_MASK(log->size);
- log->events[i].ktime = ktime_get();
- log->events[i].func = func;
- log->events[i].msg = msg;
- log->events[i].val1 = val1;
- log->events[i].val2 = val2;
- log->events[i].line = line;
- log->events[i].pid = current->pid;
-
- trace_sde_evtlog(func, line, val1, val2);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index f42e510..fd79016 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -104,13 +104,14 @@ static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
return intf_connected;
}
-static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
+ struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct sde_core_perf_params *perf)
{
struct sde_crtc_state *sde_cstate;
- if (!crtc || !state || !perf) {
+ if (!kms || !kms->catalog || !crtc || !state || !perf) {
SDE_ERROR("invalid parameters\n");
return;
}
@@ -124,6 +125,20 @@ static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
perf->core_clk_rate =
sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+ if (!sde_cstate->bw_control) {
+ perf->bw_ctl = kms->catalog->perf.max_bw_high * 1000ULL;
+ perf->max_per_pipe_ib = perf->bw_ctl;
+ perf->core_clk_rate = kms->perf.max_core_clk_rate;
+ } else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_MINIMUM) {
+ perf->bw_ctl = 0;
+ perf->max_per_pipe_ib = 0;
+ perf->core_clk_rate = 0;
+ } else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
+ perf->bw_ctl = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
+ perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ }
+
SDE_DEBUG("crtc=%d clk_rate=%llu ib=%llu ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
perf->max_per_pipe_ib, perf->bw_ctl);
@@ -157,9 +172,8 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
sde_cstate = to_sde_crtc_state(state);
- /* swap state and obtain new values */
- sde_cstate->cur_perf = sde_cstate->new_perf;
- _sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+ /* obtain new values */
+ _sde_core_perf_calc_crtc(kms, crtc, state, &sde_cstate->new_perf);
bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
curr_client_type = sde_crtc_get_client_type(crtc);
@@ -189,11 +203,9 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
if (!sde_cstate->bw_control) {
SDE_DEBUG("bypass bandwidth check\n");
} else if (!threshold) {
- sde_cstate->new_perf = sde_cstate->cur_perf;
SDE_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
- sde_cstate->new_perf = sde_cstate->cur_perf;
SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
return -E2BIG;
}
@@ -332,6 +344,7 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
+ struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_cstate;
struct sde_kms *kms;
@@ -346,6 +359,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
return;
}
+ sde_crtc = to_sde_crtc(crtc);
sde_cstate = to_sde_crtc_state(crtc->state);
/* only do this for command mode rt client (non-rsc client) */
@@ -368,8 +382,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_sde_cmd_release_bw(crtc->base.id);
- sde_cstate->cur_perf.bw_ctl = 0;
- sde_cstate->new_perf.bw_ctl = 0;
+ sde_crtc->cur_perf.bw_ctl = 0;
SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
_sde_core_perf_crtc_update_bus(kms, crtc);
}
@@ -432,19 +445,21 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
SDE_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
crtc->base.id, stop_req, kms->perf.core_clk_rate);
- old = &sde_cstate->cur_perf;
+ old = &sde_crtc->cur_perf;
new = &sde_cstate->new_perf;
if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
/*
* cases for bus bandwidth update.
- * 1. new bandwidth vote or writeback output vote
- * are higher than current vote for update request.
- * 2. new bandwidth vote or writeback output vote are
- * lower than current vote at end of commit or stop.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
*/
- if ((params_changed && ((new->bw_ctl > old->bw_ctl))) ||
- (!params_changed && ((new->bw_ctl < old->bw_ctl)))) {
+ if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
+ (new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
+ (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
+ (new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
crtc->base.id, params_changed, new->bw_ctl,
old->bw_ctl);
@@ -458,7 +473,8 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
get_sde_rsc_current_state(SDE_RSC_INDEX) ==
SDE_RSC_CMD_STATE) {
/* update new bandwdith in all cases */
- if (params_changed && new->bw_ctl != old->bw_ctl) {
+ if (params_changed && ((new->bw_ctl != old->bw_ctl) ||
+ (new->max_per_pipe_ib != old->max_per_pipe_ib))) {
old->bw_ctl = new->bw_ctl;
old->max_per_pipe_ib = new->max_per_pipe_ib;
update_bus = 1;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index c4a135a..075864b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -36,6 +36,7 @@
#include "sde_connector.h"
#include "sde_power_handle.h"
#include "sde_core_perf.h"
+#include "sde_trace.h"
struct sde_crtc_irq_info {
struct sde_irq_callback irq;
@@ -1111,12 +1112,11 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct sde_hw_stage_cfg *stage_cfg;
struct sde_rect plane_crtc_roi;
- u32 flush_mask = 0;
+ u32 flush_mask, flush_sbuf, flush_tmp;
uint32_t lm_idx = LEFT_MIXER, stage_idx;
bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
int zpos_cnt[CRTC_DUAL_MIXERS][SDE_STAGE_MAX + 1] = { {0} };
int i;
- bool sbuf_mode = false;
u32 prefill = 0;
if (!sde_crtc || !mixer) {
@@ -1128,6 +1128,10 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
lm = mixer->hw_lm;
stage_cfg = &sde_crtc->stage_cfg;
cstate = to_sde_crtc_state(crtc->state);
+ flush_sbuf = 0x0;
+
+ cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
+ cstate->sbuf_prefill_line = 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
@@ -1143,10 +1147,16 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
fb = state->fb;
if (sde_plane_is_sbuf_mode(plane, &prefill))
- sbuf_mode = true;
+ cstate->sbuf_cfg.rot_op_mode =
+ SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+ if (prefill > cstate->sbuf_prefill_line)
+ cstate->sbuf_prefill_line = prefill;
- sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
+ sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_tmp);
+ /* persist rotator flush bit(s) for one more commit */
+ flush_mask |= cstate->sbuf_flush_mask | flush_tmp;
+ flush_sbuf |= flush_tmp;
SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
@@ -1162,7 +1172,8 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
state->src_x >> 16, state->src_y >> 16,
state->src_w >> 16, state->src_h >> 16,
state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h);
+ state->crtc_w, state->crtc_h,
+ cstate->sbuf_cfg.rot_op_mode);
for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
struct sde_rect intersect;
@@ -1207,6 +1218,8 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
}
}
+ cstate->sbuf_flush_mask = flush_sbuf;
+
if (lm && lm->ops.setup_dim_layer) {
cstate = to_sde_crtc_state(crtc->state);
for (i = 0; i < cstate->num_dim_layers; i++)
@@ -1214,20 +1227,8 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
mixer, &cstate->dim_layer[i]);
}
- if (ctl->ops.setup_sbuf_cfg) {
- cstate = to_sde_crtc_state(crtc->state);
- if (!sbuf_mode) {
- cstate->sbuf_cfg.rot_op_mode =
- SDE_CTL_ROT_OP_MODE_OFFLINE;
- cstate->sbuf_prefill_line = 0;
- } else {
- cstate->sbuf_cfg.rot_op_mode =
- SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
- cstate->sbuf_prefill_line = prefill;
- }
-
+ if (ctl->ops.setup_sbuf_cfg)
ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
- }
_sde_crtc_program_lm_output_roi(crtc);
}
@@ -1746,6 +1747,7 @@ static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
* if its fence has timed out. Call input fence wait multiple times if
* fence wait is interrupted due to interrupt call.
*/
+ SDE_ATRACE_BEGIN("plane_wait_input_fence");
drm_atomic_crtc_for_each_plane(plane, crtc) {
do {
kt_wait = ktime_sub(kt_end, ktime_get());
@@ -1757,6 +1759,7 @@ static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
rc = sde_plane_wait_input_fence(plane, wait_ms);
} while (wait_ms && rc == -ERESTARTSYS);
}
+ SDE_ATRACE_END("plane_wait_input_fence");
}
static void _sde_crtc_setup_mixer_for_encoder(
@@ -2083,6 +2086,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
if (unlikely(!sde_crtc->num_mixers))
return;
+ SDE_ATRACE_BEGIN("crtc_commit");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct sde_encoder_kickoff_params params = { 0 };
@@ -2104,7 +2108,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
SDE_ERROR("crtc%d invalid frame pending\n",
crtc->base.id);
SDE_EVT32(DRMID(crtc), 0);
- return;
+ goto end;
} else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
@@ -2120,6 +2124,9 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
sde_encoder_kickoff(encoder);
}
+end:
+ SDE_ATRACE_END("crtc_commit");
+ return;
}
/**
@@ -2446,6 +2453,9 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
sde_crtc->num_mixers = 0;
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
list_for_each_entry(node, &sde_crtc->user_event_list, list) {
ret = 0;
@@ -3056,6 +3066,7 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
case CRTC_PROP_ROI_V1:
ret = _sde_crtc_set_roi_v1(state, (void *)val);
break;
+ case CRTC_PROP_CORE_CLK:
case CRTC_PROP_CORE_AB:
case CRTC_PROP_CORE_IB:
case CRTC_PROP_MEM_AB:
@@ -3411,16 +3422,18 @@ static const struct file_operations __prefix ## _fops = { \
static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
struct sde_crtc_res *res;
seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
- seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
- seq_printf(s, "core_clk_rate: %llu\n", cstate->cur_perf.core_clk_rate);
+ seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "core_clk_rate: %llu\n",
+ sde_crtc->cur_perf.core_clk_rate);
seq_printf(s, "max_per_pipe_ib: %llu\n",
- cstate->cur_perf.max_per_pipe_ib);
+ sde_crtc->cur_perf.max_per_pipe_ib);
seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
list_for_each_entry(res, &cstate->rp.res_list, list)
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 4b3c814..a622d9c 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -141,6 +141,7 @@ struct sde_crtc_event {
* @event_lock : Spinlock around event handling code
* @misr_enable : boolean entry indicates misr enable/disable status.
* @power_event : registered power event handle
+ * @cur_perf : current performance committed to clock/bandwidth driver
*/
struct sde_crtc {
struct drm_crtc base;
@@ -193,6 +194,8 @@ struct sde_crtc {
bool misr_enable;
struct sde_power_event *power_event;
+
+ struct sde_core_perf_params cur_perf;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -255,7 +258,7 @@ struct sde_crtc_respool {
* @intf_mode : Interface mode of the primary connector
* @rsc_client : sde rsc client when mode is valid
* @is_ppsplit : Whether current topology requires PPSplit special handling
- * @bw_control : true if bw controlled by bw properties
+ * @bw_control : true if bw/clk controlled by bw/clk properties
* @crtc_roi : Current CRTC ROI. Possibly sub-rectangle of mode.
* Origin top left of CRTC.
* @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
@@ -268,10 +271,10 @@ struct sde_crtc_respool {
* @property_blobs: Reference pointers for blob properties
* @num_dim_layers: Number of dim layers
* @dim_layer: Dim layer configs
- * @cur_perf: current performance state
- * @new_perf: new performance state
+ * @new_perf: new performance state being requested
* @sbuf_cfg: stream buffer configuration
* @sbuf_prefill_line: number of line for inline rotator prefetch
+ * @sbuf_flush_mask: flush mask for inline rotator
*/
struct sde_crtc_state {
struct drm_crtc_state base;
@@ -295,10 +298,10 @@ struct sde_crtc_state {
uint32_t num_dim_layers;
struct sde_hw_dim_layer dim_layer[SDE_MAX_DIM_LAYERS];
- struct sde_core_perf_params cur_perf;
struct sde_core_perf_params new_perf;
struct sde_ctl_sbuf_cfg sbuf_cfg;
- u64 sbuf_prefill_line;
+ u32 sbuf_prefill_line;
+ u32 sbuf_flush_mask;
struct sde_crtc_respool rp;
};
@@ -433,10 +436,14 @@ static inline bool sde_crtc_is_enabled(struct drm_crtc *crtc)
*/
static inline u32 sde_crtc_get_inline_prefill(struct drm_crtc *crtc)
{
+ struct sde_crtc_state *cstate;
+
if (!crtc || !crtc->state)
return 0;
- return to_sde_crtc_state(crtc->state)->sbuf_prefill_line;
+ cstate = to_sde_crtc_state(crtc->state);
+ return cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE ?
+ cstate->sbuf_prefill_line : 0;
}
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 4fb508c..5ccd385 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -35,6 +35,7 @@
#include "sde_power_handle.h"
#include "sde_hw_dsc.h"
#include "sde_crtc.h"
+#include "sde_trace.h"
#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -1582,6 +1583,7 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
if (!drm_enc || !phy_enc)
return;
+ SDE_ATRACE_BEGIN("encoder_vblank_callback");
sde_enc = to_sde_encoder_virt(drm_enc);
spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
@@ -1590,6 +1592,7 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
atomic_inc(&phy_enc->vsync_cnt);
+ SDE_ATRACE_END("encoder_vblank_callback");
}
static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
@@ -1598,8 +1601,10 @@ static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
if (!phy_enc)
return;
+ SDE_ATRACE_BEGIN("encoder_underrun_callback");
atomic_inc(&phy_enc->underrun_cnt);
SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+ SDE_ATRACE_END("encoder_underrun_callback");
}
void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
@@ -2135,6 +2140,7 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc)
SDE_ERROR("invalid encoder\n");
return;
}
+ SDE_ATRACE_BEGIN("encoder_kickoff");
sde_enc = to_sde_encoder_virt(drm_enc);
SDE_DEBUG_ENC(sde_enc, "\n");
@@ -2154,6 +2160,7 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc)
if (phys && phys->ops.handle_post_kickoff)
phys->ops.handle_post_kickoff(phys);
}
+ SDE_ATRACE_END("encoder_kickoff");
}
int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 3d6dc32..c2ef28d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -294,6 +294,7 @@ struct sde_encoder_phys_cmd {
* @bypass_irqreg: Bypass irq register/unregister if non-zero
* @wbdone_complete: for wbdone irq synchronization
* @wb_cfg: Writeback hardware configuration
+ * @cdp_cfg: Writeback CDP configuration
* @intf_cfg: Interface hardware configuration
* @wb_roi: Writeback region-of-interest
* @wb_fmt: Writeback pixel format
@@ -315,6 +316,7 @@ struct sde_encoder_phys_wb {
u32 bypass_irqreg;
struct completion wbdone_complete;
struct sde_hw_wb_cfg wb_cfg;
+ struct sde_hw_wb_cdp_cfg cdp_cfg;
struct sde_hw_intf_cfg intf_cfg;
struct sde_rect wb_roi;
const struct sde_format *wb_fmt;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 5cb84b4..488f5c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -222,7 +222,7 @@ static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
* @rot_fetch_lines: number of line to prefill, or 0 to disable
*/
static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
- u64 rot_fetch_lines)
+ u32 rot_fetch_lines)
{
struct sde_encoder_phys_vid *vid_enc =
to_sde_encoder_phys_vid(phys_enc);
@@ -232,9 +232,12 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
u32 horiz_total = 0;
u32 vert_total = 0;
u32 rot_fetch_start_vsync_counter = 0;
+ u32 flush_mask = 0;
unsigned long lock_flags;
- if (!phys_enc || !vid_enc->hw_intf ||
+ if (!phys_enc || !vid_enc->hw_intf || !phys_enc->hw_ctl ||
+ !phys_enc->hw_ctl->ops.get_bitmask_intf ||
+ !phys_enc->hw_ctl->ops.update_pending_flush ||
!vid_enc->hw_intf->ops.setup_rot_start)
return;
@@ -253,9 +256,14 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
}
SDE_DEBUG_VIDENC(vid_enc,
- "rot_fetch_lines %llu rot_fetch_start_vsync_counter %u\n",
+ "rot_fetch_lines %u rot_fetch_start_vsync_counter %u\n",
rot_fetch_lines, rot_fetch_start_vsync_counter);
+ phys_enc->hw_ctl->ops.get_bitmask_intf(
+ phys_enc->hw_ctl, &flush_mask, vid_enc->hw_intf->idx);
+ phys_enc->hw_ctl->ops.update_pending_flush(
+ phys_enc->hw_ctl, flush_mask);
+
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 385c610..1657b9b 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -248,16 +248,18 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
struct sde_hw_wb *hw_wb;
struct sde_hw_wb_cfg *wb_cfg;
+ struct sde_hw_wb_cdp_cfg *cdp_cfg;
const struct msm_format *format;
int ret, mmu_id;
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
SDE_ERROR("invalid encoder\n");
return;
}
hw_wb = wb_enc->hw_wb;
wb_cfg = &wb_enc->wb_cfg;
+ cdp_cfg = &wb_enc->cdp_cfg;
memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
wb_cfg->intf_mode = phys_enc->intf_mode;
@@ -325,6 +327,21 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
if (hw_wb->ops.setup_outformat)
hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+ if (hw_wb->ops.setup_cdp) {
+ memset(cdp_cfg, 0, sizeof(struct sde_hw_wb_cdp_cfg));
+
+ cdp_cfg->enable = phys_enc->sde_kms->catalog->perf.cdp_cfg
+ [SDE_PERF_CDP_USAGE_NRT].wr_enable;
+ cdp_cfg->ubwc_meta_enable =
+ SDE_FORMAT_IS_UBWC(wb_cfg->dest.format);
+ cdp_cfg->tile_amortize_enable =
+ SDE_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
+ SDE_FORMAT_IS_TILE(wb_cfg->dest.format);
+ cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
+
+ hw_wb->ops.setup_cdp(hw_wb, cdp_cfg);
+ }
+
if (hw_wb->ops.setup_outaddress)
hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 9f1b6cb..eb62716 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -136,7 +136,6 @@ enum sde_prop {
QSEED_TYPE,
CSC_TYPE,
PANIC_PER_PIPE,
- CDP,
SRC_SPLIT,
DIM_LAYER,
SMART_DMA_REV,
@@ -165,6 +164,7 @@ enum {
PERF_QOS_LUT_MACROTILE,
PERF_QOS_LUT_NRT,
PERF_QOS_LUT_CWB,
+ PERF_CDP_SETTING,
PERF_PROP_MAX,
};
@@ -290,6 +290,8 @@ enum {
VBIF_DYNAMIC_OT_WR_LIMIT,
VBIF_QOS_RT_REMAP,
VBIF_QOS_NRT_REMAP,
+ VBIF_MEMTYPE_0,
+ VBIF_MEMTYPE_1,
VBIF_PROP_MAX,
};
@@ -349,7 +351,6 @@ static struct sde_prop_type sde_prop[] = {
{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
- {CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
@@ -391,6 +392,8 @@ static struct sde_prop_type sde_perf_prop[] = {
PROP_TYPE_U32_ARRAY},
{PERF_QOS_LUT_CWB, "qcom,sde-qos-lut-cwb", false,
PROP_TYPE_U32_ARRAY},
+ {PERF_CDP_SETTING, "qcom,sde-cdp-setting", false,
+ PROP_TYPE_U32_ARRAY},
};
static struct sde_prop_type sspp_prop[] = {
@@ -529,6 +532,8 @@ static struct sde_prop_type vbif_prop[] = {
PROP_TYPE_U32_ARRAY},
{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
PROP_TYPE_U32_ARRAY},
+ {VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
+ {VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
};
static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
@@ -1061,6 +1066,9 @@ static int sde_sspp_parse_dt(struct device_node *np,
set_bit(SDE_SSPP_SRC, &sspp->features);
+ if (sde_cfg->has_cdp)
+ set_bit(SDE_SSPP_CDP, &sspp->features);
+
if (sde_cfg->ts_prefill_rev == 1) {
set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
} else if (sde_cfg->ts_prefill_rev == 2) {
@@ -1399,6 +1407,8 @@ static int sde_intf_parse_dt(struct device_node *np,
intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
intf->prog_fetch_lines_worst_case =
+ !prop_exists[INTF_PREFETCH] ?
+ sde_cfg->perf.min_prefill_lines :
PROP_VALUE_ACCESS(prop_value, INTF_PREFETCH, i);
of_property_read_string_index(np,
@@ -1500,6 +1510,9 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
set_bit(SDE_WB_YUV_CONFIG, &wb->features);
+ if (sde_cfg->has_cdp)
+ set_bit(SDE_WB_CDP, &wb->features);
+
set_bit(SDE_WB_QOS, &wb->features);
if (sde_cfg->vbif_qos_nlvl == 8)
set_bit(SDE_WB_QOS_8LVL, &wb->features);
@@ -1970,6 +1983,16 @@ static int sde_vbif_parse_dt(struct device_node *np,
if (rc)
goto end;
+ rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
+ &prop_count[VBIF_MEMTYPE_0], NULL);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
+ &prop_count[VBIF_MEMTYPE_1], NULL);
+ if (rc)
+ goto end;
+
sde_cfg->vbif_count = off_count;
rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
@@ -2118,6 +2141,19 @@ static int sde_vbif_parse_dt(struct device_node *np,
if (vbif->qos_rt_tbl.npriority_lvl ||
vbif->qos_nrt_tbl.npriority_lvl)
set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
+
+ vbif->memtype_count = prop_count[VBIF_MEMTYPE_0] +
+ prop_count[VBIF_MEMTYPE_1];
+ if (vbif->memtype_count > MAX_XIN_COUNT) {
+ vbif->memtype_count = 0;
+ SDE_ERROR("too many memtype defs, ignoring entries\n");
+ }
+ for (j = 0, k = 0; j < prop_count[VBIF_MEMTYPE_0]; j++)
+ vbif->memtype[k++] = PROP_VALUE_ACCESS(
+ prop_value, VBIF_MEMTYPE_0, j);
+ for (j = 0; j < prop_count[VBIF_MEMTYPE_1]; j++)
+ vbif->memtype[k++] = PROP_VALUE_ACCESS(
+ prop_value, VBIF_MEMTYPE_1, j);
}
end:
@@ -2420,6 +2456,11 @@ static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
if (rc)
goto freeprop;
+ rc = _validate_dt_entry(np, &sde_perf_prop[PERF_CDP_SETTING], 1,
+ &prop_count[PERF_CDP_SETTING], NULL);
+ if (rc)
+ goto freeprop;
+
rc = _read_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
prop_count, prop_exists, prop_value);
if (rc)
@@ -2559,6 +2600,27 @@ static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
cfg->perf.qos_lut_tbl[j].nentry = count;
}
+ if (prop_exists[PERF_CDP_SETTING]) {
+ const u32 prop_size = 2;
+ u32 count = prop_count[PERF_CDP_SETTING] / prop_size;
+
+ count = min_t(u32, count, SDE_PERF_CDP_USAGE_MAX);
+
+ for (j = 0; j < count; j++) {
+ cfg->perf.cdp_cfg[j].rd_enable =
+ PROP_VALUE_ACCESS(prop_value,
+ PERF_CDP_SETTING, j * prop_size);
+ cfg->perf.cdp_cfg[j].wr_enable =
+ PROP_VALUE_ACCESS(prop_value,
+ PERF_CDP_SETTING, j * prop_size + 1);
+ SDE_DEBUG("cdp usage:%d rd:%d wr:%d\n",
+ j, cfg->perf.cdp_cfg[j].rd_enable,
+ cfg->perf.cdp_cfg[j].wr_enable);
+ }
+
+ cfg->has_cdp = true;
+ }
+
freeprop:
kfree(prop_value);
end:
@@ -2760,6 +2822,10 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
if (rc)
goto end;
+ rc = sde_perf_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
rc = sde_rot_parse_dt(np, sde_cfg);
if (rc)
goto end;
@@ -2810,10 +2876,6 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
if (rc)
goto end;
- rc = sde_perf_parse_dt(np, sde_cfg);
- if (rc)
- goto end;
-
return sde_cfg;
end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 7d1c180..beff43c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -60,6 +60,8 @@
#define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
#define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
+#define MAX_XIN_COUNT 16
+
/**
* Supported UBWC feature versions
*/
@@ -79,7 +81,6 @@ enum {
* @SDE_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
* compression initial revision
* @SDE_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
- * @SDE_MDP_CDP, Client driven prefetch
* @SDE_MDP_MAX Maximum value
*/
@@ -89,7 +90,6 @@ enum {
SDE_MDP_BWC,
SDE_MDP_UBWC_1_0,
SDE_MDP_UBWC_1_5,
- SDE_MDP_CDP,
SDE_MDP_MAX
};
@@ -114,6 +114,7 @@ enum {
* @SDE_SSPP_SBUF, SSPP support inline stream buffer
* @SDE_SSPP_TS_PREFILL Supports prefill with traffic shaper
* @SDE_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @SDE_SSPP_CDP Supports client driven prefetch
* @SDE_SSPP_MAX maximum value
*/
enum {
@@ -136,6 +137,7 @@ enum {
SDE_SSPP_SBUF,
SDE_SSPP_TS_PREFILL,
SDE_SSPP_TS_PREFILL_REC1,
+ SDE_SSPP_CDP,
SDE_SSPP_MAX
};
@@ -245,6 +247,7 @@ enum {
* the destination image
* @SDE_WB_QOS, Writeback supports QoS control, danger/safe/creq
* @SDE_WB_QOS_8LVL, Writeback supports 8-level QoS control
+ * @SDE_WB_CDP Writeback supports client driven prefetch
* @SDE_WB_MAX maximum value
*/
enum {
@@ -262,6 +265,7 @@ enum {
SDE_WB_XY_ROI_OFFSET,
SDE_WB_QOS,
SDE_WB_QOS_8LVL,
+ SDE_WB_CDP,
SDE_WB_MAX
};
@@ -703,6 +707,8 @@ struct sde_vbif_qos_tbl {
* @dynamic_ot_wr_tbl dynamic OT write configuration table
* @qos_rt_tbl real-time QoS priority table
* @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
*/
struct sde_vbif_cfg {
SDE_HW_BLK_INFO;
@@ -713,6 +719,8 @@ struct sde_vbif_cfg {
struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
struct sde_vbif_qos_tbl qos_rt_tbl;
struct sde_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
};
/**
* struct sde_reg_dma_cfg - information of lut dma blocks
@@ -729,6 +737,27 @@ struct sde_reg_dma_cfg {
};
/**
+ * Define CDP use cases
+ * @SDE_PERF_CDP_UDAGE_RT: real-time use cases
+ * @SDE_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ SDE_PERF_CDP_USAGE_RT,
+ SDE_PERF_CDP_USAGE_NRT,
+ SDE_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct sde_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct sde_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
* struct sde_perf_cfg - performance control settings
* @max_bw_low low threshold of maximum bandwidth (kbps)
* @max_bw_high high threshold of maximum bandwidth (kbps)
@@ -748,6 +777,7 @@ struct sde_reg_dma_cfg {
* @safe_lut_tbl: LUT tables for safe signals
* @danger_lut_tbl: LUT tables for danger signals
* @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
*/
struct sde_perf_cfg {
u32 max_bw_low;
@@ -768,6 +798,7 @@ struct sde_perf_cfg {
u32 safe_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
u32 danger_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
struct sde_qos_lut_tbl qos_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
+ struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
};
/**
@@ -785,7 +816,7 @@ struct sde_perf_cfg {
* @csc_type csc or csc_10bit support.
* @smart_dma_rev Supported version of SmartDMA feature.
* @has_src_split source split feature status
- * @has_cdp Client driver prefetch feature status
+ * @has_cdp Client driven prefetch feature status
* @has_wb_ubwc UBWC feature supported on WB
* @ubwc_version UBWC feature version (0x0 for not supported)
* @has_sbuf indicate if stream buffer is available
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index a9a493f..bb9f9c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -82,6 +82,7 @@
#define SSPP_SW_PIX_EXT_C3_TB 0x124
#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
#define SSPP_UBWC_ERROR_STATUS 0x138
#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
@@ -1102,6 +1103,30 @@ static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
SDE_REG_WRITE(&ctx->hw, ts_prefill_offset, ts_count);
}
+static void sde_hw_sspp_setup_cdp(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cdp_cfg *cfg)
+{
+ u32 idx;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->tile_amortize_enable)
+ cdp_cntl |= BIT(2);
+ if (cfg->preload_ahead == SDE_SSPP_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
static void _setup_layer_ops(struct sde_hw_pipe *c,
unsigned long features)
{
@@ -1163,6 +1188,9 @@ static void _setup_layer_ops(struct sde_hw_pipe *c,
c->ops.setup_sys_cache = sde_hw_sspp_setup_sys_cache;
c->ops.get_sbuf_status = sde_hw_sspp_get_sbuf_status;
}
+
+ if (test_bit(SDE_SSPP_CDP, &features))
+ c->ops.setup_cdp = sde_hw_sspp_setup_cdp;
}
static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 44b7ea9..d52c0e5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -308,6 +308,30 @@ struct sde_hw_pipe_qos_cfg {
};
/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ SDE_SSPP_CDP_PRELOAD_AHEAD_32,
+ SDE_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct sde_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * SDE_SSPP_CDP_PRELOAD_AHEAD_32,
+ * SDE_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct sde_hw_pipe_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
* enum system cache rotation operation mode
*/
enum {
@@ -574,6 +598,14 @@ struct sde_hw_sspp_ops {
void (*setup_ts_prefill)(struct sde_hw_pipe *ctx,
struct sde_hw_pipe_ts_cfg *cfg,
enum sde_sspp_multirect_index index);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to cdp configuration
+ */
+ void (*setup_cdp)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cdp_cfg *cfg);
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index 9b9763a..b5c273a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -31,11 +31,43 @@
#define VBIF_IN_WR_LIM_CONF2 0x00C8
#define VBIF_OUT_RD_LIM_CONF0 0x00D0
#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
#define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+static void sde_hw_set_mem_type(struct sde_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = SDE_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ SDE_REG_WRITE(c, reg_off, reg_val);
+}
+
static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
u32 xin_id, bool rd, u32 limit)
{
@@ -144,6 +176,7 @@ static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
ops->set_qos_remap = sde_hw_set_qos_remap;
+ ops->set_mem_type = sde_hw_set_mem_type;
}
static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
index c67738b..80a9e5a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -71,6 +71,15 @@ struct sde_hw_vbif_ops {
*/
void (*set_qos_remap)(struct sde_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct sde_hw_vbif *vbif,
+ u32 xin_id, u32 value);
};
struct sde_hw_vbif {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
index 5dbd794..378b904 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -49,7 +49,7 @@
#define WB_UBWC_STATIC_CTRL 0x144
#define WB_CSC_BASE 0x260
#define WB_DST_ADDR_SW_STATUS 0x2B0
-#define WB_CDP_CTRL 0x2B4
+#define WB_CDP_CNTL 0x2B4
#define WB_OUT_IMAGE_SIZE 0x2C0
#define WB_OUT_XY 0x2C4
@@ -96,7 +96,6 @@ static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
u32 write_config = 0;
u32 opmode = 0;
u32 dst_addr_sw = 0;
- u32 cdp_settings = 0x0;
chroma_samp = fmt->chroma_sample;
@@ -165,18 +164,6 @@ static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
SDE_REG_WRITE(c, WB_OUT_SIZE, outsize);
SDE_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
SDE_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
-
- /* Enable CDP */
- cdp_settings = BIT(0);
-
- if (!SDE_FORMAT_IS_LINEAR(fmt))
- cdp_settings |= BIT(1);
-
- /* Enable 64 transactions if line mode*/
- if (data->intf_mode == INTF_MODE_WB_LINE)
- cdp_settings |= BIT(3);
-
- SDE_REG_WRITE(c, WB_CDP_CTRL, cdp_settings);
}
static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
@@ -234,6 +221,27 @@ static void sde_hw_wb_setup_qos_ctrl(struct sde_hw_wb *ctx,
SDE_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
}
+static void sde_hw_wb_setup_cdp(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cdp_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->preload_ahead == SDE_WB_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ SDE_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
+}
+
static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
unsigned long features)
{
@@ -249,6 +257,9 @@ static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
ops->setup_creq_lut = sde_hw_wb_setup_creq_lut;
ops->setup_qos_ctrl = sde_hw_wb_setup_qos_ctrl;
}
+
+ if (test_bit(SDE_WB_CDP, &features))
+ ops->setup_cdp = sde_hw_wb_setup_cdp;
}
struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
index caf574e..ca3c386 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -29,6 +29,30 @@ struct sde_hw_wb_cfg {
};
/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ SDE_WB_CDP_PRELOAD_AHEAD_32,
+ SDE_WB_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct sde_hw_wb_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * SDE_WB_CDP_PRELOAD_AHEAD_32,
+ * SDE_WB_CDP_PRELOAD_AHEAD_64
+ */
+struct sde_hw_wb_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
* struct sde_hw_wb_qos_cfg : Writeback pipe QoS configuration
* @danger_lut: LUT for generate danger level based on fill level
* @safe_lut: LUT for generate safe level based on fill level
@@ -95,6 +119,14 @@ struct sde_hw_wb_ops {
*/
void (*setup_qos_ctrl)(struct sde_hw_wb *ctx,
struct sde_hw_wb_qos_cfg *cfg);
+
+ /**
+ * setup_cdp - setup CDP
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe CDP configuration
+ */
+ void (*setup_cdp)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cdp_cfg *cfg);
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index a7d6ecf..c783ab0 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1238,6 +1238,10 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
sde_hw_intr_destroy(sde_kms->hw_intr);
sde_kms->hw_intr = NULL;
+ if (sde_kms->power_event)
+ sde_power_handle_unregister_event(
+ &priv->phandle, sde_kms->power_event);
+
_sde_kms_release_displays(sde_kms);
/* safe to call these more than once during shutdown */
@@ -1443,6 +1447,16 @@ static void __iomem *_sde_kms_ioremap(struct platform_device *pdev,
return ptr;
}
+static void sde_kms_handle_power_event(u32 event_type, void *usr)
+{
+ struct sde_kms *sde_kms = usr;
+
+ if (!sde_kms)
+ return;
+
+ if (event_type == SDE_POWER_EVENT_POST_ENABLE)
+ sde_vbif_init_memtypes(sde_kms);
+}
static int sde_kms_hw_init(struct msm_kms *kms)
{
@@ -1660,6 +1674,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
+ /*
+ * Handle (re)initializations during power enable
+ */
+ sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
+ sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
+ SDE_POWER_EVENT_POST_ENABLE,
+ sde_kms_handle_power_event, sde_kms, "kms");
+
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index d20af9f..f73cb21 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -159,6 +159,7 @@ struct sde_kms {
struct sde_power_client *core_client;
struct ion_client *iclient;
+ struct sde_power_event *power_event;
/* directory entry for debugfs */
struct dentry *debugfs_danger;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 88b0543..d63fec1 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1368,33 +1368,31 @@ static struct sde_crtc_res_ops fbo_res_ops = {
static u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
{
struct drm_plane_state *state;
- struct drm_crtc_state *cstate;
struct sde_plane_state *pstate;
struct sde_plane_rot_state *rstate;
struct sde_kms *sde_kms;
u32 blocksize = 128;
u32 prefill_line = 0;
- if (!plane || !plane->state || !plane->state->fb ||
- !plane->state->crtc || !plane->state->crtc->state) {
+ if (!plane || !plane->state || !plane->state->fb) {
SDE_ERROR("invalid parameters\n");
return 0;
}
sde_kms = _sde_plane_get_kms(plane);
state = plane->state;
- cstate = state->crtc->state;
pstate = to_sde_plane_state(state);
rstate = &pstate->rot;
- if (!rstate->rot_hw || !rstate->rot_hw->caps || !rstate->out_src_h ||
- !sde_kms || !sde_kms->catalog) {
- SDE_ERROR("invalid parameters\n");
+ if (!sde_kms || !sde_kms->catalog) {
+ SDE_ERROR("invalid kms\n");
return 0;
}
- sde_format_get_block_size(rstate->out_fb_format, &blocksize,
- &blocksize);
+ if (rstate->out_fb_format)
+ sde_format_get_block_size(rstate->out_fb_format,
+ &blocksize, &blocksize);
+
prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
SDE_DEBUG("plane%d prefill:%u\n", plane->base.id, prefill_line);
@@ -1416,7 +1414,7 @@ bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill)
struct sde_plane_rot_state *rstate = pstate ? &pstate->rot : NULL;
bool sbuf_mode = rstate ? rstate->out_sbuf : false;
- if (prefill && sbuf_mode)
+ if (prefill)
*prefill = sde_plane_rot_calc_prefill(plane);
return sbuf_mode;
@@ -2447,16 +2445,16 @@ int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
* sde_plane_get_ctl_flush - get control flush for the given plane
* @plane: Pointer to drm plane structure
* @ctl: Pointer to hardware control driver
- * @flush: Pointer to flush control word
+ * @flush_sspp: Pointer to sspp flush control word
+ * @flush_rot: Pointer to rotator flush control word
*/
void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
- u32 *flush)
+ u32 *flush_sspp, u32 *flush_rot)
{
struct sde_plane_state *pstate;
struct sde_plane_rot_state *rstate;
- u32 bitmask;
- if (!plane || !flush) {
+ if (!plane || !flush_sspp) {
SDE_ERROR("invalid parameters\n");
return;
}
@@ -2464,13 +2462,15 @@ void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
pstate = to_sde_plane_state(plane->state);
rstate = &pstate->rot;
- bitmask = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
+ *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
+ if (!flush_rot)
+ return;
+
+ *flush_rot = 0x0;
if (sde_plane_is_sbuf_mode(plane, NULL) && rstate->rot_hw &&
ctl->ops.get_bitmask_rot)
- ctl->ops.get_bitmask_rot(ctl, &bitmask, rstate->rot_hw->idx);
-
- *flush = bitmask;
+ ctl->ops.get_bitmask_rot(ctl, flush_rot, rstate->rot_hw->idx);
}
static int sde_plane_prepare_fb(struct drm_plane *plane,
@@ -3090,6 +3090,23 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags,
pstate->multirect_index);
+ if (psde->pipe_hw->ops.setup_cdp) {
+ struct sde_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+ memset(cdp_cfg, 0, sizeof(struct sde_hw_pipe_cdp_cfg));
+
+ cdp_cfg->enable = psde->catalog->perf.cdp_cfg
+ [SDE_PERF_CDP_USAGE_RT].rd_enable;
+ cdp_cfg->ubwc_meta_enable =
+ SDE_FORMAT_IS_UBWC(fmt);
+ cdp_cfg->tile_amortize_enable =
+ SDE_FORMAT_IS_UBWC(fmt) ||
+ SDE_FORMAT_IS_TILE(fmt);
+ cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
+
+ psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg);
+ }
+
if (psde->pipe_hw->ops.setup_sys_cache) {
if (rstate->out_sbuf) {
if (rstate->nplane < 2)
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 47611d1..f83a891 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -110,6 +110,7 @@ struct sde_plane_rot_state {
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
+ * @cdp_cfg: CDP configuration
*/
struct sde_plane_state {
struct drm_plane_state base;
@@ -126,6 +127,8 @@ struct sde_plane_state {
/* @sc_cfg: system_cache configuration */
struct sde_hw_pipe_sc_cfg sc_cfg;
struct sde_plane_rot_state rot;
+
+ struct sde_hw_pipe_cdp_cfg cdp_cfg;
};
/**
@@ -169,10 +172,11 @@ bool is_sde_plane_virtual(struct drm_plane *plane);
* sde_plane_get_ctl_flush - get control flush mask
* @plane: Pointer to DRM plane object
* @ctl: Pointer to control hardware
- * @flush: Pointer to updated flush mask
+ * @flush_sspp: Pointer to sspp flush control word
+ * @flush_rot: Pointer to rotator flush control word
*/
void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
- u32 *flush);
+ u32 *flush_sspp, u32 *flush_rot);
/**
* sde_plane_is_sbuf_mode - return status of stream buffer mode
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 427a93b..6ad2c43 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -175,7 +175,7 @@ void sde_rm_init_hw_iter(
iter->type = type;
}
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
{
struct list_head *blk_list;
@@ -217,6 +217,17 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
return false;
}
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
{
switch (type) {
@@ -288,6 +299,8 @@ int sde_rm_destroy(struct sde_rm *rm)
sde_hw_mdp_destroy(rm->hw_mdp);
rm->hw_mdp = NULL;
+ mutex_destroy(&rm->rm_lock);
+
return 0;
}
@@ -390,6 +403,9 @@ int sde_rm_init(struct sde_rm *rm,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < SDE_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
@@ -584,7 +600,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
if (lm_cfg->dspp != DSPP_MAX) {
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->dspp) {
*dspp = iter.blk;
break;
@@ -605,7 +621,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
}
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->pingpong) {
*pp = iter.blk;
break;
@@ -656,7 +672,7 @@ static int _sde_rm_reserve_lms(
/* Find a primary mixer */
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
while (lm_count != reqs->topology->num_lm &&
- sde_rm_get_hw(rm, &iter_i)) {
+ _sde_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&dspp, 0, sizeof(dspp));
memset(&pp, 0, sizeof(pp));
@@ -675,7 +691,7 @@ static int _sde_rm_reserve_lms(
sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
while (lm_count != reqs->topology->num_lm &&
- sde_rm_get_hw(rm, &iter_j)) {
+ _sde_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
@@ -711,7 +727,7 @@ static int _sde_rm_reserve_lms(
/* reserve a free PINGPONG_SLAVE block */
rc = -ENAVAIL;
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter_i)) {
+ while (_sde_rm_get_hw_locked(rm, &iter_i)) {
struct sde_pingpong_cfg *pp_cfg =
(struct sde_pingpong_cfg *)
(iter_i.blk->catalog);
@@ -742,7 +758,7 @@ static int _sde_rm_reserve_ctls(
memset(&ctls, 0, sizeof(ctls));
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
unsigned long caps;
bool has_split_display, has_ppsplit;
@@ -793,7 +809,7 @@ static int _sde_rm_reserve_dsc(
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (RESERVED_BY_OTHER(iter.blk, rsvp))
continue;
@@ -820,7 +836,7 @@ static int _sde_rm_reserve_cdm(
struct sde_cdm_cfg *cdm;
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
bool match = false;
if (RESERVED_BY_OTHER(iter.blk, rsvp))
@@ -865,7 +881,7 @@ static int _sde_rm_reserve_intf_or_wb(
/* Find the block entry in the rm, and note the reservation */
sde_rm_init_hw_iter(&iter, 0, type);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id != id)
continue;
@@ -1071,7 +1087,7 @@ static struct drm_connector *_sde_rm_get_connector(
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
-void _sde_rm_release_rsvp(
+static void _sde_rm_release_rsvp(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
struct drm_connector *conn)
@@ -1123,16 +1139,18 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
return;
}
+ mutex_lock(&rm->rm_lock);
+
rsvp = _sde_rm_get_rsvp(rm, enc);
if (!rsvp) {
SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
- return;
+ goto end;
}
conn = _sde_rm_get_connector(enc);
if (!conn) {
SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
- return;
+ goto end;
}
top_ctrl = sde_connector_get_property(conn->state,
@@ -1152,6 +1170,9 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_NONE);
}
+
+end:
+ mutex_unlock(&rm->rm_lock);
}
static int _sde_rm_commit_rsvp(
@@ -1219,13 +1240,15 @@ int sde_rm_reserve(
crtc_state->crtc->base.id, test_only);
SDE_EVT32(enc->base.id, conn_state->connector->base.id);
+ mutex_lock(&rm->rm_lock);
+
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
conn_state, &reqs);
if (ret) {
SDE_ERROR("failed to populate hw requirements\n");
- return ret;
+ goto end;
}
/*
@@ -1240,8 +1263,10 @@ int sde_rm_reserve(
* replace the current with the next.
*/
rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
- if (!rsvp_nxt)
- return -ENOMEM;
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
rsvp_cur = _sde_rm_get_rsvp(rm, enc);
@@ -1293,5 +1318,8 @@ int sde_rm_reserve(
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
+end:
+ mutex_unlock(&rm->rm_lock);
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 059952a..b4a801a 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -72,6 +72,7 @@ enum sde_rm_topology_control {
* @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
* @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
*/
struct sde_rm {
struct drm_device *dev;
@@ -80,6 +81,7 @@ struct sde_rm {
struct sde_hw_mdp *hw_mdp;
uint32_t lm_max_width;
uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index f731a30..6962bef 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -159,25 +159,37 @@ TRACE_EVENT(sde_trace_counter,
__get_str(counter_name), __entry->value)
)
+#define SDE_TRACE_EVTLOG_SIZE 15
TRACE_EVENT(sde_evtlog,
- TP_PROTO(const char *tag, u32 tag_id, u64 value1, u64 value2),
- TP_ARGS(tag, tag_id, value1, value2),
+ TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 data[]),
+ TP_ARGS(tag, tag_id, cnt, data),
TP_STRUCT__entry(
__field(int, pid)
__string(evtlog_tag, tag)
__field(u32, tag_id)
- __field(u64, value1)
- __field(u64, value2)
+ __array(u32, data, SDE_TRACE_EVTLOG_SIZE)
),
TP_fast_assign(
__entry->pid = current->tgid;
__assign_str(evtlog_tag, tag);
__entry->tag_id = tag_id;
- __entry->value1 = value1;
- __entry->value2 = value2;
+ if (cnt > SDE_TRACE_EVTLOG_SIZE)
+ cnt = SDE_TRACE_EVTLOG_SIZE;
+ memcpy(__entry->data, data, cnt * sizeof(u32));
+ memset(&__entry->data[cnt], 0,
+ (SDE_TRACE_EVTLOG_SIZE - cnt) * sizeof(u32));
),
- TP_printk("%d|%s:%d|%llu|%llu", __entry->pid, __get_str(evtlog_tag),
- __entry->tag_id, __entry->value1, __entry->value2)
+ TP_printk("%d|%s:%d|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u",
+ __entry->pid, __get_str(evtlog_tag),
+ __entry->tag_id,
+ __entry->data[0], __entry->data[1],
+ __entry->data[2], __entry->data[3],
+ __entry->data[4], __entry->data[5],
+ __entry->data[6], __entry->data[7],
+ __entry->data[8], __entry->data[9],
+ __entry->data[10], __entry->data[11],
+ __entry->data[12], __entry->data[13],
+ __entry->data[14])
)
TRACE_EVENT(sde_perf_crtc_update,
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index c675216..e63fe8c 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -265,6 +265,26 @@ void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
+void sde_vbif_init_memtypes(struct sde_kms *sde_kms)
+{
+ struct sde_hw_vbif *vbif;
+ int i, j;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+ vbif = sde_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
{
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index d05c2e0..f1da68b 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -27,6 +27,13 @@ struct sde_vbif_set_ot_params {
u32 clk_ctrl;
};
+struct sde_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
/**
* struct sde_vbif_set_qos_params - QoS remapper parameter
* @vbif_idx: vbif identifier
@@ -59,6 +66,12 @@ void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
struct sde_vbif_set_qos_params *params);
+/**
+ * sde_vbif_init_memtypes - initialize xin memory types for vbif
+ * @sde_kms: SDE handler
+ */
+void sde_vbif_init_memtypes(struct sde_kms *sde_kms);
+
#ifdef CONFIG_DEBUG_FS
int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
index 699396f..67c664f 100644
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -99,8 +99,7 @@ void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
evtlog->curr = (evtlog->curr + 1) % SDE_EVTLOG_ENTRY;
evtlog->last++;
- trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0,
- i > 1 ? log->data[1] : 0);
+ trace_sde_evtlog(name, line, log->data_cnt, log->data);
exit:
spin_unlock_irqrestore(&evtlog->spin_lock, flags);
}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index fb7f85c..452a3be 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -368,7 +368,6 @@ static int _sde_power_data_bus_set_quota(
u32 nrt_axi_port_cnt = pdbus->nrt_axi_port_cnt;
u32 total_axi_port_cnt = pdbus->axi_port_cnt;
u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
- int match_cnt = 0;
if (!bw_table || !total_axi_port_cnt ||
total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
@@ -408,20 +407,6 @@ static int _sde_power_data_bus_set_quota(
}
}
- for (i = 0; i < total_axi_port_cnt; i++) {
- vect = &bw_table->usecase
- [pdbus->curr_bw_uc_idx].vectors[i];
- /* avoid performing updates for small changes */
- if ((ab_quota[i] == vect->ab) &&
- (ib_quota[i] == vect->ib))
- match_cnt++;
- }
-
- if (match_cnt == total_axi_port_cnt) {
- pr_debug("skip BW vote\n");
- return 0;
- }
-
new_uc_idx = (pdbus->curr_bw_uc_idx %
(bw_table->num_usecases - 1)) + 1;
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 7bf2211..ac79968 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -31,16 +31,16 @@
#include "sde_dbg.h"
/* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */
-#define TCS_CASE_EXECUTION_TIME 1064000
+#define SINGLE_TCS_EXECUTION_TIME 1064000
/* this time is ~1ms - only wake tcs in any mode */
-#define RSC_BACKOFF_TIME_NS (TCS_CASE_EXECUTION_TIME + 100)
+#define RSC_BACKOFF_TIME_NS (SINGLE_TCS_EXECUTION_TIME + 100)
/* this time is ~1ms - only wake TCS in mode-0 */
-#define RSC_MODE_THRESHOLD_TIME_IN_NS ((TCS_CASE_EXECUTION_TIME >> 1) + 100)
+#define RSC_MODE_THRESHOLD_TIME_IN_NS (SINGLE_TCS_EXECUTION_TIME + 100)
/* this time is ~2ms - sleep+ wake TCS in mode-1 */
-#define RSC_TIME_SLOT_0_NS ((TCS_CASE_EXECUTION_TIME * 2) + 100)
+#define RSC_TIME_SLOT_0_NS ((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
#define DEFAULT_PANEL_FPS 60
#define DEFAULT_PANEL_JITTER 5
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 3332a05..e5ae0ad 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -206,7 +206,7 @@ static int rsc_hw_seq_memory_init(struct sde_rsc_priv *rsc)
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
0xa7e9a920, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
- 0x002079e7, rsc->debug_mode);
+ 0x002089e7, rsc->debug_mode);
/* branch address */
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7ba4508..ea36dc4 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (r600_dpm_get_vrefresh(rdev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index f6ff41a..edee6a5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7416,7 +7416,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -7446,7 +7446,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0b6b576..6068b8a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4933,7 +4933,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -4964,7 +4964,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a951881..f2eac6b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3995,7 +3995,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e0c143b..30bd4a6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -97,9 +97,10 @@
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
* 2.47.0 - Add UVD_NO_OP register support
* 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 48
+#define KMS_DRIVER_MINOR 49
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index deb9511..3168567 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
- args->vram_size = rdev->mc.real_vram_size;
- args->vram_visible = (u64)man->size << PAGE_SHIFT;
+ args->vram_size = (u64)man->size << PAGE_SHIFT;
+ args->vram_visible = rdev->mc.visible_vram_size;
args->vram_visible -= rdev->vram_pin_size;
args->gart_size = rdev->mc.gtt_size;
args->gart_size -= rdev->gart_pin_size;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 877af4a..3333e8a 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6330,7 +6330,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -6361,7 +6361,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 35cc16f..c18fc31 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1602,7 +1602,14 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
- long timeout = no_wait ? 0 : 15 * HZ;
+ long timeout = 15 * HZ;
+
+ if (no_wait) {
+ if (reservation_object_test_signaled_rcu(bo->resv, true))
+ return 0;
+ else
+ return -EBUSY;
+ }
timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
interruptible, timeout);
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index dbacb20..58ef5ee 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -798,8 +798,19 @@
#define A6XX_GMU_CM3_CFG 0x1F82D
#define A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x1F840
#define A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x1F841
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x1F842
#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x1F844
#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x1F845
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x1F846
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x1F847
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x1F848
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x1F849
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x1F84A
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x1F84B
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x1F84C
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x1F84D
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x1F84E
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x1F84F
#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x1F8C0
#define A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x1F8C1
#define A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x1F8C2
@@ -852,6 +863,7 @@
#define A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x23B0A
#define A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x23B0B
#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x23B0C
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x23B0D
#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x23B0E
#define A6XX_GMU_AHB_FENCE_STATUS 0x23B13
#define A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x23B15
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 26c5505..7a6581c 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -641,6 +641,7 @@ enum adreno_regs {
ADRENO_REG_GMU_HOST2GMU_INTR_SET,
ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
+ ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
ADRENO_REG_REGISTER_MAX,
};
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 314ac85a..13c36e6 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -3017,6 +3017,8 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
A5XX_VBIF_XIN_HALT_CTRL1),
ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION,
A5XX_VBIF_VERSION),
+ ADRENO_REG_DEFINE(ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
+ A5XX_GPMU_POWER_COUNTER_ENABLE),
};
static const struct adreno_reg_offsets a5xx_reg_offsets = {
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 314b2d8..33854ea 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -1202,7 +1202,7 @@ static int a6xx_gfx_rail_on(struct kgsl_device *device)
OOB_BOOT_SLUMBER_CLEAR_MASK);
if (ret)
- dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
+ dev_err(&gmu->pdev->dev, "Boot OOB timed out\n");
return ret;
}
@@ -1222,6 +1222,9 @@ static int a6xx_notify_slumber(struct kgsl_device *device)
int perf_idx = gmu->num_gpupwrlevels - pwr->default_pwrlevel - 1;
int ret, state;
+ /* Disable the power counter so that the GMU is not busy */
+ kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
return ret;
@@ -1238,7 +1241,7 @@ static int a6xx_notify_slumber(struct kgsl_device *device)
a6xx_oob_clear(adreno_dev, OOB_BOOT_SLUMBER_CLEAR_MASK);
if (ret)
- dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
+ dev_err(&gmu->pdev->dev, "Notify slumber OOB timed out\n");
else {
kgsl_gmu_regread(device,
A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &state);
@@ -1286,6 +1289,9 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
/* Turn on the HM and SPTP head switches */
ret = a6xx_hm_sptprac_enable(device);
+ /* Enable the power counter because it was disabled before slumber */
+ kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+
return ret;
error_rsc:
dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
@@ -1466,7 +1472,7 @@ static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
OOB_DCVS_CLEAR_MASK);
if (ret) {
- dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
+ dev_err(&gmu->pdev->dev, "DCVS OOB timed out\n");
goto done;
}
@@ -1495,10 +1501,17 @@ static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = &device->gmu;
+ unsigned int status, status2;
if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
- dev_err(&gmu->pdev->dev, "GMU is not idling\n");
+ kgsl_gmu_regread(device,
+ A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &status);
+ kgsl_gmu_regread(device,
+ A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
+ dev_err(&gmu->pdev->dev,
+ "GMU not idling: status=0x%x, status2=0x%x\n",
+ status, status2);
return -ETIMEDOUT;
}
@@ -1566,6 +1579,8 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
unsigned int reg;
+ unsigned long time;
+ bool vbif_acked = false;
/*
* For the soft reset case with GMU enabled this part is done
@@ -1584,12 +1599,19 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, ®);
adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
- /* Check VBIF status after reset */
- if (timed_poll_check(device,
- A6XX_RBBM_VBIF_GX_RESET_STATUS,
- VBIF_RESET_ACK_MASK,
- VBIF_RESET_ACK_TIMEOUT,
- VBIF_RESET_ACK_MASK))
+ /* Wait for the VBIF reset ack to complete */
+ time = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
+
+ do {
+ kgsl_regread(device, A6XX_RBBM_VBIF_GX_RESET_STATUS, ®);
+ if ((reg & VBIF_RESET_ACK_MASK) == VBIF_RESET_ACK_MASK) {
+ vbif_acked = true;
+ break;
+ }
+ cpu_relax();
+ } while (!time_after(jiffies, time));
+
+ if (!vbif_acked)
return -ETIMEDOUT;
a6xx_sptprac_enable(adreno_dev);
@@ -2431,12 +2453,47 @@ static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
};
+static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
+ /*
+ * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0 is used for the GPU
+ * busy count (see the PWR group above). Mark it as broken
+ * so it's not re-used.
+ */
+ { KGSL_PERFCOUNTER_BROKEN, 0, 0,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H, -1,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H, -1,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H, -1,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H, -1,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H, -1,
+ A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
+};
+
#define A6XX_PERFCOUNTER_GROUP(offset, name) \
ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
+#define A6XX_POWER_COUNTER_GROUP(offset, name) \
+ ADRENO_POWER_COUNTER_GROUP(a6xx, offset, name)
+
static struct adreno_perfcount_group a6xx_perfcounter_groups
[KGSL_PERFCOUNTER_GROUP_MAX] = {
A6XX_PERFCOUNTER_GROUP(CP, cp),
@@ -2462,6 +2519,7 @@ static struct adreno_perfcount_group a6xx_perfcounter_groups
ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
ADRENO_PERFCOUNTER_GROUP_FIXED),
+ A6XX_POWER_COUNTER_GROUP(GPMU, gpmu),
};
static struct adreno_perfcounters a6xx_perfcounters = {
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index bca3dd0..54acd73 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -206,8 +206,38 @@ static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
};
static const unsigned int a6xx_gmu_registers[] = {
- /* GMU */
+ /* GMU GX */
+ 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
+ 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
+ 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
+ 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
+ 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
+ /* GMU TCM */
0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
+ /* GMU CX */
+ 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
+ 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
+ 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
+ 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
+ 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
+ 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
+ 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
+ 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
+ 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA03,
+ /* GPU RSCC */
+ 0x23740, 0x23742, 0x23744, 0x23747, 0x2374C, 0x23787, 0x237EC, 0x237EF,
+ 0x237F4, 0x2382F, 0x23894, 0x23897, 0x2389C, 0x238D7, 0x2393C, 0x2393F,
+ 0x23944, 0x2397F,
+ /* GMU AO */
+ 0x23B00, 0x23B16, 0x23C00, 0x23C00,
+ /* GPU CC */
+ 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
+ 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
+ 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
+ 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
+ 0x26000, 0x26002,
+ /* GPU CC ACD */
+ 0x26400, 0x26416, 0x26420, 0x26427,
};
static const struct adreno_vbif_snapshot_registers
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index cd95003..0da4da9 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -654,7 +654,7 @@ static void _power_counter_enable_gpmu(struct adreno_device *adreno_dev,
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_perfcount_register *reg;
- unsigned int shift = counter << 3;
+ unsigned int shift = (counter << 3) % (sizeof(unsigned int) * 8);
if (adreno_is_a530(adreno_dev)) {
if (countable > 43)
@@ -662,13 +662,16 @@ static void _power_counter_enable_gpmu(struct adreno_device *adreno_dev,
} else if (adreno_is_a540(adreno_dev)) {
if (countable > 47)
return;
+ } else if (adreno_is_a6xx(adreno_dev)) {
+ if (countable > 34)
+ return;
} else
/* return on platforms that have no GPMU */
return;
reg = &counters->groups[group].regs[counter];
kgsl_regrmw(device, reg->select, 0xff << shift, countable << shift);
- kgsl_regwrite(device, A5XX_GPMU_POWER_COUNTER_ENABLE, 1);
+ adreno_writereg(adreno_dev, ADRENO_REG_GPMU_POWER_COUNTER_ENABLE, 1);
reg->value = 0;
}
@@ -684,7 +687,7 @@ static void _power_counter_enable_default(struct adreno_device *adreno_dev,
reg = &counters->groups[group].regs[counter];
kgsl_regwrite(device, reg->select, countable);
- kgsl_regwrite(device, A5XX_GPMU_POWER_COUNTER_ENABLE, 1);
+ adreno_writereg(adreno_dev, ADRENO_REG_GPMU_POWER_COUNTER_ENABLE, 1);
reg->value = 0;
}
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index f72b3fa..f87e4da 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1477,8 +1477,7 @@ void gmu_stop(struct kgsl_device *device)
if (!idle || (gpudev->wait_for_gmu_idle &&
gpudev->wait_for_gmu_idle(adreno_dev))) {
- dev_err(&gmu->pdev->dev, "Failure to stop GMU");
- return;
+ dev_err(&gmu->pdev->dev, "Stopping GMU before it is idle\n");
}
/* Pending message in all queues are abandoned */
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 7f8ff39..e46f656 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -28,6 +28,8 @@
#define UHID_NAME "uhid"
#define UHID_BUFSIZE 32
+static DEFINE_MUTEX(uhid_open_mutex);
+
struct uhid_device {
struct mutex devlock;
bool running;
@@ -142,15 +144,26 @@ static void uhid_hid_stop(struct hid_device *hid)
static int uhid_hid_open(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
+ int retval = 0;
- return uhid_queue_event(uhid, UHID_OPEN);
+ mutex_lock(&uhid_open_mutex);
+ if (!hid->open++) {
+ retval = uhid_queue_event(uhid, UHID_OPEN);
+ if (retval)
+ hid->open--;
+ }
+ mutex_unlock(&uhid_open_mutex);
+ return retval;
}
static void uhid_hid_close(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
- uhid_queue_event(uhid, UHID_CLOSE);
+ mutex_lock(&uhid_open_mutex);
+ if (!--hid->open)
+ uhid_queue_event(uhid, UHID_CLOSE);
+ mutex_unlock(&uhid_open_mutex);
}
static int uhid_hid_parse(struct hid_device *hid)
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0e07a76..c6a922e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1400,37 +1400,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
unsigned char *data = wacom->data;
- if (wacom->pen_input)
+ if (wacom->pen_input) {
dev_dbg(wacom->pen_input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
- else if (wacom->touch_input)
+
+ if (len == WACOM_PKGLEN_PENABLED ||
+ data[0] == WACOM_REPORT_PENABLED)
+ return wacom_tpc_pen(wacom);
+ }
+ else if (wacom->touch_input) {
dev_dbg(wacom->touch_input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
- switch (len) {
- case WACOM_PKGLEN_TPC1FG:
- return wacom_tpc_single_touch(wacom, len);
-
- case WACOM_PKGLEN_TPC2FG:
- return wacom_tpc_mt_touch(wacom);
-
- case WACOM_PKGLEN_PENABLED:
- return wacom_tpc_pen(wacom);
-
- default:
- switch (data[0]) {
- case WACOM_REPORT_TPC1FG:
- case WACOM_REPORT_TPCHID:
- case WACOM_REPORT_TPCST:
- case WACOM_REPORT_TPC1FGE:
+ switch (len) {
+ case WACOM_PKGLEN_TPC1FG:
return wacom_tpc_single_touch(wacom, len);
- case WACOM_REPORT_TPCMT:
- case WACOM_REPORT_TPCMT2:
- return wacom_mt_touch(wacom);
+ case WACOM_PKGLEN_TPC2FG:
+ return wacom_tpc_mt_touch(wacom);
- case WACOM_REPORT_PENABLED:
- return wacom_tpc_pen(wacom);
+ default:
+ switch (data[0]) {
+ case WACOM_REPORT_TPC1FG:
+ case WACOM_REPORT_TPCHID:
+ case WACOM_REPORT_TPCST:
+ case WACOM_REPORT_TPC1FGE:
+ return wacom_tpc_single_touch(wacom, len);
+
+ case WACOM_REPORT_TPCMT:
+ case WACOM_REPORT_TPCMT2:
+ return wacom_mt_touch(wacom);
+
+ }
}
}
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index 8a57ed2..d26e0d0 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -64,7 +64,7 @@ do { \
#define ITCHIN (0xEF4)
#define ITTRIGIN (0xEF8)
-#define CTI_MAX_TRIGGERS (8)
+#define CTI_MAX_TRIGGERS (32)
#define CTI_MAX_CHANNELS (4)
#define AFFINITY_LEVEL_L2 1
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0ed77ee..a2e3dd7 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len)
{
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+ void *dmadata = kmalloc(len, GFP_KERNEL);
+ int ret;
+
+ if (!dmadata)
+ return -ENOMEM;
/* do control transfer */
- return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
+ ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
- USB_DIR_IN, value, index, data, len, 2000);
+ USB_DIR_IN, value, index, dmadata, len, 2000);
+
+ memcpy(data, dmadata, len);
+ kfree(dmadata);
+ return ret;
}
static int usb_write(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len)
{
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+ void *dmadata = kmemdup(data, len, GFP_KERNEL);
+ int ret;
+
+ if (!dmadata)
+ return -ENOMEM;
/* do control transfer */
- return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
+ ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- value, index, data, len, 2000);
+ value, index, dmadata, len, 2000);
+
+ kfree(dmadata);
+ return ret;
}
static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 83198a8..4bd5b5c 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2366,8 +2366,11 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
ret = hfi1_rvt_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
- if (!ret)
+ if (!ret) {
+ /* peer will send again */
+ rvt_put_ss(&qp->r_sge);
goto rnr_nak;
+ }
wc.ex.imm_data = ohdr->u.rc.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
goto send_last;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2097512..f3fe787 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2067,8 +2067,10 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
- if (!ret)
+ if (!ret) {
+ rvt_put_ss(&qp->r_sge);
goto rnr_nak;
+ }
wc.ex.imm_data = ohdr->u.rc.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b91a6b5..cb9726e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -166,6 +166,7 @@
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
#define SMR_VALID (1 << 31)
#define SMR_MASK_SHIFT 16
+#define SMR_MASK_MASK 0x7FFF
#define SMR_ID_SHIFT 0
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
@@ -335,10 +336,12 @@ struct arm_smmu_s2cr {
enum arm_smmu_s2cr_type type;
enum arm_smmu_s2cr_privcfg privcfg;
u8 cbndx;
+ bool cb_handoff;
};
#define s2cr_init_val (struct arm_smmu_s2cr){ \
.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
+ .cb_handoff = false, \
}
struct arm_smmu_smr {
@@ -409,7 +412,6 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
-#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
u32 options;
@@ -528,7 +530,6 @@ static bool using_legacy_binding, using_generic_binding;
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
{ ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
- { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
{ 0, NULL},
@@ -548,8 +549,15 @@ static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova);
+
static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
+static int arm_smmu_alloc_cb(struct iommu_domain *domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev);
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -1612,14 +1620,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (is_iommu_pt_coherent(smmu_domain))
quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
- /* Dynamic domains must set cbndx through domain attribute */
- if (!dynamic) {
- ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
- smmu->num_context_banks);
- if (ret < 0)
- goto out_unlock;
- cfg->cbndx = ret;
- }
+ ret = arm_smmu_alloc_cb(domain, smmu, dev);
+ if (ret < 0)
+ goto out_unlock;
+ cfg->cbndx = ret;
+
if (smmu->version < ARM_SMMU_V2) {
cfg->irptndx = atomic_inc_return(&smmu->irptndx);
cfg->irptndx %= smmu->num_context_irqs;
@@ -2189,6 +2194,23 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ uint64_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->iova_to_pte(ops, iova);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+}
+
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
@@ -2223,14 +2245,18 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
int ret;
- size_t size;
+ size_t size, batch_size, size_to_unmap = 0;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ unsigned int idx_start, idx_end;
+ struct scatterlist *sg_start, *sg_end;
+ unsigned long __saved_iova_start;
if (!ops)
return -ENODEV;
@@ -2239,17 +2265,45 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
if (ret)
return ret;
- spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
- ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
- spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ __saved_iova_start = iova;
+ idx_start = idx_end = 0;
+ sg_start = sg_end = sg;
+ while (idx_end < nents) {
+ batch_size = sg_end->length;
+ sg_end = sg_next(sg_end);
+ idx_end++;
+ while ((idx_end < nents) &&
+ (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
- if (!ret)
- arm_smmu_unmap(domain, iova, size);
+ batch_size += sg_end->length;
+ sg_end = sg_next(sg_end);
+ idx_end++;
+ }
- arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
+ prot, &size);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ /* Returns 0 on error */
+ if (!ret) {
+ size_to_unmap = iova + size - __saved_iova_start;
+ goto out;
+ }
+
+ iova += batch_size;
+ idx_start = idx_end;
+ sg_start = sg_end;
+ }
+
+out:
arm_smmu_assign_table(smmu_domain);
- return ret;
+ if (size_to_unmap) {
+ arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
+ iova = __saved_iova_start;
+ }
+ arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+ return iova - __saved_iova_start;
}
static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -2976,6 +3030,7 @@ static struct iommu_ops arm_smmu_ops = {
.enable_config_clocks = arm_smmu_enable_config_clocks,
.disable_config_clocks = arm_smmu_disable_config_clocks,
.is_iova_coherent = arm_smmu_is_iova_coherent,
+ .iova_to_pte = arm_smmu_iova_to_pte,
};
#define IMPL_DEF1_MICRO_MMU_CTRL 0
@@ -3166,12 +3221,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
* Reset stream mapping groups: Initial values mark all SMRn as
* invalid and all S2CRn as bypass unless overridden.
*/
- if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
- for (i = 0; i < smmu->num_mapping_groups; ++i)
- arm_smmu_write_sme(smmu, i);
+ for (i = 0; i < smmu->num_mapping_groups; ++i)
+ arm_smmu_write_sme(smmu, i);
- arm_smmu_context_bank_reset(smmu);
- }
+ arm_smmu_context_bank_reset(smmu);
/* Invalidate the TLB, just in case */
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
@@ -3228,6 +3281,92 @@ static int arm_smmu_id_size_to_bits(int size)
}
}
+
+/*
+ * Some context banks needs to be transferred from bootloader to HLOS in a way
+ * that allows ongoing traffic. The current expectation is that these context
+ * banks operate in bypass mode.
+ * Additionally, there must be exactly one device in devicetree with stream-ids
+ * overlapping those used by the bootloader.
+ */
+static int arm_smmu_alloc_cb(struct iommu_domain *domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ u32 i, idx;
+ int cb = -EINVAL;
+ bool dynamic;
+
+ /* Dynamic domains must set cbndx through domain attribute */
+ dynamic = is_dynamic_domain(domain);
+ if (dynamic)
+ return INVALID_CBNDX;
+
+ mutex_lock(&smmu->stream_map_mutex);
+ for_each_cfg_sme(fwspec, i, idx) {
+ if (smmu->s2crs[idx].cb_handoff)
+ cb = smmu->s2crs[idx].cbndx;
+ }
+
+ if (cb < 0) {
+ mutex_unlock(&smmu->stream_map_mutex);
+ return __arm_smmu_alloc_bitmap(smmu->context_map,
+ smmu->num_s2_context_banks,
+ smmu->num_context_banks);
+ }
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ if (smmu->s2crs[i].cbndx == cb) {
+ smmu->s2crs[i].cbndx = 0;
+ smmu->s2crs[i].cb_handoff = false;
+ smmu->s2crs[i].count -= 1;
+ }
+ }
+ mutex_unlock(&smmu->stream_map_mutex);
+
+ return cb;
+}
+
+static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
+{
+ u32 i, raw_smr, raw_s2cr;
+ struct arm_smmu_smr smr;
+ struct arm_smmu_s2cr s2cr;
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
+ ARM_SMMU_GR0_SMR(i));
+ if (!(raw_smr & SMR_VALID))
+ continue;
+
+ smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+ smr.id = (u16)raw_smr;
+ smr.valid = true;
+
+ raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
+ ARM_SMMU_GR0_S2CR(i));
+ s2cr.group = NULL;
+ s2cr.count = 1;
+ s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
+ s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
+ S2CR_PRIVCFG_MASK;
+ s2cr.cbndx = (u8)raw_s2cr;
+ s2cr.cb_handoff = true;
+
+ if (s2cr.type != S2CR_TYPE_TRANS)
+ continue;
+
+ smmu->smrs[i] = smr;
+ smmu->s2crs[i] = s2cr;
+ bitmap_set(smmu->context_map, s2cr.cbndx, 1);
+ dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
+ raw_smr, raw_s2cr, s2cr.cbndx);
+ }
+
+ return 0;
+}
+
static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
{
struct device *dev = smmu->dev;
@@ -3487,6 +3626,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->streamid_mask = size - 1;
if (id & ID0_SMS) {
u32 smr;
+ int i;
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
@@ -3501,14 +3641,25 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* bits are set, so check each one separately. We can reject
* masters later if they try to claim IDs outside these masks.
*/
+ for (i = 0; i < size; i++) {
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
+ if (!(smr & SMR_VALID))
+ break;
+ }
+ if (i == size) {
+ dev_err(smmu->dev,
+ "Unable to compute streamid_masks\n");
+ return -ENODEV;
+ }
+
smr = smmu->streamid_mask << SMR_ID_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
smmu->streamid_mask = smr >> SMR_ID_SHIFT;
smr = smmu->streamid_mask << SMR_MASK_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
/* Zero-initialised to mark as invalid */
@@ -3800,6 +3951,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err)
goto out_power_off;
+ err = arm_smmu_handoff_cbs(smmu);
+ if (err)
+ goto out_power_off;
+
err = arm_smmu_parse_impl_def_registers(smmu);
if (err)
goto out_power_off;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ac3059d..560bb43 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -765,6 +765,51 @@ static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
return ERR_PTR(-ENOMEM);
}
+/*
+ * Based off of similar code from dma-iommu.c, but modified to use a different
+ * iova allocator
+ */
+static void fast_smmu_reserve_pci_windows(struct device *dev,
+ struct dma_fast_smmu_mapping *mapping)
+{
+ struct pci_host_bridge *bridge;
+ struct resource_entry *window;
+ phys_addr_t start, end;
+ struct pci_dev *pci_dev;
+ unsigned long flags;
+
+ if (!dev_is_pci(dev))
+ return;
+
+ pci_dev = to_pci_dev(dev);
+ bridge = pci_find_host_bridge(pci_dev->bus);
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ resource_list_for_each_entry(window, &bridge->windows) {
+ if (resource_type(window->res) != IORESOURCE_MEM &&
+ resource_type(window->res) != IORESOURCE_IO)
+ continue;
+
+ start = round_down(window->res->start - window->offset,
+ FAST_PAGE_SIZE);
+ end = round_up(window->res->end - window->offset,
+ FAST_PAGE_SIZE);
+ start = max_t(unsigned long, mapping->base, start);
+ end = min_t(unsigned long, mapping->base + mapping->size, end);
+ if (start >= end)
+ continue;
+
+ dev_dbg(dev, "iova allocator reserved 0x%pa-0x%pa\n",
+ &start, &end);
+
+ start = (start - mapping->base) >> FAST_PAGE_SHIFT;
+ end = (end - mapping->base) >> FAST_PAGE_SHIFT;
+ bitmap_set(mapping->bitmap, start, end - start);
+ }
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+
/**
* fast_smmu_attach_device
* @dev: valid struct device pointer
@@ -798,6 +843,8 @@ int fast_smmu_attach_device(struct device *dev,
mapping->fast->domain = domain;
mapping->fast->dev = dev;
+ fast_smmu_reserve_pci_windows(dev, mapping->fast);
+
group = dev->iommu_group;
if (!group) {
dev_err(dev, "No iommu associated with device\n");
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f7739ae..ea72b9c 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -735,7 +735,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
arm_lpae_iopte *table_base = table;
int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
int entry_size = ARM_LPAE_GRANULE(data);
- int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) / entry_size;
+ int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) >>
+ data->pg_shift;
int entries = min_t(int, size / entry_size,
max_entries - tl_offset);
int table_len = entries * sizeof(*table);
@@ -854,6 +855,19 @@ static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
return 0;
}
+static uint64_t arm_lpae_iova_get_pte(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte pte;
+ int lvl;
+
+ if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
+ return pte;
+
+ return 0;
+}
+
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
@@ -983,6 +997,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.unmap = arm_lpae_unmap,
.iova_to_phys = arm_lpae_iova_to_phys,
.is_iova_coherent = arm_lpae_is_iova_coherent,
+ .iova_to_pte = arm_lpae_iova_get_pte,
};
return data;
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 1599121..a686ad0 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -149,6 +149,8 @@ struct io_pgtable_ops {
unsigned long iova);
bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
unsigned long iova);
+ uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops,
+ unsigned long iova);
};
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 0c49a64..6bb435b 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -154,6 +154,7 @@ void iommu_debug_domain_remove(struct iommu_domain *domain)
static LIST_HEAD(iommu_debug_devices);
static struct dentry *debugfs_tests_dir;
static u32 iters_per_op = 1;
+static void *test_virt_addr;
struct iommu_debug_device {
struct device *dev;
@@ -1207,6 +1208,68 @@ static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
return -EIO;
}
+static ssize_t __iommu_debug_dma_attach_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ struct dma_iommu_mapping *dma_mapping;
+ ssize_t retval = -EINVAL;
+ int val;
+
+ if (kstrtoint_from_user(ubuf, count, 0, &val)) {
+ pr_err("Invalid format. Expected a hex or decimal integer");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (val) {
+ if (dev->archdata.mapping)
+ if (dev->archdata.mapping->domain) {
+ pr_err("Already attached.\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (WARN(dev->archdata.iommu,
+ "Attachment tracking out of sync with device\n")) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+ (SZ_1G * 4ULL));
+
+ if (!dma_mapping)
+ goto out;
+
+ if (arm_iommu_attach_device(dev, dma_mapping))
+ goto out_release_mapping;
+ pr_err("Attached\n");
+ } else {
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(dev->archdata.mapping);
+ pr_err("Detached\n");
+ }
+ retval = count;
+ return retval;
+
+out_release_mapping:
+ arm_iommu_release_mapping(dma_mapping);
+out:
+ return retval;
+}
+
static ssize_t __iommu_debug_attach_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset,
@@ -1260,6 +1323,81 @@ static ssize_t __iommu_debug_attach_write(struct file *file,
return retval;
}
+static ssize_t iommu_debug_dma_attach_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
+
+}
+
+static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ char c[2];
+
+ if (*offset)
+ return 0;
+
+ if (!dev->archdata.mapping)
+ c[0] = '0';
+ else
+ c[0] = dev->archdata.mapping->domain ? '1' : '0';
+
+ c[1] = '\n';
+ if (copy_to_user(ubuf, &c, 2)) {
+ pr_err("copy_to_user failed\n");
+ return -EFAULT;
+ }
+ *offset = 1; /* non-zero means we're done */
+
+ return 2;
+}
+
+static const struct file_operations iommu_debug_dma_attach_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_attach_write,
+ .read = iommu_debug_dma_attach_read,
+};
+
+static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+ int buf_len = sizeof(buf);
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, buf_len);
+
+ if (!test_virt_addr)
+ strlcpy(buf, "FAIL\n", buf_len);
+ else
+ snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_test_virt_addr_fops = {
+ .open = simple_open,
+ .read = iommu_debug_test_virt_addr_read,
+};
+
static ssize_t iommu_debug_attach_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1309,6 +1447,75 @@ static const struct file_operations iommu_debug_secure_attach_fops = {
.read = iommu_debug_attach_read,
};
+static ssize_t iommu_debug_pte_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ dma_addr_t iova;
+
+ if (kstrtox_from_user(ubuf, count, 0, &iova)) {
+ pr_err("Invalid format for iova\n");
+ ddev->iova = 0;
+ return -EINVAL;
+ }
+
+ ddev->iova = iova;
+ pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+ return count;
+}
+
+
+static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ uint64_t pte;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
+ ddev->iova);
+
+ if (!pte)
+ strlcpy(buf, "FAIL\n", sizeof(buf));
+ else
+ snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_pte_fops = {
+ .open = simple_open,
+ .write = iommu_debug_pte_write,
+ .read = iommu_debug_pte_read,
+};
+
static ssize_t iommu_debug_atos_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1370,6 +1577,55 @@ static const struct file_operations iommu_debug_atos_fops = {
.read = iommu_debug_atos_read,
};
+static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ phys_addr_t phys;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
+ ddev->iova);
+ if (!phys)
+ strlcpy(buf, "FAIL\n", sizeof(buf));
+ else
+ snprintf(buf, sizeof(buf), "%pa\n", &phys);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_atos_fops = {
+ .open = simple_open,
+ .write = iommu_debug_atos_write,
+ .read = iommu_debug_dma_atos_read,
+};
+
static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *offset)
{
@@ -1450,6 +1706,159 @@ static const struct file_operations iommu_debug_map_fops = {
.write = iommu_debug_map_write,
};
+/*
+ * Performs DMA mapping of a given virtual address and size to an iova address.
+ * User input format: (addr,len,dma attr) where dma attr is:
+ * 0: normal mapping
+ * 1: force coherent mapping
+ * 2: force non-cohernet mapping
+ * 3: use system cache
+ */
+static ssize_t iommu_debug_dma_map_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *offset)
+{
+ ssize_t retval = -EINVAL;
+ int ret;
+ char *comma1, *comma2;
+ char buf[100];
+ unsigned long addr;
+ void *v_addr;
+ dma_addr_t iova;
+ size_t size;
+ unsigned int attr;
+ unsigned long dma_attrs;
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+
+ if (count >= sizeof(buf)) {
+ pr_err("Value too large\n");
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ memset(buf, 0, sizeof(buf));
+
+ if (copy_from_user(buf, ubuf, count)) {
+ pr_err("Couldn't copy from user\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ comma1 = strnchr(buf, count, ',');
+ if (!comma1)
+ goto invalid_format;
+
+ comma2 = strnchr(comma1 + 1, count, ',');
+ if (!comma2)
+ goto invalid_format;
+
+ *comma1 = *comma2 = '\0';
+
+ if (kstrtoul(buf, 0, &addr))
+ goto invalid_format;
+ v_addr = (void *)addr;
+
+ if (kstrtosize_t(comma1 + 1, 0, &size))
+ goto invalid_format;
+
+ if (kstrtouint(comma2 + 1, 0, &attr))
+ goto invalid_format;
+
+ if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
+ goto invalid_addr;
+
+ if (attr == 0)
+ dma_attrs = 0;
+ else if (attr == 1)
+ dma_attrs = DMA_ATTR_FORCE_COHERENT;
+ else if (attr == 2)
+ dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
+ else if (attr == 3)
+ dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+ else
+ goto invalid_format;
+
+ iova = dma_map_single_attrs(dev, v_addr, size,
+ DMA_TO_DEVICE, dma_attrs);
+
+ if (dma_mapping_error(dev, iova)) {
+ pr_err("Failed to perform dma_map_single\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ retval = count;
+ pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
+ v_addr, &iova, size);
+ ddev->iova = iova;
+ pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+out:
+ return retval;
+
+invalid_format:
+ pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
+ return retval;
+
+invalid_addr:
+ pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
+ return retval;
+}
+
+static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+ dma_addr_t iova;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ iova = ddev->iova;
+ snprintf(buf, sizeof(buf), "%pa\n", &iova);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_map_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_map_write,
+ .read = iommu_debug_dma_map_read,
+};
+
static ssize_t iommu_debug_unmap_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1515,6 +1924,91 @@ static const struct file_operations iommu_debug_unmap_fops = {
.write = iommu_debug_unmap_write,
};
+static ssize_t iommu_debug_dma_unmap_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ ssize_t retval = 0;
+ char *comma1, *comma2;
+ char buf[100];
+ size_t size;
+ unsigned int attr;
+ dma_addr_t iova;
+ unsigned long dma_attrs;
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+
+ if (count >= sizeof(buf)) {
+ pr_err("Value too large\n");
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ memset(buf, 0, sizeof(buf));
+
+ if (copy_from_user(buf, ubuf, count)) {
+ pr_err("Couldn't copy from user\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ comma1 = strnchr(buf, count, ',');
+ if (!comma1)
+ goto invalid_format;
+
+ comma2 = strnchr(comma1 + 1, count, ',');
+ if (!comma2)
+ goto invalid_format;
+
+ *comma1 = *comma2 = '\0';
+
+ if (kstrtoux(buf, 0, &iova))
+ goto invalid_format;
+
+ if (kstrtosize_t(comma1 + 1, 0, &size))
+ goto invalid_format;
+
+ if (kstrtouint(comma2 + 1, 0, &attr))
+ goto invalid_format;
+
+ if (attr == 0)
+ dma_attrs = 0;
+ else if (attr == 1)
+ dma_attrs = DMA_ATTR_FORCE_COHERENT;
+ else if (attr == 2)
+ dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
+ else if (attr == 3)
+ dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+ else
+ goto invalid_format;
+
+ dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
+
+ retval = count;
+ pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+out:
+ return retval;
+
+invalid_format:
+ pr_err("Invalid format. Expected: iova,len, dma attr\n");
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_unmap_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_unmap_write,
+};
+
static ssize_t iommu_debug_config_clocks_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1624,6 +2118,13 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
+ &iommu_debug_test_virt_addr_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
&iommu_debug_profiling_fops)) {
pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
@@ -1666,6 +2167,13 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
+ &iommu_debug_dma_attach_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
&iommu_debug_attach_fops)) {
pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
@@ -1687,6 +2195,13 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
+ &iommu_debug_dma_atos_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
&iommu_debug_map_fops)) {
pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
@@ -1694,6 +2209,13 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_map", 0600, dir, ddev,
+ &iommu_debug_dma_map_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
&iommu_debug_unmap_fops)) {
pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
@@ -1701,6 +2223,20 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
+ &iommu_debug_dma_unmap_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
+ if (!debugfs_create_file("pte", 0600, dir, ddev,
+ &iommu_debug_pte_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
&iommu_debug_config_clocks_fops)) {
pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
@@ -1734,6 +2270,11 @@ static int iommu_debug_init_tests(void)
return -ENODEV;
}
+ test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
+
+ if (!test_virt_addr)
+ return -ENOMEM;
+
return bus_for_each_dev(&platform_bus_type, NULL, NULL,
snarf_iommu_devices);
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index e81bb48..6c3f8a2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1282,6 +1282,15 @@ phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
return domain->ops->iova_to_phys_hard(domain, iova);
}
+uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ if (unlikely(domain->ops->iova_to_pte == NULL))
+ return 0;
+
+ return domain->ops->iova_to_pte(domain, iova);
+}
+
bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova)
{
if (unlikely(domain->ops->is_iova_coherent == NULL))
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6c7f6c4..d2cb1e8 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -60,6 +60,7 @@ static void msg_submit(struct mbox_chan *chan)
void *data;
int err = -EBUSY;
+again:
spin_lock_irqsave(&chan->lock, flags);
if (!chan->msg_count || chan->active_req)
@@ -85,6 +86,16 @@ static void msg_submit(struct mbox_chan *chan)
exit:
spin_unlock_irqrestore(&chan->lock, flags);
+ /*
+ * If the controller returns -EAGAIN, then it means, our spinlock
+ * here is preventing the controller from receiving its interrupt,
+ * that would help clear the controller channels that are currently
+ * blocked waiting on the interrupt response.
+ * Unlock and retry again.
+ */
+ if (err == -EAGAIN)
+ goto again;
+
if (!err && (chan->txdone_method & TXDONE_BY_POLL))
/* kick start the timer immediately to avoid delays */
hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index b328a2a..1f649d6 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -28,7 +28,6 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <asm-generic/io.h>
@@ -95,10 +94,10 @@ struct tcs_response {
struct mbox_chan *chan;
struct tcs_mbox_msg *msg;
u32 m; /* m-th TCS */
- struct tasklet_struct tasklet;
int err;
int idx;
bool in_use;
+ struct list_head list;
};
struct tcs_response_pool {
@@ -122,16 +121,18 @@ struct tcs_mbox {
/* One per MBOX controller */
struct tcs_drv {
+ struct mbox_controller mbox;
const char *name;
- void *base; /* start address of the RSC's registers */
- void *reg_base; /* start address for DRV specific register */
+ void __iomem *base; /* start address of the RSC's registers */
+ void __iomem *reg_base; /* start address for DRV specific register */
int drv_id;
struct platform_device *pdev;
- struct mbox_controller mbox;
struct tcs_mbox tcs[TCS_TYPE_NR];
int num_assigned;
int num_tcs;
- struct workqueue_struct *wq;
+ struct tasklet_struct tasklet;
+ struct list_head response_pending;
+ spinlock_t drv_lock;
struct tcs_response_pool *resp_pool;
atomic_t tcs_in_use[MAX_POOL_SIZE];
/* Debug info */
@@ -141,8 +142,6 @@ struct tcs_drv {
atomic_t tcs_irq_count[MAX_POOL_SIZE];
};
-static void tcs_notify_tx_done(unsigned long data);
-
static int tcs_response_pool_init(struct tcs_drv *drv)
{
struct tcs_response_pool *pool;
@@ -153,11 +152,10 @@ static int tcs_response_pool_init(struct tcs_drv *drv)
return -ENOMEM;
for (i = 0; i < MAX_POOL_SIZE; i++) {
- tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
- (unsigned long) &pool->resp[i]);
pool->resp[i].drv = drv;
pool->resp[i].idx = i;
pool->resp[i].m = TCS_M_INIT;
+ INIT_LIST_HEAD(&pool->resp[i].list);
}
spin_lock_init(&pool->lock);
@@ -188,6 +186,9 @@ static struct tcs_response *setup_response(struct tcs_drv *drv,
}
spin_unlock_irqrestore(&pool->lock, flags);
+ if (pos == MAX_POOL_SIZE)
+ pr_err("response pool is full\n");
+
return resp;
}
@@ -240,11 +241,11 @@ static void print_response(struct tcs_drv *drv, int m)
return;
msg = resp->msg;
- pr_info("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+ pr_debug("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
resp->idx, resp->m, resp->in_use);
- pr_info("Msg: state=%d\n", msg->state);
+ pr_debug("Msg: state=%d\n", msg->state);
for (i = 0; i < msg->num_payload; i++)
- pr_info("addr=0x%x data=0x%x complete=0x%x\n",
+ pr_debug("addr=0x%x data=0x%x complete=0x%x\n",
msg->payload[i].addr,
msg->payload[i].data,
msg->payload[i].complete);
@@ -364,7 +365,15 @@ static inline struct tcs_mbox *get_tcs_for_msg(struct tcs_drv *drv,
static inline void send_tcs_response(struct tcs_response *resp)
{
- tasklet_schedule(&resp->tasklet);
+ struct tcs_drv *drv = resp->drv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv->drv_lock, flags);
+ INIT_LIST_HEAD(&resp->list);
+ list_add_tail(&resp->list, &drv->response_pending);
+ spin_unlock_irqrestore(&drv->drv_lock, flags);
+
+ tasklet_schedule(&drv->tasklet);
}
static inline void enable_tcs_irq(struct tcs_drv *drv, int m, bool enable)
@@ -455,12 +464,12 @@ static irqreturn_t tcs_irq_handler(int irq, void *p)
/* Clear the TCS IRQ status */
write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
+ /* Notify the client that this request is completed. */
+ atomic_set(&drv->tcs_in_use[m], 0);
+
/* Clean up response object and notify mbox in tasklet */
if (resp)
send_tcs_response(resp);
-
- /* Notify the client that this request is completed. */
- atomic_set(&drv->tcs_in_use[m], 0);
}
return IRQ_HANDLED;
@@ -475,19 +484,38 @@ static inline void mbox_notify_tx_done(struct mbox_chan *chan,
mbox_chan_txdone(chan, err);
}
-/**
- * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
- */
-static void tcs_notify_tx_done(unsigned long data)
+static void respond_tx_done(struct tcs_response *resp)
{
- struct tcs_response *resp = (struct tcs_response *) data;
struct mbox_chan *chan = resp->chan;
struct tcs_mbox_msg *msg = resp->msg;
int err = resp->err;
int m = resp->m;
- mbox_notify_tx_done(chan, msg, m, err);
free_response(resp);
+ mbox_notify_tx_done(chan, msg, m, err);
+}
+
+/**
+ * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
+ */
+static void tcs_notify_tx_done(unsigned long data)
+{
+ struct tcs_drv *drv = (struct tcs_drv *)data;
+ struct tcs_response *resp;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&drv->drv_lock, flags);
+ if (list_empty(&drv->response_pending)) {
+ spin_unlock_irqrestore(&drv->drv_lock, flags);
+ break;
+ }
+ resp = list_first_entry(&drv->response_pending,
+ struct tcs_response, list);
+ list_del(&resp->list);
+ spin_unlock_irqrestore(&drv->drv_lock, flags);
+ respond_tx_done(resp);
+ } while (1);
}
static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
@@ -673,8 +701,11 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- if (trigger)
+ if (trigger) {
resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
+ if (IS_ERR_OR_NULL(resp))
+ return -EBUSY;
+ }
/* Identify the sequential slots that we can write to */
spin_lock_irqsave(&tcs->tcs_lock, flags);
@@ -686,28 +717,21 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
return slot;
}
- if (trigger) {
- ret = check_for_req_inflight(drv, tcs, msg);
- if (ret) {
- spin_unlock_irqrestore(&tcs->tcs_lock, flags);
- return ret;
- }
- }
-
- /* Mark the slots as in-use, before we unlock */
- if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
- bitmap_set(tcs->slots, slot, msg->num_payload);
-
- /* Copy the addresses of the resources over to the slots */
- for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
- tcs->cmd_addr[slot + i] = msg->payload[i].addr;
-
+ /* Figure out the TCS-m and CMD-n to write to */
offset = slot / tcs->ncpt;
m = offset + tcs->tcs_offset;
n = slot % tcs->ncpt;
- /* Block, if we have an address from the msg in flight */
if (trigger) {
+ /* Block, if we have an address from the msg in flight */
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret) {
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+ if (resp)
+ free_response(resp);
+ return ret;
+ }
+
resp->m = m;
/* Mark the TCS as busy */
atomic_set(&drv->tcs_in_use[m], 1);
@@ -716,6 +740,14 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
if (tcs->type != ACTIVE_TCS)
enable_tcs_irq(drv, m, true);
drv->tcs_last_sent_ts[m] = arch_counter_get_cntvct();
+ } else {
+ /* Mark the slots as in-use, before we unlock */
+ if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
+ bitmap_set(tcs->slots, slot, msg->num_payload);
+
+ /* Copy the addresses of the resources over to the slots */
+ for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
+ tcs->cmd_addr[slot + i] = msg->payload[i].addr;
}
/* Write to the TCS or AMC */
@@ -758,6 +790,32 @@ static int tcs_mbox_invalidate(struct mbox_chan *chan)
return 0;
}
+static void print_tcs_regs(struct tcs_drv *drv, int m)
+{
+ int n;
+ struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
+ void __iomem *base = drv->reg_base;
+ u32 enable, addr, data, msgid;
+
+ if (!tcs || tcs_is_free(drv, m))
+ return;
+
+ enable = read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
+ if (!enable)
+ return;
+
+ pr_debug("TCS-%d contents:\n", m);
+ for (n = 0; n < tcs->ncpt; n++) {
+ if (!(enable & BIT(n)))
+ continue;
+ addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n);
+ data = read_tcs_reg(base, TCS_DRV_CMD_DATA, m, n);
+ msgid = read_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n);
+ pr_debug("\tn=%d addr=0x%x data=0x%x hdr=0x%x\n",
+ n, addr, data, msgid);
+ }
+}
+
static void dump_tcs_stats(struct tcs_drv *drv)
{
int i;
@@ -766,12 +824,13 @@ static void dump_tcs_stats(struct tcs_drv *drv)
for (i = 0; i < drv->num_tcs; i++) {
if (!atomic_read(&drv->tcs_in_use[i]))
continue;
- pr_info("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
+ pr_debug("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
curr, i,
atomic_read(&drv->tcs_send_count[i]),
drv->tcs_last_sent_ts[i],
atomic_read(&drv->tcs_irq_count[i]),
drv->tcs_last_recv_ts[i]);
+ print_tcs_regs(drv, i);
print_response(drv, i);
}
}
@@ -840,7 +899,7 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
if (ret != -EBUSY)
break;
udelay(100);
- } while (++count < 10);
+ } while (++count < 100);
tx_fail:
/* If there was an error in the request, schedule a response */
@@ -849,7 +908,8 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
drv, msg, chan, TCS_M_INIT, ret);
dev_err(dev, "Error sending RPMH message %d\n", ret);
- send_tcs_response(resp);
+ if (resp)
+ send_tcs_response(resp);
ret = 0;
}
@@ -857,6 +917,7 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
if (ret == -EBUSY) {
dev_err(dev, "TCS Busy, retrying RPMH message send\n");
dump_tcs_stats(drv);
+ ret = -EAGAIN;
}
return ret;
@@ -967,6 +1028,7 @@ static struct mbox_chan *of_tcs_mbox_xlate(struct mbox_controller *mbox,
}
chan = &mbox->chans[drv->num_assigned++];
+ chan->con_priv = drv;
return chan;
}
@@ -1108,6 +1170,9 @@ static int tcs_drv_probe(struct platform_device *pdev)
drv->mbox.is_idle = tcs_drv_is_idle;
drv->num_tcs = st;
drv->pdev = pdev;
+ INIT_LIST_HEAD(&drv->response_pending);
+ spin_lock_init(&drv->drv_lock);
+ tasklet_init(&drv->tasklet, tcs_notify_tx_done, (unsigned long)drv);
drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
if (!drv->name)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f0aad08..ed25f30 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -80,8 +80,6 @@ void dm_set_md_type(struct mapped_device *md, unsigned type);
unsigned dm_get_md_type(struct mapped_device *md);
struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
-int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
-
/*
* To check the return value from dm_table_find_target().
*/
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 4f246e1..9a30d64 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -628,9 +628,46 @@ static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
return rc;
}
-static int cam_cpas_util_apply_client_ahb_vote(struct cam_cpas *cpas_core,
+static int cam_cpas_util_get_ahb_level(struct cam_hw_info *cpas_hw,
+ struct device *dev, unsigned long freq, enum cam_vote_level *req_level)
+{
+ struct cam_cpas_private_soc *soc_private =
+ (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+ struct dev_pm_opp *opp;
+ unsigned int corner;
+ enum cam_vote_level level = CAM_SVS_VOTE;
+ unsigned long corner_freq = freq;
+ int i;
+
+ if (!dev || !req_level) {
+ pr_err("Invalid params %pK, %pK\n", dev, req_level);
+ return -EINVAL;
+ }
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
+ if (IS_ERR(opp)) {
+ pr_err("Error on OPP freq :%ld, %pK\n", corner_freq, opp);
+ return -EINVAL;
+ }
+
+ corner = dev_pm_opp_get_voltage(opp);
+
+ for (i = 0; i < soc_private->num_vdd_ahb_mapping; i++)
+ if (corner == soc_private->vdd_ahb[i].vdd_corner)
+ level = soc_private->vdd_ahb[i].ahb_level;
+
+ CPAS_CDBG("From OPP table : freq=[%ld][%ld], corner=%d, level=%d\n",
+ freq, corner_freq, corner, level);
+
+ *req_level = level;
+
+ return 0;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
enum cam_vote_level required_level;
enum cam_vote_level highest_level;
@@ -642,12 +679,14 @@ static int cam_cpas_util_apply_client_ahb_vote(struct cam_cpas *cpas_core,
}
if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
- pr_err("Dynamic AHB vote not supported\n");
- return -EINVAL;
+ rc = cam_cpas_util_get_ahb_level(cpas_hw, cpas_client->data.dev,
+ ahb_vote->vote.freq, &required_level);
+ if (rc)
+ return rc;
+ } else {
+ required_level = ahb_vote->vote.level;
}
- required_level = ahb_vote->vote.level;
-
if (cpas_client->ahb_level == required_level)
return 0;
@@ -708,7 +747,7 @@ static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
ahb_vote->vote.freq,
cpas_core->cpas_client[client_indx]->ahb_level);
- rc = cam_cpas_util_apply_client_ahb_vote(cpas_core,
+ rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
cpas_core->cpas_client[client_indx], ahb_vote);
unlock_client:
@@ -780,7 +819,7 @@ static int cam_cpas_hw_start(void *hw_priv, void *start_args,
CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
client_indx, ahb_vote->type, ahb_vote->vote.level,
cpas_client->ahb_level);
- rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+ rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
ahb_vote);
if (rc)
goto done;
@@ -800,8 +839,8 @@ static int cam_cpas_hw_start(void *hw_priv, void *start_args,
goto done;
}
- if (cpas_core->internal_ops.power_on_settings) {
- rc = cpas_core->internal_ops.power_on_settings(cpas_hw);
+ if (cpas_core->internal_ops.power_on) {
+ rc = cpas_core->internal_ops.power_on(cpas_hw);
if (rc) {
cam_cpas_soc_disable_resources(
&cpas_hw->soc_info);
@@ -873,6 +912,15 @@ static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
cpas_core->streamon_clients--;
if (cpas_core->streamon_clients == 0) {
+ if (cpas_core->internal_ops.power_off) {
+ rc = cpas_core->internal_ops.power_off(cpas_hw);
+ if (rc) {
+ pr_err("failed in power_off settings rc=%d\n",
+ rc);
+ /* Do not return error, passthrough */
+ }
+ }
+
rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
if (rc) {
pr_err("disable_resorce failed, rc=%d\n", rc);
@@ -883,7 +931,7 @@ static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SUSPEND_VOTE;
- rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+ rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
&ahb_vote);
if (rc)
goto done;
@@ -1282,15 +1330,22 @@ int cam_cpas_hw_probe(struct platform_device *pdev,
cpas_hw_intf->hw_ops.write = NULL;
cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
+ cpas_core->work_queue = alloc_workqueue("cam-cpas",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, CAM_CPAS_INFLIGHT_WORKS);
+ if (!cpas_core->work_queue) {
+ rc = -ENOMEM;
+ goto release_mem;
+ }
+
internal_ops = &cpas_core->internal_ops;
rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
- if (rc != 0)
- goto release_mem;
+ if (rc)
+ goto release_workq;
rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
internal_ops->handle_irq, cpas_hw);
if (rc)
- goto release_mem;
+ goto release_workq;
soc_private = (struct cam_cpas_private_soc *)
cpas_hw->soc_info.soc_private;
@@ -1375,6 +1430,9 @@ int cam_cpas_hw_probe(struct platform_device *pdev,
cam_cpas_util_client_cleanup(cpas_hw);
deinit_platform_res:
cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_workq:
+ flush_workqueue(cpas_core->work_queue);
+ destroy_workqueue(cpas_core->work_queue);
release_mem:
mutex_destroy(&cpas_hw->hw_mutex);
kfree(cpas_core);
@@ -1406,6 +1464,8 @@ int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf)
cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
cam_cpas_util_client_cleanup(cpas_hw);
cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+ flush_workqueue(cpas_core->work_queue);
+ destroy_workqueue(cpas_core->work_queue);
mutex_destroy(&cpas_hw->hw_mutex);
kfree(cpas_core);
kfree(cpas_hw);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index c181302..6d4fafe 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -17,6 +17,7 @@
#include "cam_cpas_hw_intf.h"
#define CPAS_MAX_CLIENTS 20
+#define CAM_CPAS_INFLIGHT_WORKS 5
#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
@@ -45,7 +46,8 @@ enum cam_cpas_access_type {
* @init_hw_version: Function pointer for hw init based on version
* @handle_irq: Function poniter for irq handling
* @setup_regbase: Function pointer for setup rebase indices
- * @power_on_settings: Function pointer for hw core specific power on settings
+ * @power_on: Function pointer for hw core specific power on settings
+ * @power_off: Function pointer for hw core specific power off settings
*
*/
struct cam_cpas_internal_ops {
@@ -56,7 +58,8 @@ struct cam_cpas_internal_ops {
irqreturn_t (*handle_irq)(int irq_num, void *data);
int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
int32_t regbase_index[], int32_t num_reg_map);
- int (*power_on_settings)(struct cam_hw_info *cpas_hw);
+ int (*power_on)(struct cam_hw_info *cpas_hw);
+ int (*power_off)(struct cam_hw_info *cpas_hw);
};
/**
@@ -167,6 +170,7 @@ struct cam_cpas_axi_port {
* @ahb_bus_client: AHB Bus client info
* @axi_ports_list_head: Head pointing to list of AXI ports
* @internal_ops: CPAS HW internal ops
+ * @work_queue: Work queue handle
*
*/
struct cam_cpas {
@@ -180,6 +184,7 @@ struct cam_cpas {
struct cam_cpas_bus_client ahb_bus_client;
struct list_head axi_ports_list_head;
struct cam_cpas_internal_ops internal_ops;
+ struct workqueue_struct *work_queue;
};
int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
index d2c3e06..9ee5a43 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -29,6 +29,13 @@
#define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+/* Number of times to retry while polling */
+#define CAM_CPAS_POLL_RETRY_CNT 5
+/* Minimum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MIN_USECS 200
+/* Maximum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MAX_USECS 250
+
/**
* enum cam_cpas_hw_type - Enum for CPAS HW type
*/
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index fdebdc7..b774625 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -51,21 +51,23 @@ struct cam_cpas_intf {
static struct cam_cpas_intf *g_cpas_intf;
int cam_cpas_get_hw_info(uint32_t *camera_family,
- struct cam_hw_version *camera_version)
+ struct cam_hw_version *camera_version,
+ struct cam_hw_version *cpas_version)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
pr_err("cpas intf not initialized\n");
return -ENODEV;
}
- if (!camera_family || !camera_version) {
- pr_err("invalid input %pK %pK\n", camera_family,
- camera_version);
+ if (!camera_family || !camera_version || !cpas_version) {
+ pr_err("invalid input %pK %pK %pK\n", camera_family,
+ camera_version, cpas_version);
return -EINVAL;
}
*camera_family = g_cpas_intf->hw_caps.camera_family;
*camera_version = g_cpas_intf->hw_caps.camera_version;
+ *cpas_version = g_cpas_intf->hw_caps.cpas_version;
return 0;
}
@@ -344,7 +346,7 @@ int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
}
rc = cam_cpas_get_hw_info(&query.camera_family,
- &query.camera_version);
+ &query.camera_version, &query.cpas_version);
if (rc)
break;
@@ -428,6 +430,7 @@ static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, unsigned long arg)
{
+ struct cam_control cmd_data;
int32_t rc;
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
@@ -436,9 +439,16 @@ static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
return -ENODEV;
}
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
switch (cmd) {
case VIDIOC_CAM_CONTROL:
- rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+ rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
break;
default:
pr_err("Invalid command %d for CPAS!\n", cmd);
@@ -446,6 +456,15 @@ static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
break;
}
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
return rc;
}
#endif
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 0a8e6bb..0c71ece 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -22,6 +22,26 @@
#include "cam_cpas_hw.h"
#include "cam_cpas_soc.h"
+static int cam_cpas_get_vote_level_from_string(const char *string,
+ enum cam_vote_level *vote_level)
+{
+ if (!vote_level || !string)
+ return -EINVAL;
+
+ if (strnstr("suspend", string, strlen(string)))
+ *vote_level = CAM_SUSPEND_VOTE;
+ else if (strnstr("svs", string, strlen(string)))
+ *vote_level = CAM_SVS_VOTE;
+ else if (strnstr("nominal", string, strlen(string)))
+ *vote_level = CAM_NOMINAL_VOTE;
+ else if (strnstr("turbo", string, strlen(string)))
+ *vote_level = CAM_TURBO_VOTE;
+ else
+ *vote_level = CAM_SVS_VOTE;
+
+ return 0;
+}
+
int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
struct cam_cpas_private_soc *soc_private)
{
@@ -89,6 +109,42 @@ int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
soc_private->axi_camnoc_based = of_property_read_bool(of_node,
"client-bus-camnoc-based");
+ count = of_property_count_u32_elems(of_node, "vdd-corners");
+ if ((count > 0) && (count <= CAM_REGULATOR_LEVEL_MAX) &&
+ (of_property_count_strings(of_node, "vdd-corner-ahb-mapping") ==
+ count)) {
+ const char *ahb_string;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_u32_index(of_node, "vdd-corners",
+ i, &soc_private->vdd_ahb[i].vdd_corner);
+ if (rc) {
+ pr_err("vdd-corners failed at index=%d\n", i);
+ return -ENODEV;
+ }
+
+ rc = of_property_read_string_index(of_node,
+ "vdd-corner-ahb-mapping", i, &ahb_string);
+ if (rc) {
+ pr_err("no ahb-mapping at index=%d\n", i);
+ return -ENODEV;
+ }
+
+ rc = cam_cpas_get_vote_level_from_string(ahb_string,
+ &soc_private->vdd_ahb[i].ahb_level);
+ if (rc) {
+ pr_err("invalid ahb-string at index=%d\n", i);
+ return -EINVAL;
+ }
+
+ CPAS_CDBG("Vdd-AHB mapping [%d] : [%d] [%s] [%d]\n", i,
+ soc_private->vdd_ahb[i].vdd_corner,
+ ahb_string, soc_private->vdd_ahb[i].ahb_level);
+ }
+
+ soc_private->num_vdd_ahb_mapping = count;
+ }
+
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index fdd9386..d3dfbbd 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -16,6 +16,19 @@
#include "cam_soc_util.h"
#define CAM_CPAS_MAX_CLIENTS 20
+#define CAM_REGULATOR_LEVEL_MAX 16
+
+/**
+ * struct cam_cpas_vdd_ahb_mapping : Voltage to ahb level mapping
+ *
+ * @vdd_corner : Voltage corner value
+ * @ahb_level : AHB vote level corresponds to this vdd_corner
+ *
+ */
+struct cam_cpas_vdd_ahb_mapping {
+ unsigned int vdd_corner;
+ enum cam_vote_level ahb_level;
+};
/**
* struct cam_cpas_private_soc : CPAS private DT info
@@ -27,6 +40,8 @@
* @axi_camnoc_based: Whether AXi access is camnoc based
* @client_axi_port_name: AXI Port name for each client
* @axi_port_list_node : Node representing AXI Ports list
+ * @num_vdd_ahb_mapping : Number of vdd to ahb level mapping supported
+ * @vdd_ahb : AHB level mapping info for the supported vdd levels
*
*/
struct cam_cpas_private_soc {
@@ -37,6 +52,8 @@ struct cam_cpas_private_soc {
bool axi_camnoc_based;
const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
struct device_node *axi_port_list_node;
+ uint32_t num_vdd_ahb_mapping;
+ struct cam_cpas_vdd_ahb_mapping vdd_ahb[CAM_REGULATOR_LEVEL_MAX];
};
int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
index fa8ab89..95e26c5 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -81,7 +81,8 @@ int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
internal_ops->init_hw_version = NULL;
internal_ops->handle_irq = NULL;
internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
- internal_ops->power_on_settings = NULL;
+ internal_ops->power_on = NULL;
+ internal_ops->power_off = NULL;
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 415de47..b901410 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/timer.h>
+#include <linux/slab.h>
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_hw.h"
@@ -105,15 +106,64 @@ static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
struct cam_hw_soc_info *soc_info)
{
- uint32_t reg_value;
+ uint32_t reg_value[4];
int i;
+ int size = camnoc_info->error_logger_size;
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
- for (i = 0; i < camnoc_info->error_logger_size; i++) {
- reg_value = cam_io_r_mb(
+ for (i = 0; (i + 3) < size; i = i + 4) {
+ reg_value[0] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i]);
- pr_err("ErrorLogger[%d] : 0x%x\n", i, reg_value);
+ reg_value[1] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i + 1]);
+ reg_value[2] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i + 2]);
+ reg_value[3] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i + 3]);
+ pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]\n",
+ camnoc_info->error_logger[i], reg_value[0],
+ reg_value[1], reg_value[2], reg_value[3]);
+ }
+
+ if ((i + 2) < size) {
+ reg_value[0] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i]);
+ reg_value[1] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i + 1]);
+ reg_value[2] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i + 2]);
+ pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x]\n",
+ camnoc_info->error_logger[i], reg_value[0],
+ reg_value[1], reg_value[2]);
+ i = i + 3;
+ }
+
+ if ((i + 1) < size) {
+ reg_value[0] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i]);
+ reg_value[1] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i + 1]);
+ pr_err("offset[0x%x] values [0x%x] [0x%x]\n",
+ camnoc_info->error_logger[i], reg_value[0],
+ reg_value[1]);
+ i = i + 2;
+ }
+
+ if (i < size) {
+ reg_value[0] = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->error_logger[i]);
+ pr_err("offset[0x%x] values [0x%x]\n",
+ camnoc_info->error_logger[i], reg_value[0]);
}
return 0;
@@ -128,9 +178,10 @@ static int cam_cpastop_handle_ubwc_err(struct cam_cpas *cpas_core,
reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_err[i].err_status.offset);
- pr_err("Dumping ubwc error status : 0x%x\n", reg_value);
+ pr_err("Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]\n",
+ i, camnoc_info->irq_err[i].err_status.offset, reg_value);
- return 0;
+ return reg_value;
}
static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
@@ -172,65 +223,130 @@ static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
return 0;
}
-irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
+ enum cam_camnoc_hw_irq_type irq_type, uint32_t irq_data)
{
- uint32_t irq_status;
- struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
- struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
- struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
- int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+ int i;
+ struct cam_cpas_client *cpas_client;
+
+ CPAS_CDBG("Notify CB : num_clients=%d, registered=%d, started=%d\n",
+ cpas_core->num_clients, cpas_core->registered_clients,
+ cpas_core->streamon_clients);
+
+ for (i = 0; i < cpas_core->num_clients; i++) {
+ if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
+ cpas_client = cpas_core->cpas_client[i];
+ if (cpas_client->data.cam_cpas_client_cb) {
+ CPAS_CDBG("Calling client CB %d : %d 0x%x\n",
+ i, irq_type, irq_data);
+ cpas_client->data.cam_cpas_client_cb(
+ cpas_client->data.client_handle,
+ cpas_client->data.userdata,
+ (enum cam_camnoc_irq_type)irq_type,
+ irq_data);
+ }
+ }
+ }
+}
+
+static void cam_cpastop_work(struct work_struct *work)
+{
+ struct cam_cpas_work_payload *payload;
+ struct cam_hw_info *cpas_hw;
+ struct cam_cpas *cpas_core;
+ struct cam_hw_soc_info *soc_info;
int i;
enum cam_camnoc_hw_irq_type irq_type;
+ uint32_t irq_data;
- irq_status = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->irq_sbm->sbm_status.offset);
+ payload = container_of(work, struct cam_cpas_work_payload, work);
+ if (!payload) {
+ pr_err("NULL payload");
+ return;
+ }
- pr_err("IRQ callback, irq_status=0x%x\n", irq_status);
+ cpas_hw = payload->hw;
+ cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ soc_info = &cpas_hw->soc_info;
for (i = 0; i < camnoc_info->irq_err_size; i++) {
- if ((irq_status & camnoc_info->irq_err[i].sbm_port) &&
+ if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
(camnoc_info->irq_err[i].enable)) {
irq_type = camnoc_info->irq_err[i].irq_type;
pr_err("Error occurred, type=%d\n", irq_type);
+ irq_data = 0;
switch (irq_type) {
case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
- cam_cpastop_handle_errlogger(cpas_core,
- soc_info);
+ irq_data = cam_cpastop_handle_errlogger(
+ cpas_core, soc_info);
break;
case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
- cam_cpastop_handle_ubwc_err(cpas_core,
- soc_info, i);
+ irq_data = cam_cpastop_handle_ubwc_err(
+ cpas_core, soc_info, i);
break;
case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
- cam_cpastop_handle_ahb_timeout_err(cpas_hw);
+ irq_data = cam_cpastop_handle_ahb_timeout_err(
+ cpas_hw);
break;
case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
CPAS_CDBG("TEST IRQ\n");
break;
default:
+ pr_err("Invalid IRQ type\n");
break;
}
- irq_status &= ~camnoc_info->irq_err[i].sbm_port;
+ cam_cpastop_notify_clients(cpas_core, irq_type,
+ irq_data);
+
+ payload->irq_status &=
+ ~camnoc_info->irq_err[i].sbm_port;
}
}
- if (irq_status)
- pr_err("IRQ not handled, irq_status=0x%x\n", irq_status);
+ if (payload->irq_status)
+ pr_err("IRQ not handled irq_status=0x%x\n",
+ payload->irq_status);
+
+ kfree(payload);
+}
+
+static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+ struct cam_cpas_work_payload *payload;
+
+ payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
+ if (!payload)
+ return IRQ_HANDLED;
+
+ payload->irq_status = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->irq_sbm->sbm_status.offset);
+
+ CPAS_CDBG("IRQ callback, irq_status=0x%x\n", payload->irq_status);
+
+ payload->hw = cpas_hw;
+ INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
if (TEST_IRQ_ENABLE)
cam_cpastop_disable_test_irq(cpas_hw);
cam_cpastop_reset_irq(cpas_hw);
+ queue_work(cpas_core->work_queue, &payload->work);
+
return IRQ_HANDLED;
}
-static int cam_cpastop_static_settings(struct cam_hw_info *cpas_hw)
+static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
{
int i;
@@ -256,6 +372,38 @@ static int cam_cpastop_static_settings(struct cam_hw_info *cpas_hw)
return 0;
}
+static int cam_cpastop_poweroff(struct cam_hw_info *cpas_hw)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+ int rc = 0;
+ struct cam_cpas_hw_errata_wa_list *errata_wa_list =
+ camnoc_info->errata_wa_list;
+
+ if (!errata_wa_list)
+ return 0;
+
+ if (errata_wa_list->camnoc_flush_slave_pending_trans.enable) {
+ struct cam_cpas_hw_errata_wa *errata_wa =
+ &errata_wa_list->camnoc_flush_slave_pending_trans;
+
+ rc = cam_io_poll_value_wmask(
+ soc_info->reg_map[camnoc_index].mem_base +
+ errata_wa->data.reg_info.offset,
+ errata_wa->data.reg_info.value,
+ errata_wa->data.reg_info.mask,
+ CAM_CPAS_POLL_RETRY_CNT,
+ CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
+ if (rc) {
+ pr_err("camnoc flush slave pending trans failed\n");
+ /* Do not return error, passthrough */
+ }
+ }
+
+ return rc;
+}
+
static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
struct cam_cpas_hw_caps *hw_caps)
{
@@ -295,7 +443,8 @@ int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
internal_ops->init_hw_version = cam_cpastop_init_hw_version;
internal_ops->handle_irq = cam_cpastop_handle_irq;
internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
- internal_ops->power_on_settings = cam_cpastop_static_settings;
+ internal_ops->power_on = cam_cpastop_poweron;
+ internal_ops->power_off = cam_cpastop_poweroff;
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
index 99aae3f..d5bb363 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -147,6 +147,31 @@ struct cam_camnoc_irq_err {
};
/**
+ * struct cam_cpas_hw_errata_wa : Struct for HW errata workaround info
+ *
+ * @enable: Whether to enable this errata workround
+ * @data: HW Errata workaround data
+ *
+ */
+struct cam_cpas_hw_errata_wa {
+ bool enable;
+ union {
+ struct cam_cpas_reg reg_info;
+ } data;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa_list : List of HW Errata workaround info
+ *
+ * @camnoc_flush_slave_pending_trans: Errata workaround info for flushing
+ * camnoc slave pending transactions before turning off CPAS_TOP gdsc
+ *
+ */
+struct cam_cpas_hw_errata_wa_list {
+ struct cam_cpas_hw_errata_wa camnoc_flush_slave_pending_trans;
+};
+
+/**
* struct cam_camnoc_info : Overall CAMNOC settings info
*
* @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
@@ -156,6 +181,7 @@ struct cam_camnoc_irq_err {
* @irq_err_size: Array size of IRQ Error settings
* @error_logger: Pointer to CAMNOC IRQ Error logger read registers
* @error_logger_size: Array size of IRQ Error logger
+ * @errata_wa_list: HW Errata workaround info
*
*/
struct cam_camnoc_info {
@@ -166,6 +192,23 @@ struct cam_camnoc_info {
int irq_err_size;
uint32_t *error_logger;
int error_logger_size;
+ struct cam_cpas_hw_errata_wa_list *errata_wa_list;
+};
+
+/**
+ * struct cam_cpas_work_payload : Struct for cpas work payload data
+ *
+ * @hw: Pointer to HW info
+ * @irq_status: IRQ status value
+ * @irq_data: IRQ data
+ * @work: Work handle
+ *
+ */
+struct cam_cpas_work_payload {
+ struct cam_hw_info *hw;
+ uint32_t irq_status;
+ uint32_t irq_data;
+ struct work_struct work;
};
#endif /* _CAM_CPASTOP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
index 12c8e66..b30cd05 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -279,17 +279,16 @@ static struct cam_camnoc_specific
.value = 3,
},
.danger_lut = {
- .enable = false,
+ .enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
- .masked_value = 0,
.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
- .value = 0x0,
+ .value = 0xFFFFFF00,
},
.safe_lut = {
- .enable = false,
+ .enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
- .value = 0x0,
+ .value = 0x3,
},
.ubwc_ctl = {
.enable = true,
@@ -328,18 +327,16 @@ static struct cam_camnoc_specific
.value = 3,
},
.danger_lut = {
- .enable = false,
+ .enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
- .masked_value = 0,
.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
- .value = 0x0,
+ .value = 0xFFFFFF00,
},
.safe_lut = {
- .enable = false,
+ .enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
- .masked_value = 0,
.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
- .value = 0x0,
+ .value = 0x3,
},
.ubwc_ctl = {
.enable = true,
@@ -516,6 +513,18 @@ uint32_t slave_error_logger[] = {
0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
+static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
+ .camnoc_flush_slave_pending_trans = {
+ .enable = true,
+ .data.reg_info = {
+ .access_type = CAM_REG_TYPE_READ,
+ .offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+ .mask = 0xE0000, /* Bits 17, 18, 19 */
+ .value = 0, /* expected to be 0 */
+ },
+ },
+};
+
struct cam_camnoc_info cam170_cpas100_camnoc_info = {
.specific = &cam_cpas100_camnoc_specific[0],
.specific_size = sizeof(cam_cpas100_camnoc_specific) /
@@ -527,6 +536,7 @@ struct cam_camnoc_info cam170_cpas100_camnoc_info = {
.error_logger = &slave_error_logger[0],
.error_logger_size = sizeof(slave_error_logger) /
sizeof(slave_error_logger[0]),
+ .errata_wa_list = &cam170_cpas100_errata_wa_list,
};
#endif /* _CPASTOP100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index f6b0729..27b8504 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -312,13 +312,15 @@ int cam_cpas_reg_read(
* @camera_family : Camera family type. One of
* CAM_FAMILY_CAMERA_SS
* CAM_FAMILY_CPAS_SS
- * @camera_version : Camera version
+ * @camera_version : Camera platform version
+ * @cpas_version : Camera cpas version
*
* @return 0 on success.
*
*/
int cam_cpas_get_hw_info(
uint32_t *camera_family,
- struct cam_hw_version *camera_version);
+ struct cam_hw_version *camera_version,
+ struct cam_hw_version *cpas_version);
#endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 4b2db07..2bc4b00 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -23,6 +23,7 @@
#include "cam_isp_packet_parser.h"
#include "cam_ife_hw_mgr.h"
#include "cam_cdm_intf_api.h"
+#include "cam_packet_util.h"
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -1919,6 +1920,13 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
if (rc)
return rc;
+ rc = cam_packet_util_process_patches(prepare->packet,
+ hw_mgr->mgr_common.cmd_iommu_hdl);
+ if (rc) {
+ pr_err("%s: Patch ISP packet failed.\n", __func__);
+ return rc;
+ }
+
prepare->num_hw_update_entries = 0;
prepare->num_in_map_entries = 0;
prepare->num_out_map_entries = 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index b608320..3c72279 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -315,15 +315,17 @@ int cam_isp_add_io_buffers(
struct cam_isp_resource_node *res;
struct cam_ife_hw_mgr_res *hw_mgr_res;
struct cam_isp_hw_get_buf_update update_buf;
- uint32_t kmd_buf_remain_size, i, j, k, out_buf, in_buf,
- res_id_out, res_id_in, num_plane, io_cfg_used_bytes, num_ent;
+ uint32_t kmd_buf_remain_size;
+ uint32_t i, j, num_out_buf, num_in_buf;
+ uint32_t res_id_out, res_id_in, plane_id;
+ uint32_t io_cfg_used_bytes, num_ent;
size_t size;
io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
&prepare->packet->payload +
prepare->packet->io_configs_offset);
- out_buf = 0;
- in_buf = 0;
+ num_out_buf = 0;
+ num_in_buf = 0;
io_cfg_used_bytes = 0;
/* Max one hw entries required for each base */
@@ -357,17 +359,18 @@ int cam_isp_add_io_buffers(
CDBG("%s:%d configure output io with fill fence %d\n",
__func__, __LINE__, fill_fence);
if (fill_fence) {
- if (out_buf < prepare->max_out_map_entries) {
- prepare->out_map_entries[out_buf].
+ if (num_out_buf <
+ prepare->max_out_map_entries) {
+ prepare->out_map_entries[num_out_buf].
resource_handle =
io_cfg[i].resource_type;
- prepare->out_map_entries[out_buf].
+ prepare->out_map_entries[num_out_buf].
sync_id = io_cfg[i].fence;
- out_buf++;
+ num_out_buf++;
} else {
pr_err("%s:%d ln_out:%d max_ln:%d\n",
__func__, __LINE__,
- out_buf,
+ num_out_buf,
prepare->max_out_map_entries);
return -EINVAL;
}
@@ -385,23 +388,22 @@ int cam_isp_add_io_buffers(
CDBG("%s:%d configure input io with fill fence %d\n",
__func__, __LINE__, fill_fence);
if (fill_fence) {
- if (in_buf < prepare->max_in_map_entries) {
- prepare->in_map_entries[in_buf].
+ if (num_in_buf < prepare->max_in_map_entries) {
+ prepare->in_map_entries[num_in_buf].
resource_handle =
io_cfg[i].resource_type;
- prepare->in_map_entries[in_buf].
+ prepare->in_map_entries[num_in_buf].
sync_id =
io_cfg[i].fence;
- in_buf++;
+ num_in_buf++;
} else {
pr_err("%s:%d ln_in:%d imax_ln:%d\n",
__func__, __LINE__,
- in_buf,
+ num_in_buf,
prepare->max_in_map_entries);
return -EINVAL;
}
}
- /*TO DO get the input FE address and add to list */
continue;
} else {
pr_err("%s:%d Invalid io config direction :%d\n",
@@ -427,27 +429,36 @@ int cam_isp_add_io_buffers(
}
memset(io_addr, 0, sizeof(io_addr));
- num_plane = 0;
- for (k = 0; k < CAM_PACKET_MAX_PLANES; k++) {
- if (!io_cfg[i].mem_handle[k])
- continue;
- rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[k],
- iommu_hdl, &io_addr[num_plane], &size);
+ for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
+ plane_id++) {
+ if (!io_cfg[i].mem_handle[plane_id])
+ break;
+
+ rc = cam_mem_get_io_buf(
+ io_cfg[i].mem_handle[plane_id],
+ iommu_hdl, &io_addr[plane_id], &size);
if (rc) {
pr_err("%s:%d no io addr for plane%d\n",
- __func__, __LINE__, k);
+ __func__, __LINE__, plane_id);
rc = -ENOMEM;
return rc;
}
+
+ if (io_addr[plane_id] >> 32) {
+ pr_err("Invalid mapped address\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
/* need to update with offset */
- io_addr[num_plane] += io_cfg->offsets[k];
+ io_addr[plane_id] +=
+ io_cfg[i].offsets[plane_id];
CDBG("%s: get io_addr for plane %d: 0x%llx\n",
- __func__, num_plane,
- io_addr[num_plane]);
- num_plane++;
+ __func__, plane_id,
+ io_addr[plane_id]);
}
- if (!num_plane) {
+ if (!plane_id) {
pr_err("%s:%d No valid planes for res%d\n",
__func__, __LINE__, res->res_id);
rc = -ENOMEM;
@@ -471,7 +482,8 @@ int cam_isp_add_io_buffers(
io_cfg_used_bytes/4;
update_buf.cdm.size = kmd_buf_remain_size;
update_buf.image_buf = io_addr;
- update_buf.num_buf = num_plane;
+ update_buf.num_buf = plane_id;
+ update_buf.io_cfg = &io_cfg[i];
CDBG("%s:%d: cmd buffer 0x%pK, size %d\n", __func__,
__LINE__, update_buf.cdm.cmd_buf_addr,
@@ -509,8 +521,8 @@ int cam_isp_add_io_buffers(
}
if (fill_fence) {
- prepare->num_out_map_entries = out_buf;
- prepare->num_in_map_entries = in_buf;
+ prepare->num_out_map_entries = num_out_buf;
+ prepare->num_in_map_entries = num_in_buf;
}
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 3ec9aa6..f09fdc7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -988,8 +988,8 @@ static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw *csid_hw,
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
- /* select rotate period as 5 frame */
- val = 5 << 8;
+ /* static frame with split color bar */
+ val = 1 << 5;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
/* config pix pattern */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index ea34406..6c6f38b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -149,14 +149,16 @@ struct cam_isp_hw_get_cdm_args {
* @Brief: Get cdm commands for buffer updates.
*
* @ cdm: Command buffer information
- * @ image_buf: Contain the image buffer information
+ * @ image_buf: image buffer address array
* @ num_buf: Number of buffers in the image_buf array
+ * @ io_cfg: IO buffer config information sent from UMD
*
*/
struct cam_isp_hw_get_buf_update {
struct cam_isp_hw_get_cdm_args cdm;
uint64_t *image_buf;
uint32_t num_buf;
+ struct cam_buf_io_cfg *io_cfg;
};
#endif /* _CAM_ISP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 6e62dcf..5e629b6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -28,6 +28,16 @@
#define FRAME_BASED_EN 0
+#define MAX_BUF_UPDATE_REG_NUM 20
+#define MAX_REG_VAL_PAIR_SIZE \
+ (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+
+#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val) \
+ do { \
+ buf_array[index++] = offset; \
+ buf_array[index++] = val; \
+ } while (0)
+
static uint32_t irq_reg_offset[CAM_IFE_BUS_IRQ_REGISTERS_MAX] = {
0x0000205C,
0x00002060,
@@ -64,6 +74,8 @@ struct cam_vfe_bus_ver2_common_data {
void *bus_irq_controller;
void *vfe_irq_controller;
struct cam_vfe_bus_ver2_reg_offset_common *common_reg;
+ uint32_t io_buf_update[
+ MAX_REG_VAL_PAIR_SIZE];
};
struct cam_vfe_bus_ver2_wm_resource_data {
@@ -73,6 +85,7 @@ struct cam_vfe_bus_ver2_wm_resource_data {
uint32_t irq_enabled;
+ uint32_t init_cfg_done;
uint32_t offset;
uint32_t width;
uint32_t height;
@@ -83,10 +96,21 @@ struct cam_vfe_bus_ver2_wm_resource_data {
uint32_t burst_len;
uint32_t frame_based;
+ uint32_t en_ubwc;
+ uint32_t packer_cfg;
+ uint32_t tile_cfg;
+ uint32_t h_init;
+ uint32_t v_init;
+ uint32_t ubwc_meta_stride;
+ uint32_t ubwc_mode_cfg;
+ uint32_t ubwc_meta_offset;
+
uint32_t irq_subsample_period;
uint32_t irq_subsample_pattern;
uint32_t framedrop_period;
uint32_t framedrop_pattern;
+
+ uint32_t en_cfg;
};
struct cam_vfe_bus_ver2_comp_grp_data {
@@ -598,15 +622,52 @@ static int cam_vfe_bus_acquire_wm(
rsrc_data->width = out_port_info->width;
rsrc_data->height = out_port_info->height;
- if (plane == PLANE_C) {
- switch (rsrc_data->format) {
- case CAM_FORMAT_NV21:
- case CAM_FORMAT_NV12:
- rsrc_data->height /= 2;
+
+ if (rsrc_data->index < 3) {
+ rsrc_data->width = rsrc_data->width * 5/4 * rsrc_data->height;
+ rsrc_data->height = 1;
+ rsrc_data->pack_fmt = 0x0;
+ rsrc_data->en_cfg = 0x3;
+ } else if (rsrc_data->index < 5) {
+ switch (plane) {
+ case PLANE_Y:
+ switch (rsrc_data->format) {
+ case CAM_FORMAT_UBWC_NV12:
+ case CAM_FORMAT_UBWC_NV12_4R:
+ case CAM_FORMAT_UBWC_TP10:
+ rsrc_data->en_ubwc = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case PLANE_C:
+ switch (rsrc_data->format) {
+ case CAM_FORMAT_NV21:
+ case CAM_FORMAT_NV12:
+ rsrc_data->height /= 2;
+ break;
+ case CAM_FORMAT_UBWC_NV12:
+ case CAM_FORMAT_UBWC_NV12_4R:
+ case CAM_FORMAT_UBWC_TP10:
+ rsrc_data->height /= 2;
+ rsrc_data->en_ubwc = 1;
+ break;
+ default:
+ break;
+ }
break;
default:
- break;
+ pr_err("Invalid plane type %d\n", plane);
+ return -EINVAL;
}
+ rsrc_data->pack_fmt = 0xE;
+ rsrc_data->en_cfg = 0x1;
+ } else {
+ rsrc_data->width = rsrc_data->width * 4;
+ rsrc_data->height = rsrc_data->height / 2;
+ rsrc_data->pack_fmt = 0x0;
+ rsrc_data->en_cfg = 0x1;
}
if (vfe_out_res_id >= CAM_ISP_IFE_OUT_RES_RDI_0 &&
@@ -638,7 +699,16 @@ static int cam_vfe_bus_release_wm(void *bus_priv,
rsrc_data->irq_subsample_pattern = 0;
rsrc_data->framedrop_period = 0;
rsrc_data->framedrop_pattern = 0;
-
+ rsrc_data->packer_cfg = 0;
+ rsrc_data->en_ubwc = 0;
+ rsrc_data->tile_cfg = 0;
+ rsrc_data->h_init = 0;
+ rsrc_data->v_init = 0;
+ rsrc_data->ubwc_meta_stride = 0;
+ rsrc_data->ubwc_mode_cfg = 0;
+ rsrc_data->ubwc_meta_offset = 0;
+ rsrc_data->init_cfg_done = 0;
+ rsrc_data->en_cfg = 0;
wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
return 0;
@@ -651,52 +721,18 @@ static int cam_vfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
wm_res->res_priv;
struct cam_vfe_bus_ver2_common_data *common_data =
rsrc_data->common_data;
- uint32_t width;
- uint32_t height;
- uint32_t pack_fmt;
- uint32_t stride;
- uint32_t en_cfg;
-
- CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
- rsrc_data->width, rsrc_data->height);
- CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
- rsrc_data->pack_fmt & PACKER_FMT_MAX);
- CDBG("WM res %d stride = %d, burst len = %d\n",
- rsrc_data->index, rsrc_data->width, 0xf);
cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_addr);
cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_cfg);
cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->frame_inc);
cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
- if (rsrc_data->index < 3) {
- width = rsrc_data->width * 5/4 * rsrc_data->height;
- height = 1;
- pack_fmt = 0x0;
- stride = rsrc_data->width * 5/4 * rsrc_data->height;
- en_cfg = 0x3;
- } else if (rsrc_data->index < 5) {
- width = rsrc_data->width;
- height = rsrc_data->height;
- pack_fmt = 0xE;
- stride = rsrc_data->width;
- en_cfg = 0x1;
- } else {
- width = rsrc_data->width * 4;
- height = rsrc_data->height / 2;
- pack_fmt = 0x0;
- stride = rsrc_data->width * 4;
- en_cfg = 0x1;
- }
-
- cam_io_w_mb(width,
+ cam_io_w_mb(rsrc_data->width,
common_data->mem_base + rsrc_data->hw_regs->buffer_width_cfg);
- cam_io_w(height,
+ cam_io_w(rsrc_data->height,
common_data->mem_base + rsrc_data->hw_regs->buffer_height_cfg);
- cam_io_w(pack_fmt,
+ cam_io_w(rsrc_data->pack_fmt,
common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
- cam_io_w(stride,
- common_data->mem_base + rsrc_data->hw_regs->stride);
cam_io_w(0xFFFFFFFF, common_data->mem_base +
rsrc_data->hw_regs->irq_subsample_pattern);
@@ -708,34 +744,14 @@ static int cam_vfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
cam_io_w(0x0,
common_data->mem_base + rsrc_data->hw_regs->framedrop_period);
- /* UBWC registers */
- switch (rsrc_data->format) {
- case CAM_FORMAT_UBWC_NV12:
- /* Program UBWC registers */
- break;
- default:
- break;
- }
-
- /* Subscribe IRQ */
- if (rsrc_data->irq_enabled) {
- /*
- * Currently all WM IRQ are subscribed in one place. Need to
- * make it dynamic later.
- */
- }
-
- /* Enable WM */
- cam_io_w_mb(en_cfg, common_data->mem_base + rsrc_data->hw_regs->cfg);
-
CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
- width, height);
+ rsrc_data->width, rsrc_data->height);
CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
- pack_fmt & PACKER_FMT_MAX);
+ rsrc_data->pack_fmt & PACKER_FMT_MAX);
CDBG("WM res %d stride = %d, burst len = %d\n",
- rsrc_data->index, stride, 0xf);
+ rsrc_data->index, rsrc_data->stride, 0xf);
CDBG("enable WM res %d offset 0x%x val 0x%x\n", rsrc_data->index,
- (uint32_t) rsrc_data->hw_regs->cfg, en_cfg);
+ (uint32_t) rsrc_data->hw_regs->cfg, rsrc_data->en_cfg);
wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
@@ -1622,10 +1638,11 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
{
struct cam_vfe_bus_ver2_priv *bus_priv;
struct cam_isp_hw_get_buf_update *update_buf;
+ struct cam_buf_io_cfg *io_cfg;
struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL;
struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
- uint32_t reg_val_pair[8];
- uint32_t i, size = 0;
+ uint32_t *reg_val_pair;
+ uint32_t i, j, size = 0;
/*
* Need the entire buf io config so we can get the stride info
@@ -1643,14 +1660,181 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
return -EINVAL;
}
- if (update_buf->num_buf < vfe_out_data->num_wm) {
+ if (update_buf->num_buf != vfe_out_data->num_wm) {
pr_err("Failed! Invalid number buffers:%d required:%d\n",
update_buf->num_buf, vfe_out_data->num_wm);
return -ENOMEM;
}
- size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(
- vfe_out_data->num_wm);
+ reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+ io_cfg = update_buf->io_cfg;
+
+ for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+ wm_data = vfe_out_data->wm_res[i]->res_priv;
+
+ /* For initial configuration program all bus registers */
+ if (wm_data->stride != io_cfg->planes[i].plane_stride ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->stride,
+ io_cfg->planes[i].plane_stride);
+ wm_data->stride = io_cfg->planes[i].plane_stride;
+ }
+ CDBG("image stride 0x%x\n", wm_data->stride);
+
+ if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->framedrop_pattern,
+ io_cfg->framedrop_pattern);
+ wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
+ }
+ CDBG("framedrop pattern 0x%x\n", wm_data->framedrop_pattern);
+
+ if (wm_data->framedrop_period != io_cfg->framedrop_period ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->framedrop_period,
+ io_cfg->framedrop_period);
+ wm_data->framedrop_period = io_cfg->framedrop_period;
+ }
+ CDBG("framedrop period 0x%x\n", wm_data->framedrop_period);
+
+ if (wm_data->irq_subsample_period != io_cfg->subsample_period
+ || !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->irq_subsample_period,
+ io_cfg->subsample_period);
+ wm_data->irq_subsample_period =
+ io_cfg->subsample_period;
+ }
+ CDBG("irq subsample period 0x%x\n",
+ wm_data->irq_subsample_period);
+
+ if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
+ || !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->irq_subsample_pattern,
+ io_cfg->subsample_pattern);
+ wm_data->irq_subsample_pattern =
+ io_cfg->subsample_pattern;
+ }
+ CDBG("irq subsample pattern 0x%x\n",
+ wm_data->irq_subsample_pattern);
+
+ if (wm_data->en_ubwc) {
+ if (!wm_data->hw_regs->ubwc_regs) {
+ pr_err("%s: No UBWC register to configure.\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (wm_data->packer_cfg !=
+ io_cfg->planes[i].packer_config ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->packer_cfg,
+ io_cfg->planes[i].packer_config);
+ wm_data->packer_cfg =
+ io_cfg->planes[i].packer_config;
+ }
+ CDBG("packer cfg 0x%x\n", wm_data->packer_cfg);
+
+ if (wm_data->tile_cfg != io_cfg->planes[i].tile_config
+ || !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->ubwc_regs->tile_cfg,
+ io_cfg->planes[i].tile_config);
+ wm_data->tile_cfg =
+ io_cfg->planes[i].tile_config;
+ }
+ CDBG("tile cfg 0x%x\n", wm_data->tile_cfg);
+
+ if (wm_data->h_init != io_cfg->planes[i].h_init ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->ubwc_regs->h_init,
+ io_cfg->planes[i].h_init);
+ wm_data->h_init = io_cfg->planes[i].h_init;
+ }
+ CDBG("h_init 0x%x\n", wm_data->h_init);
+
+ if (wm_data->v_init != io_cfg->planes[i].v_init ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->ubwc_regs->v_init,
+ io_cfg->planes[i].v_init);
+ wm_data->v_init = io_cfg->planes[i].v_init;
+ }
+ CDBG("v_init 0x%x\n", wm_data->v_init);
+
+ if (wm_data->ubwc_meta_stride !=
+ io_cfg->planes[i].meta_stride ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->ubwc_regs->
+ meta_stride,
+ io_cfg->planes[i].meta_stride);
+ wm_data->ubwc_meta_stride =
+ io_cfg->planes[i].meta_stride;
+ }
+ CDBG("meta stride 0x%x\n", wm_data->ubwc_meta_stride);
+
+ if (wm_data->ubwc_mode_cfg !=
+ io_cfg->planes[i].mode_config ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->ubwc_regs->mode_cfg,
+ io_cfg->planes[i].mode_config);
+ wm_data->ubwc_mode_cfg =
+ io_cfg->planes[i].mode_config;
+ }
+ CDBG("ubwc mode cfg 0x%x\n", wm_data->ubwc_mode_cfg);
+
+ if (wm_data->ubwc_meta_offset !=
+ io_cfg->planes[i].meta_offset ||
+ !wm_data->init_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->ubwc_regs->
+ meta_offset,
+ io_cfg->planes[i].meta_offset);
+ wm_data->ubwc_meta_offset =
+ io_cfg->planes[i].meta_offset;
+ }
+ CDBG("ubwc meta offset 0x%x\n",
+ wm_data->ubwc_meta_offset);
+
+ /* UBWC meta address */
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->ubwc_regs->meta_addr,
+ update_buf->image_buf[i]);
+ CDBG("ubwc meta addr 0x%llx\n",
+ update_buf->image_buf[i]);
+ }
+
+ /* WM Image address */
+ if (wm_data->en_ubwc)
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->image_addr,
+ (update_buf->image_buf[i] +
+ io_cfg->planes[i].meta_size));
+ else
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->image_addr,
+ update_buf->image_buf[i]);
+
+ CDBG("image address 0x%x\n", reg_val_pair[j-1]);
+
+ /* enable the WM */
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->cfg,
+ wm_data->en_cfg);
+
+ /* set initial configuration done */
+ if (!wm_data->init_cfg_done)
+ wm_data->init_cfg_done = 1;
+ }
+
+ size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
/* cdm util returns dwords, need to convert to bytes */
if ((size * 4) > update_buf->cdm.size) {
@@ -1659,18 +1843,9 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
return -ENOMEM;
}
- for (i = 0 ; i < vfe_out_data->num_wm; i++) {
- wm_data = vfe_out_data->wm_res[i]->res_priv;
- reg_val_pair[2 * i] = wm_data->hw_regs->image_addr;
- reg_val_pair[2 * i + 1] = update_buf->image_buf[i];
- CDBG("offset 0x%x, value 0x%llx\n",
- wm_data->hw_regs->image_addr,
- (uint64_t) update_buf->image_buf[i]);
- }
-
vfe_out_data->cdm_util_ops->cdm_write_regrandom(
- update_buf->cdm.cmd_buf_addr,
- vfe_out_data->num_wm, reg_val_pair);
+ update_buf->cdm.cmd_buf_addr, j/2, reg_val_pair);
+
/* cdm util returns dwords, need to convert to bytes */
update_buf->cdm.used_bytes = size * 4;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index c837232..4888e5b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -531,7 +531,8 @@ int cam_sensor_match_id(struct cam_sensor_ctrl_t *s_ctrl)
rc = camera_io_dev_read(
&(s_ctrl->io_master_info),
slave_info->sensor_id_reg_addr,
- &chipid, CAMERA_SENSOR_I2C_TYPE_WORD);
+ &chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
+ CAMERA_SENSOR_I2C_TYPE_WORD);
CDBG("%s:%d read id: 0x%x expected id 0x%x:\n",
__func__, __LINE__, chipid, slave_info->sensor_id);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
index bdae1d1..6292a9f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
@@ -5,4 +5,4 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 1261c4b..06e8104 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -75,4 +75,64 @@ int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client,
enum camera_sensor_i2c_type addr_type,
uint32_t delay_ms);
-#endif /* _CAM_SENSOR_I2C_H_ */
+
+/**
+ * cam_qup_i2c_read : QUP based i2c read
+ * @client : QUP I2C client structure
+ * @data : I2C data
+ * @addr_type : I2c address type
+ * @data_type : I2C data type
+ *
+ * This API handles QUP I2C read
+ */
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type);
+
+/**
+ * cam_qup_i2c_read_seq : QUP based I2C sequential read
+ * @client : QUP I2C client structure
+ * @data : I2C data
+ * @addr_type : I2c address type
+ * @num_bytes : number of bytes to read
+ * This API handles QUP I2C Sequential read
+ */
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte);
+
+/**
+ * cam_qup_i2c_poll : QUP based I2C poll operation
+ * @client : QUP I2C client structure
+ * @addr : I2C address
+ * @data : I2C data
+ * @data_mask : I2C data mask
+ * @data_type : I2C data type
+ * @addr_type : I2C addr type
+ * @delay_ms : Delay in milli seconds
+ *
+ * This API implements QUP based I2C poll
+ */
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+ uint32_t addr, uint16_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ uint32_t delay_ms);
+
+/**
+ * cam_qup_i2c_write_table : QUP based I2C write random
+ * @client : QUP I2C client structure
+ * @write_setting : I2C register settings
+ *
+ * This API handles QUP I2C random write
+ */
+
+int32_t cam_qup_i2c_write_table(
+ struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting);
+#endif /*_CAM_SENSOR_I2C_H*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index f889abc..3e1b331 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -29,6 +29,10 @@ int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
if (io_master_info->master_type == CCI_MASTER) {
return cam_cci_i2c_poll(io_master_info->cci_client,
addr, data, mask, data_type, addr_type, delay_ms);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_poll(io_master_info->client,
+ addr, data, data_mask, addr_type, data_type,
+ delay_ms);
} else {
pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
__LINE__, io_master_info->master_type);
@@ -38,6 +42,7 @@ int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
enum camera_sensor_i2c_type data_type)
{
if (!io_master_info) {
@@ -47,7 +52,10 @@ int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
if (io_master_info->master_type == CCI_MASTER) {
return cam_cci_i2c_read(io_master_info->cci_client,
- addr, data, data_type, data_type);
+ addr, data, addr_type, data_type);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_read(io_master_info->client,
+ addr, data, addr_type, data_type);
} else {
pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
__LINE__, io_master_info->master_type);
@@ -67,6 +75,9 @@ int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
if (io_master_info->master_type == CCI_MASTER) {
return cam_cci_i2c_write_table(io_master_info,
write_setting);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_write_table(io_master_info,
+ write_setting);
} else {
pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
__LINE__, io_master_info->master_type);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index 757ac17..f721afd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -38,16 +38,33 @@ struct camera_io_master {
* @io_master_info: I2C/SPI master information
* @addr: I2C address
* @data: I2C data
+ * @addr_type: I2C addr_type
* @data_type: I2C data type
*
* This API abstracts read functionality based on master type
*/
int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
enum camera_sensor_i2c_type data_type);
/**
* @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @addr_type: I2C addr type
+ * @num_bytes: number of bytes
+ *
+ * This API abstracts sequential read functionality based on master type
+ */
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_bytes);
+
+
+/**
+ * @io_master_info: I2C/SPI master information
*
* This API initializes the I2C/SPI master based on master type
*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
new file mode 100644
index 0000000..b25b1855
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_io.h"
+
+#define I2C_REG_DATA_MAX (8*1024)
+#define I2C_REG_MAX_BUF_SIZE 8
+
+static int32_t cam_qup_i2c_rxdata(
+ struct i2c_client *dev_client, unsigned char *rxdata,
+ enum camera_sensor_i2c_type addr_type,
+ int data_length)
+{
+ int32_t rc = 0;
+ uint16_t saddr = dev_client->addr >> 1;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = saddr,
+ .flags = 0,
+ .len = addr_type,
+ .buf = rxdata,
+ },
+ {
+ .addr = saddr,
+ .flags = I2C_M_RD,
+ .len = data_length,
+ .buf = rxdata,
+ },
+ };
+ rc = i2c_transfer(dev_client->adapter, msgs, 2);
+ if (rc < 0)
+ pr_err("%s:failed 0x%x\n", __func__, saddr);
+ return rc;
+}
+
+
+static int32_t cam_qup_i2c_txdata(
+ struct camera_io_master *dev_client, unsigned char *txdata,
+ int length)
+{
+ int32_t rc = 0;
+ uint16_t saddr = dev_client->client->addr >> 1;
+ struct i2c_msg msg[] = {
+ {
+ .addr = saddr,
+ .flags = 0,
+ .len = length,
+ .buf = txdata,
+ },
+ };
+ rc = i2c_transfer(dev_client->client->adapter, msg, 1);
+ if (rc < 0)
+ pr_err("%s: failed 0x%x\n", __func__, saddr);
+ return rc;
+}
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type)
+{
+ int32_t rc = -EINVAL;
+ unsigned char *buf = NULL;
+
+ if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ pr_err("ERR: %s Failed with addr/data_type verfication\n",
+ __func__);
+ return rc;
+ }
+
+ buf = kzalloc(addr_type + data_type, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = addr >> 8;
+ buf[1] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[0] = addr >> 16;
+ buf[1] = addr >> 8;
+ buf[2] = addr;
+ } else {
+ buf[0] = addr >> 24;
+ buf[1] = addr >> 16;
+ buf[2] = addr >> 8;
+ buf[3] = addr;
+ }
+
+ rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type);
+ if (rc < 0) {
+ pr_err("%s fail\n", __func__);
+ goto read_fail;
+ }
+
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+ *data = buf[0];
+ else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+ *data = buf[0] << 8 | buf[1];
+ else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+ *data = buf[0] << 16 | buf[1] << 8 | buf[2];
+ else
+ *data = buf[0] << 24 | buf[1] << 16 |
+ buf[2] << 8 | buf[3];
+
+ CDBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+read_fail:
+ kfree(buf);
+ buf = NULL;
+ return rc;
+}
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+ int i;
+
+ if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ pr_err("ERR: %s Failed with addr_type verification\n",
+ __func__);
+ return rc;
+ }
+
+ if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) {
+ pr_err("%s: Error num_byte:0x%x max supported:0x%x\n",
+ __func__, num_byte, I2C_REG_DATA_MAX);
+ return rc;
+ }
+
+ buf = kzalloc(addr_type + num_byte, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[0] = addr >> 16;
+ buf[1] = addr >> 8;
+ buf[2] = addr;
+ } else {
+ buf[0] = addr >> 24;
+ buf[1] = addr >> 16;
+ buf[2] = addr >> 8;
+ buf[3] = addr;
+ }
+
+ rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte);
+ if (rc < 0) {
+ pr_err("%s fail\n", __func__);
+ goto read_seq_fail;
+ }
+
+ for (i = 0; i < num_byte; i++)
+ data[i] = buf[i];
+
+read_seq_fail:
+ kfree(buf);
+ buf = NULL;
+ return rc;
+}
+
+static int32_t cam_qup_i2c_compare(struct i2c_client *client,
+ uint32_t addr, uint32_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type data_type,
+ enum camera_sensor_i2c_type addr_type)
+{
+ int32_t rc;
+ uint32_t reg_data = 0;
+
+ rc = cam_qup_i2c_read(client, addr, ®_data,
+ addr_type, data_type);
+ if (rc < 0)
+ return rc;
+
+ reg_data = reg_data & 0xFFFF;
+ if (data != (reg_data & ~data_mask))
+ return I2C_COMPARE_MISMATCH;
+
+ return I2C_COMPARE_MATCH;
+}
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+ uint32_t addr, uint16_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ uint32_t delay_ms)
+{
+ int32_t rc = 0;
+ int i = 0;
+
+ if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) {
+ pr_err("%s:%d invalid delay = %d max_delay = %d\n",
+ __func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+ return -EINVAL;
+ }
+
+ if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+ return -EINVAL;
+
+ for (i = 0; i < delay_ms; i++) {
+ rc = cam_qup_i2c_compare(client,
+ addr, data, data_mask, data_type, addr_type);
+ if (rc == I2C_COMPARE_MATCH)
+ return rc;
+
+ usleep_range(1000, 1010);
+ }
+ /* If rc is MISMATCH then read is successful but poll is failure */
+ if (rc == I2C_COMPARE_MISMATCH)
+ pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
+ __func__, __LINE__, rc);
+ if (rc < 0)
+ pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+
+ return rc;
+}
+
+static int32_t cam_qup_i2c_write(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_array *reg_setting,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type)
+{
+ int32_t rc = 0;
+ unsigned char buf[I2C_REG_MAX_BUF_SIZE];
+ uint8_t len = 0;
+
+ CDBG("%s reg addr = 0x%x data type: %d\n",
+ __func__, reg_setting->reg_addr, data_type);
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = reg_setting->reg_addr;
+ CDBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ len = 1;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = reg_setting->reg_addr >> 8;
+ buf[1] = reg_setting->reg_addr;
+ CDBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ CDBG("%s byte %d: 0x%x\n", __func__,
+ len+1, buf[len+1]);
+ len = 2;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[0] = reg_setting->reg_addr >> 16;
+ buf[1] = reg_setting->reg_addr >> 8;
+ buf[2] = reg_setting->reg_addr;
+ len = 3;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+ buf[0] = reg_setting->reg_addr >> 24;
+ buf[1] = reg_setting->reg_addr >> 16;
+ buf[2] = reg_setting->reg_addr >> 8;
+ buf[3] = reg_setting->reg_addr;
+ len = 4;
+ } else {
+ pr_err("%s: Invalid I2C addr type\n", __func__);
+ return -EINVAL;
+ }
+
+ CDBG("Data: 0x%x\n", reg_setting->reg_data);
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[len] = reg_setting->reg_data;
+ CDBG("Byte %d: 0x%x\n", len, buf[len]);
+ len += 1;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[len] = reg_setting->reg_data >> 8;
+ buf[len+1] = reg_setting->reg_data;
+ CDBG("Byte %d: 0x%x\n", len, buf[len]);
+ CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+ len += 2;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[len] = reg_setting->reg_data >> 16;
+ buf[len + 1] = reg_setting->reg_data >> 8;
+ buf[len + 2] = reg_setting->reg_data;
+ CDBG("Byte %d: 0x%x\n", len, buf[len]);
+ CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+ CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+ len += 3;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+ buf[len] = reg_setting->reg_data >> 24;
+ buf[len + 1] = reg_setting->reg_data >> 16;
+ buf[len + 2] = reg_setting->reg_data >> 8;
+ buf[len + 3] = reg_setting->reg_data;
+ CDBG("Byte %d: 0x%x\n", len, buf[len]);
+ CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+ CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+ CDBG("Byte %d: 0x%x\n", len+3, buf[len+3]);
+ len += 4;
+ } else {
+ pr_err("%s: Invalid Data Type\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = cam_qup_i2c_txdata(client, buf, len);
+ if (rc < 0)
+ pr_err("%s fail\n", __func__);
+ return rc;
+}
+
+int32_t cam_qup_i2c_write_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EINVAL;
+ struct cam_sensor_i2c_reg_array *reg_setting;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)))
+ return rc;
+
+ reg_setting = write_setting->reg_setting;
+
+ for (i = 0; i < write_setting->size; i++) {
+ CDBG("%s addr 0x%x data 0x%x\n", __func__,
+ reg_setting->reg_addr, reg_setting->reg_data);
+
+ rc = cam_qup_i2c_write(client, reg_setting,
+ write_setting->addr_type, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 901632a..96f40e1 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -82,7 +82,7 @@ int cam_sync_register_callback(sync_callback cb_func,
sync_cb->sync_obj = sync_obj;
INIT_WORK(&sync_cb->cb_dispatch_work,
cam_sync_util_cb_dispatch);
-
+ list_add_tail(&sync_cb->list, &row->callback_list);
sync_cb->status = row->state;
queue_work(sync_dev->work_queue,
&sync_cb->cb_dispatch_work);
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
index 1e42f75..0ffea5b 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
@@ -44,6 +44,7 @@ struct hfi_mem_info {
struct hfi_mem msg_q;
struct hfi_mem dbg_q;
struct hfi_mem sec_heap;
+ struct hfi_mem shmem;
void __iomem *icp_base;
};
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/icp/hfi.c
index 4315865..15e0315 100644
--- a/drivers/media/platform/msm/camera/icp/hfi.c
+++ b/drivers/media/platform/msm/camera/icp/hfi.c
@@ -19,6 +19,8 @@
#include <asm/errno.h>
#include <linux/timer.h>
#include <media/cam_icp.h>
+#include <linux/iopoll.h>
+
#include "cam_io_util.h"
#include "hfi_reg.h"
#include "hfi_sys_defs.h"
@@ -336,7 +338,7 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
} else {
cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN |
- ICP_FLAG_CSR_WAKE_UP_EN,
+ ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
}
@@ -460,8 +462,10 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
}
cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
- cam_io_w((uint32_t)0x7400000, icp_base + HFI_REG_SHARED_MEM_PTR);
- cam_io_w((uint32_t)0x6400000, icp_base + HFI_REG_SHARED_MEM_SIZE);
+ cam_io_w((uint32_t)hfi_mem->shmem.iova,
+ icp_base + HFI_REG_SHARED_MEM_PTR);
+ cam_io_w((uint32_t)hfi_mem->shmem.len,
+ icp_base + HFI_REG_SHARED_MEM_SIZE);
cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
icp_base + HFI_REG_UNCACHED_HEAP_PTR);
cam_io_w((uint32_t)hfi_mem->sec_heap.len,
@@ -472,25 +476,17 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
pr_debug("hw version : %u[%x]\n", hw_version, hw_version);
- do {
- msleep(500);
- status = cam_io_r(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
- } while (status != ICP_INIT_RESP_SUCCESS);
-
- if (status == ICP_INIT_RESP_SUCCESS) {
- g_hfi->hfi_state = FW_RESP_DONE;
- rc = 0;
- } else {
- rc = -ENODEV;
- pr_err("FW initialization failed");
+ rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+ status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
+ if (rc) {
+ pr_err("timed out , status = %u\n", status);
goto regions_fail;
}
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
g_hfi->hfi_state = FW_START_SENT;
- pr_debug("fw version : %u[%x]\n", fw_version, fw_version);
- pr_debug("hfi init is successful\n");
+ HFI_DBG("fw version : %u[%x]\n", fw_version, fw_version);
cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 140542b..43491a9 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -26,6 +26,8 @@
#include <linux/debugfs.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
+#include <linux/debugfs.h>
+
#include "cam_sync_api.h"
#include "cam_packet_util.h"
#include "cam_hw.h"
@@ -55,6 +57,23 @@
static struct cam_icp_hw_mgr icp_hw_mgr;
+static int cam_icp_hw_mgr_create_debugfs_entry(void)
+{
+ icp_hw_mgr.dentry = debugfs_create_dir("camera_icp", NULL);
+ if (!icp_hw_mgr.dentry)
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("a5_debug",
+ 0644,
+ icp_hw_mgr.dentry,
+ &icp_hw_mgr.a5_debug)) {
+ debugfs_remove_recursive(icp_hw_mgr.dentry);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int cam_icp_stop_cpas(struct cam_icp_hw_mgr *hw_mgr_priv)
{
struct cam_hw_intf *a5_dev_intf = NULL;
@@ -568,7 +587,12 @@ static int cam_icp_allocate_hfi_mem(void)
uint64_t kvaddr;
size_t len;
- pr_err("Allocating FW for iommu handle: %x\n", icp_hw_mgr.iommu_hdl);
+ rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+ CAM_MEM_MGR_REGION_SHARED,
+ &icp_hw_mgr.hfi_mem.shmem);
+ if (rc)
+ return -ENOMEM;
+
rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
&iova, &kvaddr, &len);
if (rc < 0) {
@@ -764,7 +788,7 @@ static int cam_icp_mgr_destroy_handle(
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("timeout/err in iconfig command: %d\n", rc);
+ pr_err("FW response timeout: %d\n", rc);
}
return rc;
@@ -870,6 +894,7 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
cam_icp_free_hfi_mem();
hw_mgr->fw_download = false;
+ debugfs_remove_recursive(icp_hw_mgr.dentry);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return 0;
@@ -886,6 +911,8 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
struct cam_icp_a5_set_irq_cb irq_cb;
struct cam_icp_a5_set_fw_buf_info fw_buf_info;
struct hfi_mem_info hfi_mem;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
int rc = 0;
if (!hw_mgr) {
@@ -1014,9 +1041,12 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+ hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+ hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
rc = cam_hfi_init(0, &hfi_mem,
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
- false);
+ hw_mgr->a5_debug);
if (rc < 0) {
pr_err("hfi_init is failed\n");
goto set_irq_failed;
@@ -1033,7 +1063,13 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
NULL, 0);
ICP_DBG("Wait for INIT DONE Message\n");
- wait_for_completion(&hw_mgr->a5_complete);
+ rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ pr_err("FW response timed out %d\n", rc);
+ goto set_irq_failed;
+ }
ICP_DBG("Done Waiting for INIT DONE Message\n");
@@ -1041,6 +1077,10 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_POWER_COLLAPSE,
NULL, 0);
+ if (rc) {
+ pr_err("icp power collapse failed\n");
+ goto set_irq_failed;
+ }
hw_mgr->fw_download = true;
@@ -1428,6 +1468,8 @@ static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
int rc = 0;
struct hfi_cmd_work_data *task_data;
struct hfi_cmd_ipebps_async ioconfig_cmd;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
@@ -1451,7 +1493,13 @@ static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
task->process_cb = cam_icp_mgr_process_cmd;
cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
- wait_for_completion(&ctx_data->wait_complete);
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ pr_err("FW response timed out %d\n", rc);
+ }
return rc;
}
@@ -1462,6 +1510,8 @@ static int cam_icp_mgr_create_handle(uint32_t dev_type,
{
struct hfi_cmd_create_handle create_handle;
struct hfi_cmd_work_data *task_data;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
int rc = 0;
create_handle.size = sizeof(struct hfi_cmd_create_handle);
@@ -1479,7 +1529,13 @@ static int cam_icp_mgr_create_handle(uint32_t dev_type,
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
- wait_for_completion(&ctx_data->wait_complete);
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ pr_err("FW response timed out %d\n", rc);
+ }
return rc;
}
@@ -1489,6 +1545,8 @@ static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
{
struct hfi_cmd_ping_pkt ping_pkt;
struct hfi_cmd_work_data *task_data;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
int rc = 0;
ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
@@ -1505,7 +1563,14 @@ static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
- wait_for_completion(&ctx_data->wait_complete);
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ pr_err("FW response timed out %d\n", rc);
+ }
+
return rc;
}
@@ -1929,6 +1994,9 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
if (!icp_hw_mgr.msg_work_data)
goto msg_work_data_failed;
+ rc = cam_icp_hw_mgr_create_debugfs_entry();
+ if (rc)
+ goto msg_work_data_failed;
for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
icp_hw_mgr.msg_work->task.pool[i].payload =
@@ -1940,7 +2008,6 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
init_completion(&icp_hw_mgr.a5_complete);
- pr_err("Exit\n");
return rc;
msg_work_data_failed:
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index e5ffa7a..32d796a 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -23,6 +23,8 @@
#include "hfi_session_defs.h"
#include "cam_req_mgr_workq.h"
#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+
#define CAM_ICP_ROLE_PARENT 1
#define CAM_ICP_ROLE_CHILD 2
@@ -56,6 +58,7 @@ struct icp_hfi_mem_info {
struct cam_mem_mgr_memory_desc dbg_q;
struct cam_mem_mgr_memory_desc sec_heap;
struct cam_mem_mgr_memory_desc fw_buf;
+ struct cam_smmu_region_info shmem;
};
/**
@@ -176,6 +179,8 @@ struct cam_icp_hw_mgr {
struct hfi_cmd_work_data *cmd_work_data;
struct hfi_msg_work_data *msg_work_data;
uint32_t ctxt_cnt;
+ struct dentry *dentry;
+ bool a5_debug;
};
#endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 17fa2cc..b582934 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -681,9 +681,13 @@ static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
/**
* sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
* on provided session_id. Each rotator has a different session_id.
+ * @rot: Pointer to rotator hw
+ * @session_id: Identifier for rotator session
+ * @sequence_id: Identifier for rotation request within the session
+ * @q_id: Rotator queue identifier
*/
static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
- struct sde_hw_rotator *rot, u32 session_id,
+ struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
enum sde_rot_queue_prio q_id)
{
int i;
@@ -692,10 +696,12 @@ static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
ctx = rot->rotCtx[q_id][i];
- if (ctx && (ctx->session_id == session_id)) {
+ if (ctx && (ctx->session_id == session_id) &&
+ (ctx->sequence_id == sequence_id)) {
SDEROT_DBG(
- "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
- q_id, i, ctx, ctx->session_id);
+ "rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d | sequence-id:%d\n",
+ q_id, i, ctx, ctx->session_id,
+ ctx->sequence_id);
return ctx;
}
}
@@ -2114,6 +2120,7 @@ static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
* @rot: Pointer to rotator hw
* @hw: Pointer to rotator resource
* @session_id: Session identifier of this context
+ * @sequence_id: Sequence identifier of this request
* @sbuf_mode: true if stream buffer is requested
*
* This function allocates a new rotator context for the given session id.
@@ -2122,6 +2129,7 @@ static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
struct sde_hw_rotator *rot,
struct sde_rot_hw_resource *hw,
u32 session_id,
+ u32 sequence_id,
bool sbuf_mode)
{
struct sde_hw_rotator_context *ctx;
@@ -2136,6 +2144,7 @@ static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
ctx->rot = rot;
ctx->q_id = hw->wb_id;
ctx->session_id = session_id;
+ ctx->sequence_id = sequence_id;
ctx->hwres = hw;
ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
@@ -2226,7 +2235,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
item = &entry->item;
ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
- item->output.sbuf);
+ item->sequence_id, item->output.sbuf);
if (!ctx) {
SDEROT_ERR("Failed allocating rotator context!!\n");
return -EINVAL;
@@ -2486,7 +2495,8 @@ static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
rot = resinfo->rot;
/* Lookup rotator context from session-id */
- ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
+ ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
+ entry->item.sequence_id, hw->wb_id);
if (!ctx) {
SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
entry->item.session_id);
@@ -2523,7 +2533,8 @@ static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
rot = resinfo->rot;
/* Lookup rotator context from session-id */
- ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
+ ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
+ entry->item.sequence_id, hw->wb_id);
if (!ctx) {
SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
entry->item.session_id);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 22eaa3f..67f7f4b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -204,6 +204,7 @@ struct sde_dbg_buf {
* ram segment size allocation. Each rotator context can be any priority. A
* incremental timestamp is used to identify and assigned to each context.
* @list: list of pending context
+ * @sequence_id: unique sequence identifier for rotation request
* @sbuf_mode: true if stream buffer is requested
* @start_ctrl: start control register update value
* @sys_cache_mode: sys cache mode register update value
@@ -216,6 +217,7 @@ struct sde_hw_rotator_context {
struct sde_rot_hw_resource *hwres;
enum sde_rot_queue_prio q_id;
u32 session_id;
+ u32 sequence_id;
u32 *regdma_base;
u32 *regdma_wrptr;
u32 timestamp;
@@ -402,7 +404,7 @@ static inline void sde_hw_rotator_put_ctx(struct sde_hw_rotator_context *ctx)
spin_lock_irqsave(&rot->rotisr_lock, flags);
rot->rotCtx[ctx->q_id][idx] = ctx;
if (ctx->sbuf_mode)
- list_add_tail(&rot->sbuf_ctx[ctx->q_id], &ctx->list);
+ list_add_tail(&ctx->list, &rot->sbuf_ctx[ctx->q_id]);
spin_unlock_irqrestore(&rot->rotisr_lock, flags);
SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 88250e1..8d54e20 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1369,13 +1369,13 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) * 2;
break;
}
- case HAL_CONFIG_VPE_OPERATIONS:
+ case HAL_PARAM_VPE_ROTATION:
{
- struct hfi_operations_type *hfi;
- struct hal_operations *prop =
- (struct hal_operations *) pdata;
- pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VPE_OPERATIONS;
- hfi = (struct hfi_operations_type *) &pkt->rg_property_data[1];
+ struct hfi_vpe_rotation_type *hfi;
+ struct hal_vpe_rotation *prop =
+ (struct hal_vpe_rotation *) pdata;
+ pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VPE_ROTATION;
+ hfi = (struct hfi_vpe_rotation_type *)&pkt->rg_property_data[1];
switch (prop->rotate) {
case HAL_ROTATE_NONE:
hfi->rotation = HFI_ROTATE_NONE;
@@ -1411,7 +1411,7 @@ int create_pkt_cmd_session_set_property(
rc = -EINVAL;
break;
}
- pkt->size += sizeof(u32) + sizeof(struct hfi_operations_type);
+ pkt->size += sizeof(u32) + sizeof(struct hfi_vpe_rotation_type);
break;
}
case HAL_PARAM_VENC_INTRA_REFRESH:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 89e8356..f678f56 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -110,6 +110,8 @@ static int hfi_process_sess_evt_seq_changed(u32 device_id,
struct hfi_profile_level *profile_level;
struct hfi_bit_depth *pixel_depth;
struct hfi_pic_struct *pic_struct;
+ struct hfi_buffer_requirements *buf_req;
+ struct hfi_index_extradata_input_crop_payload *crop_info;
u32 entropy_mode = 0;
u8 *data_ptr;
int prop_id;
@@ -231,6 +233,41 @@ static int hfi_process_sess_evt_seq_changed(u32 device_id,
data_ptr +=
sizeof(u32);
break;
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ data_ptr = data_ptr + sizeof(u32);
+ buf_req =
+ (struct hfi_buffer_requirements *)
+ data_ptr;
+ event_notify.capture_buf_count =
+ buf_req->buffer_count_min;
+ dprintk(VIDC_DBG,
+ "Capture Count : 0x%x\n",
+ event_notify.capture_buf_count);
+ data_ptr +=
+ sizeof(struct hfi_buffer_requirements);
+ break;
+ case HFI_INDEX_EXTRADATA_INPUT_CROP:
+ data_ptr = data_ptr + sizeof(u32);
+ crop_info = (struct
+ hfi_index_extradata_input_crop_payload *)
+ data_ptr;
+ event_notify.crop_data.left = crop_info->left;
+ event_notify.crop_data.top = crop_info->top;
+ event_notify.crop_data.width = crop_info->width;
+ event_notify.crop_data.height =
+ crop_info->height;
+ dprintk(VIDC_DBG,
+ "CROP info : Left = %d Top = %d\n",
+ crop_info->left,
+ crop_info->top);
+ dprintk(VIDC_DBG,
+ "CROP info : Width = %d Height = %d\n",
+ crop_info->width,
+ crop_info->height);
+ data_ptr +=
+ sizeof(struct
+ hfi_index_extradata_input_crop_payload);
+ break;
default:
dprintk(VIDC_ERR,
"%s cmd: %#x not supported\n",
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 074ea4fa..b116622 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -74,6 +74,14 @@ static int get_device_address(struct smem_client *smem_client,
goto mem_map_failed;
}
+ /* Check if the dmabuf size matches expected size */
+ if (buf->size < *buffer_size) {
+ rc = -EINVAL;
+ dprintk(VIDC_ERR,
+ "Size mismatch! Dmabuf size: %zu Expected Size: %lu",
+ buf->size, *buffer_size);
+ goto mem_buf_size_mismatch;
+ }
/* Prepare a dma buf for dma on the given device */
attach = dma_buf_attach(buf, cb->dev);
if (IS_ERR_OR_NULL(attach)) {
@@ -151,6 +159,7 @@ static int get_device_address(struct smem_client *smem_client,
dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
mem_map_table_failed:
dma_buf_detach(buf, attach);
+mem_buf_size_mismatch:
mem_buf_attach_failed:
dma_buf_put(buf);
mem_map_failed:
@@ -201,12 +210,12 @@ static void put_device_address(struct smem_client *smem_client,
}
}
-static int ion_user_to_kernel(struct smem_client *client, int fd, u32 offset,
+static int ion_user_to_kernel(struct smem_client *client, int fd, u32 size,
struct msm_smem *mem, enum hal_buffer buffer_type)
{
struct ion_handle *hndl = NULL;
ion_phys_addr_t iova = 0;
- unsigned long buffer_size = 0;
+ unsigned long buffer_size = size;
int rc = 0;
unsigned long align = SZ_4K;
unsigned long ion_flags = 0;
@@ -217,10 +226,11 @@ static int ion_user_to_kernel(struct smem_client *client, int fd, u32 offset,
dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
if (IS_ERR_OR_NULL(hndl)) {
dprintk(VIDC_ERR, "Failed to get handle: %pK, %d, %d, %pK\n",
- client, fd, offset, hndl);
+ client, fd, size, hndl);
rc = -ENOMEM;
goto fail_import_fd;
}
+
mem->kvaddr = NULL;
rc = ion_handle_get_flags(client->clnt, hndl, &ion_flags);
if (rc) {
@@ -441,7 +451,7 @@ static void ion_delete_client(struct smem_client *client)
ion_client_destroy(client->clnt);
}
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 size,
enum hal_buffer buffer_type)
{
struct smem_client *client = clt;
@@ -459,7 +469,7 @@ struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
}
switch (client->mem_type) {
case SMEM_ION:
- rc = ion_user_to_kernel(clt, fd, offset, mem, buffer_type);
+ rc = ion_user_to_kernel(clt, fd, size, mem, buffer_type);
break;
default:
dprintk(VIDC_ERR, "Mem type not supported\n");
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 7802d31..5c34f28 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -225,6 +225,14 @@ static int msm_v4l2_g_parm(struct file *file, void *fh,
return 0;
}
+static int msm_v4l2_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *a)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+ return msm_vidc_g_crop(vidc_inst, a);
+}
+
static int msm_v4l2_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
@@ -265,6 +273,7 @@ static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = {
.vidioc_encoder_cmd = msm_v4l2_encoder_cmd,
.vidioc_s_parm = msm_v4l2_s_parm,
.vidioc_g_parm = msm_v4l2_g_parm,
+ .vidioc_g_crop = msm_v4l2_g_crop,
.vidioc_enum_framesizes = msm_v4l2_enum_framesizes,
};
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index aa5f18d..d44684e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1117,7 +1117,7 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
struct hal_h264_entropy_control h264_entropy_control;
struct hal_intra_period intra_period;
struct hal_idr_period idr_period;
- struct hal_operations operations;
+ struct hal_vpe_rotation vpe_rotation;
struct hal_intra_refresh intra_refresh;
struct hal_multi_slice_control multi_slice_control;
struct hal_h264_db_control h264_db_control;
@@ -1345,19 +1345,12 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
{
- if (!(inst->capability.pixelprocess_capabilities &
- HAL_VIDEO_ENCODER_ROTATION_CAPABILITY)) {
- dprintk(VIDC_ERR, "Rotation not supported: %#x\n",
- ctrl->id);
- rc = -ENOTSUPP;
- break;
- }
- property_id = HAL_CONFIG_VPE_OPERATIONS;
- operations.rotate = msm_comm_v4l2_to_hal(
+ property_id = HAL_PARAM_VPE_ROTATION;
+ vpe_rotation.rotate = msm_comm_v4l2_to_hal(
V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
ctrl->val);
- operations.flip = HAL_FLIP_NONE;
- pdata = &operations;
+ vpe_rotation.flip = HAL_FLIP_NONE;
+ pdata = &vpe_rotation;
break;
}
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 6253632..2e952a3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -265,6 +265,29 @@ int msm_vidc_s_ctrl(void *instance, struct v4l2_control *control)
}
EXPORT_SYMBOL(msm_vidc_s_ctrl);
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *crop)
+{
+ struct msm_vidc_inst *inst = instance;
+
+ if (!inst || !crop)
+ return -EINVAL;
+
+ if (inst->session_type == MSM_VIDC_ENCODER) {
+ dprintk(VIDC_ERR,
+ "Session = %pK : Encoder Crop is not implemented yet\n",
+ inst);
+ return -EPERM;
+ }
+
+ crop->c.left = inst->prop.crop_info.left;
+ crop->c.top = inst->prop.crop_info.top;
+ crop->c.width = inst->prop.crop_info.width;
+ crop->c.height = inst->prop.crop_info.height;
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_vidc_g_crop);
+
int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
{
struct msm_vidc_inst *inst = instance;
@@ -534,7 +557,7 @@ static struct msm_smem *map_buffer(struct msm_vidc_inst *inst,
handle = msm_comm_smem_user_to_kernel(inst,
p->reserved[0],
- p->reserved[1],
+ p->length,
buffer_type);
if (!handle) {
dprintk(VIDC_ERR,
@@ -605,8 +628,10 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
goto exit;
}
- dprintk(VIDC_DBG, "[MAP] Create binfo = %pK fd = %d type = %d\n",
- binfo, b->m.planes[0].reserved[0], b->type);
+ dprintk(VIDC_DBG,
+ "[MAP] Create binfo = %pK fd = %d size = %d type = %d\n",
+ binfo, b->m.planes[0].reserved[0],
+ b->m.planes[0].length, b->type);
for (i = 0; i < b->length; ++i) {
rc = 0;
@@ -878,6 +903,7 @@ int msm_vidc_release_buffer(void *instance, int buffer_type,
struct buffer_info *bi, *dummy;
int i, rc = 0;
int found_buf = 0;
+ struct vb2_buf_entry *temp, *next;
if (!inst)
return -EINVAL;
@@ -936,6 +962,16 @@ int msm_vidc_release_buffer(void *instance, int buffer_type,
default:
break;
}
+
+ mutex_lock(&inst->pendingq.lock);
+ list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+ if (temp->vb->type == buffer_type) {
+ list_del(&temp->list);
+ kfree(temp);
+ }
+ }
+ mutex_unlock(&inst->pendingq.lock);
+
return rc;
}
EXPORT_SYMBOL(msm_vidc_release_buffer);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index a52fe05..05af186 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -371,7 +371,7 @@ int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst)
struct msm_vidc_inst *temp;
struct msm_vidc_core *core;
unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
- unsigned long mbs_per_frame;
+ unsigned long mbs_per_second;
if (!inst || !inst->core) {
dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
@@ -394,14 +394,21 @@ int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst)
list_for_each_entry(temp, &core->instances, list) {
- mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+ if (!temp ||
+ temp->state < MSM_VIDC_START_DONE ||
+ temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
+ continue;
+
+ mbs_per_second = msm_comm_get_inst_load(temp,
+ LOAD_CALC_NO_QUIRKS);
+
cycles = temp->clk_data.entry->vpp_cycles;
- if (inst->session_type == MSM_VIDC_ENCODER)
+ if (temp->session_type == MSM_VIDC_ENCODER)
cycles = temp->flags & VIDC_LOW_POWER ?
- inst->clk_data.entry->low_power_cycles :
+ temp->clk_data.entry->low_power_cycles :
cycles;
- load = cycles * mbs_per_frame;
+ load = cycles * mbs_per_second;
ops_left = load ? (freq_left / load) : 0;
/* Convert remaining operating rate to Q16 format */
@@ -418,7 +425,7 @@ int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst)
ctrl->name, ctrl->default_value, ctrl->val);
v4l2_ctrl_modify_range(ctrl, ctrl->minimum,
ctrl->val + ops_left, ctrl->step,
- ctrl->minimum);
+ ctrl->default_value);
dprintk(VIDC_DBG,
"%s: Updated Range = %lld --> %lld\n",
ctrl->name, ctrl->minimum, ctrl->maximum);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index b1a8e8b..fe61e6f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1166,12 +1166,12 @@ static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst,
if (!rc) {
dprintk(VIDC_ERR, "Wait interrupted or timed out: %d\n",
SESSION_MSG_INDEX(cmd));
- msm_comm_kill_session(inst);
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dprintk(VIDC_ERR,
"sess resp timeout can potentially crash the system\n");
msm_comm_print_debug_info(inst);
msm_vidc_handle_hw_error(inst->core);
+ msm_comm_kill_session(inst);
rc = -EIO;
} else {
rc = 0;
@@ -1554,6 +1554,14 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
inst->entropy_mode = event_notify->entropy_mode;
inst->profile = event_notify->profile;
inst->level = event_notify->level;
+ inst->prop.crop_info.left =
+ event_notify->crop_data.left;
+ inst->prop.crop_info.top =
+ event_notify->crop_data.top;
+ inst->prop.crop_info.height =
+ event_notify->crop_data.height;
+ inst->prop.crop_info.width =
+ event_notify->crop_data.width;
ptr = (u32 *)seq_changed_event.u.data;
ptr[0] = event_notify->height;
@@ -1561,6 +1569,10 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
ptr[2] = event_notify->bit_depth;
ptr[3] = event_notify->pic_struct;
ptr[4] = event_notify->colour_space;
+ ptr[5] = event_notify->crop_data.top;
+ ptr[6] = event_notify->crop_data.left;
+ ptr[7] = event_notify->crop_data.height;
+ ptr[8] = event_notify->crop_data.width;
dprintk(VIDC_DBG,
"Event payload: height = %d width = %d\n",
@@ -1571,6 +1583,13 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
event_notify->bit_depth, event_notify->pic_struct,
event_notify->colour_space);
+ dprintk(VIDC_DBG,
+ "Event payload: CROP top = %d left = %d Height = %d Width = %d\n",
+ event_notify->crop_data.top,
+ event_notify->crop_data.left,
+ event_notify->crop_data.height,
+ event_notify->crop_data.width);
+
mutex_lock(&inst->lock);
inst->in_reconfig = true;
inst->reconfig_height = event_notify->height;
@@ -4245,14 +4264,13 @@ int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype,
__func__, inst,
SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
inst->state = MSM_VIDC_CORE_INVALID;
- msm_comm_kill_session(inst);
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dprintk(VIDC_ERR,
"SESS_PROP timeout can potentially crash the system\n");
- if (inst->core->resources.debug_timeout)
- msm_comm_print_debug_info(inst);
+ msm_comm_print_debug_info(inst);
msm_vidc_handle_hw_error(inst->core);
+ msm_comm_kill_session(inst);
rc = -ETIMEDOUT;
goto exit;
} else {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 8ffbf50..c197776 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -12,6 +12,7 @@
*/
#define CREATE_TRACE_POINTS
+#define MAX_SSR_STRING_LEN 10
#include "msm_vidc_debug.h"
#include "vidc_hfi_api.h"
@@ -134,21 +135,36 @@ static int trigger_ssr_open(struct inode *inode, struct file *file)
static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos) {
- u32 ssr_trigger_val;
- int rc;
+ unsigned long ssr_trigger_val = 0;
+ int rc = 0;
struct msm_vidc_core *core = filp->private_data;
+ size_t size = MAX_SSR_STRING_LEN;
+ char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
if (!buf)
return -EINVAL;
- rc = kstrtou32(buf, 0, &ssr_trigger_val);
- if (rc < 0) {
+ if (!count)
+ goto exit;
+
+ if (count < size)
+ size = count;
+
+ if (copy_from_user(kbuf, buf, size)) {
+ dprintk(VIDC_WARN, "%s User memory fault\n", __func__);
+ rc = -EFAULT;
+ goto exit;
+ }
+
+ rc = kstrtoul(kbuf, 0, &ssr_trigger_val);
+ if (rc) {
dprintk(VIDC_WARN, "returning error err %d\n", rc);
rc = -EINVAL;
} else {
msm_vidc_trigger_ssr(core, ssr_trigger_val);
rc = count;
}
+exit:
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index 8fd895d..f4c851a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -186,7 +186,7 @@ static inline void msm_vidc_handle_hw_error(struct msm_vidc_core *core)
{
bool enable_fatal;
- enable_fatal = core->resources.debug_timeout;
+ enable_fatal = msm_vidc_debug_timeout;
/* Video driver can decide FATAL handling of HW errors
* based on multiple factors. This condition check will
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 37bccbd..5edd3d5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -175,9 +175,17 @@ struct msm_video_device {
struct video_device vdev;
};
+struct session_crop {
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
struct session_prop {
u32 width[MAX_PORT_NUM];
u32 height[MAX_PORT_NUM];
+ struct session_crop crop_info;
u32 fps;
u32 bitrate;
};
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index d259072..19ca561 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -969,7 +969,7 @@ int read_platform_resources_from_dt(
res->debug_timeout = of_property_read_bool(pdev->dev.of_node,
"qcom,debug-timeout");
- res->debug_timeout |= msm_vidc_debug_timeout;
+ msm_vidc_debug_timeout |= res->debug_timeout;
of_property_read_u32(pdev->dev.of_node,
"qcom,pm-qos-latency-us", &res->pm_qos_latency_us);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 8968764..6139e46 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1049,8 +1049,12 @@ static int venus_hfi_suspend(void *dev)
}
dprintk(VIDC_DBG, "Suspending Venus\n");
- rc = flush_delayed_work(&venus_hfi_pm_work);
+ flush_delayed_work(&venus_hfi_pm_work);
+ mutex_lock(&device->lock);
+ if (device->power_enabled)
+ rc = -EBUSY;
+ mutex_unlock(&device->lock);
return rc;
}
@@ -4168,7 +4172,7 @@ static int venus_hfi_get_fw_info(void *dev, struct hal_fw_info *fw_info)
struct venus_hfi_device *device = dev;
u32 smem_block_size = 0;
u8 *smem_table_ptr;
- char version[VENUS_VERSION_LENGTH];
+ char version[VENUS_VERSION_LENGTH] = "";
const u32 smem_image_index_venus = 14 * 128;
if (!device || !fw_info) {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index a2f076b..86e4f42 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -154,7 +154,7 @@ enum hal_property {
HAL_PARAM_VENC_SESSION_QP_RANGE,
HAL_CONFIG_VENC_INTRA_PERIOD,
HAL_CONFIG_VENC_IDR_PERIOD,
- HAL_CONFIG_VPE_OPERATIONS,
+ HAL_PARAM_VPE_ROTATION,
HAL_PARAM_VENC_INTRA_REFRESH,
HAL_PARAM_VENC_MULTI_SLICE_CONTROL,
HAL_SYS_DEBUG_CONFIG,
@@ -634,7 +634,7 @@ enum hal_flip {
HAL_UNUSED_FLIP = 0x10000000,
};
-struct hal_operations {
+struct hal_vpe_rotation {
enum hal_rotate rotate;
enum hal_flip flip;
};
@@ -1019,7 +1019,7 @@ union hal_get_property {
struct hal_quantization_range quantization_range;
struct hal_intra_period intra_period;
struct hal_idr_period idr_period;
- struct hal_operations operations;
+ struct hal_vpe_rotation vpe_rotation;
struct hal_intra_refresh intra_refresh;
struct hal_multi_slice_control multi_slice_control;
struct hal_debug_config debug_config;
@@ -1212,6 +1212,16 @@ struct msm_vidc_cb_cmd_done {
} data;
};
+struct hal_index_extradata_input_crop_payload {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
struct msm_vidc_cb_event {
u32 device_id;
void *session_id;
@@ -1227,6 +1237,8 @@ struct msm_vidc_cb_event {
u32 profile;
u32 level;
u32 entropy_mode;
+ u32 capture_buf_count;
+ struct hal_index_extradata_input_crop_payload crop_data;
};
struct msm_vidc_cb_data_done {
@@ -1314,16 +1326,6 @@ struct vidc_clk_scale_data {
int num_sessions;
};
-struct hal_index_extradata_input_crop_payload {
- u32 size;
- u32 version;
- u32 port_index;
- u32 left;
- u32 top;
- u32 width;
- u32 height;
-};
-
struct hal_cmd_sys_get_property_packet {
u32 size;
u32 packet_type;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 2d4a573..616fc09 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -327,8 +327,6 @@ struct hfi_buffer_info {
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
-#define HFI_PROPERTY_PARAM_VPE_COMMON_START \
- (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x008)
#define HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME \
@@ -344,13 +342,15 @@ struct hfi_buffer_info {
#define HFI_PROPERTY_CONFIG_VENC_SESSION_QP \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012)
+#define HFI_PROPERTY_PARAM_VPE_COMMON_START \
+ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+#define HFI_PROPERTY_PARAM_VPE_ROTATION \
+ (HFI_PROPERTY_PARAM_VPE_COMMON_START + 0x001)
#define HFI_PROPERTY_CONFIG_VPE_COMMON_START \
(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
#define HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE \
(HFI_PROPERTY_CONFIG_COMMON_START + 0x010)
-#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS \
- (HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
struct hfi_pic_struct {
u32 progressive_only;
@@ -472,7 +472,7 @@ struct hfi_idr_period {
u32 idr_period;
};
-struct hfi_operations_type {
+struct hfi_vpe_rotation_type {
u32 rotation;
u32 flip;
};
@@ -716,12 +716,7 @@ struct hfi_vpe_color_space_conversion {
#define HFI_FLIP_NONE (HFI_COMMON_BASE + 0x1)
#define HFI_FLIP_HORIZONTAL (HFI_COMMON_BASE + 0x2)
-#define HFI_FLIP_VERTICAL (HFI_COMMON_BASE + 0x3)
-
-struct hfi_operations {
- u32 rotate;
- u32 flip;
-};
+#define HFI_FLIP_VERTICAL (HFI_COMMON_BASE + 0x4)
#define HFI_RESOURCE_SYSCACHE 0x00000002
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index fbaf05e..e8ba149 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1926,6 +1926,19 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
case WCD934X_ANA_MBHC_ELECT:
case WCD934X_ANA_MBHC_ZDET:
case WCD934X_ANA_MICB2:
+ case WCD934X_CODEC_RPM_CLK_MCLK_CFG:
+ case WCD934X_CLK_SYS_MCLK_PRG:
+ case WCD934X_CHIP_TIER_CTRL_EFUSE_CTL:
+ case WCD934X_ANA_BIAS:
+ case WCD934X_ANA_BUCK_CTL:
+ case WCD934X_ANA_RCO:
+ case WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL:
+ case WCD934X_CODEC_RPM_CLK_GATE:
+ case WCD934X_BIAS_VBG_FINE_ADJ:
+ case WCD934X_CODEC_CPR_SVS_CX_VDD:
+ case WCD934X_CODEC_CPR_SVS2_CX_VDD:
+ case WCD934X_CDC_TOP_TOP_CFG1:
+ case WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL:
return true;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0ac1cf7..e203ba6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -776,7 +776,7 @@
config UID_SYS_STATS
bool "Per-UID statistics"
- depends on PROFILING
+ depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING
help
Per UID based cpu time statistics exported to /proc/uid_cputime
Per UID based io statistics exported to /proc/uid_io
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 877c4d1..c1857c7 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1261,7 +1261,7 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
atomic_read(&data->ioctl_count) <= 1)) {
pr_err("Interrupted from abort\n");
ret = -ERESTARTSYS;
- break;
+ return ret;
}
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8b1b0a0..1848cdf 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -613,17 +613,39 @@ static int mmc_devfreq_create_freq_table(struct mmc_host *host)
host->card->clk_scaling_lowest,
host->card->clk_scaling_highest);
+ /*
+ * Create the frequency table and initialize it with default values.
+ * Initialize it with platform specific frequencies if the frequency
+ * table supplied by platform driver is present, otherwise initialize
+ * it with min and max frequencies supported by the card.
+ */
if (!clk_scaling->freq_table) {
- pr_debug("%s: no frequency table defined - setting default\n",
- mmc_hostname(host));
+ if (clk_scaling->pltfm_freq_table_sz)
+ clk_scaling->freq_table_sz =
+ clk_scaling->pltfm_freq_table_sz;
+ else
+ clk_scaling->freq_table_sz = 2;
+
clk_scaling->freq_table = kzalloc(
- 2*sizeof(*(clk_scaling->freq_table)), GFP_KERNEL);
+ (clk_scaling->freq_table_sz *
+ sizeof(*(clk_scaling->freq_table))), GFP_KERNEL);
if (!clk_scaling->freq_table)
return -ENOMEM;
- clk_scaling->freq_table[0] = host->card->clk_scaling_lowest;
- clk_scaling->freq_table[1] = host->card->clk_scaling_highest;
- clk_scaling->freq_table_sz = 2;
- goto out;
+
+ if (clk_scaling->pltfm_freq_table) {
+ memcpy(clk_scaling->freq_table,
+ clk_scaling->pltfm_freq_table,
+ (clk_scaling->pltfm_freq_table_sz *
+ sizeof(*(clk_scaling->pltfm_freq_table))));
+ } else {
+ pr_debug("%s: no frequency table defined - setting default\n",
+ mmc_hostname(host));
+ clk_scaling->freq_table[0] =
+ host->card->clk_scaling_lowest;
+ clk_scaling->freq_table[1] =
+ host->card->clk_scaling_highest;
+ goto out;
+ }
}
if (host->card->clk_scaling_lowest >
@@ -840,7 +862,7 @@ int mmc_resume_clk_scaling(struct mmc_host *host)
devfreq_min_clk = host->clk_scaling.freq_table[0];
host->clk_scaling.curr_freq = devfreq_max_clk;
- if (host->ios.clock < host->card->clk_scaling_highest)
+ if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
host->clk_scaling.curr_freq = devfreq_min_clk;
host->clk_scaling.clk_scaling_in_progress = false;
@@ -902,6 +924,10 @@ int mmc_exit_clk_scaling(struct mmc_host *host)
host->clk_scaling.devfreq = NULL;
atomic_set(&host->clk_scaling.devfreq_abort, 1);
+
+ kfree(host->clk_scaling.freq_table);
+ host->clk_scaling.freq_table = NULL;
+
pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
return 0;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 0d0d56f..0c8ff86 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -338,10 +338,15 @@ static int mmc_force_err_set(void *data, u64 val)
{
struct mmc_host *host = data;
- if (host && host->ops && host->ops->force_err_irq) {
- mmc_host_clk_hold(host);
+ if (host && host->card && host->ops &&
+ host->ops->force_err_irq) {
+ /*
+ * To access the force error irq reg, we need to make
+ * sure the host is powered up and host clock is ticking.
+ */
+ mmc_get_card(host->card);
host->ops->force_err_irq(host, val);
- mmc_host_clk_release(host);
+ mmc_put_card(host->card);
}
return 0;
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 7262466..50dd6bd 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -157,7 +157,8 @@ static const struct sdhci_ops sdhci_iproc_ops = {
};
static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
.ops = &sdhci_iproc_ops,
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index fe62b69..a819b88 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1838,13 +1838,13 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
}
if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
- &msm_host->mmc->clk_scaling.freq_table,
- &msm_host->mmc->clk_scaling.freq_table_sz, 0))
+ &msm_host->mmc->clk_scaling.pltfm_freq_table,
+ &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
pr_debug("%s: no clock scaling frequencies were supplied\n",
dev_name(dev));
- else if (!msm_host->mmc->clk_scaling.freq_table ||
- !msm_host->mmc->clk_scaling.freq_table_sz)
- dev_err(dev, "bad dts clock scaling frequencies\n");
+ else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
+ !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
+ dev_err(dev, "bad dts clock scaling frequencies\n");
/*
* Few hosts can support DDR52 mode at the same lower
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index edc70ff..6dcc42d 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2573,7 +2573,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
return -1;
ad_info->aggregator_id = aggregator->aggregator_identifier;
- ad_info->ports = aggregator->num_of_ports;
+ ad_info->ports = __agg_active_ports(aggregator);
ad_info->actor_key = aggregator->actor_oper_aggregator_key;
ad_info->partner_key = aggregator->partner_oper_aggregator_key;
ether_addr_copy(ad_info->partner_system,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 93aa293..9711ca4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5144,9 +5144,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
struct be_adapter *adapter = netdev_priv(dev);
u8 l4_hdr = 0;
- /* The code below restricts offload features for some tunneled packets.
+ /* The code below restricts offload features for some tunneled and
+ * Q-in-Q packets.
* Offload features for normal (non tunnel) packets are unchanged.
*/
+ features = vlan_features_check(skb, features);
if (!skb->encapsulation ||
!(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
return features;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3f51a44..cb45390 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -767,7 +767,7 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
static void cmd_work_handler(struct work_struct *work)
@@ -797,6 +797,7 @@ static void cmd_work_handler(struct work_struct *work)
}
cmd->ent_arr[ent->idx] = ent;
+ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -818,6 +819,20 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+ /* Skip sending command to fw if internal error */
+ if (pci_channel_offline(dev->pdev) ||
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ u8 status = 0;
+ u32 drv_synd;
+
+ ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
+ MLX5_SET(mbox_out, ent->out, status, status);
+ MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
+
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ return;
+ }
+
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -828,7 +843,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
}
}
@@ -872,7 +887,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
ent->ret = -ETIMEDOUT;
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
err = ent->ret;
@@ -1369,7 +1384,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
}
}
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -1389,6 +1404,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
struct semaphore *sem;
ent = cmd->ent_arr[i];
+
+ /* if we already completed the command, ignore it */
+ if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
+ &ent->state)) {
+ /* only real completion can free the cmd slot */
+ if (!forced) {
+ mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
+ ent->idx);
+ free_ent(cmd, ent->idx);
+ }
+ continue;
+ }
+
if (ent->callback)
cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue)
@@ -1411,7 +1439,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status);
}
- free_ent(cmd, ent->idx);
+
+ /* only real completion will free the entry slot */
+ if (!forced)
+ free_ent(cmd, ent->idx);
if (ent->callback) {
ds = ent->ts2 - ent->ts1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 126cfeb..3744e2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -751,7 +751,6 @@ static void get_supported(u32 eth_proto_cap,
ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
ptys2ethtool_supported_link(supported, eth_proto_cap);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
}
static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
@@ -761,7 +760,7 @@ static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
unsigned long *advertising = link_ksettings->link_modes.advertising;
ptys2ethtool_adver_link(advertising, eth_proto_cap);
- if (tx_pause)
+ if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
@@ -806,6 +805,8 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
+ u32 rx_pause = 0;
+ u32 tx_pause = 0;
u32 eth_proto_cap;
u32 eth_proto_admin;
u32 eth_proto_lp;
@@ -828,11 +829,13 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
+ mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
get_supported(eth_proto_cap, link_ksettings);
- get_advertising(eth_proto_admin, 0, 0, link_ksettings);
+ get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index aaca090..f86e9ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -234,7 +234,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
break;
case MLX5_EVENT_TYPE_CMD:
- mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
+ mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
break;
case MLX5_EVENT_TYPE_PORT_CHANGE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 5bcf934..2115c8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
- mlx5_cmd_comp_handler(dev, vector);
+ mlx5_cmd_comp_handler(dev, vector, true);
return;
no_trig:
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c2dcf02..d6a541b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -240,34 +240,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
{
int err;
- /* The Marvell PHY has an errata which requires
- * that certain registers get written in order
- * to restart autonegotiation */
- err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1d, 0x1f);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0x200c);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1d, 0x5);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0x100);
- if (err < 0)
- return err;
-
err = marvell_set_polarity(phydev, phydev->mdix);
if (err < 0)
return err;
@@ -301,6 +273,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+static int m88e1101_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* This Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x1f);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x200c);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x5);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x100);
+ if (err < 0)
+ return err;
+
+ return marvell_config_aneg(phydev);
+}
+
static int m88e1111_config_aneg(struct phy_device *phydev)
{
int err;
@@ -1491,7 +1499,7 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.flags = PHY_HAS_INTERRUPT,
.config_init = &marvell_config_init,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1101_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 34d997c..2f260c6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -897,6 +897,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 51fc0c3..7ca9989 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1456,6 +1456,7 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = virtnet_busy_poll,
#endif
+ .ndo_features_check = passthru_features_check,
};
static void virtnet_config_changed_work(struct work_struct *work)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 80ef486..ee02605 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -850,6 +850,7 @@ static u32 vrf_fib_table(const struct net_device *dev)
static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ kfree_skb(skb);
return 0;
}
@@ -859,7 +860,7 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
{
struct net *net = dev_net(dev);
- if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
+ if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
skb = NULL; /* kfree_skb(skb) handled by nf code */
return skb;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index a83f8f6..9afd6f2 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -15,6 +15,7 @@
*/
#include <linux/etherdevice.h>
+#include <net/netlink.h>
#include "wil6210.h"
#include "wmi.h"
#include "ftm.h"
@@ -55,6 +56,62 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
#define QCA_NL80211_VENDOR_ID 0x001374
+#define WIL_MAX_RF_SECTORS (128)
+#define WIL_CID_ALL (0xff)
+
+enum qca_wlan_vendor_attr_rf_sector {
+ QCA_ATTR_MAC_ADDR = 6,
+ QCA_ATTR_PAD = 13,
+ QCA_ATTR_TSF = 29,
+ QCA_ATTR_DMG_RF_SECTOR_INDEX = 30,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE = 31,
+ QCA_ATTR_DMG_RF_MODULE_MASK = 32,
+ QCA_ATTR_DMG_RF_SECTOR_CFG = 33,
+ QCA_ATTR_DMG_RF_SECTOR_MAX,
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_type {
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_RX,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_TX,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_cfg {
+ QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+
+ /* keep last */
+ QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MAX =
+ QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1
+};
+
+static const struct
+nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = {
+ [QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN },
+ [QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 },
+ [QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 },
+ [QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED },
+};
+
+static const struct
+nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = {
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 },
+};
+
enum qca_nl80211_vendor_subcmds {
QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128,
QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129,
@@ -65,8 +122,25 @@ enum qca_nl80211_vendor_subcmds {
QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134,
QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135,
QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142,
};
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+
/* vendor specific commands */
static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
{
@@ -111,6 +185,36 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wil_aoa_abort_measurement
},
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_rf_sector_get_cfg
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_rf_sector_set_cfg
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd =
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_rf_sector_get_selected
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd =
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_rf_sector_set_selected
+ },
};
/* vendor specific events */
@@ -1665,6 +1769,42 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return wil_ps_update(wil, ps_profile);
}
+static int wil_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* Setting the wakeup trigger based on wow is TBD */
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
+ rc = wil_can_suspend(wil, false);
+ if (rc)
+ goto out;
+
+ wil_dbg_pm(wil, "suspending\n");
+
+ wil_p2p_stop_discovery(wil);
+
+ wil_abort_scan(wil, true);
+
+out:
+ return rc;
+}
+
+static int wil_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_pm(wil, "resuming\n");
+
+ return 0;
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1696,6 +1836,8 @@ static struct cfg80211_ops wil_cfg80211_ops = {
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
+ .suspend = wil_cfg80211_suspend,
+ .resume = wil_cfg80211_resume,
};
static void wil_wiphy_init(struct wiphy *wiphy)
@@ -1799,3 +1941,451 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
kfree(p2p_wdev);
}
}
+
+static int wil_rf_sector_status_to_rc(u8 status)
+{
+ switch (status) {
+ case WMI_RF_SECTOR_STATUS_SUCCESS:
+ return 0;
+ case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR:
+ return -EINVAL;
+ case WMI_RF_SECTOR_STATUS_BUSY_ERROR:
+ return -EAGAIN;
+ case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u16 sector_index;
+ u8 sector_type;
+ u32 rf_modules_vec;
+ struct wmi_get_rf_sector_params_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_get_rf_sector_params_done_event evt;
+ } __packed reply;
+ struct sk_buff *msg;
+ struct nlattr *nl_cfgs, *nl_cfg;
+ u32 i;
+ struct wmi_rf_sector_info *si;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+ wil_rf_sector_policy);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+ !tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ rf_modules_vec = nla_get_u32(
+ tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
+ if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) {
+ wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec);
+ return -EINVAL;
+ }
+
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
+ WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get rf sector cfg failed with status %d\n",
+ reply.evt.status);
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+ }
+
+ msg = cfg80211_vendor_cmd_alloc_reply_skb(
+ wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+ le64_to_cpu(reply.evt.tsf),
+ QCA_ATTR_PAD))
+ goto nla_put_failure;
+
+ nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
+ if (!nl_cfgs)
+ goto nla_put_failure;
+ for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
+ if (!(rf_modules_vec & BIT(i)))
+ continue;
+ nl_cfg = nla_nest_start(msg, i);
+ if (!nl_cfg)
+ goto nla_put_failure;
+ si = &reply.evt.sectors_info[i];
+ if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+ i) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+ le32_to_cpu(si->etype0)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+ le32_to_cpu(si->etype1)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+ le32_to_cpu(si->etype2)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+ le32_to_cpu(si->psh_hi)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+ le32_to_cpu(si->psh_lo)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+ le32_to_cpu(si->dtype_swch_off)))
+ goto nla_put_failure;
+ nla_nest_end(msg, nl_cfg);
+ }
+
+ nla_nest_end(msg, nl_cfgs);
+ rc = cfg80211_vendor_cmd_reply(msg);
+ return rc;
+nla_put_failure:
+ kfree_skb(msg);
+ return -ENOBUFS;
+}
+
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ int rc, tmp;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1];
+ u16 sector_index, rf_module_index;
+ u8 sector_type;
+ u32 rf_modules_vec = 0;
+ struct wmi_set_rf_sector_params_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_rf_sector_params_done_event evt;
+ } __packed reply;
+ struct nlattr *nl_cfg;
+ struct wmi_rf_sector_info *si;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+ wil_rf_sector_policy);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
+ tmp) {
+ rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
+ nl_cfg, wil_rf_sector_cfg_policy);
+ if (rc) {
+ wil_err(wil, "invalid sector cfg\n");
+ return -EINVAL;
+ }
+
+ if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
+ wil_err(wil, "missing cfg params\n");
+ return -EINVAL;
+ }
+
+ rf_module_index = nla_get_u8(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]);
+ if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) {
+ wil_err(wil, "invalid RF module index %d\n",
+ rf_module_index);
+ return -EINVAL;
+ }
+ rf_modules_vec |= BIT(rf_module_index);
+ si = &cmd.sectors_info[rf_module_index];
+ si->etype0 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0]));
+ si->etype1 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1]));
+ si->etype2 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2]));
+ si->psh_hi = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI]));
+ si->psh_lo = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO]));
+ si->dtype_swch_off = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]));
+ }
+
+ cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
+ WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u8 sector_type, mac_addr[ETH_ALEN];
+ int cid = 0;
+ struct wmi_get_selected_rf_sector_index_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_get_selected_rf_sector_index_done_event evt;
+ } __packed reply;
+ struct sk_buff *msg;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+ wil_rf_sector_policy);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ if (tb[QCA_ATTR_MAC_ADDR]) {
+ ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+ cid = wil_find_cid(wil, mac_addr);
+ if (cid < 0) {
+ wil_err(wil, "invalid MAC address %pM\n", mac_addr);
+ return -ENOENT;
+ }
+ } else {
+ if (test_bit(wil_status_fwconnected, wil->status)) {
+ wil_err(wil, "must specify MAC address when connected\n");
+ return -EINVAL;
+ }
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cid = (u8)cid;
+ cmd.sector_type = sector_type;
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID,
+ &cmd, sizeof(cmd),
+ WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get rf selected sector cfg failed with status %d\n",
+ reply.evt.status);
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+ }
+
+ msg = cfg80211_vendor_cmd_alloc_reply_skb(
+ wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+ le64_to_cpu(reply.evt.tsf),
+ QCA_ATTR_PAD) ||
+ nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX,
+ le16_to_cpu(reply.evt.sector_idx)))
+ goto nla_put_failure;
+
+ rc = cfg80211_vendor_cmd_reply(msg);
+ return rc;
+nla_put_failure:
+ kfree_skb(msg);
+ return -ENOBUFS;
+}
+
+static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil,
+ u16 sector_index,
+ u8 sector_type, u8 cid)
+{
+ struct wmi_set_selected_rf_sector_index_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_selected_rf_sector_index_done_event evt;
+ } __packed reply;
+ int rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ cmd.cid = (u8)cid;
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID,
+ &cmd, sizeof(cmd),
+ WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u16 sector_index;
+ u8 sector_type, mac_addr[ETH_ALEN], i;
+ int cid = 0;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+ wil_rf_sector_policy);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS &&
+ sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ if (tb[QCA_ATTR_MAC_ADDR]) {
+ ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+ if (!is_broadcast_ether_addr(mac_addr)) {
+ cid = wil_find_cid(wil, mac_addr);
+ if (cid < 0) {
+ wil_err(wil, "invalid MAC address %pM\n",
+ mac_addr);
+ return -ENOENT;
+ }
+ } else {
+ if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+ wil_err(wil, "broadcast MAC valid only with unlocking\n");
+ return -EINVAL;
+ }
+ cid = -1;
+ }
+ } else {
+ if (test_bit(wil_status_fwconnected, wil->status)) {
+ wil_err(wil, "must specify MAC address when connected\n");
+ return -EINVAL;
+ }
+ /* otherwise, using cid=0 for unassociated station */
+ }
+
+ if (cid >= 0) {
+ rc = wil_rf_sector_wmi_set_selected(wil, sector_index,
+ sector_type, cid);
+ } else {
+ /* unlock all cids */
+ rc = wil_rf_sector_wmi_set_selected(
+ wil, WMI_INVALID_RF_SECTOR_INDEX, sector_type,
+ WIL_CID_ALL);
+ if (rc == -EINVAL) {
+ for (i = 0; i < WIL6210_MAX_CID; i++) {
+ rc = wil_rf_sector_wmi_set_selected(
+ wil, WMI_INVALID_RF_SECTOR_INDEX,
+ sector_type, i);
+ /* the FW will silently ignore and return
+ * success for unused cid, so abort the loop
+ * on any other error
+ */
+ if (rc) {
+ wil_err(wil, "unlock cid %d failed with status %d\n",
+ i, rc);
+ break;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 5648ebb..0ac657d 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -509,6 +509,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
void *buf;
size_t ret;
+ if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
+ test_bit(wil_status_suspended, wil_blob->wil->status))
+ return 0;
+
if (pos < 0)
return -EINVAL;
@@ -1604,6 +1608,49 @@ static const struct file_operations fops_fw_version = {
.llseek = seq_lseek,
};
+/*---------suspend_stats---------*/
+static ssize_t wil_write_suspend_stats(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+
+ memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
+
+ return len;
+}
+
+static ssize_t wil_read_suspend_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ static char text[400];
+ int n;
+
+ n = snprintf(text, sizeof(text),
+ "Suspend statistics:\n"
+ "successful suspends:%ld failed suspends:%ld\n"
+ "successful resumes:%ld failed resumes:%ld\n"
+ "rejected by host:%ld rejected by device:%ld\n",
+ wil->suspend_stats.successful_suspends,
+ wil->suspend_stats.failed_suspends,
+ wil->suspend_stats.successful_resumes,
+ wil->suspend_stats.failed_resumes,
+ wil->suspend_stats.rejected_by_host,
+ wil->suspend_stats.rejected_by_device);
+
+ n = min_t(int, n, sizeof(text));
+
+ return simple_read_from_buffer(user_buf, count, ppos, text, n);
+}
+
+static const struct file_operations fops_suspend_stats = {
+ .read = wil_read_suspend_stats,
+ .write = wil_write_suspend_stats,
+ .open = simple_open,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1656,6 +1703,7 @@ static const struct {
{"led_blink_time", 0644, &fops_led_blink_time},
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
+ {"suspend_stats", 0644, &fops_suspend_stats},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1702,6 +1750,7 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(discovery_mode, 0644, doff_u8),
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
+ WIL_FIELD(wakeup_trigger, 0644, doff_u8),
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index cab1e5c..cad8a95c4 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -467,6 +467,12 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
wil6210_unmask_irq_pseudo(wil);
+ if (wil->suspend_resp_rcvd) {
+ wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+ wil->suspend_resp_comp = true;
+ wake_up_interruptible(&wil->wq);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index bbdd232..f8d2c20 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -54,7 +54,7 @@ static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr,
}
off = a - wil->csr;
- if (size >= WIL6210_MEM_SIZE - off) {
+ if (size >= wil->bar_size - off) {
wil_err(wil, "Requested block does not fit into memory: "
"off = 0x%08x size = 0x%08x\n", off, size);
return NULL;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 1fc4580..aff8b1b 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -579,6 +579,9 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
+ wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
+ WMI_WAKEUP_TRIGGER_BCAST;
+
return 0;
out_wmi_wq:
@@ -589,8 +592,10 @@ int wil_priv_init(struct wil6210_priv *wil)
void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
{
- if (wil->platform_ops.bus_request)
+ if (wil->platform_ops.bus_request) {
+ wil->bus_request_kbps = kbps;
wil->platform_ops.bus_request(wil->platform_handle, kbps);
+ }
}
/**
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 1afed52..cf3fadc 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -112,8 +112,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
- pdev->msi_enabled = 0;
-
pci_set_master(pdev);
wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
@@ -200,16 +198,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.ramdump = wil_platform_rop_ramdump,
.fw_recovery = wil_platform_rop_fw_recovery,
};
+ u32 bar_size = pci_resource_len(pdev, 0);
/* check HW */
dev_info(&pdev->dev, WIL_NAME
- " device found [%04x:%04x] (rev %x)\n",
- (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+ " device found [%04x:%04x] (rev %x) bar size 0x%x\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision,
+ bar_size);
- if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
- dev_err(&pdev->dev, "Not " WIL_NAME "? "
- "BAR0 size is %lu while expecting %lu\n",
- (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+ if ((bar_size < WIL6210_MIN_MEM_SIZE) ||
+ (bar_size > WIL6210_MAX_MEM_SIZE)) {
+ dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n",
+ bar_size);
return -ENODEV;
}
@@ -222,6 +222,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil->pdev = pdev;
pci_set_drvdata(pdev, wil);
+ wil->bar_size = bar_size;
/* rollback to if_free */
wil->platform_handle =
@@ -249,7 +250,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
rc = pci_enable_device(pdev);
- if (rc) {
+ if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
"pci_enable_device failed, retry with MSI only\n");
/* Work around for platforms that can't allocate IRQ:
@@ -264,6 +265,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_plat;
}
/* rollback to err_disable_pdev */
+ pci_set_power_state(pdev, PCI_D0);
rc = pci_request_region(pdev, 0, WIL_NAME);
if (rc) {
@@ -284,6 +286,15 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_set_capabilities(wil);
wil6210_clear_irq(wil);
+ wil->keep_radio_on_during_sleep =
+ wil->platform_ops.keep_radio_on_during_sleep &&
+ wil->platform_ops.keep_radio_on_during_sleep(
+ wil->platform_handle) &&
+ test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+ wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+ wil->keep_radio_on_during_sleep);
+
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
@@ -383,15 +394,16 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
goto out;
rc = wil_suspend(wil, is_runtime);
- if (rc)
- goto out;
+ if (!rc) {
+ wil->suspend_stats.successful_suspends++;
- /* TODO: how do I bring card in low power state? */
-
- /* disable bus mastering */
- pci_clear_master(pdev);
- /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
-
+ /* If platform device supports keep_radio_on_during_sleep
+ * it will control PCIe master
+ */
+ if (!wil->keep_radio_on_during_sleep)
+ /* disable bus mastering */
+ pci_clear_master(pdev);
+ }
out:
return rc;
}
@@ -404,12 +416,21 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
- /* allow master */
- pci_set_master(pdev);
-
+ /* If platform device supports keep_radio_on_during_sleep it will
+ * control PCIe master
+ */
+ if (!wil->keep_radio_on_during_sleep)
+ /* allow master */
+ pci_set_master(pdev);
rc = wil_resume(wil, is_runtime);
- if (rc)
- pci_clear_master(pdev);
+ if (rc) {
+ wil_err(wil, "device failed to resume (%d)\n", rc);
+ wil->suspend_stats.failed_resumes++;
+ if (!wil->keep_radio_on_during_sleep)
+ pci_clear_master(pdev);
+ } else {
+ wil->suspend_stats.successful_resumes++;
+ }
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 2ae4fe8..ce1f384 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -15,6 +15,7 @@
*/
#include "wil6210.h"
+#include <linux/jiffies.h>
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
{
@@ -61,20 +62,170 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+ if (rc)
+ wil->suspend_stats.rejected_by_host++;
+
return rc;
}
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
+{
+ int rc = 0;
+
+ /* wil_status_resuming will be cleared when getting
+ * WMI_TRAFFIC_RESUME_EVENTID
+ */
+ set_bit(wil_status_resuming, wil->status);
+ clear_bit(wil_status_suspended, wil->status);
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_unmask_irq(wil);
+
+ wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
+
+ /* Send WMI resume request to the device */
+ rc = wmi_resume(wil);
+ if (rc) {
+ wil_err(wil, "device failed to resume (%d), resetting\n", rc);
+ rc = wil_down(wil);
+ if (rc) {
+ wil_err(wil, "wil_down failed (%d)\n", rc);
+ goto out;
+ }
+ rc = wil_up(wil);
+ if (rc) {
+ wil_err(wil, "wil_up failed (%d)\n", rc);
+ goto out;
+ }
+ }
+
+ /* Wake all queues */
+ if (test_bit(wil_status_fwconnected, wil->status))
+ wil_update_net_queues_bh(wil, NULL, false);
+
+out:
+ if (rc)
+ set_bit(wil_status_suspended, wil->status);
+ return rc;
+}
+
+static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ unsigned long start, data_comp_to;
+
+ wil_dbg_pm(wil, "suspend keep radio on\n");
+
+ /* Prevent handling of new tx and wmi commands */
+ set_bit(wil_status_suspending, wil->status);
+ wil_update_net_queues_bh(wil, NULL, true);
+
+ if (!wil_is_tx_idle(wil)) {
+ wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ if (!wil_is_rx_idle(wil)) {
+ wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ if (!wil_is_wmi_idle(wil)) {
+ wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ /* Send WMI suspend request to the device */
+ rc = wmi_suspend(wil);
+ if (rc) {
+ wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
+ rc);
+ goto reject_suspend;
+ }
+
+ /* Wait for completion of the pending RX packets */
+ start = jiffies;
+ data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ while (!wil_is_rx_idle(wil)) {
+ if (time_after(jiffies, data_comp_to)) {
+ if (wil_is_rx_idle(wil))
+ break;
+ wil_err(wil,
+ "TO waiting for idle RX, suspend failed\n");
+ wil->suspend_stats.failed_suspends++;
+ goto resume_after_fail;
+ }
+ wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
+ napi_synchronize(&wil->napi_rx);
+ msleep(20);
+ }
+ }
+
+ /* In case of pending WMI events, reject the suspend
+ * and resume the device.
+ * This can happen if the device sent the WMI events before
+ * approving the suspend.
+ */
+ if (!wil_is_wmi_idle(wil)) {
+ wil_err(wil, "suspend failed due to pending WMI events\n");
+ wil->suspend_stats.failed_suspends++;
+ goto resume_after_fail;
+ }
+
+ wil_mask_irq(wil);
+
+ /* Disable device reset on PERST */
+ wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+
+ if (wil->platform_ops.suspend) {
+ rc = wil->platform_ops.suspend(wil->platform_handle, true);
+ if (rc) {
+ wil_err(wil, "platform device failed to suspend (%d)\n",
+ rc);
+ wil->suspend_stats.failed_suspends++;
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_unmask_irq(wil);
+ goto resume_after_fail;
+ }
+ }
+
+ /* Save the current bus request to return to the same in resume */
+ wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
+ wil6210_bus_request(wil, 0);
+
+ set_bit(wil_status_suspended, wil->status);
+ clear_bit(wil_status_suspending, wil->status);
+
+ return rc;
+
+resume_after_fail:
+ set_bit(wil_status_resuming, wil->status);
+ clear_bit(wil_status_suspending, wil->status);
+ rc = wmi_resume(wil);
+ /* if resume succeeded, reject the suspend */
+ if (!rc) {
+ rc = -EBUSY;
+ if (test_bit(wil_status_fwconnected, wil->status))
+ wil_update_net_queues_bh(wil, NULL, false);
+ }
+ return rc;
+
+reject_suspend:
+ clear_bit(wil_status_suspending, wil->status);
+ if (test_bit(wil_status_fwconnected, wil->status))
+ wil_update_net_queues_bh(wil, NULL, false);
+ return -EBUSY;
+}
+
+static int wil_suspend_radio_off(struct wil6210_priv *wil)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
-
- if (test_bit(wil_status_suspended, wil->status)) {
- wil_dbg_pm(wil, "trying to suspend while suspended\n");
- return 0;
- }
+ wil_dbg_pm(wil, "suspend radio off\n");
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
@@ -90,7 +241,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_disable_irq(wil);
if (wil->platform_ops.suspend) {
- rc = wil->platform_ops.suspend(wil->platform_handle);
+ rc = wil->platform_ops.suspend(wil->platform_handle, false);
if (rc) {
wil_enable_irq(wil);
goto out;
@@ -100,6 +251,50 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
set_bit(wil_status_suspended, wil->status);
out:
+ wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
+
+ return rc;
+}
+
+static int wil_resume_radio_off(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
+ wil_enable_irq(wil);
+ /* if netif up, bring hardware up
+ * During open(), IFF_UP set after actual device method
+ * invocation. This prevent recursive call to wil_up()
+ * wil_status_suspended will be cleared in wil_reset
+ */
+ if (ndev->flags & IFF_UP)
+ rc = wil_up(wil);
+ else
+ clear_bit(wil_status_suspended, wil->status);
+
+ return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
+
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
+ if (!keep_radio_on)
+ rc = wil_suspend_radio_off(wil);
+ else
+ rc = wil_suspend_keep_radio_on(wil);
+
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
@@ -110,29 +305,24 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
- rc = wil->platform_ops.resume(wil->platform_handle);
+ rc = wil->platform_ops.resume(wil->platform_handle,
+ keep_radio_on);
if (rc) {
wil_err(wil, "platform_ops.resume : %d\n", rc);
goto out;
}
}
- wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
- wil_enable_irq(wil);
-
- /* if netif up, bring hardware up
- * During open(), IFF_UP set after actual device method
- * invocation. This prevent recursive call to wil_up().
- * wil_status_suspended will be cleared in wil_reset
- */
- if (ndev->flags & IFF_UP)
- rc = wil_up(wil);
+ if (keep_radio_on)
+ rc = wil_resume_keep_radio_on(wil);
else
- clear_bit(wil_status_suspended, wil->status);
+ rc = wil_resume_radio_off(wil);
out:
wil_dbg_pm(wil, "resume: %s => %d\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 35bbf3a..8f1e79b4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -104,6 +104,51 @@ static inline int wil_vring_avail_high(struct vring *vring)
return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
}
+/* returns true when all tx vrings are empty */
+bool wil_is_tx_idle(struct wil6210_priv *wil)
+{
+ int i;
+ unsigned long data_comp_to;
+
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct vring *vring = &wil->vring_tx[i];
+ int vring_index = vring - wil->vring_tx;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+
+ spin_lock(&txdata->lock);
+
+ if (!vring->va || !txdata->enabled) {
+ spin_unlock(&txdata->lock);
+ continue;
+ }
+
+ data_comp_to = jiffies + msecs_to_jiffies(
+ WIL_DATA_COMPLETION_TO_MS);
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ while (!wil_vring_is_empty(vring)) {
+ if (time_after(jiffies, data_comp_to)) {
+ wil_dbg_pm(wil,
+ "TO waiting for idle tx\n");
+ spin_unlock(&txdata->lock);
+ return false;
+ }
+ wil_dbg_ratelimited(wil,
+ "tx vring is not empty -> NAPI\n");
+ spin_unlock(&txdata->lock);
+ napi_synchronize(&wil->napi_tx);
+ msleep(20);
+ spin_lock(&txdata->lock);
+ if (!vring->va || !txdata->enabled)
+ break;
+ }
+ }
+
+ spin_unlock(&txdata->lock);
+ }
+
+ return true;
+}
+
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
@@ -406,6 +451,18 @@ static inline int wil_is_back_req(u8 fc)
(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
}
+bool wil_is_rx_idle(struct wil6210_priv *wil)
+{
+ struct vring_rx_desc *_d;
+ struct vring *vring = &wil->vring_rx;
+
+ _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
+ if (_d->dma.status & RX_DMA_STATUS_DU)
+ return false;
+
+ return true;
+}
+
/**
* reap 1 frame from @swhead
*
@@ -1812,6 +1869,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
spin_lock(&txdata->lock);
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resuming, wil->status)) {
+ wil_dbg_txrx(wil,
+ "suspend/resume in progress. drop packet\n");
+ spin_unlock(&txdata->lock);
+ return -EINVAL;
+ }
+
rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
(wil, vring, skb);
@@ -1864,6 +1930,11 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
return;
}
+ /* Do not wake the queues in suspend flow */
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status))
+ return;
+
/* check wake */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct vring *cur_vring = &wil->vring_tx[i];
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index ba1c33b..eca5685 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -59,7 +59,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
}
-#define WIL6210_MEM_SIZE (2*1024*1024UL)
+#define WIL6210_MIN_MEM_SIZE (2 * 1024 * 1024UL)
+#define WIL6210_MAX_MEM_SIZE (4 * 1024 * 1024UL)
#define WIL_TX_Q_LEN_DEFAULT (4000)
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
@@ -83,6 +84,15 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
*/
#define WIL_MAX_MPDU_OVERHEAD (62)
+struct wil_suspend_stats {
+ unsigned long successful_suspends;
+ unsigned long failed_suspends;
+ unsigned long successful_resumes;
+ unsigned long failed_resumes;
+ unsigned long rejected_by_device;
+ unsigned long rejected_by_host;
+};
+
/* Calculate MAC buffer size for the firmware. It includes all overhead,
* as it will go over the air, and need to be 8 byte aligned
*/
@@ -293,6 +303,8 @@ enum {
#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
+#define WIL_DATA_COMPLETION_TO_MS 200
+
/* Hardware definitions end */
struct fw_map {
u32 from; /* linker address - from, inclusive */
@@ -421,7 +433,9 @@ enum { /* for wil6210_priv.status */
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
+ wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
+ wil_status_resuming, /* resume in progress */
wil_status_last /* keep last */
};
@@ -604,6 +618,7 @@ extern u8 led_polarity;
struct wil6210_priv {
struct pci_dev *pdev;
+ u32 bar_size;
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
@@ -686,9 +701,12 @@ struct wil6210_priv {
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
u8 discovery_mode;
u8 abft_len;
+ u8 wakeup_trigger;
+ struct wil_suspend_stats suspend_stats;
void *platform_handle;
struct wil_platform_ops platform_ops;
+ bool keep_radio_on_during_sleep;
struct pmc_ctx pmc;
@@ -715,6 +733,11 @@ struct wil6210_priv {
struct notifier_block pm_notify;
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
+
+ bool suspend_resp_rcvd;
+ bool suspend_resp_comp;
+ u32 bus_request_kbps;
+ u32 bus_request_kbps_pre_suspend;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -977,6 +1000,11 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+bool wil_is_wmi_idle(struct wil6210_priv *wil);
+int wmi_resume(struct wil6210_priv *wil);
+int wmi_suspend(struct wil6210_priv *wil);
+bool wil_is_tx_idle(struct wil6210_priv *wil);
+bool wil_is_rx_idle(struct wil6210_priv *wil);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index f8c4117..621005b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -33,10 +33,11 @@ enum wil_platform_event {
*/
struct wil_platform_ops {
int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */);
- int (*suspend)(void *handle);
- int (*resume)(void *handle);
+ int (*suspend)(void *handle, bool keep_device_power);
+ int (*resume)(void *handle, bool device_powered_on);
void (*uninit)(void *handle);
int (*notify)(void *handle, enum wil_platform_event evt);
+ bool (*keep_radio_on_during_sleep)(void *handle);
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 8e1825f..ae0952f 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -38,6 +38,8 @@ module_param(led_id, byte, 0444);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
+#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+
/**
* WMI event receiving - theory of operations
*
@@ -158,7 +160,7 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
return NULL;
off = HOSTADDR(ptr);
- if (off > WIL6210_MEM_SIZE - 4)
+ if (off > wil->bar_size - 4)
return NULL;
return wil->csr + off;
@@ -178,7 +180,7 @@ void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
return NULL;
off = HOSTADDR(ptr);
- if (off > WIL6210_MEM_SIZE - 4)
+ if (off > wil->bar_size - 4)
return NULL;
return wil->csr + off;
@@ -234,6 +236,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
return -EAGAIN;
}
+ /* Allow sending only suspend / resume commands during susepnd flow */
+ if ((test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resuming, wil->status)) &&
+ ((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) &&
+ (cmdid != WMI_TRAFFIC_RESUME_CMDID))) {
+ wil_err(wil, "WMI: reject send_command during suspend\n");
+ return -EINVAL;
+ }
+
if (!head) {
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
return -EINVAL;
@@ -893,6 +905,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
return;
}
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil, "suspended. cannot handle WMI event\n");
+ return;
+ }
+
for (n = 0;; n++) {
u16 len;
bool q;
@@ -945,6 +962,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
struct wmi_cmd_hdr *wmi = &evt->event.wmi;
u16 id = le16_to_cpu(wmi->command_id);
u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
+ if (test_bit(wil_status_resuming, wil->status)) {
+ if (id == WMI_TRAFFIC_RESUME_EVENTID)
+ clear_bit(wil_status_resuming,
+ wil->status);
+ else
+ wil_err(wil,
+ "WMI evt %d while resuming\n",
+ id);
+ }
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
if (wil->reply_id && wil->reply_id == id) {
if (wil->reply_buf) {
@@ -952,6 +978,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
min(len, wil->reply_size));
immed_reply = true;
}
+ if (id == WMI_TRAFFIC_SUSPEND_EVENTID) {
+ wil_dbg_wmi(wil,
+ "set suspend_resp_rcvd\n");
+ wil->suspend_resp_rcvd = true;
+ }
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
@@ -1909,6 +1940,85 @@ int wmi_link_maintain_cfg_write(struct wil6210_priv *wil,
return rc;
}
+int wmi_suspend(struct wil6210_priv *wil)
+{
+ int rc;
+ struct wmi_traffic_suspend_cmd cmd = {
+ .wakeup_trigger = wil->wakeup_trigger,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_traffic_suspend_event evt;
+ } __packed reply;
+ u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP;
+
+ wil->suspend_resp_rcvd = false;
+ wil->suspend_resp_comp = false;
+
+ reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+
+ rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
+ WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
+ suspend_to);
+ if (rc) {
+ wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc);
+ if (rc == -ETIME)
+ /* wmi_call TO */
+ wil->suspend_stats.rejected_by_device++;
+ else
+ wil->suspend_stats.rejected_by_host++;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "waiting for suspend_response_completed\n");
+
+ rc = wait_event_interruptible_timeout(wil->wq,
+ wil->suspend_resp_comp,
+ msecs_to_jiffies(suspend_to));
+ if (rc == 0) {
+ wil_err(wil, "TO waiting for suspend_response_completed\n");
+ if (wil->suspend_resp_rcvd)
+ /* Device responded but we TO due to another reason */
+ wil->suspend_stats.rejected_by_host++;
+ else
+ wil->suspend_stats.rejected_by_device++;
+ rc = -EBUSY;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
+ if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
+ wil_dbg_pm(wil, "device rejected the suspend\n");
+ wil->suspend_stats.rejected_by_device++;
+ }
+ rc = reply.evt.status;
+
+out:
+ wil->suspend_resp_rcvd = false;
+ wil->suspend_resp_comp = false;
+
+ return rc;
+}
+
+int wmi_resume(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_traffic_resume_event evt;
+ } __packed reply;
+
+ reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+
+ rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
+ WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
+ WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
+ if (rc)
+ return rc;
+
+ return reply.evt.status;
+}
+
static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
void *d, int len)
{
@@ -1998,3 +2108,36 @@ void wmi_event_worker(struct work_struct *work)
}
wil_dbg_wmi(wil, "event_worker: Finished\n");
}
+
+bool wil_is_wmi_idle(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ bool rc = false;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ /* Check if there are pending WMI events in the events queue */
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ wil_dbg_pm(wil, "Pending WMI events in queue\n");
+ goto out;
+ }
+
+ /* Check if there is a pending WMI call */
+ if (wil->reply_id) {
+ wil_dbg_pm(wil, "Pending WMI call\n");
+ goto out;
+ }
+
+ /* Check if there are pending RX events in mbox */
+ r->head = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail != r->head)
+ wil_dbg_pm(wil, "Pending WMI mbox events\n");
+ else
+ rc = true;
+
+out:
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index f7f5f4f..256f63c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -59,6 +59,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
WMI_FW_CAPABILITY_WMI_ONLY = 5,
WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7,
+ WMI_FW_CAPABILITY_D3_SUSPEND = 8,
WMI_FW_CAPABILITY_MAX,
};
@@ -157,7 +158,7 @@ enum wmi_command_id {
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
/* Power management */
- WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
+ WMI_TRAFFIC_SUSPEND_CMDID = 0x904,
WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
@@ -500,8 +501,14 @@ struct wmi_port_delete_cmd {
u8 reserved[3];
} __packed;
-/* WMI_TRAFFIC_DEFERRAL_CMDID */
-struct wmi_traffic_deferral_cmd {
+/* WMI_TRAFFIC_SUSPEND_CMD wakeup trigger bit mask values */
+enum wmi_wakeup_trigger {
+ WMI_WAKEUP_TRIGGER_UCAST = 0x01,
+ WMI_WAKEUP_TRIGGER_BCAST = 0x02,
+};
+
+/* WMI_TRAFFIC_SUSPEND_CMDID */
+struct wmi_traffic_suspend_cmd {
/* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
u8 wakeup_trigger;
} __packed;
@@ -1084,7 +1091,7 @@ enum wmi_event_id {
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
/* Power management */
- WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
+ WMI_TRAFFIC_SUSPEND_EVENTID = 0x1904,
WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
/* P2P */
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
@@ -1926,14 +1933,14 @@ struct wmi_link_maintain_cfg_read_done_event {
struct wmi_link_maintain_cfg lm_cfg;
} __packed;
-enum wmi_traffic_deferral_status {
- WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
- WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
+enum wmi_traffic_suspend_status {
+ WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
+ WMI_TRAFFIC_SUSPEND_REJECTED = 0x1,
};
-/* WMI_TRAFFIC_DEFERRAL_EVENTID */
-struct wmi_traffic_deferral_event {
- /* enum wmi_traffic_deferral_status_e */
+/* WMI_TRAFFIC_SUSPEND_EVENTID */
+struct wmi_traffic_suspend_event {
+ /* enum wmi_traffic_suspend_status_e */
u8 status;
} __packed;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5f2feee..fbeca06 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1725,7 +1725,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group);
del_gendisk(ns->disk);
- blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
@@ -2048,8 +2047,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
continue;
revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
- blk_mq_abort_requeue_list(ns->queue);
- blk_mq_start_stopped_hw_queues(ns->queue, true);
+
+ /*
+ * Forcibly start all queues to avoid having stuck requests.
+ * Note that we must ensure the queues are not stopped
+ * when the final removal happens.
+ */
+ blk_mq_start_hw_queues(ns->queue);
+
+ /* draining requests in requeue list */
+ blk_mq_kick_requeue_list(ns->queue);
}
mutex_unlock(&ctrl->namespaces_mutex);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3d25add..3222f3e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1011,6 +1011,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
nvme_rdma_wr_error(cq, wc, "SEND");
}
+static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
+{
+ int sig_limit;
+
+ /*
+ * We signal completion every queue depth/2 and also handle the
+ * degenerated case of a device with queue_depth=1, where we
+ * would need to signal every message.
+ */
+ sig_limit = max(queue->queue_size / 2, 1);
+ return (++queue->sig_count % sig_limit) == 0;
+}
+
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
struct ib_send_wr *first, bool flush)
@@ -1038,9 +1051,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
* Would have been way to obvious to handle this in hardware or
* at least the RDMA stack..
*
- * This messy and racy code sniplet is copy and pasted from the iSER
- * initiator, and the magic '32' comes from there as well.
- *
* Always signal the flushes. The magic request used for the flush
* sequencer is not allocated in our driver's tagset and it's
* triggered to be freed by blk_cleanup_queue(). So we need to
@@ -1048,7 +1058,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
* embeded in request's payload, is not freed when __ib_process_cq()
* calls wr_cqe->done().
*/
- if ((++queue->sig_count % 32) == 0 || flush)
+ if (nvme_rdma_queue_sig_limit(queue) || flush)
wr.send_flags |= IB_SEND_SIGNALED;
if (first)
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 293371b..c5aaac5 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -125,7 +125,6 @@ enum ipa3_usb_state {
IPA_USB_CONNECTED,
IPA_USB_STOPPED,
IPA_USB_SUSPEND_REQUESTED,
- IPA_USB_SUSPEND_IN_PROGRESS,
IPA_USB_SUSPENDED,
IPA_USB_SUSPENDED_NO_RWAKEUP,
IPA_USB_RESUME_IN_PROGRESS
@@ -146,13 +145,6 @@ enum ipa3_usb_transport_type {
#define IPA3_USB_IS_TTYPE_DPL(__ttype) \
((__ttype) == IPA_USB_TRANSPORT_DPL)
-struct finish_suspend_work_context {
- struct work_struct work;
- enum ipa3_usb_transport_type ttype;
- u32 dl_clnt_hdl;
- u32 ul_clnt_hdl;
-};
-
struct ipa3_usb_teth_prot_conn_params {
u32 usb_to_ipa_clnt_hdl;
u32 ipa_to_usb_clnt_hdl;
@@ -168,7 +160,6 @@ struct ipa3_usb_transport_type_ctx {
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
void *user_data;
enum ipa3_usb_state state;
- struct finish_suspend_work_context finish_suspend_work;
struct ipa_usb_xdci_chan_params ch_params;
struct ipa3_usb_teth_prot_conn_params teth_conn_params;
};
@@ -221,16 +212,10 @@ struct ipa3_usb_status_dbg_info {
static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work);
-static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work);
-static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work);
static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work,
ipa3_usb_wq_notify_remote_wakeup);
static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work,
ipa3_usb_wq_dpl_notify_remote_wakeup);
-static DECLARE_WORK(ipa3_usb_notify_suspend_completed_work,
- ipa3_usb_wq_notify_suspend_completed);
-static DECLARE_WORK(ipa3_usb_dpl_notify_suspend_completed_work,
- ipa3_usb_wq_dpl_notify_suspend_completed);
struct ipa3_usb_context *ipa3_usb_ctx;
@@ -273,8 +258,6 @@ static char *ipa3_usb_state_to_string(enum ipa3_usb_state state)
return "IPA_USB_STOPPED";
case IPA_USB_SUSPEND_REQUESTED:
return "IPA_USB_SUSPEND_REQUESTED";
- case IPA_USB_SUSPEND_IN_PROGRESS:
- return "IPA_USB_SUSPEND_IN_PROGRESS";
case IPA_USB_SUSPENDED:
return "IPA_USB_SUSPENDED";
case IPA_USB_SUSPENDED_NO_RWAKEUP:
@@ -330,17 +313,11 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
* In case of failure during suspend request
* handling, state is reverted to connected.
*/
- (err_permit && state == IPA_USB_SUSPEND_REQUESTED) ||
- /*
- * In case of failure during suspend completing
- * handling, state is reverted to connected.
- */
- (err_permit && state == IPA_USB_SUSPEND_IN_PROGRESS))
+ (err_permit && state == IPA_USB_SUSPEND_REQUESTED))
state_legal = true;
break;
case IPA_USB_STOPPED:
- if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
- state == IPA_USB_CONNECTED ||
+ if (state == IPA_USB_CONNECTED ||
state == IPA_USB_SUSPENDED ||
state == IPA_USB_SUSPENDED_NO_RWAKEUP)
state_legal = true;
@@ -349,19 +326,8 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
if (state == IPA_USB_CONNECTED)
state_legal = true;
break;
- case IPA_USB_SUSPEND_IN_PROGRESS:
- if (state == IPA_USB_SUSPEND_REQUESTED ||
- /*
- * In case of failure during resume, state is reverted
- * to original, which could be suspend_in_progress.
- * Allow it.
- */
- (err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
- state_legal = true;
- break;
case IPA_USB_SUSPENDED:
if (state == IPA_USB_SUSPEND_REQUESTED ||
- state == IPA_USB_SUSPEND_IN_PROGRESS ||
/*
* In case of failure during resume, state is reverted
* to original, which could be suspended. Allow it
@@ -374,8 +340,7 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
state_legal = true;
break;
case IPA_USB_RESUME_IN_PROGRESS:
- if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
- state == IPA_USB_SUSPENDED)
+ if (state == IPA_USB_SUSPENDED)
state_legal = true;
break;
default:
@@ -452,7 +417,6 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
break;
case IPA_USB_OP_DISCONNECT:
if (state == IPA_USB_CONNECTED ||
- state == IPA_USB_SUSPEND_IN_PROGRESS ||
state == IPA_USB_SUSPENDED ||
state == IPA_USB_SUSPENDED_NO_RWAKEUP)
is_legal = true;
@@ -483,7 +447,6 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
break;
case IPA_USB_OP_RESUME:
if (state == IPA_USB_SUSPENDED ||
- state == IPA_USB_SUSPEND_IN_PROGRESS ||
state == IPA_USB_SUSPENDED_NO_RWAKEUP)
is_legal = true;
break;
@@ -582,71 +545,6 @@ static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work)
ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP);
}
-static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work)
-{
- ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_SUSPEND_COMPLETED);
-}
-
-static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work)
-{
- ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_SUSPEND_COMPLETED);
-}
-
-static void ipa3_usb_wq_finish_suspend_work(struct work_struct *work)
-{
- struct finish_suspend_work_context *finish_suspend_work_ctx;
- unsigned long flags;
- int result = -EFAULT;
- struct ipa3_usb_transport_type_ctx *tctx;
-
- mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG_LOW("entry\n");
- finish_suspend_work_ctx = container_of(work,
- struct finish_suspend_work_context, work);
- tctx = &ipa3_usb_ctx->ttype_ctx[finish_suspend_work_ctx->ttype];
-
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (tctx->state != IPA_USB_SUSPEND_IN_PROGRESS) {
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- mutex_unlock(&ipa3_usb_ctx->general_mutex);
- return;
- }
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- /* Stop DL/DPL channel */
- result = ipa3_stop_gsi_channel(finish_suspend_work_ctx->dl_clnt_hdl);
- if (result) {
- IPAERR("Error stopping DL/DPL channel: %d, resuming channel\n",
- result);
- ipa3_xdci_resume(finish_suspend_work_ctx->ul_clnt_hdl,
- finish_suspend_work_ctx->dl_clnt_hdl,
- IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype));
- /* Change state back to CONNECTED */
- if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true,
- finish_suspend_work_ctx->ttype))
- IPA_USB_ERR("failed to change state to connected\n");
- queue_work(ipa3_usb_ctx->wq,
- IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
- &ipa3_usb_dpl_notify_remote_wakeup_work :
- &ipa3_usb_notify_remote_wakeup_work);
- mutex_unlock(&ipa3_usb_ctx->general_mutex);
- return;
- }
-
- /* Change ipa_usb state to SUSPENDED */
- if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false,
- finish_suspend_work_ctx->ttype))
- IPA_USB_ERR("failed to change state to suspended\n");
-
- queue_work(ipa3_usb_ctx->wq,
- IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
- &ipa3_usb_dpl_notify_suspend_completed_work :
- &ipa3_usb_notify_suspend_completed_work);
-
- IPA_USB_DBG_LOW("exit\n");
- mutex_unlock(&ipa3_usb_ctx->general_mutex);
-}
-
static int ipa3_usb_cons_request_resource_cb_do(
enum ipa3_usb_transport_type ttype,
struct work_struct *remote_wakeup_work)
@@ -674,17 +572,6 @@ static int ipa3_usb_cons_request_resource_cb_do(
else
result = -EINPROGRESS;
break;
- case IPA_USB_SUSPEND_IN_PROGRESS:
- /*
- * This case happens due to suspend interrupt.
- * CONS is granted
- */
- if (!rm_ctx->cons_requested) {
- rm_ctx->cons_requested = true;
- queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
- }
- result = 0;
- break;
case IPA_USB_SUSPENDED:
if (!rm_ctx->cons_requested) {
rm_ctx->cons_requested = true;
@@ -727,15 +614,10 @@ static int ipa3_usb_cons_release_resource_cb_do(
ipa3_usb_state_to_string(
ipa3_usb_ctx->ttype_ctx[ttype].state));
switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
- case IPA_USB_SUSPEND_IN_PROGRESS:
+ case IPA_USB_SUSPENDED:
/* Proceed with the suspend if no DL/DPL data */
if (rm_ctx->cons_requested)
rm_ctx->cons_requested_released = true;
- else {
- queue_work(ipa3_usb_ctx->wq,
- &ipa3_usb_ctx->ttype_ctx[ttype].
- finish_suspend_work.work);
- }
break;
case IPA_USB_SUSPEND_REQUESTED:
if (rm_ctx->cons_requested)
@@ -2311,8 +2193,7 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
- if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
- orig_state != IPA_USB_SUSPENDED) {
+ if (orig_state != IPA_USB_SUSPENDED) {
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
flags);
/* Stop UL channel */
@@ -2340,8 +2221,7 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (result)
goto bad_params;
- if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
- orig_state != IPA_USB_SUSPENDED) {
+ if (orig_state != IPA_USB_SUSPENDED) {
result = ipa3_usb_release_prod(ttype);
if (result) {
IPA_USB_ERR("failed to release PROD.\n");
@@ -2547,7 +2427,6 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
{
int result = 0;
unsigned long flags;
- enum ipa3_usb_cons_state curr_cons_state;
enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
@@ -2602,49 +2481,20 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto release_prod_fail;
}
+ /* Check if DL/DPL data pending */
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- curr_cons_state = ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state;
+ if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state ==
+ IPA_USB_CONS_GRANTED &&
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+
+ IPA_USB_DBG("DL/DPL data pending, invoke remote wakeup\n");
+ queue_work(ipa3_usb_ctx->wq,
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ &ipa3_usb_dpl_notify_remote_wakeup_work :
+ &ipa3_usb_notify_remote_wakeup_work);
+ }
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- if (curr_cons_state == IPA_USB_CONS_GRANTED) {
- /* Change state to SUSPEND_IN_PROGRESS */
- if (!ipa3_usb_set_state(IPA_USB_SUSPEND_IN_PROGRESS,
- false, ttype))
- IPA_USB_ERR("fail set state to suspend_in_progress\n");
- /* Check if DL/DPL data pending */
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
- IPA_USB_DBG(
- "DL/DPL data pending, invoke remote wakeup\n");
- queue_work(ipa3_usb_ctx->wq,
- IPA3_USB_IS_TTYPE_DPL(ttype) ?
- &ipa3_usb_dpl_notify_remote_wakeup_work :
- &ipa3_usb_notify_remote_wakeup_work);
- }
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ttype =
- ttype;
- ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.dl_clnt_hdl =
- dl_clnt_hdl;
- ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ul_clnt_hdl =
- ul_clnt_hdl;
- INIT_WORK(&ipa3_usb_ctx->ttype_ctx[ttype].
- finish_suspend_work.work,
- ipa3_usb_wq_finish_suspend_work);
-
- result = -EINPROGRESS;
- IPA_USB_DBG("exit with suspend_in_progress\n");
- goto bad_params;
- }
-
- /* Stop DL channel */
- result = ipa3_stop_gsi_channel(dl_clnt_hdl);
- if (result) {
- IPAERR("Error stopping DL/DPL channel: %d\n", result);
- result = -EFAULT;
- goto release_prod_fail;
- }
/* Change state to SUSPENDED */
if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
IPA_USB_ERR("failed to change state to suspended\n");
@@ -2803,13 +2653,11 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
}
}
- if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
- /* Start DL/DPL channel */
- result = ipa3_start_gsi_channel(dl_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to start DL/DPL channel.\n");
- goto start_dl_fail;
- }
+ /* Start DL/DPL channel */
+ result = ipa3_start_gsi_channel(dl_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to start DL/DPL channel.\n");
+ goto start_dl_fail;
}
/* Change state to CONNECTED */
@@ -2824,12 +2672,10 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
return 0;
state_change_connected_fail:
- if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
- result = ipa3_stop_gsi_channel(dl_clnt_hdl);
- if (result)
- IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
- result);
- }
+ result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+ if (result)
+ IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
+ result);
start_dl_fail:
if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
result = ipa3_stop_gsi_channel(ul_clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 31e530e..837bf38 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -232,6 +232,9 @@ static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
ipa3_transport_release_resource);
static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
+static void ipa3_post_init_wq(struct work_struct *work);
+static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq);
+
static struct ipa3_plat_drv_res ipa3_res = {0, };
struct msm_bus_scale_pdata *ipa3_bus_scale_table;
@@ -495,63 +498,6 @@ static int ipa3_open(struct inode *inode, struct file *filp)
return 0;
}
-/**
-* ipa3_flow_control() - Enable/Disable flow control on a particular client.
-* Return codes:
-* None
-*/
-void ipa3_flow_control(enum ipa_client_type ipa_client,
- bool enable, uint32_t qmap_id)
-{
- struct ipa_ep_cfg_ctrl ep_ctrl = {0};
- int ep_idx;
- struct ipa3_ep_context *ep;
-
- /* Check if tethered flow control is needed or not.*/
- if (!ipa3_ctx->tethered_flow_control) {
- IPADBG("Apps flow control is not needed\n");
- return;
- }
-
- /* Check if ep is valid. */
- ep_idx = ipa3_get_ep_mapping(ipa_client);
- if (ep_idx == -1) {
- IPADBG("Invalid IPA client\n");
- return;
- }
-
- ep = &ipa3_ctx->ep[ep_idx];
- if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
- IPADBG("EP not valid/Not applicable for client.\n");
- return;
- }
-
- spin_lock(&ipa3_ctx->disconnect_lock);
- /* Check if the QMAP_ID matches. */
- if (ep->cfg.meta.qmap_id != qmap_id) {
- IPADBG("Flow control ind not for same flow: %u %u\n",
- ep->cfg.meta.qmap_id, qmap_id);
- spin_unlock(&ipa3_ctx->disconnect_lock);
- return;
- }
- if (!ep->disconnect_in_progress) {
- if (enable) {
- IPADBG("Enabling Flow\n");
- ep_ctrl.ipa_ep_delay = false;
- IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
- } else {
- IPADBG("Disabling Flow\n");
- ep_ctrl.ipa_ep_delay = true;
- IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
- }
- ep_ctrl.ipa_ep_suspend = false;
- ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
- } else {
- IPADBG("EP disconnect is in progress\n");
- }
- spin_unlock(&ipa3_ctx->disconnect_lock);
-}
-
static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
{
if (!buff) {
@@ -1863,9 +1809,11 @@ static void ipa3_q6_avoid_holb(void)
IPA_ENDP_INIT_HOL_BLOCK_EN_n,
ep_idx, &ep_holb);
- ipahal_write_reg_n_fields(
- IPA_ENDP_INIT_CTRL_n,
- ep_idx, &ep_suspend);
+ /* from IPA 4.0 pipe suspend is not supported */
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_CTRL_n,
+ ep_idx, &ep_suspend);
}
}
}
@@ -3979,6 +3927,15 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
struct ipa3_flt_tbl *flt_tbl;
int i;
+ if (ipa3_ctx == NULL) {
+ IPADBG("IPA driver haven't initialized\n");
+ return -ENXIO;
+ }
+
+ /* Prevent consequent calls from trying to load the FW again. */
+ if (ipa3_ctx->ipa_initialization_complete)
+ return 0;
+
/*
* indication whether working in MHI config or non MHI config is given
* in ipa3_write which is launched before ipa3_post_init. i.e. from
@@ -4113,41 +4070,15 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
fail_setup_apps_pipes:
gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
fail_register_device:
- ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
- ipa_rm_exit();
- cdev_del(&ipa3_ctx->cdev);
- device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
- unregister_chrdev_region(ipa3_ctx->dev_num, 1);
- ipa3_free_dma_task_for_gsi();
ipa3_destroy_flt_tbl_idrs();
- idr_destroy(&ipa3_ctx->ipa_idr);
- kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
- kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
- kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_cache);
- kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
- kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
- destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
- destroy_workqueue(ipa3_ctx->power_mgmt_wq);
- iounmap(ipa3_ctx->mmio);
- ipa3_disable_clks();
- if (ipa3_clk)
- clk_put(ipa3_clk);
- ipa3_clk = NULL;
- msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
- if (ipa3_bus_scale_table) {
- msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
- ipa3_bus_scale_table = NULL;
- }
- kfree(ipa3_ctx->ctrl);
- kfree(ipa3_ctx);
- ipa3_ctx = NULL;
return result;
}
+static void ipa3_post_init_wq(struct work_struct *work)
+{
+ ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+}
+
static int ipa3_trigger_fw_loading_mdms(void)
{
int result;
@@ -4249,9 +4180,10 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
if (result) {
IPAERR("FW loading process has failed\n");
return result;
- } else
- ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
-
+ } else {
+ queue_work(ipa3_ctx->transport_power_mgmt_wq,
+ &ipa3_post_init_work);
+ }
return count;
}
@@ -4722,20 +4654,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_device_create;
}
- cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
- ipa3_ctx->cdev.owner = THIS_MODULE;
- ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
-
- result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
- if (result) {
- IPAERR(":cdev_add err=%d\n", -result);
- result = -ENODEV;
- goto fail_cdev_add;
- }
- IPADBG("ipa cdev added successful. major:%d minor:%d\n",
- MAJOR(ipa3_ctx->dev_num),
- MINOR(ipa3_ctx->dev_num));
-
if (ipa3_create_nat_device()) {
IPAERR("unable to create nat device\n");
result = -ENODEV;
@@ -4793,16 +4711,28 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
}
}
+ cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
+ ipa3_ctx->cdev.owner = THIS_MODULE;
+ ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
+
+ result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+ IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+ MAJOR(ipa3_ctx->dev_num),
+ MINOR(ipa3_ctx->dev_num));
return 0;
+fail_cdev_add:
fail_ipa_init_interrupts:
ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
fail_create_apps_resource:
ipa_rm_exit();
fail_ipa_rm_init:
fail_nat_dev_add:
- cdev_del(&ipa3_ctx->cdev);
-fail_cdev_add:
device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
fail_device_create:
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 0b8115f..564397a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -69,13 +69,15 @@ int ipa3_enable_data_path(u32 clnt_hdl)
}
/* Enable the pipe */
- if (IPA_CLIENT_IS_CONS(ep->client) &&
- (ep->keep_ipa_awake ||
- ipa3_ctx->resume_on_connect[ep->client] ||
- !ipa3_should_pipe_be_suspended(ep->client))) {
- memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = false;
- res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ if (IPA_CLIENT_IS_CONS(ep->client) &&
+ (ep->keep_ipa_awake ||
+ ipa3_ctx->resume_on_connect[ep->client] ||
+ !ipa3_should_pipe_be_suspended(ep->client))) {
+ memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ }
}
return res;
@@ -97,33 +99,41 @@ int ipa3_disable_data_path(u32 clnt_hdl)
res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
- /* Suspend the pipe */
- if (IPA_CLIENT_IS_CONS(ep->client)) {
- /*
- * for RG10 workaround uC needs to be loaded before pipe can
- * be suspended in this case.
- */
- if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
- IPADBG("uC is not loaded yet, waiting...\n");
- res = wait_for_completion_timeout(
- &ipa3_ctx->uc_loaded_completion_obj, 60 * HZ);
- if (res == 0)
- IPADBG("timeout waiting for uC to load\n");
+ /*
+ * for IPA 4.0 and above aggregation frame is closed together with
+ * channel STOP
+ */
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ /* Suspend the pipe */
+ if (IPA_CLIENT_IS_CONS(ep->client)) {
+ /*
+ * for RG10 workaround uC needs to be loaded before
+ * pipe can be suspended in this case.
+ */
+ if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
+ IPADBG("uC is not loaded yet, waiting...\n");
+ res = wait_for_completion_timeout(
+ &ipa3_ctx->uc_loaded_completion_obj,
+ 60 * HZ);
+ if (res == 0)
+ IPADBG("timeout waiting for uC load\n");
+ }
+
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
- memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = true;
- res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
- }
-
- udelay(IPA_PKT_FLUSH_TO_US);
- ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
- if (ep_aggr.aggr_en) {
- res = ipa3_tag_aggr_force_close(clnt_hdl);
- if (res) {
- IPAERR("tag process timeout, client:%d err:%d\n",
- clnt_hdl, res);
- BUG();
+ udelay(IPA_PKT_FLUSH_TO_US);
+ ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl,
+ &ep_aggr);
+ if (ep_aggr.aggr_en) {
+ res = ipa3_tag_aggr_force_close(clnt_hdl);
+ if (res) {
+ IPAERR("tag process timeout client:%d err:%d\n",
+ clnt_hdl, res);
+ ipa_assert();
+ }
}
}
@@ -1257,10 +1267,12 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto disable_clk_and_exit;
}
- /* Suspend the DL/DPL EP */
- memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = true;
- ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ /* Suspend the DL/DPL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ }
/*
* Check if DL/DPL channel is empty again, data could enter the channel
@@ -1275,6 +1287,14 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto unsuspend_dl_and_exit;
}
+ /* Stop DL channel */
+ result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping DL/DPL channel: %d\n", result);
+ result = -EFAULT;
+ goto unsuspend_dl_and_exit;
+ }
+
/* STOP UL channel */
if (!is_dpl) {
source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
@@ -1283,7 +1303,7 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (result) {
IPAERR("Error stopping UL channel: result = %d\n",
result);
- goto unsuspend_dl_and_exit;
+ goto start_dl_and_exit;
}
}
@@ -1292,11 +1312,15 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
IPADBG("exit\n");
return 0;
+start_dl_and_exit:
+ gsi_start_channel(dl_ep->gsi_chan_hdl);
unsuspend_dl_and_exit:
- /* Unsuspend the DL EP */
- memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = false;
- ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ /* Unsuspend the DL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ }
disable_clk_and_exit:
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
return result;
@@ -1340,7 +1364,8 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
{
- struct ipa3_ep_context *ul_ep, *dl_ep;
+ struct ipa3_ep_context *ul_ep = NULL;
+ struct ipa3_ep_context *dl_ep = NULL;
enum gsi_status gsi_res;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
@@ -1360,10 +1385,17 @@ int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
- /* Unsuspend the DL/DPL EP */
- memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = false;
- ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ /* Unsuspend the DL/DPL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ }
+
+ /* Start DL channel */
+ gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS)
+ IPAERR("Error starting DL channel: %d\n", gsi_res);
/* Start UL channel */
if (!is_dpl) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 04d807f..915f2b8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -267,7 +267,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
int i = 0;
int j;
int result;
- int fail_dma_wrap = 0;
u32 mem_flag = GFP_ATOMIC;
const struct ipa_gsi_ep_config *gsi_ep_cfg;
@@ -298,7 +297,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
spin_lock_bh(&sys->spinlock);
for (i = 0; i < num_desc; i++) {
- fail_dma_wrap = 0;
tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
mem_flag);
if (!tx_pkt) {
@@ -319,7 +317,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
if (ipa_populate_tag_field(&desc[i], tx_pkt,
&tag_pyld_ret)) {
IPAERR("Failed to populate tag field\n");
- goto failure;
+ goto failure_dma_map;
}
}
@@ -335,11 +333,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
tx_pkt->mem.base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
- if (!tx_pkt->mem.phys_base) {
- IPAERR("failed to do dma map.\n");
- fail_dma_wrap = 1;
- goto failure;
- }
} else {
tx_pkt->mem.phys_base =
desc[i].dma_address;
@@ -355,17 +348,17 @@ int ipa3_send(struct ipa3_sys_context *sys,
desc[i].frag,
0, tx_pkt->mem.size,
DMA_TO_DEVICE);
- if (!tx_pkt->mem.phys_base) {
- IPAERR("dma map failed\n");
- fail_dma_wrap = 1;
- goto failure;
- }
} else {
tx_pkt->mem.phys_base =
desc[i].dma_address;
tx_pkt->no_unmap_dma = true;
}
}
+ if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
+ IPAERR("failed to do dma map.\n");
+ goto failure_dma_map;
+ }
+
tx_pkt->sys = sys;
tx_pkt->callback = desc[i].callback;
tx_pkt->user1 = desc[i].user1;
@@ -426,28 +419,31 @@ int ipa3_send(struct ipa3_sys_context *sys,
return 0;
+failure_dma_map:
+ kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
failure:
ipahal_destroy_imm_cmd(tag_pyld_ret);
tx_pkt = tx_pkt_first;
for (j = 0; j < i; j++) {
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
- if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
- dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
- tx_pkt->mem.size,
- DMA_TO_DEVICE);
- } else {
- dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
- tx_pkt->mem.size,
- DMA_TO_DEVICE);
+
+ if (!tx_pkt->no_unmap_dma) {
+ if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa3_ctx->pdev,
+ tx_pkt->mem.phys_base,
+ tx_pkt->mem.size, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa3_ctx->pdev,
+ tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ }
}
kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
tx_pkt = next_pkt;
}
- if (j < num_desc)
- /* last desc failed */
- if (fail_dma_wrap)
- kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
kfree(gsi_xfer_elem_array);
@@ -1444,8 +1440,7 @@ static void ipa3_wq_repl_rx(struct work_struct *work)
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
__func__, (void *)rx_pkt->data.dma_addr,
ptr, sys);
@@ -1605,8 +1600,7 @@ static void ipa3_alloc_wlan_rx_common_cache(u32 size)
ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -1676,8 +1670,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -1764,8 +1757,8 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -1780,8 +1773,8 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa3_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 410b96a..593d4fc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -490,6 +490,10 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->hdr,
entry->hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa3_ctx->pdev, entry->phys_base)) {
+ IPAERR("dma_map_single failure for entry\n");
+ goto fail_dma_mapping;
+ }
} else {
entry->is_hdr_proc_ctx = false;
if (list_empty(&htbl->head_free_offset_list[bin])) {
@@ -565,6 +569,9 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
list_del(&entry->link);
dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
entry->hdr_len, DMA_TO_DEVICE);
+fail_dma_mapping:
+ entry->is_hdr_proc_ctx = false;
+
bad_hdr_len:
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->hdr_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 86442b1..9a406d6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1970,8 +1970,6 @@ int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
void ipa3_set_resorce_groups_min_max_limits(void);
void ipa3_suspend_apps_pipes(bool suspend);
-void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
- uint32_t qmap_id);
int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
enum ipa_ip_type ip_type,
bool hashable,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 799246b..60dc04f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1600,13 +1600,15 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
if (IPA_CLIENT_IS_CONS(ep->client)) {
- ep_cfg_ctrl.ipa_ep_suspend = true;
- result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
- if (result)
- IPAERR("client (ep: %d) failed to suspend result=%d\n",
- clnt_hdl, result);
- else
- IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ ep_cfg_ctrl.ipa_ep_suspend = true;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("(ep: %d) failed to suspend result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("(ep: %d) suspended\n", clnt_hdl);
+ }
} else {
ep_cfg_ctrl.ipa_ep_delay = true;
result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index ab26893..079481d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1631,6 +1631,16 @@ bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
ep = &ipa3_ctx->ep[ipa_ep_idx];
+ /*
+ * starting IPA 4.0 pipe no longer can be suspended. Instead,
+ * the corresponding GSI channel should be stopped. Usually client
+ * driver will take care of stopping the channel. For client drivers
+ * that are not stopping the channel, IPA RM will do that based on
+ * ipa3_should_pipe_channel_be_stopped().
+ */
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ return false;
+
if (ep->keep_ipa_awake)
return false;
@@ -1651,6 +1661,41 @@ bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
}
/**
+ * ipa3_should_pipe_channel_be_stopped() - returns true when the client's
+ * channel should be stopped during a power save scenario. False otherwise.
+ * Most client already stops the GSI channel on suspend, and are not included
+ * in the list below.
+ *
+ * @client: [IN] IPA client
+ */
+static bool ipa3_should_pipe_channel_be_stopped(enum ipa_client_type client)
+{
+ struct ipa3_ep_context *ep;
+ int ipa_ep_idx;
+
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+ return false;
+
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ WARN_ON(1);
+ return false;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (ep->keep_ipa_awake)
+ return false;
+
+ if (client == IPA_CLIENT_ODU_EMB_CONS ||
+ client == IPA_CLIENT_ODU_TETH_CONS)
+ return true;
+
+ return false;
+}
+
+/**
* ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
* resource and decrement active clients counter, which may result in clock
* gating of IPA clocks.
@@ -1695,6 +1740,19 @@ int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
pipe_suspended = true;
}
}
+
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_channel_be_stopped(client)) {
+ if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+ /* Stop GSI channel */
+ res = ipa3_stop_gsi_channel(ipa_ep_idx);
+ if (res) {
+ IPAERR("failed stop gsi ch %lu\n",
+ ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+ return res;
+ }
+ }
+ }
}
/* Sleep ~1 msec */
if (pipe_suspended)
@@ -1761,6 +1819,12 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
}
}
+
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_channel_be_stopped(client)) {
+ res = -EPERM;
+ goto bail;
+ }
}
if (res == 0) {
@@ -1824,6 +1888,19 @@ int ipa3_resume_resource(enum ipa_rm_resource_name resource)
ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
}
}
+
+ if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+ ipa3_should_pipe_channel_be_stopped(client)) {
+ if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+ res = gsi_start_channel(
+ ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+ if (res) {
+ IPAERR("failed to start gsi ch %lu\n",
+ ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+ return res;
+ }
+ }
+ }
}
return res;
@@ -2714,6 +2791,12 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
return -EINVAL;
}
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && ep_ctrl->ipa_ep_suspend) {
+ IPAERR("pipe suspend is not supported\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+
IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
clnt_hdl,
ep_ctrl->ipa_ep_suspend,
@@ -4674,6 +4757,7 @@ void ipa3_suspend_apps_pipes(bool suspend)
struct ipa_ep_cfg_ctrl cfg;
int ipa_ep_idx;
struct ipa3_ep_context *ep;
+ int res;
memset(&cfg, 0, sizeof(cfg));
cfg.ipa_ep_suspend = suspend;
@@ -4688,7 +4772,23 @@ void ipa3_suspend_apps_pipes(bool suspend)
if (ep->valid) {
IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
ipa_ep_idx);
- ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ if (suspend) {
+ res = ipa3_stop_gsi_channel(ipa_ep_idx);
+ if (res) {
+ IPAERR("failed to stop LAN channel\n");
+ ipa_assert();
+ }
+ } else {
+ res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("failed to start LAN channel\n");
+ ipa_assert();
+ }
+ }
+ } else {
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ }
if (suspend)
ipa3_gsi_poll_after_suspend(ep);
else if (!atomic_read(&ep->sys->curr_polling_state))
@@ -4706,7 +4806,23 @@ void ipa3_suspend_apps_pipes(bool suspend)
if (ep->valid) {
IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
ipa_ep_idx);
- ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ if (suspend) {
+ res = ipa3_stop_gsi_channel(ipa_ep_idx);
+ if (res) {
+ IPAERR("failed to stop WAN channel\n");
+ ipa_assert();
+ }
+ } else {
+ res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("failed to start WAN channel\n");
+ ipa_assert();
+ }
+ }
+ } else {
+ ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ }
if (suspend)
ipa3_gsi_poll_after_suspend(ep);
else if (!atomic_read(&ep->sys->curr_polling_state))
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 1a119b9..3019e4d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -688,6 +688,19 @@ static void ipareg_parse_endp_init_ctrl_n(enum ipahal_reg_name reg,
IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT);
}
+static void ipareg_construct_endp_init_ctrl_n_v4_0(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_ctrl *ep_ctrl =
+ (struct ipa_ep_cfg_ctrl *)fields;
+
+ WARN_ON(ep_ctrl->ipa_ep_suspend);
+
+ IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
@@ -1444,6 +1457,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
ipareg_parse_hps_queue_weights, 0x000005a4, 0},
/* IPAv4.0 */
+ [IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_n] = {
+ ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy,
+ 0x00000800, 0x70 },
[IPA_HW_v4_0][IPA_TX_CFG] = {
ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0,
0x000001FC, 0},
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index b198348..f408f23 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1507,25 +1507,13 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
/* Flow enable */
case RMNET_IOCTL_FLOW_ENABLE:
- IPAWANDBG("Received flow enable\n");
- if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
- sizeof(struct rmnet_ioctl_data_s))) {
- rc = -EFAULT;
- break;
- }
- ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
- ioctl_data.u.tcm_handle);
+ IPAWANERR("RMNET_IOCTL_FLOW_ENABLE not supported\n");
+ rc = -EFAULT;
break;
/* Flow disable */
case RMNET_IOCTL_FLOW_DISABLE:
- IPAWANDBG("Received flow disable\n");
- if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
- sizeof(struct rmnet_ioctl_data_s))) {
- rc = -EFAULT;
- break;
- }
- ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
- ioctl_data.u.tcm_handle);
+ IPAWANERR("RMNET_IOCTL_FLOW_DISABLE not supported\n");
+ rc = -EFAULT;
break;
/* Set flow handle */
case RMNET_IOCTL_FLOW_SET_HNDL:
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 4e9bd64..e76ff14 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -40,9 +40,6 @@
#define SMMU_SIZE ((SZ_1G * 4ULL) - SMMU_BASE)
#define WIGIG_ENABLE_DELAY 50
-#define PM_OPT_SUSPEND (MSM_PCIE_CONFIG_NO_CFG_RESTORE | \
- MSM_PCIE_CONFIG_LINKDOWN)
-#define PM_OPT_RESUME MSM_PCIE_CONFIG_NO_CFG_RESTORE
#define WIGIG_SUBSYS_NAME "WIGIG"
#define WIGIG_RAMDUMP_SIZE 0x200000 /* maximum ramdump size */
@@ -127,6 +124,8 @@ struct msm11ad_ctx {
bool use_cpu_boost;
bool is_cpu_boosted;
struct cpumask boost_cpu;
+
+ bool keep_radio_on_during_sleep;
};
static LIST_HEAD(dev_list);
@@ -523,30 +522,8 @@ int msm_11ad_ctrl_aspm_l1(struct msm11ad_ctx *ctx, bool enable)
return rc;
}
-static int ops_suspend(void *handle)
+static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx)
{
- int rc;
- struct msm11ad_ctx *ctx = handle;
- struct pci_dev *pcidev;
-
- pr_info("%s(%p)\n", __func__, handle);
- if (!ctx) {
- pr_err("No context\n");
- return -ENODEV;
- }
- pcidev = ctx->pcidev;
- rc = pci_save_state(pcidev);
- if (rc) {
- dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
- return rc;
- }
- rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
- pcidev, NULL, PM_OPT_SUSPEND);
- if (rc) {
- dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
- rc);
- return rc;
- }
if (ctx->gpio_en >= 0)
gpio_direction_output(ctx->gpio_en, 0);
@@ -557,20 +534,12 @@ static int ops_suspend(void *handle)
msm_11ad_disable_vregs(ctx);
- return rc;
+ return 0;
}
-static int ops_resume(void *handle)
+static int msm_11ad_turn_device_power_on(struct msm11ad_ctx *ctx)
{
int rc;
- struct msm11ad_ctx *ctx = handle;
- struct pci_dev *pcidev;
-
- pr_info("%s(%p)\n", __func__, handle);
- if (!ctx) {
- pr_err("No context\n");
- return -ENODEV;
- }
rc = msm_11ad_enable_vregs(ctx);
if (rc) {
@@ -588,25 +557,124 @@ static int ops_resume(void *handle)
if (ctx->sleep_clk_en >= 0)
gpio_direction_output(ctx->sleep_clk_en, 1);
- pcidev = ctx->pcidev;
if (ctx->gpio_en >= 0) {
gpio_direction_output(ctx->gpio_en, 1);
msleep(WIGIG_ENABLE_DELAY);
}
+ return 0;
+
+err_disable_vregs:
+ msm_11ad_disable_vregs(ctx);
+ return rc;
+}
+
+static int msm_11ad_suspend_power_off(void *handle)
+{
+ int rc;
+ struct msm11ad_ctx *ctx = handle;
+ struct pci_dev *pcidev;
+
+ pr_debug("%s\n", __func__);
+
+ if (!ctx) {
+ pr_err("%s: No context\n", __func__);
+ return -ENODEV;
+ }
+
+ pcidev = ctx->pcidev;
+
+ msm_pcie_shadow_control(ctx->pcidev, 0);
+
+ rc = pci_save_state(pcidev);
+ if (rc) {
+ dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
+ goto out;
+ }
+ ctx->pristine_state = pci_store_saved_state(pcidev);
+
+ rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+ pcidev, NULL, 0);
+ if (rc) {
+ dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
+ rc);
+ goto out;
+ }
+
+ rc = msm_11ad_turn_device_power_off(ctx);
+
+out:
+ return rc;
+}
+
+static int ops_suspend(void *handle, bool keep_device_power)
+{
+ struct msm11ad_ctx *ctx = handle;
+ struct pci_dev *pcidev;
+ int rc;
+
+ pr_debug("11ad suspend: %s\n", __func__);
+ if (!ctx) {
+ pr_err("11ad suspend: No context\n");
+ return -ENODEV;
+ }
+
+ if (!keep_device_power)
+ return msm_11ad_suspend_power_off(handle);
+
+ pcidev = ctx->pcidev;
+
+ msm_pcie_shadow_control(pcidev, 0);
+
+ dev_dbg(ctx->dev, "disable device and save config\n");
+ pci_disable_device(pcidev);
+ pci_save_state(pcidev);
+ ctx->pristine_state = pci_store_saved_state(pcidev);
+ dev_dbg(ctx->dev, "moving to D3\n");
+ pci_set_power_state(pcidev, PCI_D3hot);
+
+ rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+ pcidev, NULL, 0);
+ if (rc)
+ dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
+ rc);
+
+ return rc;
+}
+
+static int msm_11ad_resume_power_on(void *handle)
+{
+ int rc;
+ struct msm11ad_ctx *ctx = handle;
+ struct pci_dev *pcidev;
+
+ pr_debug("%s\n", __func__);
+
+ if (!ctx) {
+ pr_err("%s: No context\n", __func__);
+ return -ENODEV;
+ }
+ pcidev = ctx->pcidev;
+
+ rc = msm_11ad_turn_device_power_on(ctx);
+ if (rc)
+ return rc;
+
rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
- pcidev, NULL, PM_OPT_RESUME);
+ pcidev, NULL, 0);
if (rc) {
dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed :%d\n",
rc);
goto err_disable_power;
}
- rc = msm_pcie_recover_config(pcidev);
- if (rc) {
- dev_err(ctx->dev, "msm_pcie_recover_config failed :%d\n",
- rc);
- goto err_suspend_rc;
- }
+
+ pci_set_power_state(pcidev, PCI_D0);
+
+ if (ctx->pristine_state)
+ pci_load_saved_state(ctx->pcidev, ctx->pristine_state);
+ pci_restore_state(ctx->pcidev);
+
+ msm_pcie_shadow_control(ctx->pcidev, 1);
/* Disable L1, in case it is enabled */
if (ctx->l1_enabled_in_enum) {
@@ -622,18 +690,54 @@ static int ops_resume(void *handle)
err_suspend_rc:
msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
- pcidev, NULL, PM_OPT_SUSPEND);
+ pcidev, NULL, 0);
err_disable_power:
- if (ctx->gpio_en >= 0)
- gpio_direction_output(ctx->gpio_en, 0);
+ msm_11ad_turn_device_power_off(ctx);
+ return rc;
+}
- if (ctx->sleep_clk_en >= 0)
- gpio_direction_output(ctx->sleep_clk_en, 0);
+static int ops_resume(void *handle, bool device_powered_on)
+{
+ struct msm11ad_ctx *ctx = handle;
+ struct pci_dev *pcidev;
+ int rc;
- msm_11ad_disable_clocks(ctx);
-err_disable_vregs:
- msm_11ad_disable_vregs(ctx);
+ pr_debug("11ad resume: %s\n", __func__);
+ if (!ctx) {
+ pr_err("11ad resume: No context\n");
+ return -ENODEV;
+ }
+ pcidev = ctx->pcidev;
+
+ if (!device_powered_on)
+ return msm_11ad_resume_power_on(handle);
+
+ rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
+ pcidev, NULL, 0);
+ if (rc) {
+ dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed :%d\n",
+ rc);
+ return rc;
+ }
+ pci_set_power_state(pcidev, PCI_D0);
+
+ dev_dbg(ctx->dev, "restore state and enable device\n");
+ pci_load_saved_state(pcidev, ctx->pristine_state);
+ pci_restore_state(pcidev);
+
+ rc = pci_enable_device(pcidev);
+ if (rc) {
+ dev_err(ctx->dev, "pci_enable_device failed (%d)\n", rc);
+ goto out;
+ }
+
+ msm_pcie_shadow_control(pcidev, 1);
+
+ dev_dbg(ctx->dev, "pci set master\n");
+ pci_set_master(pcidev);
+
+out:
return rc;
}
@@ -643,9 +747,6 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
int rc;
int force_pt_coherent = 1;
int smmu_bypass = !ctx->smmu_s1_en;
- dma_addr_t iova_base = 0;
- dma_addr_t iova_end = ctx->smmu_base + ctx->smmu_size - 1;
- struct iommu_domain_geometry geometry;
if (!ctx->use_smmu)
return 0;
@@ -703,17 +804,6 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
rc);
goto release_mapping;
}
- memset(&geometry, 0, sizeof(geometry));
- geometry.aperture_start = iova_base;
- geometry.aperture_end = iova_end;
- rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_GEOMETRY,
- &geometry);
- if (rc) {
- dev_err(ctx->dev, "Set geometry attribute to SMMU failed (%d)\n",
- rc);
- goto release_mapping;
- }
}
}
@@ -992,6 +1082,8 @@ static int msm_11ad_probe(struct platform_device *pdev)
return -EINVAL;
}
ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
+ ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
+ "qcom,keep-radio-on-during-sleep");
ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
@@ -1104,13 +1196,6 @@ static int msm_11ad_probe(struct platform_device *pdev)
}
}
- rc = pci_save_state(pcidev);
- if (rc) {
- dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
- goto out_rc;
- }
- ctx->pristine_state = pci_store_saved_state(pcidev);
-
if (ctx->sleep_clk_en >= 0) {
rc = gpio_request(ctx->sleep_clk_en, "msm_11ad");
if (rc < 0) {
@@ -1146,7 +1231,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
device_disable_async_suspend(&pcidev->dev);
list_add_tail(&ctx->list, &dev_list);
- ops_suspend(ctx);
+ msm_11ad_suspend_power_off(ctx);
return 0;
out_rc:
@@ -1236,6 +1321,17 @@ static void msm_11ad_set_boost_affinity(struct msm11ad_ctx *ctx)
dev_warn(ctx->dev, "failed to set CPU boost affinity\n");
}
+static void msm_11ad_clear_boost_affinity(struct msm11ad_ctx *ctx)
+{
+ int rc;
+
+ irq_modify_status(ctx->pcidev->irq, IRQ_NO_BALANCING, 0);
+ rc = irq_set_affinity_hint(ctx->pcidev->irq, NULL);
+ if (rc)
+ dev_warn(ctx->dev,
+ "Failed clear affinity, rc=%d\n", rc);
+}
+
/* hooks for the wil6210 driver */
static int ops_bus_request(void *handle, u32 kbps /* KBytes/Sec */)
{
@@ -1287,8 +1383,7 @@ static int ops_bus_request(void *handle, u32 kbps /* KBytes/Sec */)
dev_err(ctx->dev,
"Failed disable boost rc=%d\n",
rc);
- irq_modify_status(ctx->pcidev->irq,
- IRQ_NO_BALANCING, 0);
+ msm_11ad_clear_boost_affinity(ctx);
dev_dbg(ctx->dev, "CPU boost disabled\n");
}
ctx->is_cpu_boosted = needs_boost;
@@ -1316,7 +1411,7 @@ static void ops_uninit(void *handle)
memset(&ctx->rops, 0, sizeof(ctx->rops));
ctx->wil_handle = NULL;
- ops_suspend(ctx);
+ msm_11ad_suspend_power_off(ctx);
}
static int msm_11ad_notify_crash(struct msm11ad_ctx *ctx)
@@ -1374,6 +1469,16 @@ static int ops_notify(void *handle, enum wil_platform_event evt)
return rc;
}
+static bool ops_keep_radio_on_during_sleep(void *handle)
+{
+ struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
+
+ pr_debug("%s: keep radio on during sleep is %s\n", __func__,
+ ctx->keep_radio_on_during_sleep ? "allowed" : "not allowed");
+
+ return ctx->keep_radio_on_during_sleep;
+}
+
void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
const struct wil_platform_rops *rops, void *wil_handle)
{
@@ -1413,6 +1518,7 @@ void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
ops->resume = ops_resume;
ops->uninit = ops_uninit;
ops->notify = ops_notify;
+ ops->keep_radio_on_during_sleep = ops_keep_radio_on_during_sleep;
return ctx;
}
@@ -1429,19 +1535,9 @@ int msm_11ad_modinit(void)
return -EINVAL;
}
- if (ctx->pristine_state) {
- /* in old kernels, pci_load_saved_state() is not exported;
- * so use pci_load_and_free_saved_state()
- * and re-allocate ctx->saved_state again
- */
- pci_load_and_free_saved_state(ctx->pcidev,
- &ctx->pristine_state);
- ctx->pristine_state = pci_store_saved_state(ctx->pcidev);
- }
-
ctx->subsys_handle = subsystem_get(ctx->subsysdesc.name);
- return ops_resume(ctx);
+ return msm_11ad_resume_power_on(ctx);
}
EXPORT_SYMBOL(msm_11ad_modinit);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index cd76ca2..8c43c4e 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -310,6 +310,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(ctm_current_max),
POWER_SUPPLY_ATTR(hw_current_max),
POWER_SUPPLY_ATTR(real_type),
+ POWER_SUPPLY_ATTR(pr_swap),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 4ecf9a5..8641a45 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -39,6 +39,8 @@
#define PL_VOTER "PL_VOTER"
#define RESTRICT_CHG_VOTER "RESTRICT_CHG_VOTER"
#define ICL_CHANGE_VOTER "ICL_CHANGE_VOTER"
+#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER "USBIN_I_VOTER"
struct pl_data {
int pl_mode;
@@ -53,7 +55,8 @@ struct pl_data {
struct votable *pl_awake_votable;
struct votable *hvdcp_hw_inov_dis_votable;
struct votable *usb_icl_votable;
- struct work_struct status_change_work;
+ struct votable *pl_enable_votable_indirect;
+ struct delayed_work status_change_work;
struct work_struct pl_disable_forever_work;
struct delayed_work pl_taper_work;
struct power_supply *main_psy;
@@ -491,6 +494,7 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
}
#define ICL_STEP_UA 25000
+#define PL_DELAY_MS 3000
static int usb_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
@@ -512,6 +516,21 @@ static int usb_icl_vote_callback(struct votable *votable, void *data,
*/
vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
+ /*
+ * if (ICL < 1400)
+ * disable parallel charger using USBIN_I_VOTER
+ * else
+ * instead of re-enabling here rely on status_changed_work
+ * (triggered via AICL completed or scheduled from here to
+ * unvote USBIN_I_VOTER) the status_changed_work enables
+ * USBIN_I_VOTER based on settled current.
+ */
+ if (icl_ua <= 1400000)
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ else
+ schedule_delayed_work(&chip->status_change_work,
+ msecs_to_jiffies(PL_DELAY_MS));
+
/* rerun AICL */
/* get the settled current */
rc = power_supply_get_property(chip->main_psy,
@@ -532,8 +551,6 @@ static int usb_icl_vote_callback(struct votable *votable, void *data,
power_supply_set_property(chip->main_psy,
POWER_SUPPLY_PROP_CURRENT_MAX,
&pval);
- /* wait for ICL change */
- msleep(100);
}
/* set the effective ICL */
@@ -541,9 +558,6 @@ static int usb_icl_vote_callback(struct votable *votable, void *data,
power_supply_set_property(chip->main_psy,
POWER_SUPPLY_PROP_CURRENT_MAX,
&pval);
- if (rerun_aicl)
- /* wait for ICL change */
- msleep(100);
vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
@@ -643,6 +657,16 @@ static int pl_disable_vote_callback(struct votable *votable,
return 0;
}
+static int pl_enable_indirect_vote_callback(struct votable *votable,
+ void *data, int pl_enable, const char *client)
+{
+ struct pl_data *chip = data;
+
+ vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, !pl_enable, 0);
+
+ return 0;
+}
+
static int pl_awake_vote_callback(struct votable *votable,
void *data, int awake, const char *client)
{
@@ -775,6 +799,42 @@ static void handle_settled_icl_change(struct pl_data *chip)
union power_supply_propval pval = {0, };
int new_total_settled_ua;
int rc;
+ int main_settled_ua;
+ int main_limited;
+ int total_current_ua;
+
+ total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+
+ /*
+ * call aicl split only when USBIN_USBIN and enabled
+ * and if aicl changed
+ */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ main_settled_ua = pval.intval;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ main_limited = pval.intval;
+
+ if ((main_limited && (main_settled_ua + chip->pl_settled_ua) < 1400000)
+ || (main_settled_ua == 0)
+ || ((total_current_ua >= 0) &&
+ (total_current_ua <= 1400000)))
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ else
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
+
if (get_effective_result(chip->pl_disable_votable))
return;
@@ -783,17 +843,10 @@ static void handle_settled_icl_change(struct pl_data *chip)
|| chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
/*
* call aicl split only when USBIN_USBIN and enabled
- * and if aicl changed
+ * and if settled current has changed by more than 300mA
*/
- rc = power_supply_get_property(chip->main_psy,
- POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
- &pval);
- if (rc < 0) {
- pr_err("Couldn't get aicl settled value rc=%d\n", rc);
- return;
- }
- new_total_settled_ua = pval.intval + chip->pl_settled_ua;
+ new_total_settled_ua = main_settled_ua + chip->pl_settled_ua;
pl_dbg(chip, PR_PARALLEL,
"total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
chip->total_settled_ua, pval.intval,
@@ -840,7 +893,7 @@ static void handle_parallel_in_taper(struct pl_data *chip)
static void status_change_work(struct work_struct *work)
{
struct pl_data *chip = container_of(work,
- struct pl_data, status_change_work);
+ struct pl_data, status_change_work.work);
if (!chip->main_psy && is_main_available(chip)) {
/*
@@ -878,7 +931,7 @@ static int pl_notifier_call(struct notifier_block *nb,
if ((strcmp(psy->desc->name, "parallel") == 0)
|| (strcmp(psy->desc->name, "battery") == 0)
|| (strcmp(psy->desc->name, "main") == 0))
- schedule_work(&chip->status_change_work);
+ schedule_delayed_work(&chip->status_change_work, 0);
return NOTIFY_OK;
}
@@ -899,7 +952,7 @@ static int pl_register_notifier(struct pl_data *chip)
static int pl_determine_initial_status(struct pl_data *chip)
{
- status_change_work(&chip->status_change_work);
+ status_change_work(&chip->status_change_work.work);
return 0;
}
@@ -968,7 +1021,18 @@ int qcom_batt_init(void)
goto destroy_votable;
}
- INIT_WORK(&chip->status_change_work, status_change_work);
+ chip->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+ VOTE_SET_ANY,
+ pl_enable_indirect_vote_callback,
+ chip);
+ if (IS_ERR(chip->pl_enable_votable_indirect)) {
+ rc = PTR_ERR(chip->pl_enable_votable_indirect);
+ return rc;
+ }
+
+ vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+ INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
@@ -1001,6 +1065,7 @@ int qcom_batt_init(void)
unreg_notifier:
power_supply_unreg_notifier(&chip->nb);
destroy_votable:
+ destroy_votable(chip->pl_enable_votable_indirect);
destroy_votable(chip->pl_awake_votable);
destroy_votable(chip->pl_disable_votable);
destroy_votable(chip->fv_votable);
@@ -1020,11 +1085,12 @@ void qcom_batt_deinit(void)
if (chip == NULL)
return;
- cancel_work_sync(&chip->status_change_work);
+ cancel_delayed_work_sync(&chip->status_change_work);
cancel_delayed_work_sync(&chip->pl_taper_work);
cancel_work_sync(&chip->pl_disable_forever_work);
power_supply_unreg_notifier(&chip->nb);
+ destroy_votable(chip->pl_enable_votable_indirect);
destroy_votable(chip->pl_awake_votable);
destroy_votable(chip->pl_disable_votable);
destroy_votable(chip->fv_votable);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index e3ecf49..73d54c6 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -491,7 +491,7 @@ static void fg_encode_default(struct fg_sram_param *sp,
int i, mask = 0xff;
int64_t temp;
- temp = DIV_ROUND_CLOSEST(val * sp[id].numrtr, sp[id].denmtr);
+ temp = (int64_t)div_s64((s64)val * sp[id].numrtr, sp[id].denmtr);
pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
@@ -1320,9 +1320,16 @@ static int fg_cap_learning_process_full_data(struct fg_chip *chip)
return rc;
}
- cc_soc_delta_pct = DIV_ROUND_CLOSEST(
- abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
- CC_SOC_30BIT);
+ cc_soc_delta_pct =
+ div64_s64((int64_t)(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+ CC_SOC_30BIT);
+
+ /* If the delta is < 50%, then skip processing full data */
+ if (cc_soc_delta_pct < 50) {
+ pr_err("cc_soc_delta_pct: %d\n", cc_soc_delta_pct);
+ return -ERANGE;
+ }
+
delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
100);
chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
@@ -1392,7 +1399,6 @@ static int fg_cap_learning_done(struct fg_chip *chip)
return rc;
}
-#define FULL_SOC_RAW 255
static void fg_cap_learning_update(struct fg_chip *chip)
{
int rc, batt_soc, batt_soc_msb;
@@ -3937,7 +3943,7 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip)
}
#define DEFAULT_CUTOFF_VOLT_MV 3200
-#define DEFAULT_EMPTY_VOLT_MV 2800
+#define DEFAULT_EMPTY_VOLT_MV 2850
#define DEFAULT_RECHARGE_VOLT_MV 4250
#define DEFAULT_CHG_TERM_CURR_MA 100
#define DEFAULT_CHG_TERM_BASE_CURR_MA 75
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 2266a2a..becce31 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -266,8 +266,9 @@ module_param_named(
debug_mask, __debug_mask, int, 0600
);
-#define MICRO_1P5A 1500000
-#define MICRO_P1A 100000
+#define MICRO_1P5A 1500000
+#define MICRO_P1A 100000
+#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -304,9 +305,6 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.no_battery = of_property_read_bool(node,
"qcom,batteryless-platform");
- chg->external_vconn = of_property_read_bool(node,
- "qcom,external-vconn");
-
rc = of_property_read_u32(node,
"qcom,fcc-max-ua", &chg->batt_profile_fcc_ua);
if (rc < 0)
@@ -400,6 +398,12 @@ static int smb2_parse_dt(struct smb2 *chip)
chg->suspend_input_on_debug_batt = of_property_read_bool(node,
"qcom,suspend-input-on-debug-batt");
+
+ rc = of_property_read_u32(node, "qcom,otg-deglitch-time-ms",
+ &chg->otg_delay_ms);
+ if (rc < 0)
+ chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
+
return 0;
}
@@ -428,6 +432,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
POWER_SUPPLY_PROP_HW_CURRENT_MAX,
POWER_SUPPLY_PROP_REAL_TYPE,
+ POWER_SUPPLY_PROP_PR_SWAP,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -450,8 +455,7 @@ static int smb2_usb_get_prop(struct power_supply *psy,
if (!val->intval)
break;
- rc = smblib_get_prop_typec_mode(chg, val);
- if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+ if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
chg->micro_usb_mode) &&
chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
val->intval = 0;
@@ -488,7 +492,7 @@ static int smb2_usb_get_prop(struct power_supply *psy,
else if (chip->bad_part)
val->intval = POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
else
- rc = smblib_get_prop_typec_mode(chg, val);
+ val->intval = chg->typec_mode;
break;
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
if (chg->micro_usb_mode)
@@ -532,6 +536,9 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_HW_CURRENT_MAX:
rc = smblib_get_charge_current(chg, &val->intval);
break;
+ case POWER_SUPPLY_PROP_PR_SWAP:
+ rc = smblib_get_prop_pr_swap_in_progress(chg, val);
+ break;
default:
pr_err("get prop %d is not supported in usb\n", psp);
rc = -EINVAL;
@@ -590,6 +597,9 @@ static int smb2_usb_set_prop(struct power_supply *psy,
rc = vote(chg->usb_icl_votable, CTM_VOTER,
val->intval >= 0, val->intval);
break;
+ case POWER_SUPPLY_PROP_PR_SWAP:
+ rc = smblib_set_prop_pr_swap_in_progress(chg, val);
+ break;
default:
pr_err("set prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -667,8 +677,7 @@ static int smb2_usb_port_get_prop(struct power_supply *psy,
if (!val->intval)
break;
- rc = smblib_get_prop_typec_mode(chg, val);
- if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+ if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
chg->micro_usb_mode) &&
chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
val->intval = 1;
@@ -1043,7 +1052,8 @@ static int smb2_batt_get_prop(struct power_supply *psy,
rc = smblib_get_prop_batt_voltage_now(chg, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
+ val->intval = get_client_vote(chg->fv_votable,
+ BATT_PROFILE_VOTER);
break;
case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
rc = smblib_get_prop_charge_qnovo_enable(chg, val);
@@ -1061,7 +1071,7 @@ static int smb2_batt_get_prop(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
val->intval = get_client_vote(chg->fcc_votable,
- DEFAULT_VOTER);
+ BATT_PROFILE_VOTER);
break;
case POWER_SUPPLY_PROP_TEMP:
rc = smblib_get_prop_batt_temp(chg, val);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index b1070e8..6ead522 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -637,6 +637,7 @@ static void smblib_uusb_removal(struct smb_charger *chg)
/* reset both usbin current and voltage votes */
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
@@ -839,7 +840,6 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
{
int rc = 0;
bool override;
- union power_supply_propval pval;
/* suspend and return if 25mA or less is requested */
if (icl_ua < USBIN_25MA)
@@ -849,14 +849,8 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
if (icl_ua == INT_MAX)
goto override_suspend_config;
- rc = smblib_get_prop_typec_mode(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
- goto enable_icl_changed_interrupt;
- }
-
/* configure current */
- if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+ if (chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) {
rc = set_sdp_current(chg, icl_ua);
if (rc < 0) {
@@ -864,6 +858,7 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
goto enable_icl_changed_interrupt;
}
} else {
+ set_sdp_current(chg, 100000);
rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
@@ -877,7 +872,7 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
if (icl_ua == INT_MAX) {
/* remove override if no voters - hw defaults is desired */
override = false;
- } else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+ } else if (chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
/* For std cable with type = SDP never override */
override = false;
@@ -917,15 +912,8 @@ int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua)
int rc = 0;
u8 load_cfg;
bool override;
- union power_supply_propval pval;
- rc = smblib_get_prop_typec_mode(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
- return rc;
- }
-
- if ((pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+ if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
|| chg->micro_usb_mode)
&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
rc = get_sdp_current(chg, icl_ua);
@@ -1046,16 +1034,6 @@ static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
return 0;
}
-static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
- void *data, int chg_enable, const char *client)
-{
- struct smb_charger *chg = data;
-
- vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0);
-
- return 0;
-}
-
static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
void *data,
int hvdcp_enable, const char *client)
@@ -1212,36 +1190,13 @@ static int smblib_typec_irq_disable_vote_callback(struct votable *votable,
static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
- u8 otg_stat, val;
- int rc = 0, i;
-
- if (!chg->external_vconn) {
- /*
- * Hardware based OTG soft start should complete within 1ms, so
- * wait for 2ms in the worst case.
- */
- for (i = 0; i < MAX_OTG_SS_TRIES; ++i) {
- usleep_range(1000, 1100);
- rc = smblib_read(chg, OTG_STATUS_REG, &otg_stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read OTG status rc=%d\n",
- rc);
- return rc;
- }
-
- if (otg_stat & BOOST_SOFTSTART_DONE_BIT)
- break;
- }
-
- if (!(otg_stat & BOOST_SOFTSTART_DONE_BIT)) {
- smblib_err(chg, "Couldn't enable VCONN; OTG soft start failed\n");
- return -EAGAIN;
- }
- }
+ int rc = 0;
+ u8 val;
/*
- * VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
- * for Vconn, and it should be set with reverse polarity of CC_OUT.
+ * When enabling VCONN using the command register the CC pin must be
+ * selected. VCONN should be supplied to the inactive CC pin hence using
+ * the opposite of the CC_ORIENTATION_BIT.
*/
smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
val = chg->typec_status[3] &
@@ -1262,7 +1217,7 @@ int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc = 0;
- mutex_lock(&chg->otg_oc_lock);
+ mutex_lock(&chg->vconn_oc_lock);
if (chg->vconn_en)
goto unlock;
@@ -1271,7 +1226,7 @@ int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
chg->vconn_en = true;
unlock:
- mutex_unlock(&chg->otg_oc_lock);
+ mutex_unlock(&chg->vconn_oc_lock);
return rc;
}
@@ -1294,7 +1249,7 @@ int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc = 0;
- mutex_lock(&chg->otg_oc_lock);
+ mutex_lock(&chg->vconn_oc_lock);
if (!chg->vconn_en)
goto unlock;
@@ -1303,7 +1258,7 @@ int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
chg->vconn_en = false;
unlock:
- mutex_unlock(&chg->otg_oc_lock);
+ mutex_unlock(&chg->vconn_oc_lock);
return rc;
}
@@ -1312,9 +1267,9 @@ int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int ret;
- mutex_lock(&chg->otg_oc_lock);
+ mutex_lock(&chg->vconn_oc_lock);
ret = chg->vconn_en;
- mutex_unlock(&chg->otg_oc_lock);
+ mutex_unlock(&chg->vconn_oc_lock);
return ret;
}
@@ -1417,13 +1372,6 @@ static int _smblib_vbus_regulator_disable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc;
- if (!chg->external_vconn && chg->vconn_en) {
- smblib_dbg(chg, PR_OTG, "Killing VCONN before disabling OTG\n");
- rc = _smblib_vconn_regulator_disable(rdev);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
- }
-
if (chg->wa_flags & OTG_WA) {
/* set OTG current limit to minimum value */
rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
@@ -1651,6 +1599,7 @@ int smblib_get_prop_batt_health(struct smb_charger *chg,
{
union power_supply_propval pval;
int rc;
+ int effective_fv_uv;
u8 stat;
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
@@ -1669,10 +1618,11 @@ int smblib_get_prop_batt_health(struct smb_charger *chg,
* If Vbatt is within 40mV above Vfloat, then don't
* treat it as overvoltage.
*/
- if (pval.intval >=
- get_effective_result(chg->fv_votable) + 40000) {
+ effective_fv_uv = get_effective_result(chg->fv_votable);
+ if (pval.intval >= effective_fv_uv + 40000) {
val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- smblib_err(chg, "battery over-voltage\n");
+ smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",
+ pval.intval, effective_fv_uv);
goto done;
}
}
@@ -1933,38 +1883,18 @@ int smblib_rerun_aicl(struct smb_charger *chg)
return rc;
smblib_dbg(chg, PR_MISC, "re-running AICL\n");
- switch (chg->smb_version) {
- case PMI8998_SUBTYPE:
- rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
- &settled_icl_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
- return rc;
- }
-
- vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
- max(settled_icl_ua - chg->param.usb_icl.step_u,
- chg->param.usb_icl.step_u));
- vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
- break;
- case PM660_SUBTYPE:
- /*
- * Use restart_AICL instead of trigger_AICL as it runs the
- * complete AICL instead of starting from the last settled
- * value.
- */
- rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
- RESTART_AICL_BIT, RESTART_AICL_BIT);
- if (rc < 0)
- smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
- rc);
- break;
- default:
- smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
- chg->smb_version);
- return -EINVAL;
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+ &settled_icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+ return rc;
}
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+ max(settled_icl_ua - chg->param.usb_icl.step_u,
+ chg->param.usb_icl.step_u));
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+
return 0;
}
@@ -1999,6 +1929,7 @@ static int smblib_dm_pulse(struct smb_charger *chg)
int smblib_dp_dm(struct smb_charger *chg, int val)
{
int target_icl_ua, rc = 0;
+ union power_supply_propval pval;
switch (val) {
case POWER_SUPPLY_DP_DM_DP_PULSE:
@@ -2016,10 +1947,35 @@ int smblib_dp_dm(struct smb_charger *chg, int val)
rc, chg->pulse_cnt);
break;
case POWER_SUPPLY_DP_DM_ICL_DOWN:
- chg->usb_icl_delta_ua -= 100000;
target_icl_ua = get_effective_result(chg->usb_icl_votable);
+ if (target_icl_ua < 0) {
+ /* no client vote, get the ICL from charger */
+ rc = power_supply_get_property(chg->usb_psy,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't get max current rc=%d\n",
+ rc);
+ return rc;
+ }
+ target_icl_ua = pval.intval;
+ }
+
+ /*
+ * Check if any other voter voted on USB_ICL in case of
+ * voter other than SW_QC3_VOTER reset and restart reduction
+ * again.
+ */
+ if (target_icl_ua != get_client_vote(chg->usb_icl_votable,
+ SW_QC3_VOTER))
+ chg->usb_icl_delta_ua = 0;
+
+ chg->usb_icl_delta_ua += 100000;
vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
- target_icl_ua + chg->usb_icl_delta_ua);
+ target_icl_ua - 100000);
+ smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
+ target_icl_ua, chg->usb_icl_delta_ua);
break;
case POWER_SUPPLY_DP_DM_ICL_UP:
default:
@@ -2255,8 +2211,6 @@ static const char * const smblib_typec_mode_name[] = {
static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
{
switch (chg->typec_status[0]) {
- case 0:
- return POWER_SUPPLY_TYPEC_NONE;
case UFP_TYPEC_RDSTD_BIT:
return POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
case UFP_TYPEC_RD1P5_BIT:
@@ -2267,7 +2221,7 @@ static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
break;
}
- return POWER_SUPPLY_TYPEC_NON_COMPLIANT;
+ return POWER_SUPPLY_TYPEC_NONE;
}
static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
@@ -2281,8 +2235,6 @@ static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
return POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE;
case DFP_RD_OPEN_BIT:
return POWER_SUPPLY_TYPEC_SINK;
- case DFP_RA_OPEN_BIT:
- return POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY;
default:
break;
}
@@ -2290,20 +2242,12 @@ static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
return POWER_SUPPLY_TYPEC_NONE;
}
-int smblib_get_prop_typec_mode(struct smb_charger *chg,
- union power_supply_propval *val)
+static int smblib_get_prop_typec_mode(struct smb_charger *chg)
{
- if (!(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
- val->intval = POWER_SUPPLY_TYPEC_NONE;
- return 0;
- }
-
if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
- val->intval = smblib_get_prop_dfp_mode(chg);
+ return smblib_get_prop_dfp_mode(chg);
else
- val->intval = smblib_get_prop_ufp_mode(chg);
-
- return 0;
+ return smblib_get_prop_ufp_mode(chg);
}
int smblib_get_prop_typec_power_role(struct smb_charger *chg,
@@ -2591,24 +2535,12 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc;
- bool orientation, cc_debounced, sink_attached, hvdcp;
+ bool orientation, sink_attached, hvdcp;
u8 stat;
if (!get_effective_result(chg->pd_allowed_votable))
return -EINVAL;
- rc = smblib_read(chg, APSD_STATUS_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc);
- return rc;
- }
-
- cc_debounced = (bool)
- (chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
- sink_attached = (bool)
- (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
- hvdcp = stat & QC_CHARGER_BIT;
-
chg->pd_active = val->intval;
if (chg->pd_active) {
vote(chg->apsd_disable_votable, PD_VOTER, true, 0);
@@ -2660,6 +2592,14 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
if (rc < 0)
smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
} else {
+ rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read APSD status rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ hvdcp = stat & QC_CHARGER_BIT;
vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
@@ -2679,8 +2619,8 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
* and data could be interrupted. Non-legacy DCP could also draw
* more, but it may impact compliance.
*/
- if (!chg->typec_legacy_valid && cc_debounced &&
- !sink_attached && hvdcp)
+ sink_attached = chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT;
+ if (!chg->typec_legacy_valid && !sink_attached && hvdcp)
schedule_work(&chg->legacy_detection_work);
}
@@ -2802,6 +2742,7 @@ static int smblib_cc2_sink_removal_enter(struct smb_charger *chg)
smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return rc;
}
+
ccout = (stat & CC_ATTACHED_BIT) ?
(!!(stat & CC_ORIENTATION_BIT) + 1) : 0;
ufp_mode = (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT) ?
@@ -3638,6 +3579,7 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
/* reset hvdcp voters */
vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
@@ -3668,6 +3610,11 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->pd_hard_reset = 0;
chg->typec_legacy_valid = false;
+ /* reset back to 120mS tCC debounce */
+ rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set 120mS tCC debounce rc=%d\n", rc);
+
/* enable APSD CC trigger for next insertion */
rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
@@ -3708,12 +3655,29 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
if (rc < 0)
smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc);
+ mutex_lock(&chg->vconn_oc_lock);
+ if (!chg->vconn_en)
+ goto unlock;
+
+ smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ VCONN_EN_VALUE_BIT, 0);
+ chg->vconn_en = false;
+
+unlock:
+ mutex_unlock(&chg->vconn_oc_lock);
+
+ /* clear exit sink based on cc */
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ EXIT_SNK_BASED_ON_CC_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't clear exit_sink_based_on_cc rc=%d\n",
+ rc);
+
typec_sink_removal(chg);
smblib_update_usb_type(chg);
}
-static void smblib_handle_typec_insertion(struct smb_charger *chg,
- bool sink_attached)
+static void smblib_handle_typec_insertion(struct smb_charger *chg)
{
int rc;
@@ -3725,65 +3689,37 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg,
smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
rc);
- if (sink_attached)
+ if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
typec_sink_insertion(chg);
else
typec_sink_removal(chg);
}
-static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
- bool rising, bool sink_attached)
+static void smblib_handle_typec_cc_state_change(struct smb_charger *chg)
{
- int rc;
- union power_supply_propval pval = {0, };
+ if (chg->pr_swap_in_progress)
+ return;
- if (rising) {
- if (!chg->typec_present) {
- chg->typec_present = true;
- smblib_dbg(chg, PR_MISC, "TypeC insertion\n");
- smblib_handle_typec_insertion(chg, sink_attached);
- }
- } else {
- if (chg->typec_present) {
- chg->typec_present = false;
- smblib_dbg(chg, PR_MISC, "TypeC removal\n");
- smblib_handle_typec_removal(chg);
- }
+ chg->typec_mode = smblib_get_prop_typec_mode(chg);
+ if (!chg->typec_present && chg->typec_mode != POWER_SUPPLY_TYPEC_NONE) {
+ chg->typec_present = true;
+ smblib_dbg(chg, PR_MISC, "TypeC %s insertion\n",
+ smblib_typec_mode_name[chg->typec_mode]);
+ smblib_handle_typec_insertion(chg);
+ } else if (chg->typec_present &&
+ chg->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+ chg->typec_present = false;
+ smblib_dbg(chg, PR_MISC, "TypeC removal\n");
+ smblib_handle_typec_removal(chg);
}
- rc = smblib_get_prop_typec_mode(chg, &pval);
- if (rc < 0)
- smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
-
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
- rising ? "rising" : "falling",
- smblib_typec_mode_name[pval.intval]);
-}
-
-irqreturn_t smblib_handle_usb_typec_change_for_uusb(struct smb_charger *chg)
-{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
- return IRQ_HANDLED;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
- stat, !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
-
- extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST,
- !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
- power_supply_changed(chg->usb_psy);
-
- return IRQ_HANDLED;
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: cc-state-change; Type-C %s detected\n",
+ smblib_typec_mode_name[chg->typec_mode]);
}
static void smblib_usb_typec_change(struct smb_charger *chg)
{
int rc;
- bool debounce_done, sink_attached;
rc = smblib_multibyte_read(chg, TYPE_C_STATUS_1_REG,
chg->typec_status, 5);
@@ -3792,12 +3728,7 @@ static void smblib_usb_typec_change(struct smb_charger *chg)
return;
}
- debounce_done =
- (bool)(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
- sink_attached =
- (bool)(chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
-
- smblib_handle_typec_debounce_done(chg, debounce_done, sink_attached);
+ smblib_handle_typec_cc_state_change(chg);
if (chg->typec_status[3] & TYPEC_VBUS_ERROR_STATUS_BIT)
smblib_dbg(chg, PR_INTERRUPT, "IRQ: vbus-error\n");
@@ -3814,7 +3745,11 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
if (chg->micro_usb_mode) {
- smblib_handle_usb_typec_change_for_uusb(chg);
+ cancel_delayed_work_sync(&chg->uusb_otg_work);
+ vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
+ smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
+ schedule_delayed_work(&chg->uusb_otg_work,
+ msecs_to_jiffies(chg->otg_delay_ms));
return IRQ_HANDLED;
}
@@ -3896,9 +3831,63 @@ irqreturn_t smblib_handle_wdog_bark(int irq, void *data)
return IRQ_HANDLED;
}
+/**************
+ * Additional USB PSY getters/setters
+ * that call interrupt functions
+ ***************/
+
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = chg->pr_swap_in_progress;
+ return 0;
+}
+
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ chg->pr_swap_in_progress = val->intval;
+ /*
+ * call the cc changed irq to handle real removals while
+ * PR_SWAP was in progress
+ */
+ smblib_usb_typec_change(chg);
+ rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT,
+ val->intval ? TCC_DEBOUNCE_20MS_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set tCC debounce rc=%d\n", rc);
+ return 0;
+}
+
/***************
* Work Queues *
***************/
+static void smblib_uusb_otg_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ uusb_otg_work.work);
+ int rc;
+ u8 stat;
+ bool otg;
+
+ rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
+ goto out;
+ }
+
+ otg = !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT));
+ extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST, otg);
+ smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
+ stat, otg);
+ power_supply_changed(chg->usb_psy);
+
+out:
+ vote(chg->awake_votable, OTG_DELAY_VOTER, false, 0);
+}
+
static void smblib_hvdcp_detect_work(struct work_struct *work)
{
@@ -4035,19 +4024,6 @@ static void smblib_otg_oc_exit(struct smb_charger *chg, bool success)
QUICKSTART_OTG_FASTROLESWAP_BIT, 0);
if (rc < 0)
smblib_err(chg, "Couldn't enable VBUS < 1V check rc=%d\n", rc);
-
- if (!chg->external_vconn && chg->vconn_en) {
- chg->vconn_attempts = 0;
- if (success) {
- rc = _smblib_vconn_regulator_enable(
- chg->vconn_vreg->rdev);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable VCONN rc=%d\n",
- rc);
- } else {
- chg->vconn_en = false;
- }
- }
}
#define MAX_OC_FALLING_TRIES 10
@@ -4136,7 +4112,7 @@ static void smblib_vconn_oc_work(struct work_struct *work)
if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
return;
- mutex_lock(&chg->otg_oc_lock);
+ mutex_lock(&chg->vconn_oc_lock);
rc = _smblib_vconn_regulator_disable(chg->vconn_vreg->rdev);
if (rc < 0) {
smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
@@ -4185,7 +4161,7 @@ static void smblib_vconn_oc_work(struct work_struct *work)
}
unlock:
- mutex_unlock(&chg->otg_oc_lock);
+ mutex_unlock(&chg->vconn_oc_lock);
}
static void smblib_otg_ss_done_work(struct work_struct *work)
@@ -4220,8 +4196,6 @@ static void smblib_icl_change_work(struct work_struct *work)
}
power_supply_changed(chg->usb_main_psy);
- vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
- settled_ua >= USB_WEAK_INPUT_UA, 0);
smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
}
@@ -4275,14 +4249,14 @@ static void smblib_legacy_detection_work(struct work_struct *work)
chg->typec_legacy_valid = true;
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
- rp_high = smblib_get_prop_ufp_mode(chg) ==
- POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+ rp_high = chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH;
if (!legacy || !rp_high)
vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
false, 0);
unlock:
chg->typec_en_dis_active = 0;
+ smblib_usb_typec_change(chg);
mutex_unlock(&chg->lock);
}
@@ -4317,7 +4291,16 @@ static int smblib_create_votables(struct smb_charger *chg)
smblib_err(chg, "Couldn't find votable PL_DISABLE rc=%d\n", rc);
return rc;
}
- vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+ chg->pl_enable_votable_indirect = find_votable("PL_ENABLE_INDIRECT");
+ if (chg->pl_enable_votable_indirect == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg,
+ "Couldn't find votable PL_ENABLE_INDIRECT rc=%d\n",
+ rc);
+ return rc;
+ }
+
vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
@@ -4367,14 +4350,6 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
- chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
- VOTE_SET_ANY,
- smblib_pl_enable_indirect_vote_callback,
- chg);
- if (IS_ERR(chg->pl_enable_votable_indirect)) {
- rc = PTR_ERR(chg->pl_enable_votable_indirect);
- return rc;
- }
chg->hvdcp_disable_votable_indirect = create_votable(
"HVDCP_DISABLE_INDIRECT",
@@ -4450,8 +4425,6 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->awake_votable);
if (chg->chg_disable_votable)
destroy_votable(chg->chg_disable_votable);
- if (chg->pl_enable_votable_indirect)
- destroy_votable(chg->pl_enable_votable_indirect);
if (chg->apsd_disable_votable)
destroy_votable(chg->apsd_disable_votable);
if (chg->hvdcp_hw_inov_dis_votable)
@@ -4481,6 +4454,7 @@ int smblib_init(struct smb_charger *chg)
mutex_init(&chg->lock);
mutex_init(&chg->write_lock);
mutex_init(&chg->otg_oc_lock);
+ mutex_init(&chg->vconn_oc_lock);
INIT_WORK(&chg->bms_update_work, bms_update_work);
INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
@@ -4492,6 +4466,7 @@ int smblib_init(struct smb_charger *chg)
INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
+ INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work);
chg->fake_capacity = -EINVAL;
chg->fake_input_current_limited = -EINVAL;
@@ -4546,6 +4521,7 @@ int smblib_deinit(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->icl_change_work);
cancel_delayed_work_sync(&chg->pl_enable_work);
cancel_work_sync(&chg->legacy_detection_work);
+ cancel_delayed_work_sync(&chg->uusb_otg_work);
power_supply_unreg_notifier(&chg->nb);
smblib_destroy_votables(chg);
qcom_batt_deinit();
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 42b357e..f39f2c9 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -36,9 +36,7 @@ enum print_reason {
#define PL_USBIN_USBIN_VOTER "PL_USBIN_USBIN_VOTER"
#define USB_PSY_VOTER "USB_PSY_VOTER"
#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
-#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
#define PL_QNOVO_VOTER "PL_QNOVO_VOTER"
-#define USBIN_I_VOTER "USBIN_I_VOTER"
#define USBIN_V_VOTER "USBIN_V_VOTER"
#define CHG_STATE_VOTER "CHG_STATE_VOTER"
#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER"
@@ -64,6 +62,8 @@ enum print_reason {
#define CC2_WA_VOTER "CC2_WA_VOTER"
#define QNOVO_VOTER "QNOVO_VOTER"
#define BATT_PROFILE_VOTER "BATT_PROFILE_VOTER"
+#define OTG_DELAY_VOTER "OTG_DELAY_VOTER"
+#define USBIN_I_VOTER "USBIN_I_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -227,15 +227,16 @@ struct smb_charger {
struct smb_iio iio;
int *debug_mask;
enum smb_mode mode;
- bool external_vconn;
struct smb_chg_freq chg_freq;
int smb_version;
+ int otg_delay_ms;
/* locks */
struct mutex lock;
struct mutex write_lock;
struct mutex ps_change_lock;
struct mutex otg_oc_lock;
+ struct mutex vconn_oc_lock;
/* power supplies */
struct power_supply *batt_psy;
@@ -290,6 +291,7 @@ struct smb_charger {
struct delayed_work icl_change_work;
struct delayed_work pl_enable_work;
struct work_struct legacy_detection_work;
+ struct delayed_work uusb_otg_work;
/* cached status */
int voltage_min_uv;
@@ -319,6 +321,8 @@ struct smb_charger {
u8 typec_status[5];
bool typec_legacy_valid;
int fake_input_current_limited;
+ bool pr_swap_in_progress;
+ int typec_mode;
/* workaround flag */
u32 wa_flags;
@@ -452,8 +456,6 @@ int smblib_get_prop_usb_current_now(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
union power_supply_propval *val);
-int smblib_get_prop_typec_mode(struct smb_charger *chg,
- union power_supply_propval *val);
int smblib_get_prop_typec_power_role(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_pd_allowed(struct smb_charger *chg,
@@ -506,6 +508,10 @@ int smblib_rerun_aicl(struct smb_charger *chg);
int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+ const union power_supply_propval *val);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 167666a..d8671ab 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -486,11 +486,11 @@ enum {
#define UFP_TYPEC_OPEN_OPEN_BIT BIT(0)
#define TYPE_C_STATUS_2_REG (USBIN_BASE + 0x0C)
-#define DFP_TYPEC_MASK 0x8F
#define DFP_RA_OPEN_BIT BIT(7)
#define TIMER_STAGE_BIT BIT(6)
#define EXIT_UFP_MODE_BIT BIT(5)
#define EXIT_DFP_MODE_BIT BIT(4)
+#define DFP_TYPEC_MASK GENMASK(3, 0)
#define DFP_RD_OPEN_BIT BIT(3)
#define DFP_RD_RA_VCONN_BIT BIT(2)
#define DFP_RD_RD_BIT BIT(1)
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index b92a482..a464a81 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -1416,6 +1416,7 @@ static enum power_supply_property smb1351_parallel_properties[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_PARALLEL_MODE,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND,
};
static int smb1351_parallel_set_chg_suspend(struct smb1351_charger *chip,
@@ -1702,6 +1703,9 @@ static int smb1351_parallel_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_PARALLEL_MODE:
val->intval = chip->parallel_mode;
break;
+ case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ val->intval = chip->parallel_charger_suspended;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 83374bb..ca0a2c6 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -248,7 +248,7 @@ static int smb138x_usb_get_prop(struct power_supply *psy,
val->intval = chg->usb_psy_desc.type;
break;
case POWER_SUPPLY_PROP_TYPEC_MODE:
- rc = smblib_get_prop_typec_mode(chg, val);
+ val->intval = chg->typec_mode;
break;
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
rc = smblib_get_prop_typec_power_role(chg, val);
@@ -941,13 +941,6 @@ static int smb138x_init_slave_hw(struct smb138x *chip)
return rc;
}
- rc = smblib_write(chg, THERMREG_SRC_CFG_REG,
- THERMREG_SKIN_ADC_SRC_EN_BIT);
- if (rc < 0) {
- pr_err("Couldn't enable connector thermreg source rc=%d\n", rc);
- return rc;
- }
-
return 0;
}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6d4b68c4..f3756ca 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -718,6 +718,7 @@ enum qeth_discipline_id {
};
struct qeth_discipline {
+ const struct device_type *devtype;
void (*start_poll)(struct ccw_device *, int, unsigned long);
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
@@ -893,6 +894,9 @@ extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
+extern const struct attribute_group qeth_device_attr_group;
+extern const struct attribute_group qeth_device_blkt_group;
+extern const struct device_type qeth_generic_devtype;
extern struct workqueue_struct *qeth_wq;
int qeth_card_hw_is_reachable(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 20cf296..e8c4830 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5462,10 +5462,12 @@ void qeth_core_free_discipline(struct qeth_card *card)
card->discipline = NULL;
}
-static const struct device_type qeth_generic_devtype = {
+const struct device_type qeth_generic_devtype = {
.name = "qeth_generic",
.groups = qeth_generic_attr_groups,
};
+EXPORT_SYMBOL_GPL(qeth_generic_devtype);
+
static const struct device_type qeth_osn_devtype = {
.name = "qeth_osn",
.groups = qeth_osn_attr_groups,
@@ -5591,23 +5593,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}
- if (card->info.type == QETH_CARD_TYPE_OSN)
- gdev->dev.type = &qeth_osn_devtype;
- else
- gdev->dev.type = &qeth_generic_devtype;
-
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_OSM:
rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
if (rc)
goto err_card;
+
+ gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
+ ? card->discipline->devtype
+ : &qeth_osn_devtype;
rc = card->discipline->setup(card->gdev);
if (rc)
goto err_disc;
- case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSX:
+ break;
default:
+ gdev->dev.type = &qeth_generic_devtype;
break;
}
@@ -5663,8 +5664,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
if (rc)
goto err;
rc = card->discipline->setup(card->gdev);
- if (rc)
+ if (rc) {
+ qeth_core_free_discipline(card);
goto err;
+ }
}
rc = card->discipline->set_online(gdev);
err:
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 75b29fd2..db6a285 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -413,12 +413,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
if (card->options.layer2 == newdis)
goto out;
- else {
- card->info.mac_bits = 0;
- if (card->discipline) {
- card->discipline->remove(card->gdev);
- qeth_core_free_discipline(card);
- }
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ /* fixed layer, can't switch */
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ card->info.mac_bits = 0;
+ if (card->discipline) {
+ card->discipline->remove(card->gdev);
+ qeth_core_free_discipline(card);
}
rc = qeth_core_load_discipline(card, newdis);
@@ -426,6 +430,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
rc = card->discipline->setup(card->gdev);
+ if (rc)
+ qeth_core_free_discipline(card);
out:
mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;
@@ -703,10 +709,11 @@ static struct attribute *qeth_blkt_device_attrs[] = {
&dev_attr_inter_jumbo.attr,
NULL,
};
-static struct attribute_group qeth_device_blkt_group = {
+const struct attribute_group qeth_device_blkt_group = {
.name = "blkt",
.attrs = qeth_blkt_device_attrs,
};
+EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
static struct attribute *qeth_device_attrs[] = {
&dev_attr_state.attr,
@@ -726,9 +733,10 @@ static struct attribute *qeth_device_attrs[] = {
&dev_attr_switch_attrs.attr,
NULL,
};
-static struct attribute_group qeth_device_attr_group = {
+const struct attribute_group qeth_device_attr_group = {
.attrs = qeth_device_attrs,
};
+EXPORT_SYMBOL_GPL(qeth_device_attr_group);
const struct attribute_group *qeth_generic_attr_groups[] = {
&qeth_device_attr_group,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 29d9fb3..0d59f9a 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -8,6 +8,8 @@
#include "qeth_core.h"
+extern const struct attribute_group *qeth_l2_attr_groups[];
+
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bb27058..5d010aa 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1021,11 +1021,21 @@ static int qeth_l2_stop(struct net_device *dev)
return 0;
}
+static const struct device_type qeth_l2_devtype = {
+ .name = "qeth_layer2",
+ .groups = qeth_l2_attr_groups,
+};
+
static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- qeth_l2_create_device_attributes(&gdev->dev);
+ if (gdev->dev.type == &qeth_generic_devtype) {
+ rc = qeth_l2_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
+ }
INIT_LIST_HEAD(&card->vid_list);
hash_init(card->mac_htable);
card->options.layer2 = 1;
@@ -1037,7 +1047,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- qeth_l2_remove_device_attributes(&cgdev->dev);
+ if (cgdev->dev.type == &qeth_generic_devtype)
+ qeth_l2_remove_device_attributes(&cgdev->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -1095,7 +1106,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
case QETH_CARD_TYPE_OSN:
card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
ether_setup);
- card->dev->flags |= IFF_NOARP;
break;
default:
card->dev = alloc_etherdev(0);
@@ -1108,9 +1118,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
- card->dev->ethtool_ops =
- (card->info.type != QETH_CARD_TYPE_OSN) ?
- &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
+ if (card->info.type == QETH_CARD_TYPE_OSN) {
+ card->dev->ethtool_ops = &qeth_l2_osn_ops;
+ card->dev->flags |= IFF_NOARP;
+ } else {
+ card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
+ }
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->hw_features = NETIF_F_SG;
@@ -1434,6 +1447,7 @@ static int qeth_l2_control_event(struct qeth_card *card,
}
struct qeth_discipline qeth_l2_discipline = {
+ .devtype = &qeth_l2_devtype,
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 692db49..a48ed9e 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -272,3 +272,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
} else
qeth_bridgeport_an_set(card, 0);
}
+
+const struct attribute_group *qeth_l2_attr_groups[] = {
+ &qeth_device_attr_group,
+ &qeth_device_blkt_group,
+ /* l2 specific, see l2_{create,remove}_device_attributes(): */
+ &qeth_l2_bridgeport_attr_group,
+ NULL,
+};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 272d9e7..171be5e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3157,8 +3157,13 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- qeth_l3_create_device_attributes(&gdev->dev);
+ rc = qeth_l3_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
+ hash_init(card->ip_htable);
+ hash_init(card->ip_mc_htable);
card->options.layer2 = 0;
card->info.hwtrap = 0;
return 0;
@@ -3450,6 +3455,7 @@ static int qeth_l3_control_event(struct qeth_card *card,
}
struct qeth_discipline qeth_l3_discipline = {
+ .devtype = &qeth_generic_devtype,
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 904422f..0414843 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1169,6 +1169,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
cmd = list_first_entry_or_null(&vscsi->free_cmd,
struct ibmvscsis_cmd, list);
if (cmd) {
+ if (cmd->abort_cmd)
+ cmd->abort_cmd = NULL;
cmd->flags &= ~(DELAY_SEND);
list_del(&cmd->list);
cmd->iue = iue;
@@ -1773,6 +1775,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
if (cmd->abort_cmd) {
retry = true;
cmd->abort_cmd->flags &= ~(DELAY_SEND);
+ cmd->abort_cmd = NULL;
}
/*
@@ -1787,6 +1790,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
list_del(&cmd->list);
ibmvscsis_free_cmd_resources(vscsi,
cmd);
+ /*
+ * With a successfully aborted op
+ * through LIO we want to increment the
+ * the vscsi credit so that when we dont
+ * send a rsp to the original scsi abort
+ * op (h_send_crq), but the tm rsp to
+ * the abort is sent, the credit is
+ * correctly sent with the abort tm rsp.
+ * We would need 1 for the abort tm rsp
+ * and 1 credit for the aborted scsi op.
+ * Thus we need to increment here.
+ * Also we want to increment the credit
+ * here because we want to make sure
+ * cmd is actually released first
+ * otherwise the client will think it
+ * it can send a new cmd, and we could
+ * find ourselves short of cmd elements.
+ */
+ vscsi->credit += 1;
} else {
iue = cmd->iue;
@@ -2961,10 +2983,7 @@ static long srp_build_response(struct scsi_info *vscsi,
rsp->opcode = SRP_RSP;
- if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
- rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
- else
- rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+ rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
rsp->tag = cmd->rsp.tag;
rsp->flags = 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8a7941b..289374c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4634,6 +4634,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
+ unsigned int sector_sz;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4692,6 +4693,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+
+ /* In case of bogus fw or device, we could end up having
+ * unaligned partial completion. We can force alignment here,
+ * then scsi-ml does not need to handle this misbehavior.
+ */
+ sector_sz = scmd->device->sector_size;
+ if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
+ xfer_cnt % sector_sz)) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
+ xfer_cnt, sector_sz);
+ xfer_cnt = round_down(xfer_cnt, sector_sz);
+ }
+
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c2ac982..967bb0d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2792,10 +2792,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
- q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
- rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
- } else
+ sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
+ rw_max = q->limits.io_opt =
+ sdkp->opt_xfer_blocks * sdp->sector_size;
+ else
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c8d9863..4446ed2 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,11 +151,6 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
return blocks << (ilog2(sdev->sector_size) - 9);
}
-static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
-{
- return blocks * sdev->sector_size;
-}
-
/*
* Look up the DIX operation based on whether the command is read or
* write and whether dix and dif are enabled.
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 1b283b2..db4e7bb 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -83,6 +83,8 @@
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
select PHY_QCOM_UFS
+ select EXTCON
+ select EXTCON_GPIO
help
This selects the QCOM specific additions to UFSHCD platform driver.
UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index de6ecbd..7c5a1bc 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -327,6 +327,20 @@ static int ufshcd_parse_pinctrl_info(struct ufs_hba *hba)
return ret;
}
+static int ufshcd_parse_extcon_info(struct ufs_hba *hba)
+{
+ struct extcon_dev *extcon;
+
+ extcon = extcon_get_edev_by_phandle(hba->dev, 0);
+ if (IS_ERR(extcon) && PTR_ERR(extcon) != -ENODEV)
+ return PTR_ERR(extcon);
+
+ if (!IS_ERR(extcon))
+ hba->extcon = extcon;
+
+ return 0;
+}
+
#ifdef CONFIG_SMP
/**
* ufshcd_pltfrm_suspend - suspend power management function
@@ -449,6 +463,9 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
ufshcd_parse_pm_levels(hba);
ufshcd_parse_gear_limits(hba);
ufshcd_parse_cmd_timeout(hba);
+ err = ufshcd_parse_extcon_info(hba);
+ if (err)
+ goto dealloc_host;
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 6418c11..1707556 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -381,6 +381,8 @@ static int ufshcd_disable_clocks(struct ufs_hba *hba,
bool is_gating_context);
static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
bool is_gating_context);
+static void ufshcd_hold_all(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
@@ -2055,6 +2057,22 @@ static void ufshcd_hibern8_enter_work(struct work_struct *work)
return;
}
+static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
+ unsigned long delay_ms)
+{
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold_all(hba);
+ ufshcd_scsi_block_requests(hba);
+ down_write(&hba->lock);
+ /* wait for all the outstanding requests to finish */
+ ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ ufshcd_set_auto_hibern8_timer(hba, delay_ms);
+ up_write(&hba->lock);
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release_all(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
static void ufshcd_hibern8_exit_work(struct work_struct *work)
{
int ret;
@@ -2106,19 +2124,32 @@ static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long flags, value;
+ bool change = true;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->hibern8_on_idle.delay_ms == value)
+ change = false;
+
+ if (value >= hba->clk_gating.delay_ms_pwr_save ||
+ value >= hba->clk_gating.delay_ms_perf) {
+ dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
+ value, hba->clk_gating.delay_ms_pwr_save,
+ hba->clk_gating.delay_ms_perf);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return -EINVAL;
+ }
+
hba->hibern8_on_idle.delay_ms = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Update auto hibern8 timer value if supported */
- if (ufshcd_is_auto_hibern8_supported(hba) &&
+ if (change && ufshcd_is_auto_hibern8_supported(hba) &&
hba->hibern8_on_idle.is_enabled)
- ufshcd_set_auto_hibern8_timer(hba,
- hba->hibern8_on_idle.delay_ms);
+ __ufshcd_set_auto_hibern8_timer(hba,
+ hba->hibern8_on_idle.delay_ms);
return count;
}
@@ -2148,7 +2179,7 @@ static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
/* Update auto hibern8 timer value if supported */
if (ufshcd_is_auto_hibern8_supported(hba)) {
- ufshcd_set_auto_hibern8_timer(hba,
+ __ufshcd_set_auto_hibern8_timer(hba,
value ? hba->hibern8_on_idle.delay_ms : value);
goto update;
}
@@ -6934,6 +6965,23 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
return err;
}
+static int ufshcd_detect_device(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ err = ufshcd_vops_full_reset(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: full reset returned %d\n",
+ __func__, err);
+
+ err = ufshcd_reset_device(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+ __func__, err);
+
+ return ufshcd_host_reset_and_restore(hba);
+}
+
/**
* ufshcd_reset_and_restore - reset and re-initialize host/device
* @hba: per-adapter instance
@@ -6950,26 +6998,10 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
int retries = MAX_HOST_RESET_RETRIES;
do {
- err = ufshcd_vops_full_reset(hba);
- if (err)
- dev_warn(hba->dev, "%s: full reset returned %d\n",
- __func__, err);
-
- err = ufshcd_reset_device(hba);
- if (err)
- dev_warn(hba->dev, "%s: device reset failed. err %d\n",
- __func__, err);
-
- err = ufshcd_host_reset_and_restore(hba);
+ err = ufshcd_detect_device(hba);
} while (err && --retries);
/*
- * There is no point proceeding even after failing
- * to recover after multiple retries.
- */
- if (err)
- BUG();
- /*
* After reset the door-bell might be cleared, complete
* outstanding requests in s/w here.
*/
@@ -7672,10 +7704,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
*/
- if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
pm_runtime_put_sync(hba->dev);
- ufshcd_hba_exit(hba);
- }
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7683,6 +7713,70 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
return ret;
}
+static void ufshcd_card_detect_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+
+ hba = container_of(work, struct ufs_hba, card_detect_work);
+ if (hba->card_detect_event &&
+ (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
+ dev_dbg(hba->dev, "%s: card detect notification received\n",
+ __func__);
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_detect_device(hba);
+ pm_runtime_put_sync(hba->dev);
+ } else {
+ dev_dbg(hba->dev, "%s: card removed notification received\n",
+ __func__);
+ /* TODO: remove the scsi device instances */
+ }
+}
+
+static int ufshcd_card_detect_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
+
+ hba->card_detect_event = event;
+ schedule_work(&hba->card_detect_work);
+
+ return NOTIFY_DONE;
+}
+
+static int ufshcd_extcon_register(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!hba->extcon)
+ return 0;
+
+ hba->card_detect_nb.notifier_call = ufshcd_card_detect_notifier;
+ ret = extcon_register_notifier(hba->extcon,
+ EXTCON_MECHANICAL,
+ &hba->card_detect_nb);
+ if (ret)
+ dev_err(hba->dev, "%s: extcon_register_notifier() failed, ret %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int ufshcd_extcon_unregister(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!hba->extcon)
+ return 0;
+
+ ret = extcon_unregister_notifier(hba->extcon, EXTCON_MECHANICAL,
+ &hba->card_detect_nb);
+ if (ret)
+ dev_err(hba->dev, "%s: extcon_unregister_notifier() failed, ret %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
/**
* ufshcd_async_scan - asynchronous execution for probing hba
* @data: data pointer to pass to this function
@@ -7699,6 +7793,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
ufshcd_hold_all(hba);
ufshcd_probe_hba(hba);
ufshcd_release_all(hba);
+
+ ufshcd_extcon_register(hba);
}
/**
@@ -8408,20 +8504,9 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
err = ufshcd_vops_init(hba);
if (err)
- goto out;
-
- err = ufshcd_vops_setup_regulators(hba, true);
- if (err)
- goto out_exit;
-
- goto out;
-
-out_exit:
- ufshcd_vops_exit(hba);
-out:
- if (err)
dev_err(hba->dev, "%s: variant %s init failed err %d\n",
__func__, ufshcd_get_var_name(hba), err);
+out:
return err;
}
@@ -8430,8 +8515,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
if (!hba->var || !hba->var->vops)
return;
- ufshcd_vops_setup_regulators(hba, false);
-
ufshcd_vops_exit(hba);
}
@@ -8490,6 +8573,7 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
static void ufshcd_hba_exit(struct ufs_hba *hba)
{
if (hba->is_powered) {
+ ufshcd_extcon_unregister(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
if (ufshcd_is_clkscaling_supported(hba)) {
@@ -8794,10 +8878,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto enable_gating;
/* UFS device & link must be active before we enter in this function */
- if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
- ret = -EINVAL;
- goto enable_gating;
- }
+ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba))
+ goto set_vreg_lpm;
if (ufshcd_is_runtime_pm(pm_op)) {
if (ufshcd_can_autobkops_during_suspend(hba)) {
@@ -8833,6 +8915,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_is_hibern8_on_idle_allowed(hba))
hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+set_vreg_lpm:
ufshcd_vreg_set_lpm(hba);
disable_clks:
@@ -8935,6 +9018,9 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto disable_vreg;
+ if (ufshcd_is_link_off(hba))
+ goto skip_dev_ops;
+
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
if (!ret) {
@@ -8982,6 +9068,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
+skip_dev_ops:
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release_all(hba);
goto out;
@@ -10043,6 +10130,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+ INIT_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 77ccc39..a485885 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -57,6 +57,7 @@
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <linux/extcon.h>
#include "unipro.h"
#include <asm/irq.h>
@@ -724,6 +725,10 @@ struct ufshcd_cmd_log {
* @ufs_stats: ufshcd statistics to be used via debugfs
* @debugfs_files: debugfs files associated with the ufs stats
* @ufshcd_dbg_print: Bitmask for enabling debug prints
+ * @extcon: pointer to external connector device
+ * @card_detect_nb: card detector notifier registered with @extcon
+ * @card_detect_work: work to exectute the card detect function
+ * @card_detect_event: card detect event, 0 = removed, 1 = inserted
* @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
@@ -896,6 +901,11 @@ struct ufs_hba {
/* Bitmask for enabling debug prints */
u32 ufshcd_dbg_print;
+ struct extcon_dev *extcon;
+ struct notifier_block card_detect_nb;
+ struct work_struct card_detect_work;
+ unsigned long card_detect_event;
+
struct ufs_pa_layer_attr pwr_info;
struct ufs_pwr_mode_info max_pwr_info;
@@ -1289,8 +1299,8 @@ static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
- if (hba->var && hba->var->vops && hba->var->vops->apply_dev_quirks)
- return hba->var->vops->apply_dev_quirks(hba);
+ if (hba->var && hba->var->vops && hba->var->vops->suspend)
+ return hba->var->vops->suspend(hba, op);
return 0;
}
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 3c4759c..453faa8 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -702,7 +702,8 @@ static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
err = true;
} else if (intent->data == NULL) {
if (einfo->intentless) {
- intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
+ intent->data = kmalloc(cmd.frag_size,
+ __GFP_ATOMIC | __GFP_HIGH);
if (!intent->data) {
err = true;
GLINK_ERR(
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index e3b5826..b5bb719 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -191,8 +191,8 @@ enum icnss_driver_state {
ICNSS_FW_TEST_MODE,
ICNSS_PM_SUSPEND,
ICNSS_PM_SUSPEND_NOIRQ,
- ICNSS_SSR_ENABLED,
- ICNSS_PDR_ENABLED,
+ ICNSS_SSR_REGISTERED,
+ ICNSS_PDR_REGISTERED,
ICNSS_PD_RESTART,
ICNSS_MSA0_ASSIGNED,
ICNSS_WLFW_EXISTS,
@@ -2347,7 +2347,7 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
if (code != SUBSYS_BEFORE_SHUTDOWN)
return NOTIFY_OK;
- if (test_bit(ICNSS_PDR_ENABLED, &priv->state))
+ if (test_bit(ICNSS_PDR_REGISTERED, &priv->state))
return NOTIFY_OK;
icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
@@ -2388,14 +2388,14 @@ static int icnss_modem_ssr_register_notifier(struct icnss_priv *priv)
icnss_pr_err("Modem register notifier failed: %d\n", ret);
}
- set_bit(ICNSS_SSR_ENABLED, &priv->state);
+ set_bit(ICNSS_SSR_REGISTERED, &priv->state);
return ret;
}
static int icnss_modem_ssr_unregister_notifier(struct icnss_priv *priv)
{
- if (!test_and_clear_bit(ICNSS_SSR_ENABLED, &priv->state))
+ if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
return 0;
subsys_notif_unregister_notifier(priv->modem_notify_handler,
@@ -2409,7 +2409,7 @@ static int icnss_pdr_unregister_notifier(struct icnss_priv *priv)
{
int i;
- if (!test_and_clear_bit(ICNSS_PDR_ENABLED, &priv->state))
+ if (!test_and_clear_bit(ICNSS_PDR_REGISTERED, &priv->state))
return 0;
for (i = 0; i < priv->total_domains; i++)
@@ -2533,9 +2533,10 @@ static int icnss_get_service_location_notify(struct notifier_block *nb,
priv->service_notifier = notifier;
priv->total_domains = pd->total_domains;
- set_bit(ICNSS_PDR_ENABLED, &priv->state);
+ set_bit(ICNSS_PDR_REGISTERED, &priv->state);
- icnss_pr_dbg("PD restart enabled, state: 0x%lx\n", priv->state);
+ icnss_pr_dbg("PD notification registration happened, state: 0x%lx\n",
+ priv->state);
return NOTIFY_OK;
@@ -3190,7 +3191,7 @@ int icnss_trigger_recovery(struct device *dev)
goto out;
}
- if (!test_bit(ICNSS_PDR_ENABLED, &priv->state)) {
+ if (!test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
priv->state);
ret = -EOPNOTSUPP;
@@ -3644,11 +3645,11 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
case ICNSS_PM_SUSPEND_NOIRQ:
seq_puts(s, "PM SUSPEND NOIRQ");
continue;
- case ICNSS_SSR_ENABLED:
- seq_puts(s, "SSR ENABLED");
+ case ICNSS_SSR_REGISTERED:
+ seq_puts(s, "SSR REGISTERED");
continue;
- case ICNSS_PDR_ENABLED:
- seq_puts(s, "PDR ENABLED");
+ case ICNSS_PDR_REGISTERED:
+ seq_puts(s, "PDR REGISTERED");
continue;
case ICNSS_PD_RESTART:
seq_puts(s, "PD RESTART");
@@ -4262,6 +4263,11 @@ static int icnss_probe(struct platform_device *pdev)
icnss_debugfs_create(priv);
+ ret = device_init_wakeup(&priv->pdev->dev, true);
+ if (ret)
+ icnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+ ret);
+
penv = priv;
icnss_pr_info("Platform driver probed successfully\n");
@@ -4282,6 +4288,8 @@ static int icnss_remove(struct platform_device *pdev)
{
icnss_pr_info("Removing driver: state: 0x%lx\n", penv->state);
+ device_init_wakeup(&penv->pdev->dev, false);
+
icnss_debugfs_destroy(penv);
icnss_modem_ssr_unregister_notifier(penv);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index 9d0adbb..e38c53e 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -706,8 +706,6 @@ int bcm_remove_handoff_req(struct device *dev, void *data)
struct msm_bus_node_device_type *cur_rsc = NULL;
int ret = 0;
- rt_mutex_lock(&msm_bus_adhoc_lock);
-
bus_dev = to_msm_bus_node(dev);
if (bus_dev->node_info->is_bcm_dev ||
bus_dev->node_info->is_fab_dev ||
@@ -730,7 +728,6 @@ int bcm_remove_handoff_req(struct device *dev, void *data)
}
exit_bcm_remove_handoff_req:
- rt_mutex_unlock(&msm_bus_adhoc_lock);
return ret;
}
@@ -857,14 +854,18 @@ static void commit_data(void)
INIT_LIST_HEAD(&commit_list);
}
-void commit_late_init_data(void)
+int commit_late_init_data(void)
{
+ int rc;
rt_mutex_lock(&msm_bus_adhoc_lock);
+ rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ bcm_remove_handoff_req);
msm_bus_commit_data(&late_init_clist);
INIT_LIST_HEAD(&late_init_clist);
rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return rc;
}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 458cf0d..144b1a1 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -1689,15 +1689,9 @@ int __init msm_bus_device_init_driver(void)
int __init msm_bus_device_late_init(void)
{
- int rc;
-
MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n");
init_time = false;
- rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
- bcm_remove_handoff_req);
-
- commit_late_init_data();
- return rc;
+ return commit_late_init_data();
}
subsys_initcall(msm_bus_device_init_driver);
late_initcall_sync(msm_bus_device_late_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index f7f17c3..cd5281a 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -205,7 +205,7 @@ int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
int throttle_en, uint64_t lim_bw);
int msm_bus_commit_data(struct list_head *clist);
int bcm_remove_handoff_req(struct device *dev, void *data);
-void commit_late_init_data(void);
+int commit_late_init_data(void);
int msm_bus_query_gen(struct list_head *qlist,
struct msm_bus_tcs_usecase *tcs_usecase);
void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index df0c609c..7984dfe 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -37,7 +37,7 @@
#define MAX_VDD_MSS_UV 1150000
#define PROXY_TIMEOUT_MS 10000
-#define MAX_SSR_REASON_LEN 81U
+#define MAX_SSR_REASON_LEN 256U
#define STOP_ACK_TIMEOUT_MS 1000
#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
@@ -276,6 +276,10 @@ static int pil_mss_loadable_init(struct modem_data *drv,
if (!res) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"restart_reg_sec");
+ if (!res) {
+ dev_err(&pdev->dev, "No restart register defined\n");
+ return -ENOMEM;
+ }
q6->restart_reg_sec = true;
}
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index 414c123..2320fea 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -626,9 +626,11 @@ static int __init audio_notifier_late_init(void)
* If pdr registration failed, register clients on next service
* Do in late init to ensure that SSR subsystem is initialized
*/
+ mutex_lock(¬ifier_mutex);
if (!audio_notifer_is_service_enabled(AUDIO_NOTIFIER_PDR_SERVICE))
audio_notifer_reg_all_clients();
+ mutex_unlock(¬ifier_mutex);
return 0;
}
late_initcall(audio_notifier_late_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 5ca0fe5..306510f 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -34,17 +34,21 @@
#define RPMH_MAX_REQ_IN_BATCH 10
#define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name) \
- struct rpmh_msg name = { \
- .msg = { 0 }, \
- .msg.state = s, \
- .msg.is_complete = true, \
- .msg.payload = name.cmd, \
- .msg.num_payload = 0, \
- .cmd = { { 0 } }, \
- .waitq = q, \
- .wait_count = c, \
- .rc = rc, \
- .bit = -1, \
+ struct rpmh_msg name = { \
+ .msg = { \
+ .state = s, \
+ .payload = name.cmd, \
+ .num_payload = 0, \
+ .is_read = false, \
+ .is_control = false, \
+ .is_complete = true, \
+ .invalidate = false, \
+ }, \
+ .cmd = { { 0 } }, \
+ .completion = q, \
+ .wait_count = c, \
+ .rc = rc, \
+ .bit = -1, \
}
struct rpmh_req {
@@ -57,7 +61,7 @@ struct rpmh_req {
struct rpmh_msg {
struct tcs_mbox_msg msg;
struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
- wait_queue_head_t *waitq;
+ struct completion *completion;
atomic_t *wait_count;
struct rpmh_client *rc;
int bit;
@@ -106,21 +110,31 @@ static struct rpmh_msg *get_msg_from_pool(struct rpmh_client *rc)
return msg;
}
+static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
+{
+ struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
+ unsigned long flags;
+
+ /* If we allocated the pool, set it as available */
+ if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
+ spin_lock_irqsave(&rpm->lock, flags);
+ bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
+ spin_unlock_irqrestore(&rpm->lock, flags);
+ }
+}
+
static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
{
struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
atomic_dec(rpm_msg->wait_count);
- wake_up(rpm_msg->waitq);
}
static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
{
struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
- struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
atomic_t *wc = rpm_msg->wait_count;
- wait_queue_head_t *waitq = rpm_msg->waitq;
- unsigned long flags;
+ struct completion *compl = rpm_msg->completion;
rpm_msg->err = r;
@@ -144,18 +158,12 @@ static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
* into an issue that the stack allocated parent object may be
* invalid before we can check the ->bit value.
*/
-
- /* If we allocated the pool, set it as available */
- if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
- spin_lock_irqsave(&rpm->lock, flags);
- bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
- spin_unlock_irqrestore(&rpm->lock, flags);
- }
+ free_msg_to_pool(rpm_msg);
/* Signal the blocking thread we are done */
if (wc && atomic_dec_and_test(wc))
- if (waitq)
- wake_up(waitq);
+ if (compl)
+ complete(compl);
}
static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
@@ -312,9 +320,9 @@ EXPORT_SYMBOL(rpmh_write_single_async);
int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
u32 addr, u32 data)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(1);
- DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+ DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
int ret;
if (IS_ERR_OR_NULL(rc))
@@ -333,7 +341,7 @@ int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
if (ret < 0)
return ret;
- wait_event(waitq, atomic_read(&wait_count) == 0);
+ wait_for_completion(&compl);
return rpm_msg.err;
}
@@ -408,9 +416,9 @@ EXPORT_SYMBOL(rpmh_write_async);
int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
struct tcs_cmd *cmd, int n)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(1);
- DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+ DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
int ret;
if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
@@ -428,7 +436,7 @@ int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
if (ret)
return ret;
- wait_event(waitq, atomic_read(&wait_count) == 0);
+ wait_for_completion(&compl);
return rpm_msg.err;
}
@@ -454,7 +462,7 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
struct tcs_cmd *cmd, int *n)
{
struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
int count = 0;
int ret, i, j, k;
@@ -507,9 +515,8 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
for (i = 0; i < count; i++) {
rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
if (IS_ERR_OR_NULL(rpm_msg[i])) {
- /* Clean up our call by spoofing tx_done */
for (j = 0 ; j < i; j++)
- rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, 0);
+ free_msg_to_pool(rpm_msg[j]);
return PTR_ERR(rpm_msg[i]);
}
cmd += n[i];
@@ -520,7 +527,7 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
might_sleep();
atomic_set(&wait_count, count);
for (i = 0; i < count; i++) {
- rpm_msg[i]->waitq = &waitq;
+ rpm_msg[i]->completion = &compl;
rpm_msg[i]->wait_count = &wait_count;
/* Bypass caching and write to mailbox directly */
ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
@@ -530,15 +537,17 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
break;
}
}
- wait_event(waitq, atomic_read(&wait_count) == (count - i));
+ /* For those unsent requests, spoof tx_done */
+ for (j = i; j < count; j++)
+ rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
+ wait_for_completion(&compl);
} else {
/* Send Sleep requests to the controller, expect no response */
for (i = 0; i < count; i++) {
- rpm_msg[i]->waitq = NULL;
+ rpm_msg[i]->completion = NULL;
ret = mbox_send_controller_data(rc->chan,
&rpm_msg[i]->msg);
- /* Clean up our call by spoofing tx_done */
- rpmh_tx_done(&rc->client, &rpm_msg[i]->msg, ret);
+ free_msg_to_pool(rpm_msg[i]);
}
return 0;
}
@@ -660,10 +669,10 @@ EXPORT_SYMBOL(rpmh_invalidate);
int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
{
int ret;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(2); /* wait for rx_cb and tx_done */
DEFINE_RPMH_MSG_ONSTACK(rc, RPMH_ACTIVE_ONLY_STATE,
- &waitq, &wait_count, rpm_msg);
+ &compl, &wait_count, rpm_msg);
if (IS_ERR_OR_NULL(rc) || !resp)
return -EINVAL;
@@ -684,7 +693,7 @@ int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
return ret;
/* Wait until the response is received from RPMH */
- wait_event(waitq, atomic_read(&wait_count) == 0);
+ wait_for_completion(&compl);
/* Read the data back from the tcs_mbox_msg structrure */
*resp = rpm_msg.cmd[0].data;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index f381f16..1c7c4a1 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -53,28 +53,25 @@
/* Uncomment the line below to test spcom against modem rather than SP */
/* #define SPCOM_TEST_HLOS_WITH_MODEM 1 */
-/* Uncomment the line below to enable debug messages */
-/* #define DEBUG 1 */
-
#define pr_fmt(fmt) "spcom [%s]: " fmt, __func__
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/errno.h>
-#include <linux/printk.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/poll.h>
-#include <linux/dma-mapping.h>
+#include <linux/kernel.h> /* min() */
+#include <linux/module.h> /* MODULE_LICENSE */
+#include <linux/device.h> /* class_create() */
+#include <linux/slab.h> /* kzalloc() */
+#include <linux/fs.h> /* file_operations */
+#include <linux/cdev.h> /* cdev_add() */
+#include <linux/errno.h> /* EINVAL, ETIMEDOUT */
+#include <linux/printk.h> /* pr_err() */
+#include <linux/bitops.h> /* BIT(x) */
+#include <linux/completion.h> /* wait_for_completion_timeout() */
+#include <linux/poll.h> /* POLLOUT */
+#include <linux/dma-mapping.h> /* dma_alloc_coherent() */
#include <linux/platform_device.h>
-#include <linux/of.h>
+#include <linux/of.h> /* of_property_count_strings() */
#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/msm_ion.h>
+#include <linux/delay.h> /* msleep() */
+#include <linux/msm_ion.h> /* msm_ion_client_create() */
#include <soc/qcom/glink.h>
#include <soc/qcom/smem.h>
@@ -82,7 +79,7 @@
#include <uapi/linux/spcom.h>
-#include "glink_private.h"
+#include "glink_private.h" /* glink_ssr() */
/* "SPCM" string */
#define SPCOM_MAGIC_ID ((uint32_t)(0x5350434D))
@@ -220,9 +217,9 @@ struct spcom_channel {
bool tx_abort;
/* rx data info */
- int rx_buf_size; /* allocated rx buffer size */
+ size_t rx_buf_size; /* allocated rx buffer size */
bool rx_buf_ready;
- int actual_rx_size; /* actual data size received */
+ size_t actual_rx_size; /* actual data size received */
const void *glink_rx_buf;
/* ION lock/unlock support */
@@ -276,6 +273,7 @@ static void spcom_notify_rx_abort(void *handle, const void *priv,
const void *pkt_priv);
static struct spcom_channel *spcom_find_channel_by_name(const char *name);
static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd);
+static void spcom_rx_abort_pending_server(void);
/**
* spcom_is_ready() - driver is initialized and ready.
@@ -301,6 +299,10 @@ static inline bool spcom_is_channel_open(struct spcom_channel *ch)
*/
static inline bool spcom_is_channel_connected(struct spcom_channel *ch)
{
+ /* Channel must be open before it gets connected */
+ if (!spcom_is_channel_open(ch))
+ return false;
+
return (ch->glink_state == GLINK_CONNECTED);
}
@@ -316,6 +318,10 @@ static int spcom_create_predefined_channels_chardev(void)
{
int i;
int ret;
+ static bool is_predefined_created;
+
+ if (is_predefined_created)
+ return 0;
for (i = 0; i < SPCOM_MAX_CHANNELS; i++) {
const char *name = spcom_dev->predefined_ch_name[i];
@@ -330,6 +336,8 @@ static int spcom_create_predefined_channels_chardev(void)
}
}
+ is_predefined_created = true;
+
return 0;
}
@@ -352,6 +360,16 @@ static void spcom_link_state_notif_cb(struct glink_link_state_cb_info *cb_info,
struct spcom_channel *ch = NULL;
const char *ch_name = "sp_kernel";
+ if (!cb_info) {
+ pr_err("invalid NULL cb_info param\n");
+ return;
+ }
+
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return;
+ }
+
spcom_dev->link_state = cb_info->link_state;
pr_debug("spcom_link_state_notif_cb called. transport = %s edge = %s\n",
@@ -375,6 +393,9 @@ static void spcom_link_state_notif_cb(struct glink_link_state_cb_info *cb_info,
pr_err("failed to find channel [%s].\n", ch_name);
else
spcom_unlock_ion_buf(ch, SPCOM_ION_FD_UNLOCK_ALL);
+
+ pr_debug("Rx-Abort pending servers.\n");
+ spcom_rx_abort_pending_server();
break;
default:
pr_err("unknown link_state [%d].\n", cb_info->link_state);
@@ -396,13 +417,17 @@ static void spcom_notify_rx(void *handle,
struct spcom_channel *ch = (struct spcom_channel *) priv;
if (!ch) {
- pr_err("invalid ch parameter.\n");
+ pr_err("invalid NULL channel param\n");
+ return;
+ }
+ if (!buf) {
+ pr_err("invalid NULL buf param\n");
return;
}
- pr_debug("ch [%s] rx size [%d].\n", ch->name, (int) size);
+ pr_debug("ch [%s] rx size [%zu]\n", ch->name, size);
- ch->actual_rx_size = (int) size;
+ ch->actual_rx_size = size;
ch->glink_rx_buf = (void *) buf;
complete_all(&ch->rx_done);
@@ -421,7 +446,11 @@ static void spcom_notify_tx_done(void *handle,
int *tx_buf = (int *) buf;
if (!ch) {
- pr_err("invalid ch parameter.\n");
+ pr_err("invalid NULL channel param\n");
+ return;
+ }
+ if (!buf) {
+ pr_err("invalid NULL buf param\n");
return;
}
@@ -446,11 +475,15 @@ static void spcom_notify_state(void *handle, const void *priv,
int ret;
struct spcom_channel *ch = (struct spcom_channel *) priv;
+ if (!ch) {
+ pr_err("invalid NULL channel param\n");
+ return;
+ }
+
switch (event) {
case GLINK_CONNECTED:
pr_debug("GLINK_CONNECTED, ch name [%s].\n", ch->name);
- complete_all(&ch->connect);
-
+ ch->glink_state = event;
/*
* if spcom_notify_state() is called within glink_open()
* then ch->glink_handle is not updated yet.
@@ -466,10 +499,11 @@ static void spcom_notify_state(void *handle, const void *priv,
if (ret) {
pr_err("glink_queue_rx_intent() err [%d]\n", ret);
} else {
- pr_debug("rx buf is ready, size [%d].\n",
+ pr_debug("rx buf is ready, size [%zu].\n",
ch->rx_buf_size);
ch->rx_buf_ready = true;
}
+ complete_all(&ch->connect);
break;
case GLINK_LOCAL_DISCONNECTED:
/*
@@ -477,6 +511,7 @@ static void spcom_notify_state(void *handle, const void *priv,
* only after *both* sides closed the channel.
*/
pr_debug("GLINK_LOCAL_DISCONNECTED, ch [%s].\n", ch->name);
+ ch->glink_state = event;
complete_all(&ch->disconnect);
break;
case GLINK_REMOTE_DISCONNECTED:
@@ -487,6 +522,8 @@ static void spcom_notify_state(void *handle, const void *priv,
*/
pr_err("GLINK_REMOTE_DISCONNECTED, ch [%s].\n", ch->name);
+ ch->glink_state = event;
+
/*
* Abort any blocking read() operation.
* The glink notification might be after REMOTE_DISCONNECT.
@@ -504,8 +541,6 @@ static void spcom_notify_state(void *handle, const void *priv,
(int) event, ch->name);
return;
}
-
- ch->glink_state = event;
}
/**
@@ -539,9 +574,14 @@ static void spcom_notify_rx_abort(void *handle, const void *priv,
{
struct spcom_channel *ch = (struct spcom_channel *) priv;
+ if (!ch) {
+ pr_err("invalid NULL channel param\n");
+ return;
+ }
+
pr_debug("ch [%s] pending rx aborted.\n", ch->name);
- if (spcom_is_channel_connected(ch) && (!ch->rx_abort)) {
+ if (spcom_is_channel_open(ch) && (!ch->rx_abort)) {
ch->rx_abort = true;
complete_all(&ch->rx_done);
}
@@ -559,6 +599,11 @@ static void spcom_notify_tx_abort(void *handle, const void *priv,
{
struct spcom_channel *ch = (struct spcom_channel *) priv;
+ if (!ch) {
+ pr_err("invalid NULL channel param\n");
+ return;
+ }
+
pr_debug("ch [%s] pending tx aborted.\n", ch->name);
if (spcom_is_channel_connected(ch) && (!ch->tx_abort)) {
@@ -672,12 +717,11 @@ static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
/* only one client/server may use the channel */
if (ch->ref_count) {
- pr_err("channel [%s] already in use.\n", name);
- goto exit_err;
+ pr_err("channel [%s] is BUSY, already in use by pid [%d].\n",
+ name, ch->pid);
+ mutex_unlock(&ch->lock);
+ return -EBUSY;
}
- ch->ref_count++;
- ch->pid = current_pid();
- ch->txn_id = INITIAL_TXN_ID;
pr_debug("ch [%s] opened by PID [%d], count [%d]\n",
name, ch->pid, ch->ref_count);
@@ -702,7 +746,12 @@ static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
} else {
pr_debug("glink_open [%s] ok.\n", name);
}
+
+ /* init channel context after successful open */
ch->glink_handle = handle;
+ ch->ref_count++;
+ ch->pid = current_pid();
+ ch->txn_id = INITIAL_TXN_ID;
pr_debug("Wait for connection on channel [%s] timeout_msec [%d].\n",
name, timeout_msec);
@@ -776,6 +825,8 @@ static int spcom_close(struct spcom_channel *ch)
* @size: buffer size
*
* ACK is expected within a very short time (few msec).
+ *
+ * Return: 0 on successful operation, negative value otherwise.
*/
static int spcom_tx(struct spcom_channel *ch,
void *buf,
@@ -840,13 +891,15 @@ static int spcom_tx(struct spcom_channel *ch,
* @size: buffer size
*
* ACK is expected within a very short time (few msec).
+ *
+ * Return: size in bytes on success, negative value on failure.
*/
static int spcom_rx(struct spcom_channel *ch,
void *buf,
uint32_t size,
uint32_t timeout_msec)
{
- int ret;
+ int ret = -1;
unsigned long jiffies = msecs_to_jiffies(timeout_msec);
long timeleft = 1;
@@ -854,7 +907,7 @@ static int spcom_rx(struct spcom_channel *ch,
/* check for already pending data */
if (ch->actual_rx_size) {
- pr_debug("already pending data size [%d].\n",
+ pr_debug("already pending data size [%zu]\n",
ch->actual_rx_size);
goto copy_buf;
}
@@ -871,23 +924,24 @@ static int spcom_rx(struct spcom_channel *ch,
if (timeleft == 0) {
pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
- goto exit_err;
+ mutex_unlock(&ch->lock);
+ return -ETIMEDOUT;
} else if (ch->rx_abort) {
- pr_err("rx aborted.\n");
- goto exit_err;
+ mutex_unlock(&ch->lock);
+ return -ERESTART; /* probably SSR */
} else if (ch->actual_rx_size) {
- pr_debug("actual_rx_size is [%d].\n", ch->actual_rx_size);
+ pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
} else {
pr_err("actual_rx_size is zero.\n");
goto exit_err;
}
+copy_buf:
if (!ch->glink_rx_buf) {
pr_err("invalid glink_rx_buf.\n");
goto exit_err;
}
-copy_buf:
/* Copy from glink buffer to spcom buffer */
size = min_t(int, ch->actual_rx_size, size);
memcpy(buf, ch->glink_rx_buf, size);
@@ -905,7 +959,7 @@ static int spcom_rx(struct spcom_channel *ch,
pr_err("glink_queue_rx_intent() failed, ret [%d]", ret);
goto exit_err;
} else {
- pr_debug("queue rx_buf, size [%d].\n", ch->rx_buf_size);
+ pr_debug("queue rx_buf, size [%zu]\n", ch->rx_buf_size);
}
mutex_unlock(&ch->lock);
@@ -925,6 +979,8 @@ static int spcom_rx(struct spcom_channel *ch,
* Server needs the size of the next request to allocate a request buffer.
* Initially used intent-request, however this complicated the remote side,
* so both sides are not using glink_tx() with INTENT_REQ anymore.
+ *
+ * Return: size in bytes on success, negative value on failure.
*/
static int spcom_get_next_request_size(struct spcom_channel *ch)
{
@@ -936,15 +992,22 @@ static int spcom_get_next_request_size(struct spcom_channel *ch)
/* check if already got it via callback */
if (ch->actual_rx_size) {
- pr_debug("next-req-size already ready ch [%s] size [%d].\n",
+ pr_debug("next-req-size already ready ch [%s] size [%zu]\n",
ch->name, ch->actual_rx_size);
goto exit_ready;
}
pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
wait_for_completion(&ch->rx_done);
+
+ /* Check Rx Abort on SP reset */
+ if (ch->rx_abort) {
+ pr_err("rx aborted.\n");
+ goto exit_error;
+ }
+
if (ch->actual_rx_size <= 0) {
- pr_err("invalid rx size [%d] ch [%s].\n",
+ pr_err("invalid rx size [%zu] ch [%s]\n",
ch->actual_rx_size, ch->name);
goto exit_error;
}
@@ -968,6 +1031,27 @@ static int spcom_get_next_request_size(struct spcom_channel *ch)
}
+/**
+ * spcom_rx_abort_pending_server() - abort pending server rx on SSR.
+ *
+ * Server that is waiting for request, but has no client connected,
+ * will not get RX-ABORT or REMOTE-DISCONNECT notification,
+ * that should cancel the server pending rx operation.
+ */
+static void spcom_rx_abort_pending_server(void)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
+ struct spcom_channel *ch = &spcom_dev->channels[i];
+
+ if (ch->is_server) {
+ pr_debug("rx-abort server on ch [%s].\n", ch->name);
+ spcom_notify_rx_abort(NULL, ch, NULL);
+ }
+ }
+}
+
/*======================================================================*/
/* General API for kernel drivers */
/*======================================================================*/
@@ -979,6 +1063,9 @@ static int spcom_get_next_request_size(struct spcom_channel *ch)
*/
bool spcom_is_sp_subsystem_link_up(void)
{
+ if (spcom_dev == NULL)
+ return false;
+
return (spcom_dev->link_state == GLINK_LINK_STATE_UP);
}
EXPORT_SYMBOL(spcom_is_sp_subsystem_link_up);
@@ -1001,6 +1088,11 @@ struct spcom_client *spcom_register_client(struct spcom_client_info *info)
struct spcom_channel *ch;
struct spcom_client *client;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return NULL;
+ }
+
if (!info) {
pr_err("Invalid parameter.\n");
return NULL;
@@ -1042,17 +1134,26 @@ int spcom_unregister_client(struct spcom_client *client)
{
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!client) {
- pr_err("Invalid parameter.\n");
+ pr_err("Invalid client parameter.\n");
return -EINVAL;
}
ch = client->ch;
-
- kfree(client);
+ if (!ch) {
+ pr_err("Invalid channel.\n");
+ return -EINVAL;
+ }
spcom_close(ch);
+ kfree(client);
+
return 0;
}
EXPORT_SYMBOL(spcom_unregister_client);
@@ -1069,6 +1170,8 @@ EXPORT_SYMBOL(spcom_unregister_client);
* @timeout_msec: timeout waiting for response.
*
* The timeout depends on the specific request handling time at the remote side.
+ *
+ * Return: number of rx bytes on success, negative value on failure.
*/
int spcom_client_send_message_sync(struct spcom_client *client,
void *req_ptr,
@@ -1080,12 +1183,21 @@ int spcom_client_send_message_sync(struct spcom_client *client,
int ret;
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!client || !req_ptr || !resp_ptr) {
pr_err("Invalid parameter.\n");
return -EINVAL;
}
ch = client->ch;
+ if (!ch) {
+ pr_err("Invalid channel.\n");
+ return -EINVAL;
+ }
/* Check if remote side connect */
if (!spcom_is_channel_connected(ch)) {
@@ -1120,13 +1232,25 @@ EXPORT_SYMBOL(spcom_client_send_message_sync);
bool spcom_client_is_server_connected(struct spcom_client *client)
{
bool connected;
+ struct spcom_channel *ch;
+
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return false;
+ }
if (!client) {
pr_err("Invalid parameter.\n");
+ return false;
+ }
+
+ ch = client->ch;
+ if (!ch) {
+ pr_err("Invalid channel.\n");
return -EINVAL;
}
- connected = spcom_is_channel_connected(client->ch);
+ connected = spcom_is_channel_connected(ch);
return connected;
}
@@ -1150,6 +1274,11 @@ struct spcom_server *spcom_register_service(struct spcom_service_info *info)
struct spcom_channel *ch;
struct spcom_server *server;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return NULL;
+ }
+
if (!info) {
pr_err("Invalid parameter.\n");
return NULL;
@@ -1188,17 +1317,26 @@ int spcom_unregister_service(struct spcom_server *server)
{
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!server) {
- pr_err("Invalid parameter.\n");
+ pr_err("Invalid server parameter.\n");
return -EINVAL;
}
ch = server->ch;
-
- kfree(server);
+ if (!ch) {
+ pr_err("Invalid channel parameter.\n");
+ return -EINVAL;
+ }
spcom_close(ch);
+ kfree(server);
+
return 0;
}
EXPORT_SYMBOL(spcom_unregister_service);
@@ -1208,7 +1346,7 @@ EXPORT_SYMBOL(spcom_unregister_service);
*
* @server: server handle
*
- * Return: request size in bytes.
+ * Return: size in bytes on success, negative value on failure.
*/
int spcom_server_get_next_request_size(struct spcom_server *server)
{
@@ -1221,6 +1359,10 @@ int spcom_server_get_next_request_size(struct spcom_server *server)
}
ch = server->ch;
+ if (!ch) {
+ pr_err("Invalid channel.\n");
+ return -EINVAL;
+ }
/* Check if remote side connect */
if (!spcom_is_channel_connected(ch)) {
@@ -1243,7 +1385,7 @@ EXPORT_SYMBOL(spcom_server_get_next_request_size);
* @req_ptr: request buffer pointer
* @req_size: max request size
*
- * Return: request size in bytes.
+ * Return: size in bytes on success, negative value on failure.
*/
int spcom_server_wait_for_request(struct spcom_server *server,
void *req_ptr,
@@ -1252,12 +1394,21 @@ int spcom_server_wait_for_request(struct spcom_server *server,
int ret;
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!server || !req_ptr) {
pr_err("Invalid parameter.\n");
return -EINVAL;
}
ch = server->ch;
+ if (!ch) {
+ pr_err("Invalid channel.\n");
+ return -EINVAL;
+ }
/* Check if remote side connect */
if (!spcom_is_channel_connected(ch)) {
@@ -1285,12 +1436,21 @@ int spcom_server_send_response(struct spcom_server *server,
int ret;
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!server || !resp_ptr) {
pr_err("Invalid parameter.\n");
return -EINVAL;
}
ch = server->ch;
+ if (!ch) {
+ pr_err("Invalid channel.\n");
+ return -EINVAL;
+ }
/* Check if remote side connect */
if (!spcom_is_channel_connected(ch)) {
@@ -1322,6 +1482,7 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
int ret = 0;
struct spcom_user_create_channel_command *cmd = cmd_buf;
const char *ch_name;
+ const size_t maxlen = sizeof(cmd->ch_name);
if (cmd_size != sizeof(*cmd)) {
pr_err("cmd_size [%d] , expected [%d].\n",
@@ -1330,6 +1491,10 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
}
ch_name = cmd->ch_name;
+ if (strnlen(cmd->ch_name, maxlen) == maxlen) {
+ pr_err("channel name is not NULL terminated\n");
+ return -EINVAL;
+ }
pr_debug("ch_name [%s].\n", ch_name);
@@ -1468,7 +1633,7 @@ static int modify_ion_addr(void *buf,
/* Get ION handle from fd */
handle = ion_import_dma_buf_fd(spcom_dev->ion_client, fd);
- if (handle == NULL) {
+ if (IS_ERR_OR_NULL(handle)) {
pr_err("fail to get ion handle.\n");
return -EINVAL;
}
@@ -1629,18 +1794,23 @@ static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
/* Get ION handle from fd - this increments the ref count */
ion_handle = ion_import_dma_buf_fd(spcom_dev->ion_client, fd);
- if (ion_handle == NULL) {
+ if (IS_ERR_OR_NULL(ion_handle)) {
pr_err("fail to get ion handle.\n");
return -EINVAL;
}
+
pr_debug("ion handle ok.\n");
+ /* ION buf lock doesn't involve any rx/tx data to SP. */
+ mutex_lock(&ch->lock);
+
/* Check if this ION buffer is already locked */
for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
if (ch->ion_handle_table[i] == ion_handle) {
- pr_debug("fd [%d] ion buf is already locked.\n", fd);
+ pr_err("fd [%d] ion buf is already locked.\n", fd);
/* decrement back the ref count */
ion_free(spcom_dev->ion_client, ion_handle);
+ mutex_unlock(&ch->lock);
return -EINVAL;
}
}
@@ -1650,11 +1820,19 @@ static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
if (ch->ion_handle_table[i] == NULL) {
ch->ion_handle_table[i] = ion_handle;
ch->ion_fd_table[i] = fd;
- pr_debug("locked ion buf#[%d], fd [%d].\n", i, fd);
+ pr_debug("ch [%s] locked ion buf #%d, fd [%d].\n",
+ ch->name, i, fd);
+ mutex_unlock(&ch->lock);
return 0;
}
}
+ pr_err("no free entry to store ion handle of fd [%d].\n", fd);
+ /* decrement back the ref count */
+ ion_free(spcom_dev->ion_client, ion_handle);
+
+ mutex_unlock(&ch->lock);
+
return -EFAULT;
}
@@ -1684,20 +1862,24 @@ static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd)
/* unlock all ION buf */
for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
if (ch->ion_handle_table[i] != NULL) {
+ pr_debug("unlocked ion buf #%d fd [%d].\n",
+ i, ch->ion_fd_table[i]);
ion_free(ion_client, ch->ion_handle_table[i]);
ch->ion_handle_table[i] = NULL;
ch->ion_fd_table[i] = -1;
- pr_debug("unlocked ion buf#[%d].\n", i);
}
}
} else {
/* unlock specific ION buf */
for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+ if (ch->ion_handle_table[i] == NULL)
+ continue;
if (ch->ion_fd_table[i] == fd) {
+ pr_debug("unlocked ion buf #%d fd [%d].\n",
+ i, ch->ion_fd_table[i]);
ion_free(ion_client, ch->ion_handle_table[i]);
ch->ion_handle_table[i] = NULL;
ch->ion_fd_table[i] = -1;
- pr_debug("unlocked ion buf#[%d].\n", i);
found = true;
break;
}
@@ -1731,8 +1913,13 @@ static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
return -EINVAL;
}
+ /* ION buf unlock doesn't involve any rx/tx data to SP. */
+ mutex_lock(&ch->lock);
+
ret = spcom_unlock_ion_buf(ch, fd);
+ mutex_unlock(&ch->lock);
+
return ret;
}
@@ -1766,9 +1953,9 @@ static int spcom_handle_write(struct spcom_channel *ch,
int swap_id;
char cmd_name[5] = {0}; /* debug only */
- /* opcode field is the minimum length of cmd */
- if (buf_size < sizeof(cmd->cmd_id)) {
- pr_err("Invalid argument user buffer size %d.\n", buf_size);
+ /* Minimal command should have command-id and argument */
+ if (buf_size < sizeof(struct spcom_user_command)) {
+ pr_err("Command buffer size [%d] too small\n", buf_size);
return -EINVAL;
}
@@ -1813,7 +2000,7 @@ static int spcom_handle_write(struct spcom_channel *ch,
* @buf: command buffer.
* @size: command buffer size.
*
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
*/
static int spcom_handle_get_req_size(struct spcom_channel *ch,
void *buf,
@@ -1841,7 +2028,7 @@ static int spcom_handle_get_req_size(struct spcom_channel *ch,
* @buf: command buffer.
* @size: command buffer size.
*
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
*/
static int spcom_handle_read_req_resp(struct spcom_channel *ch,
void *buf,
@@ -1861,7 +2048,7 @@ static int spcom_handle_read_req_resp(struct spcom_channel *ch,
/* Check param validity */
if (size > SPCOM_MAX_RESPONSE_SIZE) {
- pr_err("ch [%s] inavlid size [%d].\n",
+ pr_err("ch [%s] invalid size [%d].\n",
ch->name, size);
return -EINVAL;
}
@@ -1884,7 +2071,8 @@ static int spcom_handle_read_req_resp(struct spcom_channel *ch,
ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
if (ret < 0) {
pr_err("rx error %d.\n", ret);
- goto exit_err;
+ kfree(rx_buf);
+ return ret;
} else {
size = ret; /* actual_rx_size */
}
@@ -1924,7 +2112,7 @@ static int spcom_handle_read_req_resp(struct spcom_channel *ch,
* A special size SPCOM_GET_NEXT_REQUEST_SIZE, which is bigger than the max
* response/request tells the kernel that user space only need the size.
*
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
*/
static int spcom_handle_read(struct spcom_channel *ch,
void *buf,
@@ -1932,8 +2120,8 @@ static int spcom_handle_read(struct spcom_channel *ch,
{
if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
pr_debug("get next request size, ch [%s].\n", ch->name);
- size = spcom_handle_get_req_size(ch, buf, size);
ch->is_server = true;
+ size = spcom_handle_get_req_size(ch, buf, size);
} else {
pr_debug("get request/response, ch [%s].\n", ch->name);
size = spcom_handle_read_req_resp(ch, buf, size);
@@ -1988,6 +2176,10 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
struct spcom_channel *ch;
const char *name = file_to_filename(filp);
+ /* silent error message until spss link is up */
+ if (!spcom_is_sp_subsystem_link_up())
+ return -ENODEV;
+
pr_debug("Open file [%s].\n", name);
if (strcmp(name, DEVICE_NAME) == 0) {
@@ -2006,8 +2198,6 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
return -ENODEV;
}
- filp->private_data = ch;
-
ret = spcom_open(ch, OPEN_CHANNEL_TIMEOUT_MSEC);
if (ret == -ETIMEDOUT) {
pr_err("Connection timeout channel [%s].\n", name);
@@ -2016,6 +2206,8 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
return ret;
}
+ filp->private_data = ch;
+
pr_debug("finished.\n");
return 0;
@@ -2036,7 +2228,6 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
{
struct spcom_channel *ch;
const char *name = file_to_filename(filp);
- bool connected = false;
pr_debug("Close file [%s].\n", name);
@@ -2058,19 +2249,18 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
}
/* channel might be already closed or disconnected */
- if (spcom_is_channel_open(ch) && spcom_is_channel_connected(ch))
- connected = true;
+ if (!spcom_is_channel_open(ch)) {
+ pr_err("ch [%s] already closed.\n", name);
+ return 0;
+ }
reinit_completion(&ch->disconnect);
spcom_close(ch);
- if (connected) {
- pr_debug("Wait for event GLINK_LOCAL_DISCONNECTED, ch [%s].\n",
- name);
- wait_for_completion(&ch->disconnect);
- pr_debug("GLINK_LOCAL_DISCONNECTED signaled, ch [%s].\n", name);
- }
+ pr_debug("Wait for event GLINK_LOCAL_DISCONNECTED, ch [%s].\n", name);
+ wait_for_completion(&ch->disconnect);
+ pr_debug("GLINK_LOCAL_DISCONNECTED signaled, ch [%s].\n", name);
return 0;
}
@@ -2102,8 +2292,8 @@ static ssize_t spcom_device_write(struct file *filp,
ch = filp->private_data;
if (!ch) {
- pr_debug("invalid ch pointer.\n");
- /* Allow some special commands via /dev/spcom and /dev/sp_ssr */
+ pr_err("invalid ch pointer, command not allowed.\n");
+ return -EINVAL;
} else {
/* Check if remote side connect */
if (!spcom_is_channel_connected(ch)) {
@@ -2147,7 +2337,7 @@ static ssize_t spcom_device_write(struct file *filp,
}
/**
- * spcom_device_read() - handle channel file write() from user space.
+ * spcom_device_read() - handle channel file read() from user space.
*
* @filp: file pointer
*
@@ -2173,12 +2363,28 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
ch = filp->private_data;
+ if (ch == NULL) {
+ pr_err("invalid ch pointer, file [%s].\n", name);
+ return -EINVAL;
+ }
+
+ if (!spcom_is_channel_open(ch)) {
+ pr_err("ch is not open, file [%s].\n", name);
+ return -EINVAL;
+ }
+
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- actual_size = spcom_handle_read(ch, buf, size);
- if ((actual_size <= 0) || (actual_size > size)) {
+ ret = spcom_handle_read(ch, buf, size);
+ if (ret < 0) {
+ pr_err("read error [%d].\n", ret);
+ kfree(buf);
+ return ret;
+ }
+ actual_size = ret;
+ if ((actual_size == 0) || (actual_size > size)) {
pr_err("invalid actual_size [%d].\n", actual_size);
kfree(buf);
return -EFAULT;
@@ -2254,6 +2460,10 @@ static unsigned int spcom_device_poll(struct file *filp,
done = (spcom_dev->link_state == GLINK_LINK_STATE_UP);
break;
case SPCOM_POLL_CH_CONNECT:
+ if (ch == NULL) {
+ pr_err("invalid ch pointer, file [%s].\n", name);
+ return -EINVAL;
+ }
pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT.\n", name);
if (wait) {
reinit_completion(&ch->connect);
@@ -2329,7 +2539,7 @@ static int spcom_create_channel_chardev(const char *name)
devt = spcom_dev->device_no + spcom_dev->channel_count;
priv = ch;
dev = device_create(cls, parent, devt, priv, name);
- if (!dev) {
+ if (IS_ERR(dev)) {
pr_err("device_create failed.\n");
kfree(cdev);
return -ENODEV;
@@ -2382,7 +2592,7 @@ static int __init spcom_register_chardev(void)
spcom_dev->device_no, priv,
DEVICE_NAME);
- if (!spcom_dev->class_dev) {
+ if (IS_ERR(spcom_dev->class_dev)) {
pr_err("class_device_create failed %d\n", ret);
ret = -ENOMEM;
goto exit_destroy_class;
@@ -2435,6 +2645,11 @@ static int spcom_parse_dt(struct device_node *np)
pr_debug("num of predefined channels [%d].\n", num_ch);
+ if (num_ch > ARRAY_SIZE(spcom_dev->predefined_ch_name)) {
+ pr_err("too many predefined channels [%d].\n", num_ch);
+ return -EINVAL;
+ }
+
for (i = 0; i < num_ch; i++) {
ret = of_property_read_string_index(np, propname, i, &name);
if (ret) {
@@ -2500,21 +2715,23 @@ static int spcom_probe(struct platform_device *pdev)
pr_debug("register_link_state_cb(), transport [%s] edge [%s]\n",
link_info.transport, link_info.edge);
notif_handle = glink_register_link_state_cb(&link_info, spcom_dev);
- if (!notif_handle) {
+ if (IS_ERR(notif_handle)) {
pr_err("glink_register_link_state_cb(), err [%d]\n", ret);
goto fail_reg_chardev;
}
spcom_dev->ion_client = msm_ion_client_create(DEVICE_NAME);
- if (spcom_dev->ion_client == NULL) {
+ if (IS_ERR(spcom_dev->ion_client)) {
pr_err("fail to create ion client.\n");
- goto fail_reg_chardev;
+ goto fail_ion_client;
}
pr_info("Driver Initialization ok.\n");
return 0;
+fail_ion_client:
+ glink_unregister_link_state_cb(notif_handle);
fail_reg_chardev:
pr_err("Failed to init driver.\n");
spcom_unregister_chrdev();
@@ -2552,7 +2769,7 @@ static int __init spcom_init(void)
if (ret)
pr_err("spcom_driver register failed %d\n", ret);
- return 0;
+ return ret;
}
module_init(spcom_init);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 6ff39de..f8f6829 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -36,7 +36,7 @@
#define XO_FREQ 19200000
#define PROXY_TIMEOUT_MS 10000
-#define MAX_SSR_REASON_LEN 81U
+#define MAX_SSR_REASON_LEN 256U
#define STOP_ACK_TIMEOUT_MS 1000
#define CRASH_STOP_ACK_TO_MS 200
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 40e50f2..01ea228 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3798,6 +3798,8 @@ int iscsi_target_tx_thread(void *arg)
{
int ret = 0;
struct iscsi_conn *conn = arg;
+ bool conn_freed = false;
+
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
@@ -3823,12 +3825,14 @@ int iscsi_target_tx_thread(void *arg)
goto transport_err;
ret = iscsit_handle_response_queue(conn);
- if (ret == 1)
+ if (ret == 1) {
goto get_immediate;
- else if (ret == -ECONNRESET)
+ } else if (ret == -ECONNRESET) {
+ conn_freed = true;
goto out;
- else if (ret < 0)
+ } else if (ret < 0) {
goto transport_err;
+ }
}
transport_err:
@@ -3838,8 +3842,13 @@ int iscsi_target_tx_thread(void *arg)
* responsible for cleaning up the early connection failure.
*/
if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
- iscsit_take_action_for_connection_exit(conn);
+ iscsit_take_action_for_connection_exit(conn, &conn_freed);
out:
+ if (!conn_freed) {
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+ }
return 0;
}
@@ -4012,6 +4021,7 @@ int iscsi_target_rx_thread(void *arg)
{
int rc;
struct iscsi_conn *conn = arg;
+ bool conn_freed = false;
/*
* Allow ourselves to be interrupted by SIGINT so that a
@@ -4024,7 +4034,7 @@ int iscsi_target_rx_thread(void *arg)
*/
rc = wait_for_completion_interruptible(&conn->rx_login_comp);
if (rc < 0 || iscsi_target_check_conn_state(conn))
- return 0;
+ goto out;
if (!conn->conn_transport->iscsit_get_rx_pdu)
return 0;
@@ -4033,7 +4043,15 @@ int iscsi_target_rx_thread(void *arg)
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
- iscsit_take_action_for_connection_exit(conn);
+ iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
+out:
+ if (!conn_freed) {
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+ }
+
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b54e72c..efc453e 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
}
}
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
{
+ *conn_freed = false;
+
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
spin_unlock_bh(&conn->state_lock);
iscsit_close_connection(conn);
+ *conn_freed = true;
return;
}
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
spin_unlock_bh(&conn->state_lock);
iscsit_handle_connection_cleanup(conn);
+ *conn_freed = true;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index a9e2f94..fbc1d84 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -9,6 +9,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 96c55bc..6128e8e 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1460,5 +1460,9 @@ int iscsi_target_login_thread(void *arg)
break;
}
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+
return 0;
}
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 432adbc..1ab5b0c 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/thermal.h>
#include "tsens.h"
+#include "qcom/qti_virtual_sensor.h"
LIST_HEAD(tsens_device_list);
@@ -172,6 +173,9 @@ static int tsens_thermal_zone_register(struct tsens_device *tmdev)
return -ENODEV;
}
+ /* Register virtual thermal sensors. */
+ qti_virtual_sensor_register(&tmdev->pdev->dev);
+
return 0;
}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 984241f9..65d8fd7 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -33,6 +33,9 @@
#include <linux/thermal.h>
#include <linux/list.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_virtual.h>
+
#include "thermal_core.h"
/*** Private data structures to represent thermal device tree data ***/
@@ -107,11 +110,82 @@ struct __thermal_zone {
struct __sensor_param *senps;
};
+/**
+ * struct virtual_sensor - internal representation of a virtual thermal zone
+ * @num_sensors - number of sensors this virtual sensor will reference to
+ * estimate temperature
+ * @tz - Array of thermal zones of the sensors this virtual sensor will use
+ * to estimate temperature
+ * @virt_tz - Virtual thermal zone pointer
+ * @logic - aggregation logic to be used to estimate the temperature
+ * @last_reading - last estimated temperature
+ * @coefficients - array of coefficients to be used for weighted aggregation
+ * logic
+ * @avg_offset - offset value to be used for the weighted aggregation logic
+ * @avg_denominator - denominator value to be used for the weighted aggregation
+ * logic
+ */
+struct virtual_sensor {
+ int num_sensors;
+ struct thermal_zone_device *tz[THERMAL_MAX_VIRT_SENSORS];
+ struct thermal_zone_device *virt_tz;
+ enum aggregation_logic logic;
+ int last_reading;
+ int coefficients[THERMAL_MAX_VIRT_SENSORS];
+ int avg_offset;
+ int avg_denominator;
+};
+
static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
unsigned int trip_type_mask, int *low, int *high);
/*** DT thermal zone device callbacks ***/
+static int virt_sensor_read_temp(void *data, int *val)
+{
+ struct virtual_sensor *sens = data;
+ int idx, temp = 0, ret = 0;
+
+ for (idx = 0; idx < sens->num_sensors; idx++) {
+ int sens_temp = 0;
+
+ ret = thermal_zone_get_temp(sens->tz[idx], &sens_temp);
+ if (ret) {
+ pr_err("virt zone: sensor[%s] read error:%d\n",
+ sens->tz[idx]->type, ret);
+ return ret;
+ }
+ switch (sens->logic) {
+ case VIRT_WEIGHTED_AVG:
+ temp += sens_temp * sens->coefficients[idx];
+ if (idx == (sens->num_sensors - 1))
+ temp = (temp + sens->avg_offset)
+ / sens->avg_denominator;
+ break;
+ case VIRT_MAXIMUM:
+ if (idx == 0)
+ temp = INT_MIN;
+ if (sens_temp > temp)
+ temp = sens_temp;
+ break;
+ case VIRT_MINIMUM:
+ if (idx == 0)
+ temp = INT_MAX;
+ if (sens_temp < temp)
+ temp = sens_temp;
+ break;
+ default:
+ break;
+ }
+ trace_virtual_temperature(sens->virt_tz, sens->tz[idx],
+ sens_temp, temp);
+ }
+
+ sens->last_reading = *val = temp;
+
+ return 0;
+}
+
static int of_thermal_get_temp(struct thermal_zone_device *tz,
int *temp)
{
@@ -516,6 +590,10 @@ static struct thermal_zone_device_ops of_thermal_ops = {
.unbind = of_thermal_unbind,
};
+static struct thermal_zone_of_device_ops of_virt_ops = {
+ .get_temp = virt_sensor_read_temp,
+};
+
/*** sensor API ***/
static struct thermal_zone_device *
@@ -727,6 +805,135 @@ static int devm_thermal_zone_of_sensor_match(struct device *dev, void *res,
}
/**
+ * devm_thermal_of_virtual_sensor_register - Register a virtual sensor.
+ * Three types of virtual sensors are supported.
+ * 1. Weighted aggregation type:
+ * Virtual sensor of this type calculates the weighted aggregation
+ * of sensor temperatures using the below formula,
+ * temp = (sensor_1_temp * coeff_1 + ... + sensor_n_temp * coeff_n)
+ * + avg_offset / avg_denominator
+ * So the sensor drivers has to specify n+2 coefficients.
+ * 2. Maximum type:
+ * Virtual sensors of this type will report the maximum of all
+ * sensor temperatures.
+ * 3. Minimum type:
+ * Virtual sensors of this type will report the minimum of all
+ * sensor temperatures.
+ *
+ * @input arguments:
+ * @dev: Virtual sensor driver device pointer.
+ * @sensor_data: Virtual sensor data supported for the device.
+ *
+ * @return: Returns a virtual thermal zone pointer. Returns error if thermal
+ * zone is not created. Returns -EAGAIN, if the sensor that is required for
+ * this virtual sensor temperature estimation is not registered yet. The
+ * sensor driver can try again later.
+ */
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+ struct device *dev,
+ const struct virtual_sensor_data *sensor_data)
+{
+ int sens_idx = 0;
+ struct virtual_sensor *sens;
+ struct __thermal_zone *tz;
+ struct thermal_zone_device **ptr;
+ struct thermal_zone_device *tzd;
+ struct __sensor_param *sens_param = NULL;
+ enum thermal_device_mode mode;
+
+ if (!dev || !sensor_data)
+ return ERR_PTR(-EINVAL);
+
+ tzd = thermal_zone_get_zone_by_name(
+ sensor_data->virt_zone_name);
+ if (IS_ERR(tzd)) {
+ dev_err(dev, "sens:%s not available err: %ld\n",
+ sensor_data->virt_zone_name,
+ PTR_ERR(tzd));
+ return tzd;
+ }
+
+ mutex_lock(&tzd->lock);
+ /*
+ * Check if the virtual zone is registered and enabled.
+ * If so return the registered thermal zone.
+ */
+ tzd->ops->get_mode(tzd, &mode);
+ mutex_unlock(&tzd->lock);
+ if (mode == THERMAL_DEVICE_ENABLED)
+ return tzd;
+
+ sens = devm_kzalloc(dev, sizeof(*sens), GFP_KERNEL);
+ if (!sens)
+ return ERR_PTR(-ENOMEM);
+
+ sens->virt_tz = tzd;
+ sens->logic = sensor_data->logic;
+ sens->num_sensors = sensor_data->num_sensors;
+ if (sens->logic == VIRT_WEIGHTED_AVG) {
+ int coeff_ct = sensor_data->coefficient_ct;
+
+ /*
+ * For weighted aggregation, sensor drivers has to specify
+ * n+2 coefficients.
+ */
+ if (coeff_ct != sens->num_sensors) {
+ dev_err(dev, "sens:%s Invalid coefficient\n",
+ sensor_data->virt_zone_name);
+ return ERR_PTR(-EINVAL);
+ }
+ memcpy(sens->coefficients, sensor_data->coefficients,
+ coeff_ct * sizeof(*sens->coefficients));
+ sens->avg_offset = sensor_data->avg_offset;
+ sens->avg_denominator = sensor_data->avg_denominator;
+ }
+
+ for (sens_idx = 0; sens_idx < sens->num_sensors; sens_idx++) {
+ sens->tz[sens_idx] = thermal_zone_get_zone_by_name(
+ sensor_data->sensor_names[sens_idx]);
+ if (IS_ERR(sens->tz[sens_idx])) {
+ dev_err(dev, "sens:%s sensor[%s] fetch err:%ld\n",
+ sensor_data->virt_zone_name,
+ sensor_data->sensor_names[sens_idx],
+ PTR_ERR(sens->tz[sens_idx]));
+ break;
+ }
+ }
+ if (sens->num_sensors != sens_idx)
+ return ERR_PTR(-EAGAIN);
+
+ sens_param = kzalloc(sizeof(*sens_param), GFP_KERNEL);
+ if (!sens_param)
+ return ERR_PTR(-ENOMEM);
+ sens_param->sensor_data = sens;
+ sens_param->ops = &of_virt_ops;
+ INIT_LIST_HEAD(&sens_param->first_tz);
+ sens_param->trip_high = INT_MAX;
+ sens_param->trip_low = INT_MIN;
+ mutex_init(&sens_param->lock);
+
+ mutex_lock(&tzd->lock);
+ tz = tzd->devdata;
+ tz->senps = sens_param;
+ tzd->ops->get_temp = of_thermal_get_temp;
+ list_add_tail(&tz->list, &sens_param->first_tz);
+ mutex_unlock(&tzd->lock);
+
+ ptr = devres_alloc(devm_thermal_zone_of_sensor_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ *ptr = tzd;
+ devres_add(dev, ptr);
+
+ tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
+
+ return tzd;
+}
+EXPORT_SYMBOL(devm_thermal_of_virtual_sensor_register);
+
+/**
* devm_thermal_zone_of_sensor_register - Resource managed version of
* thermal_zone_of_sensor_register()
* @dev: a valid struct device pointer of a sensor device. Must contain
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index 819c6d5..57cdd49 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/iio/consumer.h>
@@ -29,13 +30,17 @@
#define QPNP_TM_REG_ALARM_CTRL 0x46
#define QPNP_TM_TYPE 0x09
-#define QPNP_TM_SUBTYPE 0x08
+#define QPNP_TM_SUBTYPE_GEN1 0x08
+#define QPNP_TM_SUBTYPE_GEN2 0x09
-#define STATUS_STAGE_MASK 0x03
+#define STATUS_GEN1_STAGE_MASK GENMASK(1, 0)
+#define STATUS_GEN2_STATE_MASK GENMASK(6, 4)
+#define STATUS_GEN2_STATE_SHIFT 4
-#define SHUTDOWN_CTRL1_THRESHOLD_MASK 0x03
+#define SHUTDOWN_CTRL1_OVERRIDE_MASK GENMASK(7, 6)
+#define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0)
-#define ALARM_CTRL_FORCE_ENABLE 0x80
+#define ALARM_CTRL_FORCE_ENABLE BIT(7)
/*
* Trip point values based on threshold control
@@ -58,6 +63,7 @@
struct qpnp_tm_chip {
struct regmap *map;
struct thermal_zone_device *tz_dev;
+ unsigned int subtype;
long temp;
unsigned int thresh;
unsigned int stage;
@@ -66,6 +72,9 @@ struct qpnp_tm_chip {
struct iio_channel *adc;
};
+/* This array maps from GEN2 alarm state to GEN1 alarm stage */
+static const unsigned int alarm_state_map[8] = {0, 1, 1, 2, 2, 3, 3, 3};
+
static int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *data)
{
unsigned int val;
@@ -84,13 +93,14 @@ static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data)
return regmap_write(chip->map, chip->base + addr, data);
}
-/*
- * This function updates the internal temp value based on the
- * current thermal stage and threshold as well as the previous stage
+/**
+ * qpnp_tm_get_temp_stage() - return over-temperature stage
+ * @chip: Pointer to the qpnp_tm chip
+ *
+ * Return: stage (GEN1) or state (GEN2) on success, or errno on failure.
*/
-static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip)
{
- unsigned int stage;
int ret;
u8 reg = 0;
@@ -98,16 +108,44 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
if (ret < 0)
return ret;
- stage = reg & STATUS_STAGE_MASK;
+ if (chip->subtype == QPNP_TM_SUBTYPE_GEN1)
+ ret = reg & STATUS_GEN1_STAGE_MASK;
+ else
+ ret = (reg & STATUS_GEN2_STATE_MASK) >> STATUS_GEN2_STATE_SHIFT;
- if (stage > chip->stage) {
+ return ret;
+}
+
+/*
+ * This function updates the internal temp value based on the
+ * current thermal stage and threshold as well as the previous stage
+ */
+static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+{
+ unsigned int stage, stage_new, stage_old;
+ int ret;
+
+ ret = qpnp_tm_get_temp_stage(chip);
+ if (ret < 0)
+ return ret;
+ stage = ret;
+
+ if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) {
+ stage_new = stage;
+ stage_old = chip->stage;
+ } else {
+ stage_new = alarm_state_map[stage];
+ stage_old = alarm_state_map[chip->stage];
+ }
+
+ if (stage_new > stage_old) {
/* increasing stage, use lower bound */
- chip->temp = (stage - 1) * TEMP_STAGE_STEP +
+ chip->temp = (stage_new - 1) * TEMP_STAGE_STEP +
chip->thresh * TEMP_THRESH_STEP +
TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
- } else if (stage < chip->stage) {
+ } else if (stage_new < stage_old) {
/* decreasing stage, use upper bound */
- chip->temp = stage * TEMP_STAGE_STEP +
+ chip->temp = stage_new * TEMP_STAGE_STEP +
chip->thresh * TEMP_THRESH_STEP -
TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
}
@@ -162,28 +200,37 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data)
*/
static int qpnp_tm_init(struct qpnp_tm_chip *chip)
{
+ unsigned int stage;
int ret;
- u8 reg;
+ u8 reg = 0;
- chip->thresh = THRESH_MIN;
- chip->temp = DEFAULT_TEMP;
-
- ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, ®);
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, ®);
if (ret < 0)
return ret;
- chip->stage = reg & STATUS_STAGE_MASK;
+ chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+ chip->temp = DEFAULT_TEMP;
- if (chip->stage)
+ ret = qpnp_tm_get_temp_stage(chip);
+ if (ret < 0)
+ return ret;
+ chip->stage = ret;
+
+ stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1
+ ? chip->stage : alarm_state_map[chip->stage];
+
+ if (stage)
chip->temp = chip->thresh * TEMP_THRESH_STEP +
- (chip->stage - 1) * TEMP_STAGE_STEP +
+ (stage - 1) * TEMP_STAGE_STEP +
TEMP_THRESH_MIN;
/*
* Set threshold and disable software override of stage 2 and 3
* shutdowns.
*/
- reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+ chip->thresh = THRESH_MIN;
+ reg &= ~(SHUTDOWN_CTRL1_OVERRIDE_MASK | SHUTDOWN_CTRL1_THRESHOLD_MASK);
+ reg |= chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
if (ret < 0)
return ret;
@@ -242,13 +289,16 @@ static int qpnp_tm_probe(struct platform_device *pdev)
goto fail;
}
- if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) {
+ if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
+ && subtype != QPNP_TM_SUBTYPE_GEN2)) {
dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
type, subtype);
ret = -ENODEV;
goto fail;
}
+ chip->subtype = subtype;
+
ret = qpnp_tm_init(chip);
if (ret < 0) {
dev_err(&pdev->dev, "init failed\n");
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 473d15a..f6e1b86 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -30,3 +30,13 @@
each cluster can be used to perform quick thermal mitigations by
tracking temperatures of the CPUs and taking thermal action in the
hardware without s/w intervention.
+
+config QTI_VIRTUAL_SENSOR
+ bool "QTI Virtual Sensor driver"
+ depends on THERMAL_OF
+ help
+ This driver has the information about the virtual sensors used by
+ QTI chipset's and registers the virtual sensors to a thermal zone.
+ The virtual sensor information includes the underlying thermal
+ sensors to query for temperature and the aggregation logic to
+ determine the virtual sensor temperature.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index d1a53b0..8859380 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -2,3 +2,4 @@
qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o
+obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
new file mode 100644
index 0000000..3064c74
--- /dev/null
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#include "qti_virtual_sensor.h"
+
+static const struct virtual_sensor_data qti_virtual_sensors[] = {
+ {
+ .virt_zone_name = "gpu-virt-max-step",
+ .num_sensors = 2,
+ .sensor_names = {"gpu0-usr",
+ "gpu1-usr"},
+ .logic = VIRT_MAXIMUM,
+ },
+ {
+ .virt_zone_name = "silver-virt-max-usr",
+ .num_sensors = 4,
+ .sensor_names = {"cpu0-silver-usr",
+ "cpu1-silver-usr",
+ "cpu2-silver-usr",
+ "cpu3-silver-usr"},
+ .logic = VIRT_MAXIMUM,
+ },
+ {
+ .virt_zone_name = "gold-virt-max-usr",
+ .num_sensors = 4,
+ .sensor_names = {"cpu0-gold-usr",
+ "cpu1-gold-usr",
+ "cpu2-gold-usr",
+ "cpu3-gold-usr"},
+ .logic = VIRT_MAXIMUM,
+ },
+};
+
+int qti_virtual_sensor_register(struct device *dev)
+{
+ int sens_ct = 0;
+ static int idx;
+ struct thermal_zone_device *tz;
+
+ sens_ct = ARRAY_SIZE(qti_virtual_sensors);
+ for (; idx < sens_ct; idx++) {
+ tz = devm_thermal_of_virtual_sensor_register(dev,
+ &qti_virtual_sensors[idx]);
+ if (IS_ERR(tz))
+ dev_dbg(dev, "sensor:%d register error:%ld\n",
+ idx, PTR_ERR(tz));
+ else
+ dev_dbg(dev, "sensor:%d registered\n", idx);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qti_virtual_sensor_register);
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.h b/drivers/thermal/qcom/qti_virtual_sensor.h
new file mode 100644
index 0000000..371b794
--- /dev/null
+++ b/drivers/thermal/qcom/qti_virtual_sensor.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QTI_VIRT_SENS_H__
+#define __QTI_VIRT_SENS_H__
+
+#ifdef CONFIG_QTI_VIRTUAL_SENSOR
+
+int qti_virtual_sensor_register(struct device *dev);
+
+#else
+
+static inline int qti_virtual_sensor_register(struct device *dev)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_QTI_VIRTUAL_SENSOR */
+
+#endif /* __QTI_VIRT_SENS_H__ */
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index ecfc4ef..6b05b7b 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -76,6 +76,14 @@ static unsigned long get_target_state(struct thermal_instance *instance,
return next_target;
}
+ /*
+ * If there is no new throttle request and if the thermal zone
+ * wasn't requesting any previous mitigation, then skip the
+ * evaluation.
+ */
+ if (instance->target == THERMAL_NO_TARGET && !throttle)
+ return next_target;
+
switch (trend) {
case THERMAL_TREND_RAISING:
if (throttle) {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 080d5a5..f24d303 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1320,7 +1320,7 @@ static void autoconfig(struct uart_8250_port *up)
/*
* Check if the device is a Fintek F81216A
*/
- if (port->type == PORT_16550A)
+ if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
fintek_8250_probe(up);
if (up->capabilities != old_capabilities) {
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 2e12c3f..94ba2c3 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -327,6 +327,11 @@ static void msm_geni_serial_break_ctl(struct uart_port *uport, int ctl)
mb();
}
+static unsigned int msm_geni_cons_get_mctrl(struct uart_port *uport)
+{
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport)
{
u32 geni_ios = 0;
@@ -610,6 +615,8 @@ static void msm_geni_serial_console_write(struct console *co, const char *s,
{
struct uart_port *uport;
struct msm_geni_serial_port *port;
+ int locked = 1;
+ unsigned long flags;
WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS);
@@ -618,9 +625,15 @@ static void msm_geni_serial_console_write(struct console *co, const char *s,
return;
uport = &port->uport;
- spin_lock(&uport->lock);
- __msm_geni_serial_console_write(uport, s, count);
- spin_unlock(&uport->lock);
+ if (oops_in_progress)
+ locked = spin_trylock_irqsave(&uport->lock, flags);
+ else
+ spin_lock_irqsave(&uport->lock, flags);
+
+ if (locked) {
+ __msm_geni_serial_console_write(uport, s, count);
+ spin_unlock_irqrestore(&uport->lock, flags);
+ }
}
static int handle_rx_console(struct uart_port *uport,
@@ -1014,13 +1027,20 @@ static void set_rfr_wm(struct msm_geni_serial_port *port)
static void msm_geni_serial_shutdown(struct uart_port *uport)
{
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ unsigned long flags;
+ /* Stop the console before stopping the current tx */
+ if (uart_console(uport))
+ console_stop(uport->cons);
+
+ spin_lock_irqsave(&uport->lock, flags);
msm_geni_serial_stop_tx(uport);
msm_geni_serial_stop_rx(uport);
+ spin_unlock_irqrestore(&uport->lock, flags);
+
disable_irq(uport->irq);
free_irq(uport->irq, msm_port);
if (uart_console(uport)) {
- console_stop(uport->cons);
se_geni_resources_off(&msm_port->serial_rsc);
} else {
if (msm_port->wakeup_irq > 0) {
@@ -1572,6 +1592,7 @@ static const struct uart_ops msm_geni_console_pops = {
.shutdown = msm_geni_serial_shutdown,
.type = msm_geni_serial_get_type,
.set_mctrl = msm_geni_cons_set_mctrl,
+ .get_mctrl = msm_geni_cons_get_mctrl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = msm_geni_serial_get_char,
.poll_put_char = msm_geni_serial_poll_put_char,
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 38614fa..228d8af 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3008,8 +3008,9 @@ static int dwc3_msm_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
msm_dwc3_pwr_irq,
msm_dwc3_pwr_irq_thread,
- IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
- | IRQF_ONESHOT, "ss_phy_irq", mdwc);
+ IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH
+ | IRQF_EARLY_RESUME | IRQF_ONESHOT,
+ "ss_phy_irq", mdwc);
if (ret) {
dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
ret);
@@ -3519,13 +3520,16 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
mdwc->hs_phy->flags |= PHY_HOST_MODE;
- if (dwc->maximum_speed == USB_SPEED_SUPER)
+ if (dwc->maximum_speed == USB_SPEED_SUPER) {
mdwc->ss_phy->flags |= PHY_HOST_MODE;
+ usb_phy_notify_connect(mdwc->ss_phy,
+ USB_SPEED_SUPER);
+ }
+ usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
pm_runtime_get_sync(mdwc->dev);
dbg_event(0xFF, "StrtHost gync",
atomic_read(&mdwc->dev->power.usage_count));
- usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
if (!IS_ERR(mdwc->vbus_reg))
ret = regulator_enable(mdwc->vbus_reg);
if (ret) {
@@ -3614,8 +3618,13 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
dbg_event(0xFF, "StopHost gsync",
atomic_read(&mdwc->dev->power.usage_count));
usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+ if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
+ usb_phy_notify_disconnect(mdwc->ss_phy,
+ USB_SPEED_SUPER);
+ mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+ }
+
mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
- mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
platform_device_del(dwc->xhci);
usb_unregister_notify(&mdwc->host_nb);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index bb32978..b062d58 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -815,7 +815,13 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
dwc->delayed_status = true;
out:
- if (ret < 0) {
+ /*
+ * Don't try to halt ep0 if ret is -ESHUTDOWN.
+ * ret as -ESHUTDOWN suggests that setup packet related response
+ * is available but queueing of ep0 is failed. Possibly ep0 is
+ * already disabled.
+ */
+ if (ret < 0 && ret != -ESHUTDOWN) {
dbg_event(0x0, "ERRSTAL", ret);
dwc3_ep0_stall_and_restart(dwc);
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 026ff6c..df0427c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1443,6 +1443,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
struct dwc3 *dwc = dep->dwc;
int ret;
+ if (!dep->endpoint.desc) {
+ dev_dbg(dwc->dev, "(%s)'s desc is NULL.\n", dep->name);
+ return -EINVAL;
+ }
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
return -EINVAL;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index a50e327..f383e32 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -29,6 +29,11 @@
#include <linux/usb/usbpd.h>
#include "usbpd.h"
+/* To start USB stack for USB3.1 complaince testing */
+static bool usb_compliance_mode;
+module_param(usb_compliance_mode, bool, 0644);
+MODULE_PARM_DESC(usb_compliance_mode, "Start USB stack for USB3.1 compliance testing");
+
enum usbpd_state {
PE_UNKNOWN,
PE_ERROR_RECOVERY,
@@ -187,6 +192,8 @@ static void *usbpd_ipc_log;
#define PD_MAX_MSG_ID 7
+#define PD_MAX_DATA_OBJ 7
+
#define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
(((type) & 0xF) | ((dr) << 5) | (rev << 6) | \
((pr) << 8) | ((id) << 9) | ((cnt) << 12))
@@ -308,7 +315,7 @@ struct usbpd {
struct list_head rx_q;
spinlock_t rx_lock;
- u32 received_pdos[7];
+ u32 received_pdos[PD_MAX_DATA_OBJ];
u16 src_cap_id;
u8 selected_pdo;
u8 requested_pdo;
@@ -490,13 +497,12 @@ static int pd_send_msg(struct usbpd *pd, u8 hdr_type, const u32 *data,
ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), type, 15);
/* TODO figure out timeout. based on tReceive=1.1ms x nRetryCount? */
- /* MessageID incremented regardless of Tx error */
- pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
-
if (ret < 0)
return ret;
else if (ret != num_data * sizeof(u32))
return -EIO;
+
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
return 0;
}
@@ -556,6 +562,7 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
static int pd_eval_src_caps(struct usbpd *pd)
{
+ int obj_cnt;
union power_supply_propval val;
u32 first_pdo = pd->received_pdos[0];
@@ -572,6 +579,13 @@ static int pd_eval_src_caps(struct usbpd *pd)
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
+ for (obj_cnt = 1; obj_cnt < PD_MAX_DATA_OBJ; obj_cnt++) {
+ if ((PD_SRC_PDO_TYPE(pd->received_pdos[obj_cnt]) ==
+ PD_SRC_PDO_TYPE_AUGMENTED) &&
+ !PD_APDO_PPS(pd->received_pdos[obj_cnt]))
+ pd->spec_rev = USBPD_REV_30;
+ }
+
/* Select the first PDO (vSafe5V) immediately. */
pd_select_pdo(pd, 1, 0, 0);
@@ -580,6 +594,8 @@ static int pd_eval_src_caps(struct usbpd *pd)
static void pd_send_hard_reset(struct usbpd *pd)
{
+ union power_supply_propval val = {0};
+
usbpd_dbg(&pd->dev, "send hard reset");
/* Force CC logic to source/sink to keep Rp/Rd unchanged */
@@ -587,6 +603,7 @@ static void pd_send_hard_reset(struct usbpd *pd)
pd->hard_reset_count++;
pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */
pd->in_pr_swap = false;
+ power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
}
static void kick_sm(struct usbpd *pd, int ms)
@@ -602,6 +619,8 @@ static void kick_sm(struct usbpd *pd, int ms)
static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
{
+ union power_supply_propval val = {1};
+
if (type != HARD_RESET_SIG) {
usbpd_err(&pd->dev, "invalid signal (%d) received\n", type);
return;
@@ -612,6 +631,9 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
/* Force CC logic to source/sink to keep Rp/Rd unchanged */
set_power_role(pd, pd->current_pr);
pd->hard_reset_recvd = true;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
kick_sm(pd, 0);
}
@@ -662,12 +684,6 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
return;
}
- /* if spec rev differs (i.e. is older), update PHY */
- if (PD_MSG_HDR_REV(header) < pd->spec_rev) {
- pd->spec_rev = PD_MSG_HDR_REV(header);
- pd_phy_update_spec_rev(pd->spec_rev);
- }
-
rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL);
if (!rx_msg)
return;
@@ -710,7 +726,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
.shutdown_cb = phy_shutdown,
.frame_filter_val = FRAME_FILTER_EN_SOP |
FRAME_FILTER_EN_HARD_RESET,
- .spec_rev = USBPD_REV_20,
};
union power_supply_propval val = {0};
unsigned long flags;
@@ -732,6 +747,15 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
break;
/* Source states */
+ case PE_SRC_DISABLED:
+ /* are we still connected? */
+ if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+ pd->current_pr = PR_NONE;
+ kick_sm(pd, 0);
+ }
+
+ break;
+
case PE_SRC_STARTUP:
if (pd->current_dr == DR_NONE) {
pd->current_dr = DR_DFP;
@@ -748,8 +772,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
- /* support only PD 2.0 as a source */
- pd->spec_rev = USBPD_REV_20;
pd_reset_protocol(pd);
if (!pd->in_pr_swap) {
@@ -760,7 +782,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
phy_params.data_role = pd->current_dr;
phy_params.power_role = pd->current_pr;
- phy_params.spec_rev = pd->spec_rev;
ret = pd_phy_open(&phy_params);
if (ret) {
@@ -772,14 +793,15 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
}
pd->pd_phy_opened = true;
- } else {
- pd_phy_update_spec_rev(pd->spec_rev);
}
pd->current_state = PE_SRC_SEND_CAPABILITIES;
if (pd->in_pr_swap) {
kick_sm(pd, SWAP_SOURCE_START_TIME);
pd->in_pr_swap = false;
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
break;
}
@@ -862,6 +884,10 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
case PE_SRC_HARD_RESET:
case PE_SNK_HARD_RESET:
+ /* are we still connected? */
+ if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE)
+ pd->current_pr = PR_NONE;
+
/* hard reset may sleep; handle it in the workqueue */
kick_sm(pd, 0);
break;
@@ -888,7 +914,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->current_dr = DR_UFP;
if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
- pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP)
+ pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP ||
+ usb_compliance_mode)
start_usb_peripheral(pd);
}
@@ -905,11 +932,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
if (!val.intval)
break;
- /*
- * support up to PD 3.0 as a sink; if source is 2.0,
- * phy_msg_received() will handle the downgrade.
- */
- pd->spec_rev = USBPD_REV_30;
pd_reset_protocol(pd);
if (!pd->in_pr_swap) {
@@ -920,7 +942,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
phy_params.data_role = pd->current_dr;
phy_params.power_role = pd->current_pr;
- phy_params.spec_rev = pd->spec_rev;
ret = pd_phy_open(&phy_params);
if (ret) {
@@ -932,8 +953,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
}
pd->pd_phy_opened = true;
- } else {
- pd_phy_update_spec_rev(pd->spec_rev);
}
pd->current_voltage = pd->requested_voltage = 5000000;
@@ -1548,6 +1567,11 @@ static void usbpd_sm(struct work_struct *w)
if (pd->current_state == PE_UNKNOWN)
goto sm_done;
+ if (pd->vconn_enabled) {
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+ }
+
usbpd_info(&pd->dev, "USB Type-C disconnect\n");
if (pd->pd_phy_opened) {
@@ -1567,7 +1591,6 @@ static void usbpd_sm(struct work_struct *w)
memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
rx_msg_cleanup(pd);
- val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
@@ -1583,11 +1606,6 @@ static void usbpd_sm(struct work_struct *w)
pd->vbus_enabled = false;
}
- if (pd->vconn_enabled) {
- regulator_disable(pd->vconn);
- pd->vconn_enabled = false;
- }
-
reset_vdm_state(pd);
if (pd->current_dr == DR_UFP)
stop_usb_peripheral(pd);
@@ -1602,6 +1620,10 @@ static void usbpd_sm(struct work_struct *w)
usleep_range(ERROR_RECOVERY_TIME * USEC_PER_MSEC,
(ERROR_RECOVERY_TIME + 5) * USEC_PER_MSEC);
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+
/* set due to dual_role class "mode" change */
if (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE)
val.intval = pd->forced_pr;
@@ -1625,11 +1647,22 @@ static void usbpd_sm(struct work_struct *w)
if (pd->hard_reset_recvd) {
pd->hard_reset_recvd = false;
- val.intval = 1;
+ if (pd->requested_current) {
+ val.intval = pd->requested_current = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+ }
+
+ pd->requested_voltage = 5000000;
+ val.intval = pd->requested_voltage;
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+ POWER_SUPPLY_PROP_VOLTAGE_MIN, &val);
pd->in_pr_swap = false;
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+
pd->in_explicit_contract = false;
pd->selected_pdo = pd->requested_pdo = 0;
pd->rdo = 0;
@@ -1733,14 +1766,8 @@ static void usbpd_sm(struct work_struct *w)
case PE_SRC_READY:
if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
- ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
- default_src_caps,
- ARRAY_SIZE(default_src_caps), SOP_MSG);
- if (ret) {
- usbpd_err(&pd->dev, "Error sending SRC CAPs\n");
- usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
- break;
- }
+ pd->current_state = PE_SRC_SEND_CAPABILITIES;
+ kick_sm(pd, 0);
} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
pd->sink_caps, pd->num_sink_caps,
@@ -1896,6 +1923,9 @@ static void usbpd_sm(struct work_struct *w)
case PE_SNK_WAIT_FOR_CAPABILITIES:
pd->in_pr_swap = false;
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
val.intval = 0;
@@ -1915,15 +1945,6 @@ static void usbpd_sm(struct work_struct *w)
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
} else if (pd->hard_reset_count < 3) {
usbpd_set_state(pd, PE_SNK_HARD_RESET);
- } else if (pd->pd_connected) {
- usbpd_info(&pd->dev, "Sink hard reset count exceeded, forcing reconnect\n");
-
- val.intval = 0;
- power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
- &val);
-
- usbpd_set_state(pd, PE_ERROR_RECOVERY);
} else {
usbpd_dbg(&pd->dev, "Sink hard reset count exceeded, disabling PD\n");
@@ -2071,6 +2092,9 @@ static void usbpd_sm(struct work_struct *w)
}
pd->in_pr_swap = true;
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
} else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
@@ -2214,6 +2238,9 @@ static void usbpd_sm(struct work_struct *w)
case PE_PRS_SRC_SNK_TRANSITION_TO_OFF:
pd->in_pr_swap = true;
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
pd->in_explicit_contract = false;
if (pd->vbus_enabled) {
@@ -2254,6 +2281,9 @@ static void usbpd_sm(struct work_struct *w)
}
pd->in_pr_swap = true;
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
@@ -2313,6 +2343,14 @@ static void usbpd_sm(struct work_struct *w)
sm_done:
kfree(rx_msg);
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ ret = list_empty(&pd->rx_q);
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ /* requeue if there are any new/pending RX messages */
+ if (!ret)
+ kick_sm(pd, 0);
+
if (!pd->sm_queued)
pm_relax(&pd->dev);
}
@@ -3187,7 +3225,7 @@ struct usbpd *usbpd_create(struct device *parent)
if (ret)
goto free_pd;
- pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE);
+ pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE | WQ_HIGHPRI);
if (!pd->wq) {
ret = -ENOMEM;
goto del_pd;
@@ -3309,6 +3347,8 @@ struct usbpd *usbpd_create(struct device *parent)
pd->dual_role->drv_data = pd;
}
+ /* default support as PD 2.0 source or sink */
+ pd->spec_rev = USBPD_REV_20;
pd->current_pr = PR_NONE;
pd->current_dr = DR_NONE;
list_add_tail(&pd->instance, &_usbpd);
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 4caee72..1f5306f 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -108,6 +108,7 @@ struct usb_pdphy {
int tx_status;
u8 frame_filter_val;
bool in_test_data_mode;
+ bool rx_busy;
enum data_role data_role;
enum power_role power_role;
@@ -334,15 +335,6 @@ int pd_phy_update_roles(enum data_role dr, enum power_role pr)
}
EXPORT_SYMBOL(pd_phy_update_roles);
-int pd_phy_update_spec_rev(enum pd_spec_rev rev)
-{
- struct usb_pdphy *pdphy = __pdphy;
-
- return pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
- MSG_CONFIG_SPEC_REV_MASK, rev);
-}
-EXPORT_SYMBOL(pd_phy_update_spec_rev);
-
int pd_phy_open(struct pd_phy_params *params)
{
int ret;
@@ -377,7 +369,9 @@ int pd_phy_open(struct pd_phy_params *params)
if (ret)
return ret;
- ret = pd_phy_update_spec_rev(params->spec_rev);
+ /* PD 2.0 phy */
+ ret = pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+ MSG_CONFIG_SPEC_REV_MASK, USBPD_REV_20);
if (ret)
return ret;
@@ -492,6 +486,12 @@ int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
return -EINVAL;
}
+ ret = pdphy_reg_read(pdphy, &val, USB_PDPHY_RX_ACKNOWLEDGE, 1);
+ if (ret || val || pdphy->rx_busy) {
+ dev_err(pdphy->dev, "%s: RX message pending\n", __func__);
+ return -EBUSY;
+ }
+
pdphy->tx_status = -EINPROGRESS;
/* write 2 byte SOP message header */
@@ -664,6 +664,15 @@ static int pd_phy_bist_mode(u8 bist_mode)
BIST_MODE_MASK | BIST_ENABLE, bist_mode | BIST_ENABLE);
}
+static irqreturn_t pdphy_msg_rx_irq(int irq, void *data)
+{
+ struct usb_pdphy *pdphy = data;
+
+ pdphy->rx_busy = true;
+
+ return IRQ_WAKE_THREAD;
+}
+
static irqreturn_t pdphy_msg_rx_irq_thread(int irq, void *data)
{
u8 size, rx_status, frame_type;
@@ -720,6 +729,7 @@ static irqreturn_t pdphy_msg_rx_irq_thread(int irq, void *data)
false);
pdphy->rx_bytes += size + 1;
done:
+ pdphy->rx_busy = false;
return IRQ_HANDLED;
}
@@ -805,7 +815,7 @@ static int pdphy_probe(struct platform_device *pdev)
return ret;
ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
- &pdphy->msg_rx_irq, "msg-rx", NULL,
+ &pdphy->msg_rx_irq, "msg-rx", pdphy_msg_rx_irq,
pdphy_msg_rx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
if (ret < 0)
return ret;
diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h
index b2663ad..1087017 100644
--- a/drivers/usb/pd/usbpd.h
+++ b/drivers/usb/pd/usbpd.h
@@ -68,7 +68,6 @@ struct pd_phy_params {
enum data_role data_role;
enum power_role power_role;
u8 frame_filter_val;
- u8 spec_rev;
};
#if IS_ENABLED(CONFIG_QPNP_USB_PDPHY)
@@ -77,7 +76,6 @@ int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms);
int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
enum pd_msg_type type, unsigned int timeout_ms);
int pd_phy_update_roles(enum data_role dr, enum power_role pr);
-int pd_phy_update_spec_rev(enum pd_spec_rev rev);
void pd_phy_close(void);
#else
static inline int pd_phy_open(struct pd_phy_params *params)
@@ -101,11 +99,6 @@ static inline int pd_phy_update_roles(enum data_role dr, enum power_role pr)
return -ENODEV;
}
-static inline int pd_phy_update_spec_rev(enum pd_spec_rev rev)
-{
- return -ENODEV;
-}
-
static inline void pd_phy_close(void)
{
}
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 8bdd9fd..59f5379 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -201,14 +201,16 @@ static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy,
if (enable) {
msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+ val = readb_relaxed(phy->base + autonomous_mode_offset);
+ val |= ARCVR_DTCT_EN;
if (phy->phy.flags & DEVICE_IN_SS_MODE) {
- val =
- readb_relaxed(phy->base + autonomous_mode_offset);
- val |= ARCVR_DTCT_EN;
val |= ALFPS_DTCT_EN;
val &= ~ARCVR_DTCT_EVENT_SEL;
- writeb_relaxed(val, phy->base + autonomous_mode_offset);
+ } else {
+ val &= ~ALFPS_DTCT_EN;
+ val |= ARCVR_DTCT_EVENT_SEL;
}
+ writeb_relaxed(val, phy->base + autonomous_mode_offset);
msm_ssusb_qmp_clamp_enable(phy, true);
} else {
msm_ssusb_qmp_clamp_enable(phy, false);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a7943f8..74a2b44 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1805,6 +1805,8 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
{
+ struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
+ struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
int type = CURSEG_HOT_DATA;
int err;
@@ -1831,6 +1833,11 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
return err;
}
+ /* sanity check for summary blocks */
+ if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
+ sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
+ return -EINVAL;
+
return 0;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7e0c002..b81998e 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1424,6 +1424,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned int main_segs, blocks_per_seg;
+ int i;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1435,6 +1437,22 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (unlikely(fsmeta >= total))
return 1;
+ main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
+ blocks_per_seg = sbi->blocks_per_seg;
+
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
+ le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
+ return 1;
+ }
+ }
+ for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
+ if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
+ le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
+ return 1;
+ }
+ }
+
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
diff --git a/fs/pnode.c b/fs/pnode.c
index b5f97c6..e4e428d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -504,9 +504,14 @@ static struct mount *next_descendent(struct mount *root, struct mount *cur)
if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
return first_slave(cur);
do {
- if (cur->mnt_slave.next != &cur->mnt_master->mnt_slave_list)
- return next_slave(cur);
- cur = cur->mnt_master;
+ struct mount *master = cur->mnt_master;
+
+ if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
+ struct mount *next = next_slave(cur);
+
+ return (next == root) ? NULL : next;
+ }
+ cur = master;
} while (cur != root);
return NULL;
}
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index f04ab23..f3469ad 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -812,9 +812,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
uspi->s_dirblksize = UFS_SECTOR_SIZE;
super_block_offset=UFS_SBLOCK;
- /* Keep 2Gig file limit. Some UFS variants need to override
- this but as I don't know which I'll let those in the know loosen
- the rules */
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+
switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
case UFS_MOUNT_UFSTYPE_44BSD:
UFSD("ufstype=44bsd\n");
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 5a508b0..2a8cbd1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2208,8 +2208,10 @@ xfs_bmap_add_extent_delay_real(
}
temp = xfs_bmap_worst_indlen(bma->ip, temp);
temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
- diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
- (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+ diff = (int)(temp + temp2 -
+ (startblockval(PREV.br_startblock) -
+ (bma->cur ?
+ bma->cur->bc_private.b.allocated : 0)));
if (diff > 0) {
error = xfs_mod_fdblocks(bma->ip->i_mount,
-((int64_t)diff), false);
@@ -2266,7 +2268,6 @@ xfs_bmap_add_extent_delay_real(
temp = da_new;
if (bma->cur)
temp += bma->cur->bc_private.b.allocated;
- ASSERT(temp <= da_old);
if (temp < da_old)
xfs_mod_fdblocks(bma->ip->i_mount,
(int64_t)(da_old - temp), false);
@@ -3964,7 +3965,7 @@ xfs_bmap_remap_alloc(
{
struct xfs_trans *tp = ap->tp;
struct xfs_mount *mp = tp->t_mountp;
- xfs_agblock_t bno;
+ xfs_fsblock_t bno;
struct xfs_alloc_arg args;
int error;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 2849d3f..91c6891 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4376,7 +4376,7 @@ xfs_btree_visit_blocks(
xfs_btree_readahead_ptr(cur, ptr, 1);
/* save for the next iteration of the loop */
- lptr = *ptr;
+ xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
}
/* for each buffer in the level */
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index ef9f6ea..699a51b 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -126,6 +126,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
+extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
/* xfs_dir2_readdir.c */
extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index c6809ff..e84af09 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -629,6 +629,112 @@ xfs_dir2_sf_check(
}
#endif /* DEBUG */
+/* Verify the consistency of an inline directory. */
+int
+xfs_dir2_sf_verify(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dir2_sf_hdr *sfp;
+ struct xfs_dir2_sf_entry *sfep;
+ struct xfs_dir2_sf_entry *next_sfep;
+ char *endp;
+ const struct xfs_dir_ops *dops;
+ struct xfs_ifork *ifp;
+ xfs_ino_t ino;
+ int i;
+ int i8count;
+ int offset;
+ int size;
+ int error;
+ __uint8_t filetype;
+
+ ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+ /*
+ * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
+ * so we can only trust the mountpoint to have the right pointer.
+ */
+ dops = xfs_dir_get_ops(mp, NULL);
+
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
+ size = ifp->if_bytes;
+
+ /*
+ * Give up if the directory is way too short.
+ */
+ if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
+ size < xfs_dir2_sf_hdr_size(sfp->i8count))
+ return -EFSCORRUPTED;
+
+ endp = (char *)sfp + size;
+
+ /* Check .. entry */
+ ino = dops->sf_get_parent_ino(sfp);
+ i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
+ error = xfs_dir_ino_validate(mp, ino);
+ if (error)
+ return error;
+ offset = dops->data_first_offset;
+
+ /* Check all reported entries */
+ sfep = xfs_dir2_sf_firstentry(sfp);
+ for (i = 0; i < sfp->count; i++) {
+ /*
+ * struct xfs_dir2_sf_entry has a variable length.
+ * Check the fixed-offset parts of the structure are
+ * within the data buffer.
+ */
+ if (((char *)sfep + sizeof(*sfep)) >= endp)
+ return -EFSCORRUPTED;
+
+ /* Don't allow names with known bad length. */
+ if (sfep->namelen == 0)
+ return -EFSCORRUPTED;
+
+ /*
+ * Check that the variable-length part of the structure is
+ * within the data buffer. The next entry starts after the
+ * name component, so nextentry is an acceptable test.
+ */
+ next_sfep = dops->sf_nextentry(sfp, sfep);
+ if (endp < (char *)next_sfep)
+ return -EFSCORRUPTED;
+
+ /* Check that the offsets always increase. */
+ if (xfs_dir2_sf_get_offset(sfep) < offset)
+ return -EFSCORRUPTED;
+
+ /* Check the inode number. */
+ ino = dops->sf_get_ino(sfp, sfep);
+ i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
+ error = xfs_dir_ino_validate(mp, ino);
+ if (error)
+ return error;
+
+ /* Check the file type. */
+ filetype = dops->sf_get_ftype(sfep);
+ if (filetype >= XFS_DIR3_FT_MAX)
+ return -EFSCORRUPTED;
+
+ offset = xfs_dir2_sf_get_offset(sfep) +
+ dops->data_entsize(sfep->namelen);
+
+ sfep = next_sfep;
+ }
+ if (i8count != sfp->i8count)
+ return -EFSCORRUPTED;
+ if ((void *)sfep != (void *)endp)
+ return -EFSCORRUPTED;
+
+ /* Make sure this whole thing ought to be in local format. */
+ if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+ (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
+ return -EFSCORRUPTED;
+
+ return 0;
+}
+
/*
* Create a new (shortform) directory.
*/
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 25c1e07..8a37efe 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -33,6 +33,8 @@
#include "xfs_trace.h"
#include "xfs_attr_sf.h"
#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_priv.h"
kmem_zone_t *xfs_ifork_zone;
@@ -210,6 +212,16 @@ xfs_iformat_fork(
if (error)
return error;
+ /* Check inline dir contents. */
+ if (S_ISDIR(VFS_I(ip)->i_mode) &&
+ dip->di_format == XFS_DINODE_FMT_LOCAL) {
+ error = xfs_dir2_sf_verify(ip);
+ if (error) {
+ xfs_idestroy_fork(ip, XFS_DATA_FORK);
+ return error;
+ }
+ }
+
if (xfs_is_reflink_inode(ip)) {
ASSERT(ip->i_cowfp == NULL);
xfs_ifork_init_cow(ip);
@@ -320,7 +332,6 @@ xfs_iformat_local(
int whichfork,
int size)
{
-
/*
* If the size is unreasonable, then something
* is wrong and we just bail out rather than crash in
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index b177ef3..82a38d8 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1629,13 +1629,28 @@ xfs_refcount_recover_cow_leftovers(
if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
return -EOPNOTSUPP;
- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+ INIT_LIST_HEAD(&debris);
+
+ /*
+ * In this first part, we use an empty transaction to gather up
+ * all the leftover CoW extents so that we can subsequently
+ * delete them. The empty transaction is used to avoid
+ * a buffer lock deadlock if there happens to be a loop in the
+ * refcountbt because we're allowed to re-grab a buffer that is
+ * already attached to our transaction. When we're done
+ * recording the CoW debris we cancel the (empty) transaction
+ * and everything goes away cleanly.
+ */
+ error = xfs_trans_alloc_empty(mp, &tp);
if (error)
return error;
- cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
+
+ error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ if (error)
+ goto out_trans;
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
/* Find all the leftover CoW staging extents. */
- INIT_LIST_HEAD(&debris);
memset(&low, 0, sizeof(low));
memset(&high, 0, sizeof(high));
low.rc.rc_startblock = XFS_REFC_COW_START;
@@ -1645,10 +1660,11 @@ xfs_refcount_recover_cow_leftovers(
if (error)
goto out_cursor;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
- xfs_buf_relse(agbp);
+ xfs_trans_brelse(tp, agbp);
+ xfs_trans_cancel(tp);
/* Now iterate the list to free the leftovers */
- list_for_each_entry(rr, &debris, rr_list) {
+ list_for_each_entry_safe(rr, n, &debris, rr_list) {
/* Set up transaction. */
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
if (error)
@@ -1676,8 +1692,16 @@ xfs_refcount_recover_cow_leftovers(
error = xfs_trans_commit(tp);
if (error)
goto out_free;
+
+ list_del(&rr->rr_list);
+ kmem_free(rr);
}
+ return error;
+out_defer:
+ xfs_defer_cancel(&dfops);
+out_trans:
+ xfs_trans_cancel(tp);
out_free:
/* Free the leftover list */
list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1688,11 +1712,6 @@ xfs_refcount_recover_cow_leftovers(
out_cursor:
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- xfs_buf_relse(agbp);
- goto out_free;
-
-out_defer:
- xfs_defer_cancel(&dfops);
- xfs_trans_cancel(tp);
- goto out_free;
+ xfs_trans_brelse(tp, agbp);
+ goto out_trans;
}
diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
index 7917f6e..d787c67 100644
--- a/fs/xfs/libxfs/xfs_trans_space.h
+++ b/fs/xfs/libxfs/xfs_trans_space.h
@@ -21,8 +21,20 @@
/*
* Components of space reservations.
*/
+
+/* Worst case number of rmaps that can be held in a block. */
#define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) \
(((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))
+
+/* Adding one rmap could split every level up to the top of the tree. */
+#define XFS_RMAPADD_SPACE_RES(mp) ((mp)->m_rmap_maxlevels)
+
+/* Blocks we might need to add "b" rmaps to a tree. */
+#define XFS_NRMAPADD_SPACE_RES(mp, b)\
+ (((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
+ XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
+ XFS_RMAPADD_SPACE_RES(mp))
+
#define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) \
(((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0]))
#define XFS_EXTENTADD_SPACE_RES(mp,w) (XFS_BM_MAXLEVELS(mp,w) - 1)
@@ -30,13 +42,12 @@
(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
XFS_EXTENTADD_SPACE_RES(mp,w))
+
+/* Blocks we might need to add "b" mappings & rmappings to a file. */
#define XFS_SWAP_RMAP_SPACE_RES(mp,b,w)\
- (((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
- XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
- XFS_EXTENTADD_SPACE_RES(mp,w) + \
- ((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
- XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
- (mp)->m_rmap_maxlevels)
+ (XFS_NEXTENTADD_SPACE_RES((mp), (b), (w)) + \
+ XFS_NRMAPADD_SPACE_RES((mp), (b)))
+
#define XFS_DAENTER_1B(mp,w) \
((w) == XFS_DATA_FORK ? (mp)->m_dir_geo->fsbcount : 1)
#define XFS_DAENTER_DBS(mp,w) \
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 0457abe..6df0a7c 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -116,11 +116,11 @@ xfs_finish_page_writeback(
bsize = bh->b_size;
do {
+ if (off > end)
+ break;
next = bh->b_this_page;
if (off < bvec->bv_offset)
goto next_bh;
- if (off > end)
- break;
bh->b_end_io(bh, !error);
next_bh:
off += bsize;
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 9bf57c7..c4b90e7 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -34,6 +34,8 @@
#include "xfs_bmap.h"
#include "xfs_icache.h"
#include "xfs_trace.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans_space.h"
kmem_zone_t *xfs_bui_zone;
@@ -446,7 +448,8 @@ xfs_bui_recover(
return -EIO;
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+ XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
if (error)
return error;
budp = xfs_trans_get_bud(tp, buip);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5328ecd..87b495e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -588,9 +588,13 @@ xfs_getbmap(
}
break;
default:
+ /* Local format data forks report no extents. */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+ bmv->bmv_entries = 0;
+ return 0;
+ }
if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
- ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+ ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
return -EINVAL;
if (xfs_get_extsz_hint(ip) ||
@@ -718,7 +722,7 @@ xfs_getbmap(
* extents.
*/
if (map[i].br_startblock == DELAYSTARTBLOCK &&
- map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+ map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
ASSERT((iflags & BMV_IF_DELALLOC) != 0);
if (map[i].br_startblock == HOLESTARTBLOCK &&
@@ -911,9 +915,9 @@ xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
}
/*
- * This is called by xfs_inactive to free any blocks beyond eof
- * when the link count isn't zero and by xfs_dm_punch_hole() when
- * punching a hole to EOF.
+ * This is called to free any blocks beyond eof. The caller must hold
+ * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
+ * reference to the inode.
*/
int
xfs_free_eofblocks(
@@ -928,8 +932,6 @@ xfs_free_eofblocks(
struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
-
/*
* Figure out if there are any blocks beyond the end
* of the file. If not, then there is nothing to do.
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index d7a67d7..1626927 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -96,12 +96,16 @@ static inline void
xfs_buf_ioacct_inc(
struct xfs_buf *bp)
{
- if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+ if (bp->b_flags & XBF_NO_IOACCT)
return;
ASSERT(bp->b_flags & XBF_ASYNC);
- bp->b_flags |= _XBF_IN_FLIGHT;
- percpu_counter_inc(&bp->b_target->bt_io_count);
+ spin_lock(&bp->b_lock);
+ if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
+ bp->b_state |= XFS_BSTATE_IN_FLIGHT;
+ percpu_counter_inc(&bp->b_target->bt_io_count);
+ }
+ spin_unlock(&bp->b_lock);
}
/*
@@ -109,14 +113,24 @@ xfs_buf_ioacct_inc(
* freed and unaccount from the buftarg.
*/
static inline void
+__xfs_buf_ioacct_dec(
+ struct xfs_buf *bp)
+{
+ ASSERT(spin_is_locked(&bp->b_lock));
+
+ if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
+ bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+ percpu_counter_dec(&bp->b_target->bt_io_count);
+ }
+}
+
+static inline void
xfs_buf_ioacct_dec(
struct xfs_buf *bp)
{
- if (!(bp->b_flags & _XBF_IN_FLIGHT))
- return;
-
- bp->b_flags &= ~_XBF_IN_FLIGHT;
- percpu_counter_dec(&bp->b_target->bt_io_count);
+ spin_lock(&bp->b_lock);
+ __xfs_buf_ioacct_dec(bp);
+ spin_unlock(&bp->b_lock);
}
/*
@@ -148,9 +162,9 @@ xfs_buf_stale(
* unaccounted (released to LRU) before that occurs. Drop in-flight
* status now to preserve accounting consistency.
*/
- xfs_buf_ioacct_dec(bp);
-
spin_lock(&bp->b_lock);
+ __xfs_buf_ioacct_dec(bp);
+
atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
(list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -953,12 +967,12 @@ xfs_buf_rele(
* ensures the decrement occurs only once per-buf.
*/
if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
- xfs_buf_ioacct_dec(bp);
+ __xfs_buf_ioacct_dec(bp);
goto out_unlock;
}
/* the last reference has been dropped ... */
- xfs_buf_ioacct_dec(bp);
+ __xfs_buf_ioacct_dec(bp);
if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
/*
* If the buffer is added to the LRU take a new reference to the
@@ -1052,6 +1066,8 @@ void
xfs_buf_unlock(
struct xfs_buf *bp)
{
+ ASSERT(xfs_buf_islocked(bp));
+
XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
@@ -1790,6 +1806,28 @@ xfs_alloc_buftarg(
}
/*
+ * Cancel a delayed write list.
+ *
+ * Remove each buffer from the list, clear the delwri queue flag and drop the
+ * associated buffer reference.
+ */
+void
+xfs_buf_delwri_cancel(
+ struct list_head *list)
+{
+ struct xfs_buf *bp;
+
+ while (!list_empty(list)) {
+ bp = list_first_entry(list, struct xfs_buf, b_list);
+
+ xfs_buf_lock(bp);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+ list_del_init(&bp->b_list);
+ xfs_buf_relse(bp);
+ }
+}
+
+/*
* Add a buffer to the delayed write list.
*
* This queues a buffer for writeout if it hasn't already been. Note that
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 1c2e52b..ad514a8 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -63,7 +63,6 @@ typedef enum {
#define _XBF_KMEM (1 << 21)/* backed by heap memory */
#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
#define _XBF_COMPOUND (1 << 23)/* compound buffer */
-#define _XBF_IN_FLIGHT (1 << 25) /* I/O in flight, for accounting purposes */
typedef unsigned int xfs_buf_flags_t;
@@ -83,14 +82,14 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
- { _XBF_COMPOUND, "COMPOUND" }, \
- { _XBF_IN_FLIGHT, "IN_FLIGHT" }
+ { _XBF_COMPOUND, "COMPOUND" }
/*
* Internal state flags.
*/
#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
+#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
/*
* The xfs_buftarg contains 2 notions of "sector size" -
@@ -330,6 +329,7 @@ extern void *xfs_buf_offset(struct xfs_buf *, size_t);
extern void xfs_buf_stale(struct xfs_buf *bp);
/* Delayed Write Buffer Routines */
+extern void xfs_buf_delwri_cancel(struct list_head *);
extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
extern int xfs_buf_delwri_submit(struct list_head *);
extern int xfs_buf_delwri_submit_nowait(struct list_head *);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 2981698..eba6316 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -71,22 +71,11 @@ xfs_dir2_sf_getdents(
struct xfs_da_geometry *geo = args->geo;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Give up if the directory is way too short.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
-
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
- return -EFSCORRUPTED;
-
/*
* If the block number in the offset is out of range, we're done.
*/
@@ -405,6 +394,7 @@ xfs_dir2_leaf_readbuf(
/*
* Do we need more readahead?
+ * Each loop tries to process 1 full dir blk; last may be partial.
*/
blk_start_plug(&plug);
for (mip->ra_index = mip->ra_offset = i = 0;
@@ -415,7 +405,8 @@ xfs_dir2_leaf_readbuf(
* Read-ahead a contiguous directory block.
*/
if (i > mip->ra_current &&
- map[mip->ra_index].br_blockcount >= geo->fsbcount) {
+ (map[mip->ra_index].br_blockcount - mip->ra_offset) >=
+ geo->fsbcount) {
xfs_dir3_data_readahead(dp,
map[mip->ra_index].br_startoff + mip->ra_offset,
XFS_FSB_TO_DADDR(dp->i_mount,
@@ -436,14 +427,19 @@ xfs_dir2_leaf_readbuf(
}
/*
- * Advance offset through the mapping table.
+ * Advance offset through the mapping table, processing a full
+ * dir block even if it is fragmented into several extents.
+ * But stop if we have consumed all valid mappings, even if
+ * it's not yet a full directory block.
*/
- for (j = 0; j < geo->fsbcount; j += length ) {
+ for (j = 0;
+ j < geo->fsbcount && mip->ra_index < mip->map_valid;
+ j += length ) {
/*
* The rest of this extent but not more than a dir
* block.
*/
- length = min_t(int, geo->fsbcount,
+ length = min_t(int, geo->fsbcount - j,
map[mip->ra_index].br_blockcount -
mip->ra_offset);
mip->ra_offset += length;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1209ad2..a90ec3f 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1130,13 +1130,13 @@ xfs_find_get_desired_pgoff(
index = startoff >> PAGE_SHIFT;
endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
- end = endoff >> PAGE_SHIFT;
+ end = (endoff - 1) >> PAGE_SHIFT;
do {
int want;
unsigned nr_pages;
unsigned int i;
- want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
want);
/*
@@ -1163,17 +1163,6 @@ xfs_find_get_desired_pgoff(
break;
}
- /*
- * At lease we found one page. If this is the first time we
- * step into the loop, and if the first page index offset is
- * greater than the given search offset, a hole was found.
- */
- if (type == HOLE_OFF && lastoff == startoff &&
- lastoff < page_offset(pvec.pages[0])) {
- found = true;
- break;
- }
-
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
loff_t b_offset;
@@ -1185,18 +1174,18 @@ xfs_find_get_desired_pgoff(
* file mapping. However, page->index will not change
* because we have a reference on the page.
*
- * Searching done if the page index is out of range.
- * If the current offset is not reaches the end of
- * the specified search range, there should be a hole
- * between them.
+ * If current page offset is beyond where we've ended,
+ * we've found a hole.
*/
- if (page->index > end) {
- if (type == HOLE_OFF && lastoff < endoff) {
- *offset = lastoff;
- found = true;
- }
+ if (type == HOLE_OFF && lastoff < endoff &&
+ lastoff < page_offset(pvec.pages[i])) {
+ found = true;
+ *offset = lastoff;
goto out;
}
+ /* Searching done if the page index is out of range. */
+ if (page->index > end)
+ goto out;
lock_page(page);
/*
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 3fb1f3f..74304b6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -264,6 +264,22 @@ xfs_inode_clear_reclaim_tag(
xfs_perag_clear_reclaim_tag(pag);
}
+static void
+xfs_inew_wait(
+ struct xfs_inode *ip)
+{
+ wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
+ DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
+
+ do {
+ prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+ if (!xfs_iflags_test(ip, XFS_INEW))
+ break;
+ schedule();
+ } while (true);
+ finish_wait(wq, &wait.wait);
+}
+
/*
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
* part of the structure. This is made more complex by the fact we store
@@ -368,14 +384,17 @@ xfs_iget_cache_hit(
error = xfs_reinit_inode(mp, inode);
if (error) {
+ bool wake;
/*
* Re-initializing the inode failed, and we are in deep
* trouble. Try to re-add it to the reclaim list.
*/
rcu_read_lock();
spin_lock(&ip->i_flags_lock);
-
+ wake = !!__xfs_iflags_test(ip, XFS_INEW);
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+ if (wake)
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
trace_xfs_iget_reclaim_fail(ip);
goto out_error;
@@ -625,9 +644,11 @@ xfs_iget(
STATIC int
xfs_inode_ag_walk_grab(
- struct xfs_inode *ip)
+ struct xfs_inode *ip,
+ int flags)
{
struct inode *inode = VFS_I(ip);
+ bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
ASSERT(rcu_read_lock_held());
@@ -645,7 +666,8 @@ xfs_inode_ag_walk_grab(
goto out_unlock_noent;
/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
- if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
+ __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
goto out_unlock_noent;
spin_unlock(&ip->i_flags_lock);
@@ -673,7 +695,8 @@ xfs_inode_ag_walk(
void *args),
int flags,
void *args,
- int tag)
+ int tag,
+ int iter_flags)
{
uint32_t first_index;
int last_error = 0;
@@ -715,7 +738,7 @@ xfs_inode_ag_walk(
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
- if (done || xfs_inode_ag_walk_grab(ip))
+ if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
batch[i] = NULL;
/*
@@ -743,6 +766,9 @@ xfs_inode_ag_walk(
for (i = 0; i < nr_found; i++) {
if (!batch[i])
continue;
+ if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
+ xfs_iflags_test(batch[i], XFS_INEW))
+ xfs_inew_wait(batch[i]);
error = execute(batch[i], flags, args);
IRELE(batch[i]);
if (error == -EAGAIN) {
@@ -822,12 +848,13 @@ xfs_cowblocks_worker(
}
int
-xfs_inode_ag_iterator(
+xfs_inode_ag_iterator_flags(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
- void *args)
+ void *args,
+ int iter_flags)
{
struct xfs_perag *pag;
int error = 0;
@@ -837,7 +864,8 @@ xfs_inode_ag_iterator(
ag = 0;
while ((pag = xfs_perag_get(mp, ag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
+ iter_flags);
xfs_perag_put(pag);
if (error) {
last_error = error;
@@ -849,6 +877,17 @@ xfs_inode_ag_iterator(
}
int
+xfs_inode_ag_iterator(
+ struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, int flags,
+ void *args),
+ int flags,
+ void *args)
+{
+ return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
+}
+
+int
xfs_inode_ag_iterator_tag(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
@@ -865,7 +904,8 @@ xfs_inode_ag_iterator_tag(
ag = 0;
while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
+ 0);
xfs_perag_put(pag);
if (error) {
last_error = error;
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 8a7c849..9183f77 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -48,6 +48,11 @@ struct xfs_eofblocks {
#define XFS_IGET_UNTRUSTED 0x2
#define XFS_IGET_DONTCACHE 0x4
+/*
+ * flags for AG inode iterator
+ */
+#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */
+
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
uint flags, uint lock_flags, xfs_inode_t **ipp);
@@ -79,6 +84,9 @@ void xfs_cowblocks_worker(struct work_struct *);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
int flags, void *args);
+int xfs_inode_ag_iterator_flags(struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, int flags, void *args),
+ int flags, void *args, int iter_flags);
int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
int flags, void *args, int tag);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index e50636c..7a0b4ee 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -50,6 +50,7 @@
#include "xfs_log.h"
#include "xfs_bmap_btree.h"
#include "xfs_reflink.h"
+#include "xfs_dir2_priv.h"
kmem_zone_t *xfs_inode_zone;
@@ -1914,12 +1915,13 @@ xfs_inactive(
* force is true because we are evicting an inode from the
* cache. Post-eof blocks must be freed, lest we end up with
* broken free space accounting.
+ *
+ * Note: don't bother with iolock here since lockdep complains
+ * about acquiring it in reclaim context. We have the only
+ * reference to the inode at this point anyways.
*/
- if (xfs_can_free_eofblocks(ip, true)) {
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ if (xfs_can_free_eofblocks(ip, true))
xfs_free_eofblocks(ip);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- }
return;
}
@@ -3562,6 +3564,12 @@ xfs_iflush_int(
if (ip->i_d.di_version < 3)
ip->i_d.di_flushiter++;
+ /* Check the inline directory data. */
+ if (S_ISDIR(VFS_I(ip)->i_mode) &&
+ ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
+ xfs_dir2_sf_verify(ip))
+ goto corrupt_out;
+
/*
* Copy the dirty parts of the inode into the on-disk inode. We always
* copy out the core of the inode, because if the inode is dirty at all
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 71e8a81..c038f6e 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -217,7 +217,8 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
#define XFS_ISTALE (1 << 1) /* inode has been staled */
#define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
-#define XFS_INEW (1 << 3) /* inode has just been allocated */
+#define __XFS_INEW_BIT 3 /* inode has just been allocated */
+#define XFS_INEW (1 << __XFS_INEW_BIT)
#define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
#define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
#define __XFS_IFLOCK_BIT 7 /* inode is being flushed right now */
@@ -467,6 +468,7 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
xfs_iflags_clear(ip, XFS_INEW);
barrier();
unlock_new_inode(VFS_I(ip));
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
}
static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index a391975..73cfc71 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1542,10 +1542,11 @@ xfs_ioc_getbmap(
unsigned int cmd,
void __user *arg)
{
- struct getbmapx bmx;
+ struct getbmapx bmx = { 0 };
int error;
- if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
+ /* struct getbmap is a strict subset of struct getbmapx. */
+ if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
return -EFAULT;
if (bmx.bmv_count < 2)
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 3605624..65740d1 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1151,10 +1151,10 @@ xfs_xattr_iomap_begin(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- lockmode = xfs_ilock_data_map_shared(ip);
+ lockmode = xfs_ilock_attr_map_shared(ip);
/* if there are no attribute fork or extents, return ENOENT */
- if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
+ if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
error = -ENOENT;
goto out_unlock;
}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 66e8817..d8a77db 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -585,7 +585,7 @@ xfs_inumbers(
return error;
bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
- buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
+ buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
do {
struct xfs_inobt_rec_incore r;
int stat;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 4017aa9..b57ab34 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1293,7 +1293,7 @@ void
xfs_log_work_queue(
struct xfs_mount *mp)
{
- queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
+ queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
msecs_to_jiffies(xfs_syncd_centisecs * 10));
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1bf878b..5415f90 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -183,6 +183,7 @@ typedef struct xfs_mount {
struct workqueue_struct *m_reclaim_workqueue;
struct workqueue_struct *m_log_workqueue;
struct workqueue_struct *m_eofblocks_workqueue;
+ struct workqueue_struct *m_sync_workqueue;
/*
* Generation of the filesysyem layout. This is incremented by each
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index b669b12..8b9a9f1 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1384,12 +1384,7 @@ xfs_qm_quotacheck(
mp->m_qflags |= flags;
error_return:
- while (!list_empty(&buffer_list)) {
- struct xfs_buf *bp =
- list_first_entry(&buffer_list, struct xfs_buf, b_list);
- list_del_init(&bp->b_list);
- xfs_buf_relse(bp);
- }
+ xfs_buf_delwri_cancel(&buffer_list);
if (error) {
xfs_warn(mp,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 475a388..9cb5c38 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -759,5 +759,6 @@ xfs_qm_dqrele_all_inodes(
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
+ xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
+ XFS_AGITER_INEW_WAIT);
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 2252f16..29a75ec 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -736,8 +736,22 @@ xfs_reflink_end_cow(
offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
- /* Start a rolling transaction to switch the mappings */
- resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+ /*
+ * Start a rolling transaction to switch the mappings. We're
+ * unlikely ever to have to remap 16T worth of single-block
+ * extents, so just cap the worst case extent count to 2^32-1.
+ * Stick a warning in just in case, and avoid 64-bit division.
+ */
+ BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
+ if (end_fsb - offset_fsb > UINT_MAX) {
+ error = -EFSCORRUPTED;
+ xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
+ ASSERT(0);
+ goto out;
+ }
+ resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,
+ (unsigned int)(end_fsb - offset_fsb),
+ XFS_DATA_FORK);
error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
resblks, 0, 0, &tp);
if (error)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index dbbd3f1..882fb85 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -872,8 +872,15 @@ xfs_init_mount_workqueues(
if (!mp->m_eofblocks_workqueue)
goto out_destroy_log;
+ mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
+ mp->m_fsname);
+ if (!mp->m_sync_workqueue)
+ goto out_destroy_eofb;
+
return 0;
+out_destroy_eofb:
+ destroy_workqueue(mp->m_eofblocks_workqueue);
out_destroy_log:
destroy_workqueue(mp->m_log_workqueue);
out_destroy_reclaim:
@@ -894,6 +901,7 @@ STATIC void
xfs_destroy_mount_workqueues(
struct xfs_mount *mp)
{
+ destroy_workqueue(mp->m_sync_workqueue);
destroy_workqueue(mp->m_eofblocks_workqueue);
destroy_workqueue(mp->m_log_workqueue);
destroy_workqueue(mp->m_reclaim_workqueue);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 70f42ea..a280e12 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -263,6 +263,28 @@ xfs_trans_alloc(
}
/*
+ * Create an empty transaction with no reservation. This is a defensive
+ * mechanism for routines that query metadata without actually modifying
+ * them -- if the metadata being queried is somehow cross-linked (think a
+ * btree block pointer that points higher in the tree), we risk deadlock.
+ * However, blocks grabbed as part of a transaction can be re-grabbed.
+ * The verifiers will notice the corrupt block and the operation will fail
+ * back to userspace without deadlocking.
+ *
+ * Note the zero-length reservation; this transaction MUST be cancelled
+ * without any dirty data.
+ */
+int
+xfs_trans_alloc_empty(
+ struct xfs_mount *mp,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans_res resv = {0};
+
+ return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
+}
+
+/*
* Record the indicated change to the given field for application
* to the file system's superblock when the transaction commits.
* For now, just store the change in the transaction structure.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 61b7fbd..98024cb 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -159,6 +159,8 @@ typedef struct xfs_trans {
int xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
uint blocks, uint rtextents, uint flags,
struct xfs_trans **tpp);
+int xfs_trans_alloc_empty(struct xfs_mount *mp,
+ struct xfs_trans **tpp);
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index c1350ce..75ddcfa 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -34,4 +34,12 @@
#define PCLK_SRC_MUX_1_CLK 15
#define PCLK_SRC_1_CLK 16
#define PCLK_MUX_1_CLK 17
+
+/* DP PLL clocks */
+#define DP_VCO_CLK 0
+#define DP_LINK_CLK_DIVSEL_TEN 1
+#define DP_VCO_DIVIDED_TWO_CLK_SRC 2
+#define DP_VCO_DIVIDED_FOUR_CLK_SRC 3
+#define DP_VCO_DIVIDED_SIX_CLK_SRC 4
+#define DP_VCO_DIVIDED_CLK_SRC_MUX 5
#endif
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index be2210c..9d52d2e 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -43,6 +43,7 @@
#define MSM_BUS_FAB_MC_VIRT 6151
#define MSM_BUS_FAB_MEM_NOC 6152
#define MSM_BUS_FAB_IPA_VIRT 6153
+#define MSM_BUS_FAB_CAMNOC_VIRT 6154
#define MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
#define MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -236,7 +237,7 @@
#define MSM_BUS_MASTER_MNOC_SF_MEM_NOC 133
#define MSM_BUS_MASTER_SNOC_GC_MEM_NOC 134
#define MSM_BUS_MASTER_SNOC_SF_MEM_NOC 135
-#define MSM_BUS_MASTER_CAMNOC_HF 136
+#define MSM_BUS_MASTER_CAMNOC_HF0 136
#define MSM_BUS_MASTER_CAMNOC_SF 137
#define MSM_BUS_MASTER_VIDEO_PROC 138
#define MSM_BUS_MASTER_GNOC_SNOC 139
@@ -245,7 +246,11 @@
#define MSM_BUS_MASTER_MEM_NOC_SNOC 142
#define MSM_BUS_MASTER_IPA_CORE 143
#define MSM_BUS_MASTER_ALC 144
-#define MSM_BUS_MASTER_MASTER_LAST 145
+#define MSM_BUS_MASTER_CAMNOC_HF1 145
+#define MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP 146
+#define MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP 147
+#define MSM_BUS_MASTER_CAMNOC_SF_UNCOMP 148
+#define MSM_BUS_MASTER_MASTER_LAST 149
#define MSM_BUS_MASTER_LLCC_DISPLAY 20000
#define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -584,7 +589,8 @@
#define MSM_BUS_SLAVE_SNOC_MEM_NOC_SF 775
#define MSM_BUS_SLAVE_MEM_NOC_SNOC 776
#define MSM_BUS_SLAVE_IPA 777
-#define MSM_BUS_SLAVE_LAST 778
+#define MSM_BUS_SLAVE_CAMNOC_UNCOMP 778
+#define MSM_BUS_SLAVE_LAST 779
#define MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
#define MSM_BUS_SLAVE_LLCC_DISPLAY 20513
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index ec80d0c..ace92fc 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -1,7 +1,6 @@
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
-#include <linux/bpf.h>
#include <linux/jump_label.h>
#include <uapi/linux/bpf.h>
@@ -22,20 +21,19 @@ struct cgroup_bpf {
*/
struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE];
+ bool disallow_override[MAX_BPF_ATTACH_TYPE];
};
void cgroup_bpf_put(struct cgroup *cgrp);
void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
-void __cgroup_bpf_update(struct cgroup *cgrp,
- struct cgroup *parent,
- struct bpf_prog *prog,
- enum bpf_attach_type type);
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+ struct bpf_prog *prog, enum bpf_attach_type type,
+ bool overridable);
/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
-void cgroup_bpf_update(struct cgroup *cgrp,
- struct bpf_prog *prog,
- enum bpf_attach_type type);
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+ enum bpf_attach_type type, bool overridable);
int __cgroup_bpf_run_filter(struct sock *sk,
struct sk_buff *skb,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 0e1e050..cf86f52 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -440,6 +440,13 @@ union map_info *dm_get_rq_mapinfo(struct request *rq);
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+void dm_lock_md_type(struct mapped_device *md);
+void dm_unlock_md_type(struct mapped_device *md);
+void dm_set_md_type(struct mapped_device *md, unsigned type);
+unsigned dm_get_md_type(struct mapped_device *md);
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
+unsigned dm_table_get_type(struct dm_table *t);
+
/*
* Geometry functions.
*/
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 3319d97..8feecd5 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -630,14 +630,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
netdev_features_t features)
{
- if (skb_vlan_tagged_multi(skb))
- features = netdev_intersect_features(features,
- NETIF_F_SG |
- NETIF_F_HIGHDMA |
- NETIF_F_FRAGLIST |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
+ if (skb_vlan_tagged_multi(skb)) {
+ /* In the case of multi-tagged packets, use a direct mask
+ * instead of using netdev_interesect_features(), to make
+ * sure that only devices supporting NETIF_F_HW_CSUM will
+ * have checksum offloading support.
+ */
+ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+ NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ }
return features;
}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e3d181e..7bdddb3 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -244,6 +244,8 @@ struct iommu_ops {
void (*tlbi_domain)(struct iommu_domain *domain);
int (*enable_config_clocks)(struct iommu_domain *domain);
void (*disable_config_clocks)(struct iommu_domain *domain);
+ uint64_t (*iova_to_pte)(struct iommu_domain *domain,
+ dma_addr_t iova);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
@@ -331,6 +333,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t offset, u64 size,
int prot);
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+
+extern uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova);
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f7033fa..d6ebc01 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -424,12 +424,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
}
#endif
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+ phys_addr_t end_addr);
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return 0;
}
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+ phys_addr_t end_addr)
+{
+ return 0;
+}
+
#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ecc451d..e1a903a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -640,7 +640,12 @@ enum {
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
+enum {
+ MLX5_CMD_ENT_STATE_PENDING_COMP,
+};
+
struct mlx5_cmd_work_ent {
+ unsigned long state;
struct mlx5_cmd_msg *in;
struct mlx5_cmd_msg *out;
void *uout;
@@ -838,7 +843,7 @@ void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
#endif
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0e6a54c..2f943a0 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -347,7 +347,9 @@ struct mmc_devfeq_clk_scaling {
atomic_t devfreq_abort;
bool skip_clk_scale_freq_update;
int freq_table_sz;
+ int pltfm_freq_table_sz;
u32 *freq_table;
+ u32 *pltfm_freq_table;
unsigned long total_busy_time_us;
unsigned long target_freq;
unsigned long curr_freq;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7e273e2..6744eb4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -672,6 +672,7 @@ typedef struct pglist_data {
* is the first PFN that needs to be initialised.
*/
unsigned long first_deferred_pfn;
+ unsigned long static_init_size;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 72f9211..4381570 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -248,6 +248,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
POWER_SUPPLY_PROP_HW_CURRENT_MAX,
POWER_SUPPLY_PROP_REAL_TYPE,
+ POWER_SUPPLY_PROP_PR_SWAP,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index decb943..6c6ae4d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -178,7 +178,9 @@ extern u64 nr_running_integral(unsigned int cpu);
#endif
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
-extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+ unsigned int *max_nr,
+ unsigned int *big_max_nr);
extern unsigned int sched_get_cpu_util(int cpu);
extern void calc_global_load(unsigned long ticks);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8491bdc..9b0d5cb 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -43,6 +43,9 @@
/* Default weight of a bound cooling device */
#define THERMAL_WEIGHT_DEFAULT 0
+/* Max sensors that can be used for a single virtual thermalzone */
+#define THERMAL_MAX_VIRT_SENSORS 5
+
/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
#define THERMAL_TEMP_INVALID -274000
@@ -405,6 +408,39 @@ struct thermal_trip {
enum thermal_trip_type type;
};
+/* Different aggregation logic supported for virtual sensors */
+enum aggregation_logic {
+ VIRT_WEIGHTED_AVG,
+ VIRT_MAXIMUM,
+ VIRT_MINIMUM,
+ VIRT_AGGREGATION_NR,
+};
+
+/*
+ * struct virtual_sensor_data - Data structure used to provide
+ * information about the virtual zone.
+ * @virt_zone_name - Virtual thermal zone name
+ * @num_sensors - Number of sensors this virtual zone uses to compute
+ * temperature
+ * @sensor_names - Array of sensor names
+ * @logic - Temperature aggregation logic to be used
+ * @coefficients - Coefficients to be used for weighted average logic
+ * @coefficient_ct - number of coefficients provided as input
+ * @avg_offset - offset value to be used for the weighted aggregation logic
+ * @avg_denominator - denominator value to be used for the weighted aggregation
+ * logic
+ */
+struct virtual_sensor_data {
+ int num_sensors;
+ char virt_zone_name[THERMAL_NAME_LENGTH];
+ char *sensor_names[THERMAL_MAX_VIRT_SENSORS];
+ enum aggregation_logic logic;
+ int coefficients[THERMAL_MAX_VIRT_SENSORS];
+ int coefficient_ct;
+ int avg_offset;
+ int avg_denominator;
+};
+
/* Function declarations */
#ifdef CONFIG_THERMAL_OF
struct thermal_zone_device *
@@ -417,6 +453,9 @@ struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
const struct thermal_zone_of_device_ops *ops);
void devm_thermal_zone_of_sensor_unregister(struct device *dev,
struct thermal_zone_device *tz);
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+ struct device *dev,
+ const struct virtual_sensor_data *sensor_data);
#else
static inline struct thermal_zone_device *
thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
@@ -444,6 +483,14 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev,
{
}
+static inline
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+ struct device *dev,
+ const struct virtual_sensor_data *sensor_data)
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif
#if IS_ENABLED(CONFIG_THERMAL)
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
new file mode 100644
index 0000000..f2322f3
--- /dev/null
+++ b/include/linux/usb/audio-v3.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file holds USB constants and structures defined
+ * by the USB Device Class Definition for Audio Devices in version 3.0.
+ * Comments below reference relevant sections of the documents contained
+ * in http://www.usb.org/developers/docs/devclass_docs/USB_Audio_v3.0.zip
+ */
+
+#ifndef __LINUX_USB_AUDIO_V3_H
+#define __LINUX_USB_AUDIO_V3_H
+
+#include <linux/types.h>
+
+#define UAC3_MIXER_UNIT_V3 0x05
+#define UAC3_FEATURE_UNIT_V3 0x07
+#define UAC3_CLOCK_SOURCE 0x0b
+
+#define BADD_MAXPSIZE_SYNC_MONO_16 0x0060
+#define BADD_MAXPSIZE_SYNC_MONO_24 0x0090
+#define BADD_MAXPSIZE_SYNC_STEREO_16 0x00c0
+#define BADD_MAXPSIZE_SYNC_STEREO_24 0x0120
+
+#define BADD_MAXPSIZE_ASYNC_MONO_16 0x0062
+#define BADD_MAXPSIZE_ASYNC_MONO_24 0x0093
+#define BADD_MAXPSIZE_ASYNC_STEREO_16 0x00c4
+#define BADD_MAXPSIZE_ASYNC_STEREO_24 0x0126
+
+#define BIT_RES_16_BIT 0x10
+#define BIT_RES_24_BIT 0x18
+
+#define SUBSLOTSIZE_16_BIT 0x02
+#define SUBSLOTSIZE_24_BIT 0x03
+
+#define BADD_SAMPLING_RATE 48000
+
+#define NUM_CHANNELS_MONO 1
+#define NUM_CHANNELS_STEREO 2
+#define BADD_CH_CONFIG_MONO 0
+#define BADD_CH_CONFIG_STEREO 3
+#define CLUSTER_ID_MONO 0x0001
+#define CLUSTER_ID_STEREO 0x0002
+
+#define FULL_ADC_PROFILE 0x01
+
+/* BADD Profile IDs */
+#define PROF_GENERIC_IO 0x20
+#define PROF_HEADPHONE 0x21
+#define PROF_SPEAKER 0x22
+#define PROF_MICROPHONE 0x23
+#define PROF_HEADSET 0x24
+#define PROF_HEADSET_ADAPTER 0x25
+#define PROF_SPEAKERPHONE 0x26
+
+/* BADD Entity IDs */
+#define BADD_OUT_TERM_ID_BAOF 0x03
+#define BADD_OUT_TERM_ID_BAIF 0x06
+#define BADD_IN_TERM_ID_BAOF 0x01
+#define BADD_IN_TERM_ID_BAIF 0x04
+#define BADD_FU_ID_BAOF 0x02
+#define BADD_FU_ID_BAIF 0x05
+#define BADD_CLOCK_SOURCE 0x09
+#define BADD_FU_ID_BAIOF 0x07
+#define BADD_MU_ID_BAIOF 0x08
+
+#define UAC_BIDIR_TERMINAL_HEADSET 0x0402
+#define UAC_BIDIR_TERMINAL_SPEAKERPHONE 0x0403
+
+#define NUM_BADD_DESCS 7
+
+struct uac3_input_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bCSourceID;
+ __u32 bmControls;
+ __u16 wClusterDescrID;
+ __u16 wExTerminalDescrID;
+ __u16 wConnectorsDescrID;
+ __u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_INPUT_TERMINAL_SIZE 0x14
+
+extern struct uac3_input_terminal_descriptor badd_baif_in_term_desc;
+extern struct uac3_input_terminal_descriptor badd_baof_in_term_desc;
+
+struct uac3_output_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 bCSourceID;
+ __u32 bmControls;
+ __u16 wExTerminalDescrID;
+ __u16 wConnectorsDescrID;
+ __u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_OUTPUT_TERMINAL_SIZE 0x13
+
+extern struct uac3_output_terminal_descriptor badd_baif_out_term_desc;
+extern struct uac3_output_terminal_descriptor badd_baof_out_term_desc;
+
+extern __u8 monoControls[];
+extern __u8 stereoControls[];
+extern __u8 badd_mu_src_ids[];
+
+struct uac3_mixer_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bNrInPins;
+ __u8 *baSourceID;
+ __u16 wClusterDescrID;
+ __u8 bmMixerControls;
+ __u32 bmControls;
+ __u16 wMixerDescrStr;
+} __packed;
+
+#define UAC3_DT_MIXER_UNIT_SIZE 0x10
+
+extern struct uac3_mixer_unit_descriptor badd_baiof_mu_desc;
+
+struct uac3_feature_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ __u8 *bmaControls;
+ __u16 wFeatureDescrStr;
+} __packed;
+
+extern struct uac3_feature_unit_descriptor badd_baif_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baof_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baiof_fu_desc;
+
+struct uac3_clock_source_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bmAttributes;
+ __u32 bmControls;
+ __u8 bReferenceTerminal;
+ __u16 wClockSourceStr;
+} __packed;
+
+#define UAC3_DT_CLOCK_SRC_SIZE 0x0c
+
+extern struct uac3_clock_source_descriptor badd_clock_desc;
+
+extern void *badd_desc_list[];
+
+#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 0583431..8053c8a 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -121,5 +121,6 @@ int msm_vidc_subscribe_event(void *instance,
int msm_vidc_unsubscribe_event(void *instance,
const struct v4l2_event_subscription *sub);
int msm_vidc_dqevent(void *instance, struct v4l2_event *event);
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *a);
int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 6835d22..ddcff17 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -107,10 +107,16 @@ struct dst_entry {
};
};
+struct dst_metrics {
+ u32 metrics[RTAX_MAX];
+ atomic_t refcnt;
+};
+extern const struct dst_metrics dst_default_metrics;
+
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
+#define DST_METRICS_REFCOUNTED 0x2UL
#define DST_METRICS_FLAGS 0x3UL
#define __DST_METRICS_PTR(Y) \
((u32 *)((Y) & ~DST_METRICS_FLAGS))
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f390c3b..aa75828 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -114,11 +114,11 @@ struct fib_info {
__be32 fib_prefsrc;
u32 fib_tb_id;
u32 fib_priority;
- u32 *fib_metrics;
-#define fib_mtu fib_metrics[RTAX_MTU-1]
-#define fib_window fib_metrics[RTAX_WINDOW-1]
-#define fib_rtt fib_metrics[RTAX_RTT-1]
-#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
+ struct dst_metrics *fib_metrics;
+#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
+#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
int fib_nhs;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight;
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4a9c625..8c1746a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1834,24 +1834,30 @@ TRACE_EVENT(sched_overutilized,
TRACE_EVENT(sched_get_nr_running_avg,
- TP_PROTO(int avg, int big_avg, int iowait_avg),
+ TP_PROTO(int avg, int big_avg, int iowait_avg,
+ unsigned int max_nr, unsigned int big_max_nr),
- TP_ARGS(avg, big_avg, iowait_avg),
+ TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr),
TP_STRUCT__entry(
__field( int, avg )
__field( int, big_avg )
__field( int, iowait_avg )
+ __field( unsigned int, max_nr )
+ __field( unsigned int, big_max_nr )
),
TP_fast_assign(
__entry->avg = avg;
__entry->big_avg = big_avg;
__entry->iowait_avg = iowait_avg;
+ __entry->max_nr = max_nr;
+ __entry->big_max_nr = big_max_nr;
),
- TP_printk("avg=%d big_avg=%d iowait_avg=%d",
- __entry->avg, __entry->big_avg, __entry->iowait_avg)
+ TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u",
+ __entry->avg, __entry->big_avg, __entry->iowait_avg,
+ __entry->max_nr, __entry->big_max_nr)
);
TRACE_EVENT(core_ctl_eval_need,
diff --git a/include/trace/events/thermal_virtual.h b/include/trace/events/thermal_virtual.h
new file mode 100644
index 0000000..4c9ce51
--- /dev/null
+++ b/include/trace/events/thermal_virtual.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal_virtual
+
+#if !defined(_TRACE_VIRTUAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VIRTUAL_H
+
+#include <linux/thermal.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(virtual_temperature,
+
+ TP_PROTO(struct thermal_zone_device *virt_tz,
+ struct thermal_zone_device *tz, int sens_temp,
+ int est_temp),
+
+ TP_ARGS(virt_tz, tz, sens_temp, est_temp),
+
+ TP_STRUCT__entry(
+ __string(virt_zone, virt_tz->type)
+ __string(therm_zone, tz->type)
+ __field(int, sens_temp)
+ __field(int, est_temp)
+ ),
+
+ TP_fast_assign(
+ __assign_str(virt_zone, virt_tz->type);
+ __assign_str(therm_zone, tz->type);
+ __entry->sens_temp = sens_temp;
+ __entry->est_temp = est_temp;
+ ),
+
+ TP_printk("virt_zone=%s zone=%s temp=%d virtual zone estimated temp=%d",
+ __get_str(virt_zone), __get_str(therm_zone),
+ __entry->sens_temp,
+ __entry->est_temp)
+);
+
+#endif /* _TRACE_VIRTUAL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 67d632f..2d078c2 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -92,4 +92,6 @@
#define SO_CNX_ADVICE 53
+#define SO_COOKIE 57
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 14eaf2d..b2d5be9 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -109,6 +109,12 @@ enum bpf_attach_type {
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
+ * to the given target_fd cgroup the descendent cgroup will be able to
+ * override effective bpf program that was inherited from this cgroup
+ */
+#define BPF_F_ALLOW_OVERRIDE (1U << 0)
+
#define BPF_PSEUDO_MAP_FD 1
/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -157,6 +163,7 @@ union bpf_attr {
__u32 target_fd; /* container object to attach to */
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
+ __u32 attach_flags;
};
} __attribute__((aligned(8)));
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index d2314be..c6f5b09 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -26,6 +26,7 @@
/* bInterfaceProtocol values to denote the version of the standard used */
#define UAC_VERSION_1 0x00
#define UAC_VERSION_2 0x20
+#define UAC_VERSION_3 0x30
/* A.2 Audio Interface Subclass Codes */
#define USB_SUBCLASS_AUDIOCONTROL 0x01
diff --git a/include/uapi/media/cam_cpas.h b/include/uapi/media/cam_cpas.h
index 300bd87..c5cbac8 100644
--- a/include/uapi/media/cam_cpas.h
+++ b/include/uapi/media/cam_cpas.h
@@ -11,13 +11,15 @@
*
* @camera_family : Camera family type
* @reserved : Reserved field for alignment
- * @camera_version : Camera version
+ * @camera_version : Camera platform version
+ * @cpas_version : Camera CPAS version within camera platform
*
*/
struct cam_cpas_query_cap {
uint32_t camera_family;
uint32_t reserved;
struct cam_hw_version camera_version;
+ struct cam_hw_version cpas_version;
};
#endif /* __UAPI_CAM_CPAS_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index d8a5868..954de19 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1270,6 +1270,10 @@
Say N.
+config SOCK_CGROUP_DATA
+ bool
+ default n
+
endif # CGROUPS
config SCHED_HMP
diff --git a/init/do_mounts_dm.c b/init/do_mounts_dm.c
index a557c5e..7760705 100644
--- a/init/do_mounts_dm.c
+++ b/init/do_mounts_dm.c
@@ -5,13 +5,17 @@
*
* This file is released under the GPL.
*/
+#include <linux/async.h>
+#include <linux/ctype.h>
#include <linux/device-mapper.h>
#include <linux/fs.h>
#include <linux/string.h>
+#include <linux/delay.h>
#include "do_mounts.h"
-#include "../drivers/md/dm.h"
+#define DM_MAX_DEVICES 256
+#define DM_MAX_TARGETS 256
#define DM_MAX_NAME 32
#define DM_MAX_UUID 129
#define DM_NO_UUID "none"
@@ -19,14 +23,47 @@
#define DM_MSG_PREFIX "init"
/* Separators used for parsing the dm= argument. */
-#define DM_FIELD_SEP ' '
-#define DM_LINE_SEP ','
+#define DM_FIELD_SEP " "
+#define DM_LINE_SEP ","
+#define DM_ANY_SEP DM_FIELD_SEP DM_LINE_SEP
/*
* When the device-mapper and any targets are compiled into the kernel
- * (not a module), one target may be created and used as the root device at
- * boot time with the parameters given with the boot line dm=...
- * The code for that is here.
+ * (not a module), one or more device-mappers may be created and used
+ * as the root device at boot time with the parameters given with the
+ * boot line dm=...
+ *
+ * Multiple device-mappers can be stacked specifing the number of
+ * devices. A device can have multiple targets if the the number of
+ * targets is specified.
+ *
+ * TODO(taysom:defect 32847)
+ * In the future, the <num> field will be mandatory.
+ *
+ * <device> ::= [<num>] <device-mapper>+
+ * <device-mapper> ::= <head> "," <target>+
+ * <head> ::= <name> <uuid> <mode> [<num>]
+ * <target> ::= <start> <length> <type> <options> ","
+ * <mode> ::= "ro" | "rw"
+ * <uuid> ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "none"
+ * <type> ::= "verity" | "bootcache" | ...
+ *
+ * Example:
+ * 2 vboot none ro 1,
+ * 0 1768000 bootcache
+ * device=aa55b119-2a47-8c45-946a-5ac57765011f+1
+ * signature=76e9be054b15884a9fa85973e9cb274c93afadb6
+ * cache_start=1768000 max_blocks=100000 size_limit=23 max_trace=20000,
+ * vroot none ro 1,
+ * 0 1740800 verity payload=254:0 hashtree=254:0 hashstart=1740800 alg=sha1
+ * root_hexdigest=76e9be054b15884a9fa85973e9cb274c93afadb6
+ * salt=5b3549d54d6c7a3837b9b81ed72e49463a64c03680c47835bef94d768e5646fe
+ *
+ * Notes:
+ * 1. uuid is a label for the device and we set it to "none".
+ * 2. The <num> field will be optional initially and assumed to be 1.
+ * Once all the scripts that set these fields have been set, it will
+ * be made mandatory.
*/
struct dm_setup_target {
@@ -38,381 +75,388 @@ struct dm_setup_target {
struct dm_setup_target *next;
};
-static struct {
+struct dm_device {
int minor;
int ro;
char name[DM_MAX_NAME];
char uuid[DM_MAX_UUID];
- char *targets;
+ unsigned long num_targets;
struct dm_setup_target *target;
int target_count;
+ struct dm_device *next;
+};
+
+struct dm_option {
+ char *start;
+ char *next;
+ size_t len;
+ char delim;
+};
+
+static struct {
+ unsigned long num_devices;
+ char *str;
} dm_setup_args __initdata;
static __initdata int dm_early_setup;
-static size_t __init get_dm_option(char *str, char **next, char sep)
+static int __init get_dm_option(struct dm_option *opt, const char *accept)
{
- size_t len = 0;
- char *endp = NULL;
+ char *str = opt->next;
+ char *endp;
if (!str)
return 0;
- endp = strchr(str, sep);
+ str = skip_spaces(str);
+ opt->start = str;
+ endp = strpbrk(str, accept);
if (!endp) { /* act like strchrnul */
- len = strlen(str);
- endp = str + len;
+ opt->len = strlen(str);
+ endp = str + opt->len;
} else {
- len = endp - str;
+ opt->len = endp - str;
}
-
- if (endp == str)
- return 0;
-
- if (!next)
- return len;
-
+ opt->delim = *endp;
if (*endp == 0) {
/* Don't advance past the nul. */
- *next = endp;
+ opt->next = endp;
} else {
- *next = endp + 1;
+ opt->next = endp + 1;
}
- return len;
+ return opt->len != 0;
}
-static int __init dm_setup_args_init(void)
+static int __init dm_setup_cleanup(struct dm_device *devices)
{
- dm_setup_args.minor = 0;
- dm_setup_args.ro = 0;
- dm_setup_args.target = NULL;
- dm_setup_args.target_count = 0;
+ struct dm_device *dev = devices;
+
+ while (dev) {
+ struct dm_device *old_dev = dev;
+ struct dm_setup_target *target = dev->target;
+ while (target) {
+ struct dm_setup_target *old_target = target;
+ kfree(target->type);
+ kfree(target->params);
+ target = target->next;
+ kfree(old_target);
+ dev->target_count--;
+ }
+ BUG_ON(dev->target_count);
+ dev = dev->next;
+ kfree(old_dev);
+ }
return 0;
}
-static int __init dm_setup_cleanup(void)
+static char * __init dm_parse_device(struct dm_device *dev, char *str)
{
- struct dm_setup_target *target = dm_setup_args.target;
- struct dm_setup_target *old_target = NULL;
- while (target) {
- kfree(target->type);
- kfree(target->params);
- old_target = target;
- target = target->next;
- kfree(old_target);
- dm_setup_args.target_count--;
- }
- BUG_ON(dm_setup_args.target_count);
- return 0;
-}
-
-static char * __init dm_setup_parse_device_args(char *str)
-{
- char *next = NULL;
- size_t len = 0;
+ struct dm_option opt;
+ size_t len;
/* Grab the logical name of the device to be exported to udev */
- len = get_dm_option(str, &next, DM_FIELD_SEP);
- if (!len) {
+ opt.next = str;
+ if (!get_dm_option(&opt, DM_FIELD_SEP)) {
DMERR("failed to parse device name");
goto parse_fail;
}
- len = min(len + 1, sizeof(dm_setup_args.name));
- strlcpy(dm_setup_args.name, str, len); /* includes nul */
- str = skip_spaces(next);
+ len = min(opt.len + 1, sizeof(dev->name));
+ strlcpy(dev->name, opt.start, len); /* includes nul */
/* Grab the UUID value or "none" */
- len = get_dm_option(str, &next, DM_FIELD_SEP);
- if (!len) {
+ if (!get_dm_option(&opt, DM_FIELD_SEP)) {
DMERR("failed to parse device uuid");
goto parse_fail;
}
- len = min(len + 1, sizeof(dm_setup_args.uuid));
- strlcpy(dm_setup_args.uuid, str, len);
- str = skip_spaces(next);
+ len = min(opt.len + 1, sizeof(dev->uuid));
+ strlcpy(dev->uuid, opt.start, len);
/* Determine if the table/device will be read only or read-write */
- if (!strncmp("ro,", str, 3)) {
- dm_setup_args.ro = 1;
- } else if (!strncmp("rw,", str, 3)) {
- dm_setup_args.ro = 0;
+ get_dm_option(&opt, DM_ANY_SEP);
+ if (!strncmp("ro", opt.start, opt.len)) {
+ dev->ro = 1;
+ } else if (!strncmp("rw", opt.start, opt.len)) {
+ dev->ro = 0;
} else {
DMERR("failed to parse table mode");
goto parse_fail;
}
- str = skip_spaces(str + 3);
- return str;
+ /* Optional number field */
+ /* XXX: The <num> field will be mandatory in the next round */
+ if (opt.delim == DM_FIELD_SEP[0]) {
+ if (!get_dm_option(&opt, DM_LINE_SEP))
+ return NULL;
+ dev->num_targets = simple_strtoul(opt.start, NULL, 10);
+ } else {
+ dev->num_targets = 1;
+ }
+ if (dev->num_targets > DM_MAX_TARGETS) {
+ DMERR("too many targets %lu > %d",
+ dev->num_targets, DM_MAX_TARGETS);
+ }
+ return opt.next;
parse_fail:
return NULL;
}
-static void __init dm_substitute_devices(char *str, size_t str_len)
+static char * __init dm_parse_targets(struct dm_device *dev, char *str)
{
- char *candidate = str;
- char *candidate_end = str;
- char old_char;
- size_t len = 0;
- dev_t dev;
-
- if (str_len < 3)
- return;
-
- while (str && *str) {
- candidate = strchr(str, '/');
- if (!candidate)
- break;
-
- /* Avoid embedded slashes */
- if (candidate != str && *(candidate - 1) != DM_FIELD_SEP) {
- str = strchr(candidate, DM_FIELD_SEP);
- continue;
- }
-
- len = get_dm_option(candidate, &candidate_end, DM_FIELD_SEP);
- str = skip_spaces(candidate_end);
- if (len < 3 || len > 37) /* name_to_dev_t max; maj:mix min */
- continue;
-
- /* Temporarily terminate with a nul */
- if (*candidate_end)
- candidate_end--;
- old_char = *candidate_end;
- *candidate_end = '\0';
-
- DMDEBUG("converting candidate device '%s' to dev_t", candidate);
- /* Use the boot-time specific device naming */
- dev = name_to_dev_t(candidate);
- *candidate_end = old_char;
-
- DMDEBUG(" -> %u", dev);
- /* No suitable replacement found */
- if (!dev)
- continue;
-
- /* Rewrite the /dev/path as a major:minor */
- len = snprintf(candidate, len, "%u:%u", MAJOR(dev), MINOR(dev));
- if (!len) {
- DMERR("error substituting device major/minor.");
- break;
- }
- candidate += len;
- /* Pad out with spaces (fixing our nul) */
- while (candidate < candidate_end)
- *(candidate++) = DM_FIELD_SEP;
- }
-}
-
-static int __init dm_setup_parse_targets(char *str)
-{
- char *next = NULL;
- size_t len = 0;
- struct dm_setup_target **target = NULL;
+ struct dm_option opt;
+ struct dm_setup_target **target = &dev->target;
+ unsigned long num_targets = dev->num_targets;
+ unsigned long i;
/* Targets are defined as per the table format but with a
* comma as a newline separator. */
- target = &dm_setup_args.target;
- while (str && *str) {
+ opt.next = str;
+ for (i = 0; i < num_targets; i++) {
*target = kzalloc(sizeof(struct dm_setup_target), GFP_KERNEL);
if (!*target) {
- DMERR("failed to allocate memory for target %d",
- dm_setup_args.target_count);
+ DMERR("failed to allocate memory for target %s<%ld>",
+ dev->name, i);
goto parse_fail;
}
- dm_setup_args.target_count++;
+ dev->target_count++;
- (*target)->begin = simple_strtoull(str, &next, 10);
- if (!next || *next != DM_FIELD_SEP) {
- DMERR("failed to parse starting sector for target %d",
- dm_setup_args.target_count - 1);
+ if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+ DMERR("failed to parse starting sector"
+ " for target %s<%ld>", dev->name, i);
goto parse_fail;
}
- str = skip_spaces(next + 1);
+ (*target)->begin = simple_strtoull(opt.start, NULL, 10);
- (*target)->length = simple_strtoull(str, &next, 10);
- if (!next || *next != DM_FIELD_SEP) {
- DMERR("failed to parse length for target %d",
- dm_setup_args.target_count - 1);
+ if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+ DMERR("failed to parse length for target %s<%ld>",
+ dev->name, i);
goto parse_fail;
}
- str = skip_spaces(next + 1);
+ (*target)->length = simple_strtoull(opt.start, NULL, 10);
- len = get_dm_option(str, &next, DM_FIELD_SEP);
- if (!len ||
- !((*target)->type = kstrndup(str, len, GFP_KERNEL))) {
- DMERR("failed to parse type for target %d",
- dm_setup_args.target_count - 1);
+ if (get_dm_option(&opt, DM_FIELD_SEP))
+ (*target)->type = kstrndup(opt.start, opt.len,
+ GFP_KERNEL);
+ if (!((*target)->type)) {
+ DMERR("failed to parse type for target %s<%ld>",
+ dev->name, i);
goto parse_fail;
}
- str = skip_spaces(next);
-
- len = get_dm_option(str, &next, DM_LINE_SEP);
- if (!len ||
- !((*target)->params = kstrndup(str, len, GFP_KERNEL))) {
- DMERR("failed to parse params for target %d",
- dm_setup_args.target_count - 1);
+ if (get_dm_option(&opt, DM_LINE_SEP))
+ (*target)->params = kstrndup(opt.start, opt.len,
+ GFP_KERNEL);
+ if (!((*target)->params)) {
+ DMERR("failed to parse params for target %s<%ld>",
+ dev->name, i);
goto parse_fail;
}
- str = skip_spaces(next);
-
- /* Before moving on, walk through the copied target and
- * attempt to replace all /dev/xxx with the major:minor number.
- * It may not be possible to resolve them traditionally at
- * boot-time. */
- dm_substitute_devices((*target)->params, len);
-
target = &((*target)->next);
}
- DMDEBUG("parsed %d targets", dm_setup_args.target_count);
+ DMDEBUG("parsed %d targets", dev->target_count);
- return 0;
+ return opt.next;
parse_fail:
- return 1;
+ return NULL;
+}
+
+static struct dm_device * __init dm_parse_args(void)
+{
+ struct dm_device *devices = NULL;
+ struct dm_device **tail = &devices;
+ struct dm_device *dev;
+ char *str = dm_setup_args.str;
+ unsigned long num_devices = dm_setup_args.num_devices;
+ unsigned long i;
+
+ if (!str)
+ return NULL;
+ for (i = 0; i < num_devices; i++) {
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ DMERR("failed to allocated memory for dev");
+ goto error;
+ }
+ *tail = dev;
+ tail = &dev->next;
+ /*
+ * devices are given minor numbers 0 - n-1
+ * in the order they are found in the arg
+ * string.
+ */
+ dev->minor = i;
+ str = dm_parse_device(dev, str);
+ if (!str) /* NULL indicates error in parsing, bail */
+ goto error;
+
+ str = dm_parse_targets(dev, str);
+ if (!str)
+ goto error;
+ }
+ return devices;
+error:
+ dm_setup_cleanup(devices);
+ return NULL;
}
/*
* Parse the command-line parameters given our kernel, but do not
* actually try to invoke the DM device now; that is handled by
- * dm_setup_drive after the low-level disk drivers have initialised.
- * dm format is as follows:
- * dm="name uuid fmode,[table line 1],[table line 2],..."
- * May be used with root=/dev/dm-0 as it always uses the first dm minor.
+ * dm_setup_drives after the low-level disk drivers have initialised.
+ * dm format is described at the top of the file.
+ *
+ * Because dm minor numbers are assigned in assending order starting with 0,
+ * You can assume the first device is /dev/dm-0, the next device is /dev/dm-1,
+ * and so forth.
*/
-
static int __init dm_setup(char *str)
{
- dm_setup_args_init();
+ struct dm_option opt;
+ unsigned long num_devices;
- str = dm_setup_parse_device_args(str);
if (!str) {
DMDEBUG("str is NULL");
goto parse_fail;
}
-
- /* Target parsing is delayed until we have dynamic memory */
- dm_setup_args.targets = str;
-
- printk(KERN_INFO "dm: will configure '%s' on dm-%d\n",
- dm_setup_args.name, dm_setup_args.minor);
-
+ opt.next = str;
+ if (!get_dm_option(&opt, DM_FIELD_SEP))
+ goto parse_fail;
+ if (isdigit(opt.start[0])) { /* XXX: Optional number field */
+ num_devices = simple_strtoul(opt.start, NULL, 10);
+ str = opt.next;
+ } else {
+ num_devices = 1;
+ /* Don't advance str */
+ }
+ if (num_devices > DM_MAX_DEVICES) {
+ DMDEBUG("too many devices %lu > %d",
+ num_devices, DM_MAX_DEVICES);
+ }
+ dm_setup_args.str = str;
+ dm_setup_args.num_devices = num_devices;
+ DMINFO("will configure %lu devices", num_devices);
dm_early_setup = 1;
return 1;
parse_fail:
- printk(KERN_WARNING "dm: Invalid arguments supplied to dm=.\n");
+ DMWARN("Invalid arguments supplied to dm=.");
return 0;
}
-
-static void __init dm_setup_drive(void)
+static void __init dm_setup_drives(void)
{
struct mapped_device *md = NULL;
struct dm_table *table = NULL;
struct dm_setup_target *target;
- char *uuid = dm_setup_args.uuid;
+ struct dm_device *dev;
+ char *uuid = NULL;
fmode_t fmode = FMODE_READ;
+ struct dm_device *devices;
- /* Finish parsing the targets. */
- if (dm_setup_parse_targets(dm_setup_args.targets))
- goto parse_fail;
+ devices = dm_parse_args();
- if (dm_create(dm_setup_args.minor, &md)) {
- DMDEBUG("failed to create the device");
- goto dm_create_fail;
- }
- DMDEBUG("created device '%s'", dm_device_name(md));
-
- /* In addition to flagging the table below, the disk must be
- * set explicitly ro/rw. */
- set_disk_ro(dm_disk(md), dm_setup_args.ro);
-
- if (!dm_setup_args.ro)
- fmode |= FMODE_WRITE;
- if (dm_table_create(&table, fmode, dm_setup_args.target_count, md)) {
- DMDEBUG("failed to create the table");
- goto dm_table_create_fail;
- }
-
- dm_lock_md_type(md);
- target = dm_setup_args.target;
- while (target) {
- DMINFO("adding target '%llu %llu %s %s'",
- (unsigned long long) target->begin,
- (unsigned long long) target->length, target->type,
- target->params);
- if (dm_table_add_target(table, target->type, target->begin,
- target->length, target->params)) {
- DMDEBUG("failed to add the target to the table");
- goto add_target_fail;
+ for (dev = devices; dev; dev = dev->next) {
+ if (dm_create(dev->minor, &md)) {
+ DMDEBUG("failed to create the device");
+ goto dm_create_fail;
}
- target = target->next;
- }
+ DMDEBUG("created device '%s'", dm_device_name(md));
- if (dm_table_complete(table)) {
- DMDEBUG("failed to complete the table");
- goto table_complete_fail;
- }
+ /*
+ * In addition to flagging the table below, the disk must be
+ * set explicitly ro/rw.
+ */
+ set_disk_ro(dm_disk(md), dev->ro);
- if (dm_get_md_type(md) == DM_TYPE_NONE) {
+ if (!dev->ro)
+ fmode |= FMODE_WRITE;
+ if (dm_table_create(&table, fmode, dev->target_count, md)) {
+ DMDEBUG("failed to create the table");
+ goto dm_table_create_fail;
+ }
+
+ dm_lock_md_type(md);
+
+ for (target = dev->target; target; target = target->next) {
+ DMINFO("adding target '%llu %llu %s %s'",
+ (unsigned long long) target->begin,
+ (unsigned long long) target->length,
+ target->type, target->params);
+ if (dm_table_add_target(table, target->type,
+ target->begin,
+ target->length,
+ target->params)) {
+ DMDEBUG("failed to add the target"
+ " to the table");
+ goto add_target_fail;
+ }
+ }
+ if (dm_table_complete(table)) {
+ DMDEBUG("failed to complete the table");
+ goto table_complete_fail;
+ }
+
+ /* Suspend the device so that we can bind it to the table. */
+ if (dm_suspend(md, 0)) {
+ DMDEBUG("failed to suspend the device pre-bind");
+ goto suspend_fail;
+ }
+
+ /* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(table));
+
+ /* Setup md->queue to reflect md's type. */
if (dm_setup_md_queue(md, table)) {
DMWARN("unable to set up device queue for new table.");
goto setup_md_queue_fail;
}
- } else if (dm_get_md_type(md) != dm_table_get_type(table)) {
- DMWARN("can't change device type after initial table load.");
- goto setup_md_queue_fail;
- }
- /* Suspend the device so that we can bind it to the table. */
- if (dm_suspend(md, 0)) {
- DMDEBUG("failed to suspend the device pre-bind");
- goto suspend_fail;
+ /*
+ * Bind the table to the device. This is the only way
+ * to associate md->map with the table and set the disk
+ * capacity directly.
+ */
+ if (dm_swap_table(md, table)) { /* should return NULL. */
+ DMDEBUG("failed to bind the device to the table");
+ goto table_bind_fail;
+ }
+
+ /* Finally, resume and the device should be ready. */
+ if (dm_resume(md)) {
+ DMDEBUG("failed to resume the device");
+ goto resume_fail;
+ }
+
+ /* Export the dm device via the ioctl interface */
+ if (!strcmp(DM_NO_UUID, dev->uuid))
+ uuid = NULL;
+ if (dm_ioctl_export(md, dev->name, uuid)) {
+ DMDEBUG("failed to export device with given"
+ " name and uuid");
+ goto export_fail;
+ }
+
+ dm_unlock_md_type(md);
+
+ DMINFO("dm-%d is ready", dev->minor);
}
-
- /* Bind the table to the device. This is the only way to associate
- * md->map with the table and set the disk capacity directly. */
- if (dm_swap_table(md, table)) { /* should return NULL. */
- DMDEBUG("failed to bind the device to the table");
- goto table_bind_fail;
- }
-
- /* Finally, resume and the device should be ready. */
- if (dm_resume(md)) {
- DMDEBUG("failed to resume the device");
- goto resume_fail;
- }
-
- /* Export the dm device via the ioctl interface */
- if (!strcmp(DM_NO_UUID, dm_setup_args.uuid))
- uuid = NULL;
- if (dm_ioctl_export(md, dm_setup_args.name, uuid)) {
- DMDEBUG("failed to export device with given name and uuid");
- goto export_fail;
- }
- printk(KERN_INFO "dm: dm-%d is ready\n", dm_setup_args.minor);
-
- dm_unlock_md_type(md);
- dm_setup_cleanup();
+ dm_setup_cleanup(devices);
return;
export_fail:
resume_fail:
table_bind_fail:
-suspend_fail:
setup_md_queue_fail:
+suspend_fail:
table_complete_fail:
add_target_fail:
dm_unlock_md_type(md);
dm_table_create_fail:
dm_put(md);
dm_create_fail:
- dm_setup_cleanup();
-parse_fail:
- printk(KERN_WARNING "dm: starting dm-%d (%s) failed\n",
- dm_setup_args.minor, dm_setup_args.name);
+ DMWARN("starting dm-%d (%s) failed",
+ dev->minor, dev->name);
+ dm_setup_cleanup(devices);
}
__setup("dm=", dm_setup);
@@ -421,6 +465,6 @@ void __init dm_run_setup(void)
{
if (!dm_early_setup)
return;
- printk(KERN_INFO "dm: attempting early device configuration.\n");
- dm_setup_drive();
+ DMINFO("attempting early device configuration.");
+ dm_setup_drives();
}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index d05c292..a44a7e4 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -52,6 +52,7 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
e = rcu_dereference_protected(parent->bpf.effective[type],
lockdep_is_held(&cgroup_mutex));
rcu_assign_pointer(cgrp->bpf.effective[type], e);
+ cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
}
}
@@ -66,8 +67,8 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
* Each cgroup has a set of two pointers for bpf programs; one for eBPF
* programs it owns, and which is effective for execution.
*
- * If @prog is %NULL, this function attaches a new program to the cgroup and
- * releases the one that is currently attached, if any. @prog is then made
+ * If @prog is not %NULL, this function attaches a new program to the cgroup
+ * and releases the one that is currently attached, if any. @prog is then made
* the effective program of type @type in that cgroup.
*
* If @prog is %NULL, the currently attached program of type @type is released,
@@ -82,30 +83,63 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
*
* Must be called with cgroup_mutex held.
*/
-void __cgroup_bpf_update(struct cgroup *cgrp,
- struct cgroup *parent,
- struct bpf_prog *prog,
- enum bpf_attach_type type)
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+ struct bpf_prog *prog, enum bpf_attach_type type,
+ bool new_overridable)
{
- struct bpf_prog *old_prog, *effective;
+ struct bpf_prog *old_prog, *effective = NULL;
struct cgroup_subsys_state *pos;
+ bool overridable = true;
- old_prog = xchg(cgrp->bpf.prog + type, prog);
+ if (parent) {
+ overridable = !parent->bpf.disallow_override[type];
+ effective = rcu_dereference_protected(parent->bpf.effective[type],
+ lockdep_is_held(&cgroup_mutex));
+ }
- effective = (!prog && parent) ?
- rcu_dereference_protected(parent->bpf.effective[type],
- lockdep_is_held(&cgroup_mutex)) :
- prog;
+ if (prog && effective && !overridable)
+ /* if parent has non-overridable prog attached, disallow
+ * attaching new programs to descendent cgroup
+ */
+ return -EPERM;
+
+ if (prog && effective && overridable != new_overridable)
+ /* if parent has overridable prog attached, only
+ * allow overridable programs in descendent cgroup
+ */
+ return -EPERM;
+
+ old_prog = cgrp->bpf.prog[type];
+
+ if (prog) {
+ overridable = new_overridable;
+ effective = prog;
+ if (old_prog &&
+ cgrp->bpf.disallow_override[type] == new_overridable)
+ /* disallow attaching non-overridable on top
+ * of existing overridable in this cgroup
+ * and vice versa
+ */
+ return -EPERM;
+ }
+
+ if (!prog && !old_prog)
+ /* report error when trying to detach and nothing is attached */
+ return -ENOENT;
+
+ cgrp->bpf.prog[type] = prog;
css_for_each_descendant_pre(pos, &cgrp->self) {
struct cgroup *desc = container_of(pos, struct cgroup, self);
/* skip the subtree if the descendant has its own program */
- if (desc->bpf.prog[type] && desc != cgrp)
+ if (desc->bpf.prog[type] && desc != cgrp) {
pos = css_rightmost_descendant(pos);
- else
+ } else {
rcu_assign_pointer(desc->bpf.effective[type],
effective);
+ desc->bpf.disallow_override[type] = !overridable;
+ }
}
if (prog)
@@ -115,6 +149,7 @@ void __cgroup_bpf_update(struct cgroup *cgrp,
bpf_prog_put(old_prog);
static_branch_dec(&cgroup_bpf_enabled_key);
}
+ return 0;
}
/**
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e13157f..5e668da 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -747,7 +747,9 @@ static int bpf_prog_load(union bpf_attr *attr)
attr->kern_version != LINUX_VERSION_CODE)
return -EINVAL;
- if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
+ if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
+ type != BPF_PROG_TYPE_CGROUP_SKB &&
+ !capable(CAP_SYS_ADMIN))
return -EPERM;
/* plain bpf_prog allocation */
@@ -826,12 +828,13 @@ static int bpf_obj_get(const union bpf_attr *attr)
#ifdef CONFIG_CGROUP_BPF
-#define BPF_PROG_ATTACH_LAST_FIELD attach_type
+#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
static int bpf_prog_attach(const union bpf_attr *attr)
{
struct bpf_prog *prog;
struct cgroup *cgrp;
+ int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -839,6 +842,9 @@ static int bpf_prog_attach(const union bpf_attr *attr)
if (CHECK_ATTR(BPF_PROG_ATTACH))
return -EINVAL;
+ if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
+ return -EINVAL;
+
switch (attr->attach_type) {
case BPF_CGROUP_INET_INGRESS:
case BPF_CGROUP_INET_EGRESS:
@@ -853,7 +859,10 @@ static int bpf_prog_attach(const union bpf_attr *attr)
return PTR_ERR(cgrp);
}
- cgroup_bpf_update(cgrp, prog, attr->attach_type);
+ ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
+ attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
+ if (ret)
+ bpf_prog_put(prog);
cgroup_put(cgrp);
break;
@@ -861,7 +870,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
return -EINVAL;
}
- return 0;
+ return ret;
}
#define BPF_PROG_DETACH_LAST_FIELD attach_type
@@ -869,6 +878,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
static int bpf_prog_detach(const union bpf_attr *attr)
{
struct cgroup *cgrp;
+ int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -883,7 +893,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
if (IS_ERR(cgrp))
return PTR_ERR(cgrp);
- cgroup_bpf_update(cgrp, NULL, attr->attach_type);
+ ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
cgroup_put(cgrp);
break;
@@ -891,7 +901,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
return -EINVAL;
}
- return 0;
+ return ret;
}
#endif /* CONFIG_CGROUP_BPF */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 44c17f4..fe158bd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2164,6 +2164,7 @@ static bool may_access_skb(enum bpf_prog_type type)
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
+ case BPF_PROG_TYPE_CGROUP_SKB:
return true;
default:
return false;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0fab276..02e367a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -6520,15 +6520,16 @@ static __init int cgroup_namespaces_init(void)
subsys_initcall(cgroup_namespaces_init);
#ifdef CONFIG_CGROUP_BPF
-void cgroup_bpf_update(struct cgroup *cgrp,
- struct bpf_prog *prog,
- enum bpf_attach_type type)
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+ enum bpf_attach_type type, bool overridable)
{
struct cgroup *parent = cgroup_parent(cgrp);
+ int ret;
mutex_lock(&cgroup_mutex);
- __cgroup_bpf_update(cgrp, parent, prog, type);
+ ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
mutex_unlock(&cgroup_mutex);
+ return ret;
}
#endif /* CONFIG_CGROUP_BPF */
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index bc615c6..fb6017e 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -3,6 +3,8 @@
# CONFIG_DEVMEM is not set
# CONFIG_FHANDLE is not set
# CONFIG_INET_LRO is not set
+# CONFIG_NFSD is not set
+# CONFIG_NFS_FS is not set
# CONFIG_OABI_COMPAT is not set
# CONFIG_SYSVIPC is not set
# CONFIG_USELIB is not set
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b6fb796..de1b3b7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3355,6 +3355,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
cpu_load_update_active(rq);
calc_global_load_tick(rq);
+ sched_freq_tick(cpu);
cpufreq_update_util(rq, 0);
early_notif = early_detection_notify(rq, wallclock);
@@ -3380,8 +3381,6 @@ void scheduler_tick(void)
if (curr->sched_class == &fair_sched_class)
check_for_migration(rq, curr);
-
- sched_freq_tick(cpu);
}
#ifdef CONFIG_NO_HZ_FULL
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index e594804..b140e55 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -39,6 +39,7 @@ struct cluster_data {
cpumask_t cpu_mask;
unsigned int need_cpus;
unsigned int task_thres;
+ unsigned int max_nr;
s64 need_ts;
struct list_head lru;
bool pending;
@@ -458,47 +459,25 @@ static struct kobj_type ktype_core_ctl = {
/* ==================== runqueue based core count =================== */
-#define NR_RUNNING_TOLERANCE 5
-
static void update_running_avg(void)
{
int avg, iowait_avg, big_avg;
+ int max_nr, big_max_nr;
struct cluster_data *cluster;
unsigned int index = 0;
- sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
-
- /*
- * Round up to the next integer if the average nr running tasks
- * is within NR_RUNNING_TOLERANCE/100 of the next integer.
- * If normal rounding up is used, it will allow a transient task
- * to trigger online event. By the time core is onlined, the task
- * has finished.
- * Rounding to closest suffers same problem because scheduler
- * might only provide running stats per jiffy, and a transient
- * task could skew the number for one jiffy. If core control
- * samples every 2 jiffies, it will observe 0.5 additional running
- * average which rounds up to 1 task.
- */
- avg = (avg + NR_RUNNING_TOLERANCE) / 100;
- big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
+ sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
+ &max_nr, &big_max_nr);
for_each_cluster(cluster, index) {
if (!cluster->inited)
continue;
- /*
- * Big cluster only need to take care of big tasks, but if
- * there are not enough big cores, big tasks need to be run
- * on little as well. Thus for little's runqueue stat, it
- * has to use overall runqueue average, or derive what big
- * tasks would have to be run on little. The latter approach
- * is not easy to get given core control reacts much slower
- * than scheduler, and can't predict scheduler's behavior.
- */
cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
+ cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
}
}
+#define MAX_NR_THRESHOLD 4
/* adjust needed CPUs based on current runqueue information */
static unsigned int apply_task_need(const struct cluster_data *cluster,
unsigned int new_need)
@@ -509,7 +488,15 @@ static unsigned int apply_task_need(const struct cluster_data *cluster,
/* only unisolate more cores if there are tasks to run */
if (cluster->nrrun > new_need)
- return new_need + 1;
+ new_need = new_need + 1;
+
+ /*
+ * We don't want tasks to be overcrowded in a cluster.
+ * If any CPU has more than MAX_NR_THRESHOLD in the last
+ * window, bring another CPU to help out.
+ */
+ if (cluster->max_nr > MAX_NR_THRESHOLD)
+ new_need = new_need + 1;
return new_need;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6ccd3a7..cd406da 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5553,6 +5553,9 @@ static int group_idle_state(struct sched_group *sg)
for_each_cpu(i, sched_group_cpus(sg))
state = min(state, idle_get_state_idx(cpu_rq(i)));
+ if (unlikely(state == INT_MAX))
+ return -EINVAL;
+
/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
state++;
@@ -5638,6 +5641,9 @@ static int sched_group_energy(struct energy_env *eenv)
}
idle_idx = group_idle_state(sg);
+ if (unlikely(idle_idx < 0))
+ return idle_idx;
+
group_util = group_norm_util(eenv, sg);
sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f344c88..28d660b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2456,6 +2456,11 @@ static inline bool hmp_capable(void)
return max_possible_capacity != min_max_possible_capacity;
}
+static inline bool is_max_capacity_cpu(int cpu)
+{
+ return cpu_max_possible_capacity(cpu) == max_possible_capacity;
+}
+
/*
* 'load' is in reference to "best cpu" at its best frequency.
* Scale that in reference to a given cpu, accounting for how bad it is
@@ -2681,6 +2686,15 @@ extern void sched_boost_parse_dt(void);
extern void clear_ed_task(struct task_struct *p, struct rq *rq);
extern bool early_detection_notify(struct rq *rq, u64 wallclock);
+#ifdef CONFIG_SCHED_HMP
+extern unsigned int power_cost(int cpu, u64 demand);
+#else
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+ return cpu_max_possible_capacity(cpu);
+}
+#endif
+
#else /* CONFIG_SCHED_WALT */
struct hmp_sched_stats;
@@ -2724,6 +2738,8 @@ static inline int is_task_migration_throttled(struct task_struct *p)
return 0;
}
+static inline bool is_max_capacity_cpu(int cpu) { return true; }
+
static inline void
inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
@@ -2833,6 +2849,11 @@ static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
return 0;
}
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+
#endif /* CONFIG_SCHED_WALT */
#ifdef CONFIG_SCHED_HMP
@@ -2847,7 +2868,6 @@ extern void
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
struct task_struct *p, s64 delta);
-extern unsigned int power_cost(int cpu, u64 demand);
extern unsigned int cpu_temp(int cpu);
extern void pre_big_task_count_change(const struct cpumask *cpus);
extern void post_big_task_count_change(const struct cpumask *cpus);
@@ -2904,11 +2924,6 @@ check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
struct task_struct *p, s64 delta) { }
-static inline unsigned int power_cost(int cpu, u64 demand)
-{
- return SCHED_CAPACITY_SCALE;
-}
-
static inline unsigned int cpu_temp(int cpu)
{
return 0;
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index f820094..7f86c0b 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -27,11 +27,13 @@ static DEFINE_PER_CPU(u64, nr_prod_sum);
static DEFINE_PER_CPU(u64, last_time);
static DEFINE_PER_CPU(u64, nr_big_prod_sum);
static DEFINE_PER_CPU(u64, nr);
+static DEFINE_PER_CPU(u64, nr_max);
static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static s64 last_get_time;
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
/**
* sched_get_nr_running_avg
* @return: Average nr_running, iowait and nr_big_tasks value since last poll.
@@ -41,7 +43,8 @@ static s64 last_get_time;
* Obtains the average nr_running value since the last poll.
* This function may not be called concurrently with itself
*/
-void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
+void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+ unsigned int *max_nr, unsigned int *big_max_nr)
{
int cpu;
u64 curr_time = sched_clock();
@@ -51,6 +54,8 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
*avg = 0;
*iowait_avg = 0;
*big_avg = 0;
+ *max_nr = 0;
+ *big_max_nr = 0;
if (!diff)
return;
@@ -79,17 +84,35 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
per_cpu(nr_big_prod_sum, cpu) = 0;
per_cpu(iowait_prod_sum, cpu) = 0;
+ if (*max_nr < per_cpu(nr_max, cpu))
+ *max_nr = per_cpu(nr_max, cpu);
+
+ if (is_max_capacity_cpu(cpu)) {
+ if (*big_max_nr < per_cpu(nr_max, cpu))
+ *big_max_nr = per_cpu(nr_max, cpu);
+ }
+
+ per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
}
diff = curr_time - last_get_time;
last_get_time = curr_time;
- *avg = (int)div64_u64(tmp_avg * 100, diff);
- *big_avg = (int)div64_u64(tmp_big_avg * 100, diff);
- *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
+ /*
+ * Any task running on BIG cluster and BIG tasks running on little
+ * cluster contributes to big_avg. Small or medium tasks can also
+ * run on BIG cluster when co-location and scheduler boost features
+ * are activated. We don't want these tasks to downmigrate to little
+ * cluster when BIG CPUs are available but isolated. Round up the
+ * average values so that core_ctl aggressively unisolate BIG CPUs.
+ */
+ *avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff);
+ *big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff);
+ *iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff);
- trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg);
+ trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg,
+ *max_nr, *big_max_nr);
BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
@@ -122,6 +145,9 @@ void sched_update_nr_prod(int cpu, long delta, bool inc)
BUG_ON((s64)per_cpu(nr, cpu) < 0);
+ if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
+ per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
+
per_cpu(nr_prod_sum, cpu) += nr_running * diff;
per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index b89abbd..65f4148 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -152,6 +152,8 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
* IMPORTANT: Initialize both copies to same value!!
*/
+static __read_mostly bool sched_predl;
+
__read_mostly unsigned int sched_ravg_hist_size = 5;
__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
@@ -231,6 +233,16 @@ static int __init set_sched_ravg_window(char *str)
early_param("sched_ravg_window", set_sched_ravg_window);
+static int __init set_sched_predl(char *str)
+{
+ unsigned int predl;
+
+ get_option(&str, &predl);
+ sched_predl = !!predl;
+ return 0;
+}
+early_param("sched_predl", set_sched_predl);
+
void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
{
inc_nr_big_task(&rq->hmp_stats, p);
@@ -402,7 +414,7 @@ unsigned int nr_eligible_big_tasks(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+ if (!is_max_capacity_cpu(cpu))
return rq->hmp_stats.nr_big_tasks;
return rq->nr_running;
@@ -923,7 +935,7 @@ void set_window_start(struct rq *rq)
if (!sync_cpu_available) {
rq->window_start = 1;
sync_cpu_available = 1;
- atomic_set(&walt_irq_work_lastq_ws, rq->window_start);
+ atomic64_set(&walt_irq_work_lastq_ws, rq->window_start);
} else {
struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
@@ -1096,6 +1108,9 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
{
u32 new, old;
+ if (!sched_predl)
+ return;
+
if (is_idle_task(p) || exiting_task(p))
return;
@@ -1618,6 +1633,9 @@ static inline u32 predict_and_update_buckets(struct rq *rq,
int bidx;
u32 pred_demand;
+ if (!sched_predl)
+ return 0;
+
bidx = busy_to_bucket(runtime);
pred_demand = get_pred_busy(rq, p, bidx, runtime);
bucket_increase(p->ravg.busy_buckets, bidx);
@@ -1916,7 +1934,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
if (old_window_start == rq->window_start)
return;
- result = atomic_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
+ result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
rq->window_start);
if (result == old_window_start)
irq_work_queue(&rq->irq_work);
diff --git a/mm/ksm.c b/mm/ksm.c
index 56e92dc..5f1855b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1020,8 +1020,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
goto out;
if (PageTransCompound(page)) {
- err = split_huge_page(page);
- if (err)
+ if (split_huge_page(page))
goto out_unlock;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 49b7c1e..f1eabcc 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1725,6 +1725,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name
}
}
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+ struct memblock_region *rgn;
+ unsigned long size = 0;
+ int idx;
+
+ for_each_memblock_type((&memblock.reserved), rgn) {
+ phys_addr_t start, end;
+
+ if (rgn->base + rgn->size < start_addr)
+ continue;
+ if (rgn->base > end_addr)
+ continue;
+
+ start = rgn->base;
+ end = start + rgn->size;
+ size += end - start;
+ }
+
+ return size;
+}
+
void __init_memblock __memblock_dump_all(void)
{
pr_info("MEMBLOCK configuration:\n");
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 19e796d..4bd4480 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1587,12 +1587,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (ret) {
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
- /*
- * We know that soft_offline_huge_page() tries to migrate
- * only one hugepage pointed to by hpage, so we need not
- * run through the pagelist here.
- */
- putback_active_hugepage(hpage);
+ if (!list_empty(&pagelist))
+ putback_movable_pages(&pagelist);
if (ret > 0)
ret = -EIO;
} else {
diff --git a/mm/mlock.c b/mm/mlock.c
index 4feee1d..9cdd063 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -285,7 +285,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
int i;
int nr = pagevec_count(pvec);
- int delta_munlocked;
+ int delta_munlocked = -nr;
struct pagevec pvec_putback;
int pgrescued = 0;
@@ -305,6 +305,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
continue;
else
__munlock_isolation_failed(page);
+ } else {
+ delta_munlocked++;
}
/*
@@ -316,7 +318,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
pagevec_add(&pvec_putback, pvec->pages[i]);
pvec->pages[i] = NULL;
}
- delta_munlocked = -nr + pagevec_count(&pvec_putback);
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
spin_unlock_irq(zone_lru_lock(zone));
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ca9565..27ddaae 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -286,6 +286,26 @@ int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static inline void reset_deferred_meminit(pg_data_t *pgdat)
{
+ unsigned long max_initialise;
+ unsigned long reserved_lowmem;
+
+ /*
+ * Initialise at least 2G of a node but also take into account that
+ * two large system hashes that can take up 1GB for 0.25TB/node.
+ */
+ max_initialise = max(2UL << (30 - PAGE_SHIFT),
+ (pgdat->node_spanned_pages >> 8));
+
+ /*
+ * Compensate the all the memblock reservations (e.g. crash kernel)
+ * from the initial estimation to make sure we will initialize enough
+ * memory to boot.
+ */
+ reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+ pgdat->node_start_pfn + max_initialise);
+ max_initialise += reserved_lowmem;
+
+ pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
pgdat->first_deferred_pfn = ULONG_MAX;
}
@@ -308,20 +328,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
unsigned long pfn, unsigned long zone_end,
unsigned long *nr_initialised)
{
- unsigned long max_initialise;
-
/* Always populate low zones for address-contrained allocations */
if (zone_end < pgdat_end_pfn(pgdat))
return true;
- /*
- * Initialise at least 2G of a node but also take into account that
- * two large system hashes that can take up 1GB for 0.25TB/node.
- */
- max_initialise = max(2UL << (30 - PAGE_SHIFT),
- (pgdat->node_spanned_pages >> 8));
-
(*nr_initialised)++;
- if ((*nr_initialised > max_initialise) &&
+ if ((*nr_initialised > pgdat->static_init_size) &&
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
pgdat->first_deferred_pfn = pfn;
return false;
@@ -5940,7 +5951,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
/* pg_data_t should be reset to zero when it's allocated */
WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
- reset_deferred_meminit(pgdat);
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
pgdat->per_cpu_nodestats = NULL;
@@ -5962,6 +5972,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
(unsigned long)pgdat->node_mem_map);
#endif
+ reset_deferred_meminit(pgdat);
free_area_init_core(pgdat);
}
diff --git a/mm/slub.c b/mm/slub.c
index 30be24b..7341005 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
}
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+ unsigned int length)
{
metadata_access_enable();
- print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+ print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
length, 1);
metadata_access_disable();
}
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
p, p - addr, get_freepointer(s, p));
if (s->flags & SLAB_RED_ZONE)
- print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+ s->red_left_pad);
else if (p > addr + 16)
- print_section("Bytes b4 ", p - 16, 16);
+ print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
- print_section("Object ", p, min_t(unsigned long, s->object_size,
- PAGE_SIZE));
+ print_section(KERN_ERR, "Object ", p,
+ min_t(unsigned long, s->object_size, PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
- print_section("Redzone ", p + s->object_size,
+ print_section(KERN_ERR, "Redzone ", p + s->object_size,
s->inuse - s->object_size);
if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section("Padding ", p + off, size_from_object(s) - off);
+ print_section(KERN_ERR, "Padding ", p + off,
+ size_from_object(s) - off);
dump_stack();
}
@@ -832,7 +835,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
end--;
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
- print_section("Padding ", end - remainder, remainder);
+ print_section(KERN_ERR, "Padding ", end - remainder, remainder);
restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
return 0;
@@ -985,7 +988,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
page->freelist);
if (!alloc)
- print_section("Object ", (void *)object,
+ print_section(KERN_INFO, "Object ", (void *)object,
s->object_size);
dump_stack();
@@ -5466,6 +5469,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
char mbuf[64];
char *buf;
struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+ ssize_t len;
if (!attr || !attr->store || !attr->show)
continue;
@@ -5490,8 +5494,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
buf = buffer;
}
- attr->show(root_cache, buf);
- attr->store(s, buf, strlen(buf));
+ len = attr->show(root_cache, buf);
+ if (len > 0)
+ attr->store(s, buf, len);
}
if (buffer)
diff --git a/net/Kconfig b/net/Kconfig
index d5ff4f7..0b8c255 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -265,10 +265,6 @@
config HWBM
bool
-config SOCK_CGROUP_DATA
- bool
- default n
-
config CGROUP_NET_PRIO
bool "Network priority cgroup"
depends on CGROUPS
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 0474106..7625ec8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -776,6 +776,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
return -EPROTONOSUPPORT;
}
}
+
+ if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
+ __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
+
+ if (defpvid >= VLAN_VID_MASK)
+ return -EINVAL;
+ }
#endif
return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d8ad73b3..5a782f5 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -185,6 +185,7 @@ static void br_stp_start(struct net_bridge *br)
br_debug(br, "using kernel STP\n");
/* To start timers on any ports left in blocking */
+ mod_timer(&br->hello_timer, jiffies + br->hello_time);
br_port_state_selection(br);
}
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index da058b8..15826fd 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg)
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
- if (br->stp_enabled != BR_USER_STP)
+ if (br->stp_enabled == BR_KERNEL_STP)
mod_timer(&br->hello_timer,
round_jiffies(jiffies + br->hello_time));
}
diff --git a/net/core/dst.c b/net/core/dst.c
index b5cbbe0..656b70d 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard_out);
-const u32 dst_default_metrics[RTAX_MAX + 1] = {
+const struct dst_metrics dst_default_metrics = {
/* This initializer is needed to force linker to place this variable
* into const section. Otherwise it might end into bss section.
* We really want to avoid false sharing on this variable, and catch
* any writes on it.
*/
- [RTAX_MAX] = 0xdeadbeef,
+ .refcnt = ATOMIC_INIT(1),
};
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
if (dev)
dev_hold(dev);
dst->ops = ops;
- dst_init_metrics(dst, dst_default_metrics, true);
+ dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL;
dst->path = dst;
dst->from = NULL;
@@ -315,25 +315,30 @@ EXPORT_SYMBOL(dst_release);
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{
- u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+ struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (p) {
- u32 *old_p = __DST_METRICS_PTR(old);
+ struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
unsigned long prev, new;
- memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+ atomic_set(&p->refcnt, 1);
+ memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
new = (unsigned long) p;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev != old) {
kfree(p);
- p = __DST_METRICS_PTR(prev);
+ p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
if (prev & DST_METRICS_READ_ONLY)
p = NULL;
+ } else if (prev & DST_METRICS_REFCOUNTED) {
+ if (atomic_dec_and_test(&old_p->refcnt))
+ kfree(old_p);
}
}
- return p;
+ BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
+ return (u32 *)p;
}
EXPORT_SYMBOL(dst_cow_metrics_generic);
@@ -342,7 +347,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
{
unsigned long prev, new;
- new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
+ new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev == old)
kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/filter.c b/net/core/filter.c
index 2cb4f0f..5e42e0e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -95,8 +95,8 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
skb->sk = sk;
pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
- err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
skb->sk = save_sk;
+ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
}
rcu_read_unlock();
@@ -2208,6 +2208,7 @@ bool bpf_helper_changes_skb_data(void *func)
func == bpf_skb_change_proto ||
func == bpf_skb_change_tail ||
func == bpf_skb_pull_data ||
+ func == bpf_clone_redirect ||
func == bpf_l3_csum_replace ||
func == bpf_l4_csum_replace)
return true;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b490af6..1d91607 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1617,13 +1617,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq, 0,
flags,
ext_filter_mask);
- /* If we ran out of room on the first message,
- * we're in trouble
- */
- WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
- if (err < 0)
- goto out;
+ if (err < 0) {
+ if (likely(skb->len))
+ goto out;
+
+ goto out_err;
+ }
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
@@ -1631,10 +1631,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
}
}
out:
+ err = skb->len;
+out_err:
cb->args[1] = idx;
cb->args[0] = h;
- return skb->len;
+ return err;
}
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
@@ -3413,8 +3415,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
err = br_dev->netdev_ops->ndo_bridge_getlink(
skb, portid, seq, dev,
filter_mask, NLM_F_MULTI);
- if (err < 0 && err != -EOPNOTSUPP)
- break;
+ if (err < 0 && err != -EOPNOTSUPP) {
+ if (likely(skb->len))
+ break;
+
+ goto out_err;
+ }
}
idx++;
}
@@ -3425,16 +3431,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
seq, dev,
filter_mask,
NLM_F_MULTI);
- if (err < 0 && err != -EOPNOTSUPP)
- break;
+ if (err < 0 && err != -EOPNOTSUPP) {
+ if (likely(skb->len))
+ break;
+
+ goto out_err;
+ }
}
idx++;
}
}
+ err = skb->len;
+out_err:
rcu_read_unlock();
cb->args[0] = idx;
- return skb->len;
+ return err;
}
static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/sock.c b/net/core/sock.c
index 19562f7..f07eaea 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -138,10 +138,7 @@
#include <trace/events/sock.h>
-#ifdef CONFIG_INET
#include <net/tcp.h>
-#endif
-
#include <net/busy_poll.h>
static DEFINE_MUTEX(proto_list_mutex);
@@ -1034,6 +1031,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
union {
int val;
+ u64 val64;
struct linger ling;
struct timeval tm;
} v;
@@ -1264,6 +1262,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_incoming_cpu;
break;
+
+ case SO_COOKIE:
+ lv = sizeof(u64);
+ if (len < lv)
+ return -EINVAL;
+ v.val64 = sock_gen_cookie(sk);
+ break;
default:
/* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7).
@@ -1687,28 +1692,24 @@ EXPORT_SYMBOL(skb_set_owner_w);
* delay queue. We want to allow the owner socket to send more
* packets, as if they were already TX completed by a typical driver.
* But we also want to keep skb->sk set because some packet schedulers
- * rely on it (sch_fq for example). So we set skb->truesize to a small
- * amount (1) and decrease sk_wmem_alloc accordingly.
+ * rely on it (sch_fq for example).
*/
void skb_orphan_partial(struct sk_buff *skb)
{
- /* If this skb is a TCP pure ACK or already went here,
- * we have nothing to do. 2 is already a very small truesize.
- */
- if (skb->truesize <= 2)
+ if (skb_is_tcp_pure_ack(skb))
return;
- /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
- * so we do not completely orphan skb, but transfert all
- * accounted bytes but one, to avoid unexpected reorders.
- */
if (skb->destructor == sock_wfree
#ifdef CONFIG_INET
|| skb->destructor == tcp_wfree
#endif
) {
- atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
- skb->truesize = 1;
+ struct sock *sk = skb->sk;
+
+ if (atomic_inc_not_zero(&sk->sk_refcnt)) {
+ atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+ skb->destructor = sock_efree;
+ }
} else {
skb_orphan(skb);
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 237d62c..2ac9d2a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -426,6 +426,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
@@ -490,6 +493,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 98fd2f7..37f4578 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -759,7 +759,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
- int dumped = 0;
+ int dumped = 0, err;
if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -779,20 +779,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
if (dumped)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
- if (fib_table_dump(tb, skb, cb) < 0)
- goto out;
+ err = fib_table_dump(tb, skb, cb);
+ if (err < 0) {
+ if (likely(skb->len))
+ goto out;
+
+ goto out_err;
+ }
dumped = 1;
next:
e++;
}
}
out:
+ err = skb->len;
+out_err:
rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
- return skb->len;
+ return err;
}
/* Prepare and feed intra-kernel routing request.
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 6a40680..7563831 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -204,6 +204,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ struct dst_metrics *m;
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
@@ -214,8 +215,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
- if (fi->fib_metrics != (u32 *) dst_default_metrics)
- kfree(fi->fib_metrics);
+ m = fi->fib_metrics;
+ if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
+ kfree(m);
kfree(fi);
}
@@ -982,11 +984,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
return -EINVAL;
- fi->fib_metrics[type - 1] = val;
+ fi->fib_metrics->metrics[type - 1] = val;
}
if (ecn_ca)
- fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+ fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
return 0;
}
@@ -1044,11 +1046,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto failure;
fib_info_cnt++;
if (cfg->fc_mx) {
- fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
if (!fi->fib_metrics)
goto failure;
+ atomic_set(&fi->fib_metrics->refcnt, 1);
} else
- fi->fib_metrics = (u32 *) dst_default_metrics;
+ fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
@@ -1252,7 +1255,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
if (fi->fib_priority &&
nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
goto nla_put_failure;
- if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
+ if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
goto nla_put_failure;
if (fi->fib_prefsrc &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index e3665bf..ef40bb6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1932,6 +1932,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
/* rcu_read_lock is hold by caller */
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+ int err;
+
if (i < s_i) {
i++;
continue;
@@ -1942,17 +1944,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
continue;
}
- if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWROUTE,
- tb->tb_id,
- fa->fa_type,
- xkey,
- KEYLENGTH - fa->fa_slen,
- fa->fa_tos,
- fa->fa_info, NLM_F_MULTI) < 0) {
+ err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ tb->tb_id, fa->fa_type,
+ xkey, KEYLENGTH - fa->fa_slen,
+ fa->fa_tos, fa->fa_info, NLM_F_MULTI);
+ if (err < 0) {
cb->args[4] = i;
- return -1;
+ return err;
}
i++;
}
@@ -1974,10 +1973,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
t_key key = cb->args[3];
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
- if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
+ int err;
+
+ err = fn_trie_dump_leaf(l, tb, skb, cb);
+ if (err < 0) {
cb->args[3] = key;
cb->args[2] = count;
- return -1;
+ return err;
}
++count;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d5d3ead..ceae0ea 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -665,6 +665,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
/* listeners have SOCK_RCU_FREE, not the children */
sock_reset_flag(newsk, SOCK_RCU_FREE);
+ inet_sk(newsk)->mc_list = NULL;
+
newsk->sk_mark = inet_rsk(req)->ir_mark;
atomic64_set(&newsk->sk_cookie,
atomic64_read(&inet_rsk(req)->ir_cookie));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 70c40ba2..18c6e79 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1370,8 +1370,12 @@ static void rt_add_uncached_list(struct rtable *rt)
static void ipv4_dst_destroy(struct dst_entry *dst)
{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rtable *rt = (struct rtable *) dst;
+ if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
+ kfree(p);
+
if (!list_empty(&rt->rt_uncached)) {
struct uncached_list *ul = rt->rt_uncached_list;
@@ -1423,7 +1427,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
rt->rt_gateway = nh->nh_gw;
rt->rt_uses_gateway = 1;
}
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
+ if (fi->fib_metrics != &dst_default_metrics) {
+ rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+ atomic_inc(&fi->fib_metrics->refcnt);
+ }
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb142ca..86fbf0f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1078,9 +1078,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
int *copied, size_t size)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct sockaddr *uaddr = msg->msg_name;
int err, flags;
- if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+ if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
+ (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
+ uaddr->sa_family == AF_UNSPEC))
return -EOPNOTSUPP;
if (tp->fastopen_req)
return -EALREADY; /* Another Fast Open is in progress */
@@ -1093,7 +1096,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
tp->fastopen_req->size = size;
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
- err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+ err = __inet_stream_connect(sk->sk_socket, uaddr,
msg->msg_namelen, flags);
*copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e074816..a03f1e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1178,13 +1178,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
*/
if (pkt_len > mss) {
unsigned int new_len = (pkt_len / mss) * mss;
- if (!in_sack && new_len < pkt_len) {
+ if (!in_sack && new_len < pkt_len)
new_len += mss;
- if (new_len >= skb->len)
- return 0;
- }
pkt_len = new_len;
}
+
+ if (pkt_len >= skb->len && !in_sack)
+ return 0;
+
err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
if (err < 0)
return err;
@@ -3233,7 +3234,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
int delta;
/* Non-retransmitted hole got filled? That's reordering */
- if (reord < prior_fackets)
+ if (reord < prior_fackets && reord <= tp->fackets_out)
tcp_update_reordering(sk, tp->fackets_out - reord, 0);
delta = tcp_is_fack(tp) ? pkts_acked :
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 33b04ec..013086b 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -63,7 +63,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
const struct net_offload *ops;
int proto;
struct frag_hdr *fptr;
- unsigned int unfrag_ip6hlen;
unsigned int payload_len;
u8 *prevhdr;
int offset = 0;
@@ -116,8 +115,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
skb->network_header = (u8 *)ipv6h - skb->head;
if (udpfrag) {
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
- fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
+ int err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
fptr->frag_off = htons(offset);
if (skb->next)
fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a215802..d472a5f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -595,7 +595,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0;
- hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ goto fail;
+ hlen = err;
nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb);
@@ -1453,6 +1456,11 @@ static int __ip6_append_data(struct sock *sk,
*/
alloclen += sizeof(struct frag_hdr);
+ copy = datalen - transhdrlen - fraggap;
+ if (copy < 0) {
+ err = -EINVAL;
+ goto error;
+ }
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len,
@@ -1502,13 +1510,9 @@ static int __ip6_append_data(struct sock *sk,
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
- copy = datalen - transhdrlen - fraggap;
-
- if (copy < 0) {
- err = -EINVAL;
- kfree_skb(skb);
- goto error;
- } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+ if (copy > 0 &&
+ getfrag(from, data + transhdrlen, offset,
+ copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index cd42523..e9065b8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
- struct ipv6_opt_hdr *exthdr =
- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
unsigned int packet_len = skb_tail_pointer(skb) -
skb_network_header(skb);
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
- while (offset + 1 <= packet_len) {
+ while (offset <= packet_len) {
+ struct ipv6_opt_hdr *exthdr;
switch (**nexthdr) {
@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
return offset;
}
- offset += ipv6_optlen(exthdr);
- *nexthdr = &exthdr->nexthdr;
+ if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+ return -EINVAL;
+
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
+ offset += ipv6_optlen(exthdr);
+ *nexthdr = &exthdr->nexthdr;
}
- return offset;
+ return -EINVAL;
}
EXPORT_SYMBOL(ip6_find_1stfragopt);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1c3bc0a..368c23a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1048,6 +1048,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
@@ -1117,6 +1118,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ac858c4..a2267f8 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
u8 frag_hdr_sz = sizeof(struct frag_hdr);
__wsum csum;
int tnl_hlen;
+ int err;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
@@ -90,7 +91,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ unfrag_ip6hlen = err;
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index cb76ff3..6a563e6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2652,13 +2652,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
}
- sockc.tsflags = po->sk.sk_tsflags;
- if (msg->msg_controllen) {
- err = sock_cmsg_send(&po->sk, msg, &sockc);
- if (unlikely(err))
- goto out;
- }
-
err = -ENXIO;
if (unlikely(dev == NULL))
goto out;
@@ -2666,6 +2659,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if (unlikely(!(dev->flags & IFF_UP)))
goto out_put;
+ sockc.tsflags = po->sk.sk_tsflags;
+ if (msg->msg_controllen) {
+ err = sock_cmsg_send(&po->sk, msg, &sockc);
+ if (unlikely(err))
+ goto out_put;
+ }
+
if (po->sk.sk_socket->type == SOCK_RAW)
reserve = dev->hard_header_len;
size_max = po->tx_ring.frame_size
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a01a56e..6c79915 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
struct sctp_association **app,
struct sctp_transport **tpp)
{
+ struct sctp_init_chunk *chunkhdr, _chunkhdr;
union sctp_addr saddr;
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
struct sctp_association *asoc;
struct sctp_transport *transport = NULL;
- struct sctp_init_chunk *chunkhdr;
__u32 vtag = ntohl(sctphdr->vtag);
- int len = skb->len - ((void *)sctphdr - (void *)skb->data);
*app = NULL; *tpp = NULL;
@@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
* discard the packet.
*/
if (vtag == 0) {
- chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
- if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
- + sizeof(__be32) ||
+ /* chunk header + first 4 octects of init header */
+ chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+ sizeof(struct sctphdr),
+ sizeof(struct sctp_chunkhdr) +
+ sizeof(__be32), &_chunkhdr);
+ if (!chunkhdr ||
chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
- ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
+ ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
goto out;
- }
+
} else if (vtag != asoc->c.peer_vtag) {
goto out;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 6a2532d..0c09060 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -240,12 +240,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
struct sctp_bind_addr *bp;
struct ipv6_pinfo *np = inet6_sk(sk);
struct sctp_sockaddr_entry *laddr;
- union sctp_addr *baddr = NULL;
union sctp_addr *daddr = &t->ipaddr;
union sctp_addr dst_saddr;
struct in6_addr *final_p, final;
__u8 matchlen = 0;
- __u8 bmatchlen;
sctp_scope_t scope;
memset(fl6, 0, sizeof(struct flowi6));
@@ -312,23 +310,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
*/
rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
- if (!laddr->valid)
+ struct dst_entry *bdst;
+ __u8 bmatchlen;
+
+ if (!laddr->valid ||
+ laddr->state != SCTP_ADDR_SRC ||
+ laddr->a.sa.sa_family != AF_INET6 ||
+ scope > sctp_scope(&laddr->a))
continue;
- if ((laddr->state == SCTP_ADDR_SRC) &&
- (laddr->a.sa.sa_family == AF_INET6) &&
- (scope <= sctp_scope(&laddr->a))) {
- bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
- if (!baddr || (matchlen < bmatchlen)) {
- baddr = &laddr->a;
- matchlen = bmatchlen;
- }
- }
- }
- if (baddr) {
- fl6->saddr = baddr->v6.sin6_addr;
- fl6->fl6_sport = baddr->v6.sin6_port;
+
+ fl6->saddr = laddr->a.v6.sin6_addr;
+ fl6->fl6_sport = laddr->a.v6.sin6_port;
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+ bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
+
+ if (!IS_ERR(bdst) &&
+ ipv6_chk_addr(dev_net(bdst->dev),
+ &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
+ if (!IS_ERR_OR_NULL(dst))
+ dst_release(dst);
+ dst = bdst;
+ break;
+ }
+
+ bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
+ if (matchlen > bmatchlen)
+ continue;
+
+ if (!IS_ERR_OR_NULL(dst))
+ dst_release(dst);
+ dst = bdst;
+ matchlen = bmatchlen;
}
rcu_read_unlock();
@@ -666,6 +678,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
rcu_read_lock();
opt = rcu_dereference(np->opt);
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
index 9ce707b..9cbc786 100644
--- a/samples/bpf/libbpf.c
+++ b/samples/bpf/libbpf.c
@@ -104,12 +104,14 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
}
-int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
+int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
+ unsigned int flags)
{
union bpf_attr attr = {
.target_fd = target_fd,
.attach_bpf_fd = prog_fd,
.attach_type = type,
+ .attach_flags = flags;
};
return syscall(__NR_bpf, BPF_PROG_ATTACH, &attr, sizeof(attr));
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index d0a799a..b06cf5a 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -15,7 +15,8 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, int insn_len,
const char *license, int kern_version);
-int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
+int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
+ unsigned int flags);
int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
int bpf_obj_pin(int fd, const char *pathname);
diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c
index 63ef208..9de4896 100644
--- a/samples/bpf/test_cgrp2_attach.c
+++ b/samples/bpf/test_cgrp2_attach.c
@@ -124,7 +124,7 @@ int main(int argc, char **argv)
ret = bpf_prog_detach(cg_fd, type);
printf("bpf_prog_detach() returned '%s' (%d)\n", strerror(errno), errno);
- ret = bpf_prog_attach(prog_fd, cg_fd, type);
+ ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
if (ret < 0) {
printf("Failed to attach prog to cgroup: '%s'\n",
strerror(errno));
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 37b70f8..0abab79 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1537,6 +1537,8 @@ static const struct snd_pci_quirk stac9200_fixup_tbl[] = {
"Dell Inspiron 1501", STAC_9200_DELL_M26),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
"unknown Dell", STAC_9200_DELL_M26),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
+ "Dell Latitude D430", STAC_9200_DELL_M22),
/* Panasonic */
SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
/* Gateway machines needs EAPD to be set on resume */
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index 502aa4f..0942d4a 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -1662,6 +1662,7 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
mutex_lock(&msm_sdw->codec_mutex);
switch (opcode) {
case AUDIO_NOTIFIER_SERVICE_DOWN:
+ msm_sdw->int_mclk1_enabled = false;
msm_sdw->dev_up = false;
for (i = 0; i < msm_sdw->nr; i++)
swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 5f8e3fd..125ce7a 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -37,9 +37,10 @@
#define DRV_NAME "pmic_analog_codec"
#define SDM660_CDC_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
- SNDRV_PCM_RATE_48000)
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_192000)
#define SDM660_CDC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
- SNDRV_PCM_FMTBIT_S24_LE)
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_3LE)
#define MSM_DIG_CDC_STRING_LEN 80
#define MSM_ANLG_CDC_VERSION_ENTRY_SIZE 32
@@ -3796,6 +3797,9 @@ static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
msm_anlg_cdc_configure_cap(codec, false, false);
wcd_mbhc_stop(&sdm660_cdc_priv->mbhc);
wcd_mbhc_deinit(&sdm660_cdc_priv->mbhc);
+ /* Disable mechanical detection and set type to insertion */
+ snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1,
+ 0xA0, 0x20);
ret = wcd_mbhc_init(&sdm660_cdc_priv->mbhc, codec, &mbhc_cb,
&intr_ids, wcd_mbhc_registers, true);
if (ret)
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index f140b19..b6e0ec6 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -1929,8 +1929,12 @@ static struct snd_soc_dai_driver msm_codec_dais[] = {
.stream_name = "AIF1 Playback",
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE,
},
.ops = &msm_dig_dai_ops,
},
@@ -2012,7 +2016,7 @@ static struct snd_soc_codec_driver soc_msm_dig_codec = {
const struct regmap_config msm_digital_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
- .val_bits = 32,
+ .val_bits = 8,
.lock = enable_digital_callback,
.unlock = disable_digital_callback,
.cache_type = REGCACHE_FLAT,
@@ -2085,10 +2089,18 @@ static int msm_dig_cdc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int msm_dig_suspend(struct device *dev)
{
- struct msm_asoc_mach_data *pdata =
- snd_soc_card_get_drvdata(registered_digcodec->component.card);
+ struct msm_asoc_mach_data *pdata;
struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(dev);
+ if (!registered_digcodec || !msm_dig_cdc) {
+ pr_debug("%s:digcodec not initialized, return\n", __func__);
+ return 0;
+ }
+ pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
+ if (!pdata) {
+ pr_debug("%s:card not initialized, return\n", __func__);
+ return 0;
+ }
if (msm_dig_cdc->dapm_bias_off) {
pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
__func__, atomic_read(&pdata->int_mclk0_rsc_ref),
diff --git a/sound/soc/codecs/wcd-mbhc-adc.c b/sound/soc/codecs/wcd-mbhc-adc.c
index 7278431..e44eec9 100644
--- a/sound/soc/codecs/wcd-mbhc-adc.c
+++ b/sound/soc/codecs/wcd-mbhc-adc.c
@@ -729,7 +729,8 @@ static void wcd_correct_swch_plug(struct work_struct *work)
* otherwise report unsupported plug
*/
if (mbhc->mbhc_cfg->swap_gnd_mic &&
- mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+ mbhc->mbhc_cfg->swap_gnd_mic(codec,
+ true)) {
pr_debug("%s: US_EU gpio present,flip switch\n"
, __func__);
continue;
diff --git a/sound/soc/codecs/wcd-mbhc-legacy.c b/sound/soc/codecs/wcd-mbhc-legacy.c
index 83023bc..745e2e8 100644
--- a/sound/soc/codecs/wcd-mbhc-legacy.c
+++ b/sound/soc/codecs/wcd-mbhc-legacy.c
@@ -633,7 +633,8 @@ static void wcd_correct_swch_plug(struct work_struct *work)
* otherwise report unsupported plug
*/
if (mbhc->mbhc_cfg->swap_gnd_mic &&
- mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+ mbhc->mbhc_cfg->swap_gnd_mic(codec,
+ true)) {
pr_debug("%s: US_EU gpio present,flip switch\n"
, __func__);
continue;
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 510a8dc..ebcb413 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -1460,18 +1460,12 @@ static int wcd_mbhc_usb_c_analog_setup_gpios(struct wcd_mbhc *mbhc,
if (config->usbc_en1_gpio_p)
rc = msm_cdc_pinctrl_select_active_state(
config->usbc_en1_gpio_p);
- if (rc == 0 && config->usbc_en2n_gpio_p)
- rc = msm_cdc_pinctrl_select_active_state(
- config->usbc_en2n_gpio_p);
if (rc == 0 && config->usbc_force_gpio_p)
rc = msm_cdc_pinctrl_select_active_state(
config->usbc_force_gpio_p);
mbhc->usbc_mode = POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
} else {
/* no delay is required when disabling GPIOs */
- if (config->usbc_en2n_gpio_p)
- msm_cdc_pinctrl_select_sleep_state(
- config->usbc_en2n_gpio_p);
if (config->usbc_en1_gpio_p)
msm_cdc_pinctrl_select_sleep_state(
config->usbc_en1_gpio_p);
@@ -1490,6 +1484,8 @@ static int wcd_mbhc_usb_c_analog_setup_gpios(struct wcd_mbhc *mbhc,
}
mbhc->usbc_mode = POWER_SUPPLY_TYPEC_NONE;
+ if (mbhc->mbhc_cfg->swap_gnd_mic)
+ mbhc->mbhc_cfg->swap_gnd_mic(mbhc->codec, false);
}
return rc;
@@ -1675,19 +1671,12 @@ int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg)
dev_dbg(mbhc->codec->dev, "%s: usbc analog enabled\n",
__func__);
rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
- "qcom,usbc-analog-en1_gpio",
+ "qcom,usbc-analog-en1-gpio",
&config->usbc_en1_gpio,
&config->usbc_en1_gpio_p);
if (rc)
goto err;
- rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
- "qcom,usbc-analog-en2_n_gpio",
- &config->usbc_en2n_gpio,
- &config->usbc_en2n_gpio_p);
- if (rc)
- goto err;
-
if (of_find_property(card->dev->of_node,
"qcom,usbc-analog-force_detect_gpio",
NULL)) {
@@ -1734,12 +1723,6 @@ int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg)
gpio_free(config->usbc_en1_gpio);
config->usbc_en1_gpio = 0;
}
- if (config->usbc_en2n_gpio > 0) {
- dev_dbg(card->dev, "%s free usb_en2 gpio %d\n",
- __func__, config->usbc_en2n_gpio);
- gpio_free(config->usbc_en2n_gpio);
- config->usbc_en2n_gpio = 0;
- }
if (config->usbc_force_gpio > 0) {
dev_dbg(card->dev, "%s free usb_force gpio %d\n",
__func__, config->usbc_force_gpio);
@@ -1748,8 +1731,6 @@ int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg)
}
if (config->usbc_en1_gpio_p)
of_node_put(config->usbc_en1_gpio_p);
- if (config->usbc_en2n_gpio_p)
- of_node_put(config->usbc_en2n_gpio_p);
if (config->usbc_force_gpio_p)
of_node_put(config->usbc_force_gpio_p);
dev_dbg(mbhc->codec->dev, "%s: leave %d\n", __func__, rc);
@@ -1790,15 +1771,11 @@ void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
/* free GPIOs */
if (config->usbc_en1_gpio > 0)
gpio_free(config->usbc_en1_gpio);
- if (config->usbc_en2n_gpio > 0)
- gpio_free(config->usbc_en2n_gpio);
if (config->usbc_force_gpio)
gpio_free(config->usbc_force_gpio);
if (config->usbc_en1_gpio_p)
of_node_put(config->usbc_en1_gpio_p);
- if (config->usbc_en2n_gpio_p)
- of_node_put(config->usbc_en2n_gpio_p);
if (config->usbc_force_gpio_p)
of_node_put(config->usbc_force_gpio_p);
}
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index 4ea4401..7ed06c3 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -404,10 +404,10 @@ enum mbhc_moisture_rref {
struct usbc_ana_audio_config {
int usbc_en1_gpio;
- int usbc_en2n_gpio;
+ int usbc_en2_gpio;
int usbc_force_gpio;
struct device_node *usbc_en1_gpio_p; /* used by pinctrl API */
- struct device_node *usbc_en2n_gpio_p; /* used by pinctrl API */
+ struct device_node *usbc_en2_gpio_p; /* used by pinctrl API */
struct device_node *usbc_force_gpio_p; /* used by pinctrl API */
};
@@ -416,7 +416,7 @@ struct wcd_mbhc_config {
void *calibration;
bool detect_extn_cable;
bool mono_stero_detection;
- bool (*swap_gnd_mic)(struct snd_soc_codec *codec);
+ bool (*swap_gnd_mic)(struct snd_soc_codec *codec, bool active);
bool hs_ext_micbias;
bool gnd_det_en;
int key_code[WCD_MBHC_KEYCODE_NUM];
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index 7e217a6..a08b598 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -82,8 +82,15 @@
#define WCD_SPI_WORD_BYTE_CNT (4)
#define WCD_SPI_RW_MULTI_MIN_LEN (16)
-/* Max size is closest multiple of 16 less than 64Kbytes */
-#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 16)
+/* Max size is 32 bytes less than 64Kbytes */
+#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
+
+/*
+ * Max size for the pre-allocated buffers is the max
+ * possible read/write length + 32 bytes for the SPI
+ * read/write command header itself.
+ */
+#define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
/* Alignment requirements */
#define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
@@ -149,6 +156,10 @@ struct wcd_spi_priv {
/* Completion object to indicate system resume completion */
struct completion resume_comp;
+
+ /* Buffers to hold memory used for transfers */
+ void *tx_buf;
+ void *rx_buf;
};
enum xfer_request {
@@ -230,17 +241,18 @@ static int wcd_spi_read_single(struct spi_device *spi,
struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
- u8 *tx_buf;
+ u8 *tx_buf = wcd_spi->tx_buf;
u32 frame = 0;
int ret;
dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
__func__, remote_addr);
- tx_buf = kzalloc(WCD_SPI_READ_SINGLE_LEN,
- GFP_KERNEL | GFP_DMA);
- if (!tx_buf)
+ if (!tx_buf) {
+ dev_err(&spi->dev, "%s: tx_buf not allocated\n",
+ __func__);
return -ENOMEM;
+ }
frame |= WCD_SPI_READ_FRAME_OPCODE;
frame |= remote_addr & WCD_CMD_ADDR_MASK;
@@ -256,7 +268,6 @@ static int wcd_spi_read_single(struct spi_device *spi,
rx_xfer->len = sizeof(*val);
ret = spi_sync(spi, &wcd_spi->msg2);
- kfree(tx_buf);
return ret;
}
@@ -267,8 +278,8 @@ static int wcd_spi_read_multi(struct spi_device *spi,
{
struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
struct spi_transfer *xfer = &wcd_spi->xfer1;
- u8 *tx_buf;
- u8 *rx_buf;
+ u8 *tx_buf = wcd_spi->tx_buf;
+ u8 *rx_buf = wcd_spi->rx_buf;
u32 frame = 0;
int ret;
@@ -278,15 +289,9 @@ static int wcd_spi_read_multi(struct spi_device *spi,
frame |= WCD_SPI_FREAD_FRAME_OPCODE;
frame |= remote_addr & WCD_CMD_ADDR_MASK;
- tx_buf = kzalloc(WCD_SPI_CMD_FREAD_LEN + len,
- GFP_KERNEL | GFP_DMA);
- if (!tx_buf)
- return -ENOMEM;
-
- rx_buf = kzalloc(WCD_SPI_CMD_FREAD_LEN + len,
- GFP_KERNEL | GFP_DMA);
- if (!rx_buf) {
- kfree(tx_buf);
+ if (!tx_buf || !rx_buf) {
+ dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
+ (!tx_buf) ? "tx_buf" : "rx_buf");
return -ENOMEM;
}
@@ -306,8 +311,6 @@ static int wcd_spi_read_multi(struct spi_device *spi,
memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
done:
- kfree(tx_buf);
- kfree(rx_buf);
return ret;
}
@@ -344,7 +347,7 @@ static int wcd_spi_write_multi(struct spi_device *spi,
struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
struct spi_transfer *xfer = &wcd_spi->xfer1;
u32 frame = 0;
- u8 *tx_buf;
+ u8 *tx_buf = wcd_spi->tx_buf;
int xfer_len, ret;
dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
@@ -356,9 +359,11 @@ static int wcd_spi_write_multi(struct spi_device *spi,
frame = cpu_to_be32(frame);
xfer_len = len + sizeof(frame);
- tx_buf = kzalloc(xfer_len, GFP_KERNEL);
- if (!tx_buf)
+ if (!tx_buf) {
+ dev_err(&spi->dev, "%s: tx_buf not allocated\n",
+ __func__);
return -ENOMEM;
+ }
memcpy(tx_buf, &frame, sizeof(frame));
memcpy(tx_buf + sizeof(frame), data, len);
@@ -372,8 +377,6 @@ static int wcd_spi_write_multi(struct spi_device *spi,
dev_err(&spi->dev,
"%s: Failed, addr = 0x%x, len = %zd\n",
__func__, remote_addr, len);
- kfree(tx_buf);
-
return ret;
}
@@ -1331,6 +1334,23 @@ static int wcd_spi_component_bind(struct device *dev,
spi_message_init(&wcd_spi->msg2);
spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
+
+ /* Pre-allocate the buffers */
+ wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!wcd_spi->tx_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!wcd_spi->rx_buf) {
+ kfree(wcd_spi->tx_buf);
+ wcd_spi->tx_buf = NULL;
+ ret = -ENOMEM;
+ goto done;
+ }
done:
return ret;
}
@@ -1348,6 +1368,11 @@ static void wcd_spi_component_unbind(struct device *dev,
spi_transfer_del(&wcd_spi->xfer1);
spi_transfer_del(&wcd_spi->xfer2[0]);
spi_transfer_del(&wcd_spi->xfer2[1]);
+
+ kfree(wcd_spi->tx_buf);
+ kfree(wcd_spi->rx_buf);
+ wcd_spi->tx_buf = NULL;
+ wcd_spi->rx_buf = NULL;
}
static const struct component_ops wcd_spi_component_ops = {
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index dedf4dc..f8fa43b 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -188,7 +188,7 @@ module_param(sido_buck_svs_voltage, int, 0664);
MODULE_PARM_DESC(sido_buck_svs_voltage,
"setting for SVS voltage for SIDO BUCK");
-#define TASHA_TX_UNMUTE_DELAY_MS 25
+#define TASHA_TX_UNMUTE_DELAY_MS 40
static int tx_unmute_delay = TASHA_TX_UNMUTE_DELAY_MS;
module_param(tx_unmute_delay, int, 0664);
@@ -5902,8 +5902,6 @@ static int tasha_codec_enable_dec(struct snd_soc_dapm_widget *w,
CF_MIN_3DB_150HZ << 5);
/* Enable TX PGA Mute */
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
- /* Enable APC */
- snd_soc_update_bits(codec, dec_cfg_reg, 0x08, 0x08);
break;
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x00);
@@ -5930,7 +5928,6 @@ static int tasha_codec_enable_dec(struct snd_soc_dapm_widget *w,
hpf_cut_off_freq =
tasha->tx_hpf_work[decimator].hpf_cut_off_freq;
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
- snd_soc_update_bits(codec, dec_cfg_reg, 0x08, 0x00);
if (cancel_delayed_work_sync(
&tasha->tx_hpf_work[decimator].dwork)) {
if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index c0a32f3..f7fb325 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -505,7 +505,7 @@ static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
-#define WCD934X_TX_UNMUTE_DELAY_MS 25
+#define WCD934X_TX_UNMUTE_DELAY_MS 40
static int tx_unmute_delay = WCD934X_TX_UNMUTE_DELAY_MS;
module_param(tx_unmute_delay, int, 0664);
@@ -795,11 +795,13 @@ int wcd934x_bringup(struct wcd9xxx *wcd9xxx)
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+ /* Add 1msec delay for VOUT to settle */
+ usleep_range(1000, 1100);
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
- regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+ regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
return 0;
}
@@ -8274,6 +8276,9 @@ static int __tavil_cdc_mclk_enable(struct tavil_priv *tavil,
WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
ret = __tavil_cdc_mclk_enable_locked(tavil, enable);
+ if (enable)
+ wcd_resmgr_set_sido_input_src(tavil->resmgr,
+ SIDO_SOURCE_RCO_BG);
WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
return ret;
@@ -8411,6 +8416,8 @@ static int __tavil_codec_internal_rco_ctrl(struct snd_soc_codec *codec,
__func__, ret);
goto done;
}
+ wcd_resmgr_set_sido_input_src(tavil->resmgr,
+ SIDO_SOURCE_RCO_BG);
ret = wcd_resmgr_enable_clk_block(tavil->resmgr,
WCD_CLK_RCO);
ret |= tavil_cdc_req_mclk_enable(tavil, false);
@@ -9814,18 +9821,23 @@ static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil)
{
int val, rc;
- __tavil_cdc_mclk_enable(tavil, true);
+ WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
+ __tavil_cdc_mclk_enable_locked(tavil, true);
regmap_update_bits(tavil->wcd9xxx->regmap,
WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x1E, 0x10);
regmap_update_bits(tavil->wcd9xxx->regmap,
WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x01, 0x01);
-
/*
* 5ms sleep required after enabling efuse control
* before checking the status.
*/
usleep_range(5000, 5500);
+ wcd_resmgr_set_sido_input_src(tavil->resmgr,
+ SIDO_SOURCE_RCO_BG);
+
+ WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
+
rc = regmap_read(tavil->wcd9xxx->regmap,
WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS, &val);
if (rc || (!(val & 0x01)))
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 8780888..825aaee 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -25,8 +25,7 @@
#define WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
#define WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
-static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
- int sido_src);
+
static const char *wcd_resmgr_clk_type_to_str(enum wcd_clock_type clk_type)
{
if (clk_type == WCD_CLK_OFF)
@@ -267,8 +266,6 @@ static int wcd_resmgr_enable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
0x01, 0x01);
wcd_resmgr_codec_reg_update_bits(resmgr,
WCD934X_CODEC_RPM_CLK_GATE, 0x03, 0x00);
- wcd_resmgr_set_sido_input_src(resmgr,
- SIDO_SOURCE_RCO_BG);
} else {
wcd_resmgr_codec_reg_update_bits(resmgr,
WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
@@ -515,7 +512,7 @@ int wcd_resmgr_enable_clk_block(struct wcd9xxx_resmgr_v2 *resmgr,
return ret;
}
-static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
int sido_src)
{
if (!resmgr)
@@ -553,6 +550,7 @@ static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
pr_debug("%s: sido input src to external\n", __func__);
}
}
+EXPORT_SYMBOL(wcd_resmgr_set_sido_input_src);
/*
* wcd_resmgr_set_sido_input_src_locked:
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.h b/sound/soc/codecs/wcd9xxx-resmgr-v2.h
index f605a24..e831ba6 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.h
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -87,4 +87,7 @@ int wcd_resmgr_get_clk_type(struct wcd9xxx_resmgr_v2 *resmgr);
void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr);
void wcd_resmgr_set_sido_input_src_locked(struct wcd9xxx_resmgr_v2 *resmgr,
int sido_src);
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+ int sido_src);
+
#endif
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index 062bae2..4189e59 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -1273,6 +1273,7 @@ static int wsa881x_swr_remove(struct swr_device *pdev)
return -EINVAL;
}
debugfs_remove_recursive(debugfs_wsa881x_dent);
+ debugfs_wsa881x_dent = NULL;
snd_soc_unregister_codec(&pdev->dev);
if (wsa881x->pd_gpio)
gpio_free(wsa881x->pd_gpio);
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index b75ba98..7bc4051 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -496,6 +496,8 @@ static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
static struct platform_device *spdev;
@@ -2263,6 +2265,54 @@ static int mi2s_get_sample_rate(int value)
return sample_rate;
}
+static int mi2s_get_format(int value)
+{
+ int format;
+
+ switch (value) {
+ case 0:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 1:
+ format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 2:
+ format = SNDRV_PCM_FORMAT_S24_3LE;
+ break;
+ case 3:
+ format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
+ default:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+ int value;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ value = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ value = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = 3;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ return value;
+}
+
static int mi2s_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -2395,6 +2445,78 @@ static int msm_mi2s_tx_ch_put(struct snd_kcontrol *kcontrol,
return 1;
}
+static int msm_mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_rx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_tx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
static int msm_hifi_ctrl(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
@@ -2647,6 +2769,22 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
SOC_ENUM_EXT("QUAT_MI2S_TX Channels", quat_mi2s_tx_chs,
msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+ SOC_ENUM_EXT("PRIM_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("PRIM_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
msm_hifi_put),
};
@@ -3111,48 +3249,64 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
break;
case MSM_BACKEND_DAI_PRI_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[PRIM_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[PRIM_MI2S].channels;
break;
case MSM_BACKEND_DAI_PRI_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[PRIM_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[PRIM_MI2S].channels;
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[SEC_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[SEC_MI2S].channels;
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[SEC_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[SEC_MI2S].channels;
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[TERT_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[TERT_MI2S].channels;
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[TERT_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[TERT_MI2S].channels;
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[QUAT_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[QUAT_MI2S].channels;
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[QUAT_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[QUAT_MI2S].channels;
@@ -3980,6 +4134,7 @@ static u32 get_mi2s_bits_per_sample(u32 bit_format)
u32 bit_per_sample;
switch (bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
case SNDRV_PCM_FORMAT_S24_LE:
bit_per_sample = 32;
@@ -4059,6 +4214,13 @@ static int msm_set_pinctrl(struct msm_pinctrl_info *pinctrl_info,
ret = -EINVAL;
goto err;
}
+
+ if (pinctrl_info->pinctrl == NULL) {
+ pr_err("%s: pinctrl_info->pinctrl is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
curr_state = pinctrl_info->curr_state;
pinctrl_info->curr_state = new_state;
pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
@@ -4327,6 +4489,7 @@ static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
struct snd_soc_card *card = rtd->card;
struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+ int ret_pinctrl = 0;
dev_dbg(rtd->card->dev,
"%s: substream = %s stream = %d, dai name %s, dai ID %d\n",
@@ -4341,11 +4504,10 @@ static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
goto done;
}
if (index == QUAT_MI2S) {
- ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
- if (ret) {
+ ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+ if (ret_pinctrl) {
pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
- __func__, ret);
- goto done;
+ __func__, ret_pinctrl);
}
}
@@ -4404,6 +4566,7 @@ static void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_card *card = rtd->card;
struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+ int ret_pinctrl = 0;
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
@@ -4424,10 +4587,10 @@ static void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
mutex_unlock(&mi2s_intf_conf[index].lock);
if (index == QUAT_MI2S) {
- ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
- if (ret)
+ ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+ if (ret_pinctrl)
pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
- __func__, ret);
+ __func__, ret_pinctrl);
}
}
@@ -7266,10 +7429,12 @@ static int msm_asoc_machine_remove(struct platform_device *pdev)
struct msm_asoc_mach_data *pdata =
snd_soc_card_get_drvdata(card);
- gpio_free(pdata->us_euro_gpio);
+ if (gpio_is_valid(pdata->us_euro_gpio))
+ gpio_free(pdata->us_euro_gpio);
i2s_auxpcm_deinit();
snd_soc_unregister_card(card);
+ audio_notifier_deregister("msm8998");
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 0c46763..dfac5fd 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -4131,12 +4131,13 @@ static struct snd_soc_dai_driver msm_dai_q6_mi2s_dai[] = {
.stream_name = "INT0 MI2S Playback",
.aif_name = "INT0_MI2S_RX",
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_44100,
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S24_3LE,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "INT0 MI2S Capture",
@@ -4235,12 +4236,13 @@ static struct snd_soc_dai_driver msm_dai_q6_mi2s_dai[] = {
.stream_name = "INT4 MI2S Playback",
.aif_name = "INT4_MI2S_RX",
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S24_3LE,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "INT4 MI2S Capture",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 465634b..cbee6b6 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -5378,6 +5378,57 @@ static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -11176,6 +11227,9 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
tert_tdm_rx_3_mixer_controls,
ARRAY_SIZE(tert_tdm_rx_3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("TERT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+ tert_tdm_rx_4_mixer_controls,
+ ARRAY_SIZE(tert_tdm_rx_4_mixer_controls)),
SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
quat_tdm_rx_0_mixer_controls,
ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
@@ -12200,6 +12254,24 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Audio Mixer"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_TDM_RX_4", NULL, "TERT_TDM_RX_4 Audio Mixer"},
+
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -12361,6 +12433,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia2 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
{"MultiMedia1 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
{"MultiMedia1 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia2 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
{"MultiMedia6 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia6 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"MultiMedia3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
@@ -12954,6 +13027,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"VOIP_UL", NULL, "VOC_EXT_EC MUX"},
{"VoLTE_UL", NULL, "VOC_EXT_EC MUX"},
{"VOICE2_UL", NULL, "VOC_EXT_EC MUX"},
+ {"VoWLAN_UL", NULL, "VOC_EXT_EC MUX"},
{"VOICEMMODE1_UL", NULL, "VOC_EXT_EC MUX"},
{"VOICEMMODE2_UL", NULL, "VOC_EXT_EC MUX"},
@@ -13907,6 +13981,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"BE_OUT", NULL, "TERT_TDM_RX_1"},
{"BE_OUT", NULL, "TERT_TDM_RX_2"},
{"BE_OUT", NULL, "TERT_TDM_RX_3"},
+ {"BE_OUT", NULL, "TERT_TDM_RX_4"},
{"BE_OUT", NULL, "QUAT_TDM_RX_0"},
{"BE_OUT", NULL, "QUAT_TDM_RX_1"},
{"BE_OUT", NULL, "QUAT_TDM_RX_2"},
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 176b8aa..05d123b 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -6695,8 +6695,6 @@ static int afe_set_cal_fb_spkr_prot(int32_t cal_type, size_t data_size,
mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
memcpy(&this_afe.prot_cfg, &cal_data->cal_info,
sizeof(this_afe.prot_cfg));
- this_afe.th_ftm_cfg.mode = this_afe.prot_cfg.mode;
- this_afe.ex_ftm_cfg.mode = this_afe.prot_cfg.mode;
mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
done:
return ret;
@@ -6839,8 +6837,6 @@ static int afe_get_cal_fb_spkr_prot(int32_t cal_type, size_t data_size,
cal_data->cal_info.r0[SP_V2_SPKR_1] = -1;
cal_data->cal_info.r0[SP_V2_SPKR_2] = -1;
}
- this_afe.th_ftm_cfg.mode = this_afe.prot_cfg.mode;
- this_afe.ex_ftm_cfg.mode = this_afe.prot_cfg.mode;
mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
__pm_relax(&wl.ws);
done:
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index b603b8a..2c3d7fc 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -1609,6 +1609,9 @@ int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT1");
snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT2");
+ } else {
+ snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT1");
+ snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT2");
}
snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 304bf47..22d6e0e 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -173,7 +173,9 @@ struct msm_pinctrl_info {
struct msm_asoc_mach_data {
u32 mclk_freq;
int us_euro_gpio; /* used by gpio driver API */
+ int usbc_en2_gpio; /* used by gpio driver API */
struct device_node *us_euro_gpio_p; /* used by pinctrl API */
+ struct pinctrl *usbc_en2_gpio_p; /* used by pinctrl API */
struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
struct snd_info_entry *codec_root;
@@ -3106,27 +3108,126 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return rc;
}
-static bool msm_swap_gnd_mic(struct snd_soc_codec *codec)
+static bool msm_usbc_swap_gnd_mic(struct snd_soc_codec *codec, bool active)
{
- struct snd_soc_card *card = codec->component.card;
- struct msm_asoc_mach_data *pdata =
- snd_soc_card_get_drvdata(card);
int value = 0;
+ bool ret = 0;
+ struct snd_soc_card *card = codec->component.card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct pinctrl_state *en2_pinctrl_active;
+ struct pinctrl_state *en2_pinctrl_sleep;
- if (pdata->us_euro_gpio_p) {
- value = msm_cdc_pinctrl_get_state(pdata->us_euro_gpio_p);
- if (value)
- msm_cdc_pinctrl_select_sleep_state(
- pdata->us_euro_gpio_p);
- else
- msm_cdc_pinctrl_select_active_state(
- pdata->us_euro_gpio_p);
- } else if (pdata->us_euro_gpio >= 0) {
- value = gpio_get_value_cansleep(pdata->us_euro_gpio);
- gpio_set_value_cansleep(pdata->us_euro_gpio, !value);
+ if (!pdata->usbc_en2_gpio_p) {
+ if (active) {
+ /* if active and usbc_en2_gpio undefined, get pin */
+ pdata->usbc_en2_gpio_p = devm_pinctrl_get(card->dev);
+ if (IS_ERR_OR_NULL(pdata->usbc_en2_gpio_p)) {
+ dev_err(card->dev,
+ "%s: Can't get EN2 gpio pinctrl:%ld\n",
+ __func__,
+ PTR_ERR(pdata->usbc_en2_gpio_p));
+ pdata->usbc_en2_gpio_p = NULL;
+ return false;
+ }
+ } else
+ /* if not active and usbc_en2_gpio undefined, return */
+ return false;
}
- pr_debug("%s: swap select switch %d to %d\n", __func__, value, !value);
- return true;
+
+ pdata->usbc_en2_gpio = of_get_named_gpio(card->dev->of_node,
+ "qcom,usbc-analog-en2-gpio", 0);
+ if (!gpio_is_valid(pdata->usbc_en2_gpio)) {
+ dev_err(card->dev, "%s, property %s not in node %s",
+ __func__, "qcom,usbc-analog-en2-gpio",
+ card->dev->of_node->full_name);
+ return false;
+ }
+
+ en2_pinctrl_active = pinctrl_lookup_state(
+ pdata->usbc_en2_gpio_p, "aud_active");
+ if (IS_ERR_OR_NULL(en2_pinctrl_active)) {
+ dev_err(card->dev,
+ "%s: Cannot get aud_active pinctrl state:%ld\n",
+ __func__, PTR_ERR(en2_pinctrl_active));
+ ret = false;
+ goto err_lookup_state;
+ }
+
+ en2_pinctrl_sleep = pinctrl_lookup_state(
+ pdata->usbc_en2_gpio_p, "aud_sleep");
+ if (IS_ERR_OR_NULL(en2_pinctrl_sleep)) {
+ dev_err(card->dev,
+ "%s: Cannot get aud_sleep pinctrl state:%ld\n",
+ __func__, PTR_ERR(en2_pinctrl_sleep));
+ ret = false;
+ goto err_lookup_state;
+ }
+
+ /* if active and usbc_en2_gpio_p defined, swap using usbc_en2_gpio_p */
+ if (active) {
+ dev_dbg(codec->dev, "%s: enter\n", __func__);
+ if (pdata->usbc_en2_gpio_p) {
+ value = gpio_get_value_cansleep(pdata->usbc_en2_gpio);
+ if (value)
+ pinctrl_select_state(pdata->usbc_en2_gpio_p,
+ en2_pinctrl_sleep);
+ else
+ pinctrl_select_state(pdata->usbc_en2_gpio_p,
+ en2_pinctrl_active);
+ } else if (pdata->usbc_en2_gpio >= 0) {
+ value = gpio_get_value_cansleep(pdata->usbc_en2_gpio);
+ gpio_set_value_cansleep(pdata->usbc_en2_gpio, !value);
+ }
+ pr_debug("%s: swap select switch %d to %d\n", __func__,
+ value, !value);
+ ret = true;
+ } else {
+ /* if not active, release usbc_en2_gpio_p pin */
+ pinctrl_select_state(pdata->usbc_en2_gpio_p,
+ en2_pinctrl_sleep);
+ }
+
+err_lookup_state:
+ devm_pinctrl_put(pdata->usbc_en2_gpio_p);
+ pdata->usbc_en2_gpio_p = NULL;
+ return ret;
+}
+
+static bool msm_swap_gnd_mic(struct snd_soc_codec *codec, bool active)
+{
+ int value = 0;
+ int ret = 0;
+ struct snd_soc_card *card = codec->component.card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+
+ if (!pdata)
+ return false;
+
+ if (!wcd_mbhc_cfg.enable_usbc_analog) {
+ /* if usbc is not defined, swap using us_euro_gpio_p */
+ if (pdata->us_euro_gpio_p) {
+ value = msm_cdc_pinctrl_get_state(
+ pdata->us_euro_gpio_p);
+ if (value)
+ msm_cdc_pinctrl_select_sleep_state(
+ pdata->us_euro_gpio_p);
+ else
+ msm_cdc_pinctrl_select_active_state(
+ pdata->us_euro_gpio_p);
+ } else if (pdata->us_euro_gpio >= 0) {
+ value = gpio_get_value_cansleep(
+ pdata->us_euro_gpio);
+ gpio_set_value_cansleep(
+ pdata->us_euro_gpio, !value);
+ }
+ pr_debug("%s: swap select switch %d to %d\n", __func__,
+ value, !value);
+ ret = true;
+ } else {
+ /* if usbc is defined, swap using usbc_en2 */
+ ret = msm_usbc_swap_gnd_mic(codec, active);
+ }
+ return ret;
}
static int msm_afe_set_config(struct snd_soc_codec *codec)
@@ -3833,6 +3934,13 @@ static int msm_set_pinctrl(struct msm_pinctrl_info *pinctrl_info,
ret = -EINVAL;
goto err;
}
+
+ if (pinctrl_info->pinctrl == NULL) {
+ pr_err("%s: pinctrl_info->pinctrl is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
curr_state = pinctrl_info->curr_state;
pinctrl_info->curr_state = new_state;
pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
@@ -4101,6 +4209,7 @@ static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
struct snd_soc_card *card = rtd->card;
struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+ int ret_pinctrl = 0;
dev_dbg(rtd->card->dev,
"%s: substream = %s stream = %d, dai name %s, dai ID %d\n",
@@ -4115,12 +4224,10 @@ static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
goto err;
}
if (index == QUAT_MI2S) {
- ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
- if (ret) {
+ ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+ if (ret_pinctrl)
pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
- __func__, ret);
- goto err;
- }
+ __func__, ret_pinctrl);
}
/*
* Muxtex protection in case the same MI2S
@@ -4177,6 +4284,7 @@ static void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_card *card = rtd->card;
struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+ int ret_pinctrl = 0;
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
@@ -4197,10 +4305,10 @@ static void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
mutex_unlock(&mi2s_intf_conf[index].lock);
if (index == QUAT_MI2S) {
- ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
- if (ret)
+ ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+ if (ret_pinctrl)
pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
- __func__, ret);
+ __func__, ret_pinctrl);
}
}
@@ -6454,6 +6562,7 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
char *mclk_freq_prop_name;
const struct of_device_id *match;
int ret;
+ const char *usb_c_dt = "qcom,msm-mbhc-usbc-audio-supported";
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform supplied from device tree\n");
@@ -6601,6 +6710,9 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
wcd_mbhc_cfg.swap_gnd_mic = msm_swap_gnd_mic;
}
+ if (of_find_property(pdev->dev.of_node, usb_c_dt, NULL))
+ wcd_mbhc_cfg.swap_gnd_mic = msm_swap_gnd_mic;
+
ret = msm_prepare_us_euro(card);
if (ret)
dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index d2ac038..083887b 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -13,7 +13,8 @@
pcm.o \
proc.o \
quirks.o \
- stream.o
+ stream.o \
+ badd.o
snd-usbmidi-lib-objs := midi.o
diff --git a/sound/usb/badd.c b/sound/usb/badd.c
new file mode 100644
index 0000000..cc6c26c
--- /dev/null
+++ b/sound/usb/badd.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
+
+struct uac3_input_terminal_descriptor badd_baif_in_term_desc = {
+ .bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = BADD_IN_TERM_ID_BAIF,
+ .bCSourceID = BADD_CLOCK_SOURCE,
+ .wExTerminalDescrID = 0x0000,
+ .wTerminalDescrStr = 0x0000
+};
+
+struct uac3_input_terminal_descriptor badd_baof_in_term_desc = {
+ .bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = BADD_IN_TERM_ID_BAOF,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0x00,
+ .bCSourceID = BADD_CLOCK_SOURCE,
+ .bmControls = 0x00000000,
+ .wExTerminalDescrID = 0x0000,
+ .wConnectorsDescrID = 0x0000,
+ .wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baif_out_term_desc = {
+ .bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = BADD_OUT_TERM_ID_BAIF,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0x00, /* No associated terminal */
+ .bSourceID = BADD_FU_ID_BAIF,
+ .bCSourceID = BADD_CLOCK_SOURCE,
+ .bmControls = 0x00000000, /* No controls */
+ .wExTerminalDescrID = 0x0000,
+ .wConnectorsDescrID = 0x0000,
+ .wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baof_out_term_desc = {
+ .bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = BADD_OUT_TERM_ID_BAOF,
+ .bSourceID = BADD_FU_ID_BAOF,
+ .bCSourceID = BADD_CLOCK_SOURCE,
+ .wExTerminalDescrID = 0x0000,
+ .wTerminalDescrStr = 0x0000
+};
+
+__u8 monoControls[] = {
+ 0x03, 0x00, 0x00, 0x00,
+ 0x0c, 0x00, 0x00, 0x00};
+
+__u8 stereoControls[] = {
+ 0x03, 0x00, 0x00, 0x00,
+ 0x0c, 0x00, 0x00, 0x00,
+ 0x0c, 0x00, 0x00, 0x00
+};
+
+__u8 badd_mu_src_ids[] = {BADD_IN_TERM_ID_BAOF, BADD_FU_ID_BAIOF};
+
+struct uac3_mixer_unit_descriptor badd_baiof_mu_desc = {
+ .bLength = UAC3_DT_MIXER_UNIT_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_MIXER_UNIT_V3,
+ .bUnitID = BADD_MU_ID_BAIOF,
+ .bNrInPins = 0x02,
+ .baSourceID = badd_mu_src_ids,
+ .bmMixerControls = 0x00,
+ .bmControls = 0x00000000,
+ .wMixerDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baif_fu_desc = {
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+ .bUnitID = BADD_FU_ID_BAIF,
+ .bSourceID = BADD_IN_TERM_ID_BAIF,
+ .wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baof_fu_desc = {
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+ .bUnitID = BADD_FU_ID_BAOF,
+ .wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baiof_fu_desc = {
+ .bLength = 0x0f,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+ .bUnitID = BADD_FU_ID_BAIOF,
+ .bSourceID = BADD_IN_TERM_ID_BAIF,
+ .bmaControls = monoControls,
+ .wFeatureDescrStr = 0x0000
+};
+
+struct uac3_clock_source_descriptor badd_clock_desc = {
+ .bLength = UAC3_DT_CLOCK_SRC_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC3_CLOCK_SOURCE,
+ .bClockID = BADD_CLOCK_SOURCE,
+ .bmControls = 0x00000001,
+ .bReferenceTerminal = 0x00,
+ .wClockSourceStr = 0x0000
+};
+
+void *badd_desc_list[] = {
+ &badd_baif_in_term_desc,
+ &badd_baof_in_term_desc,
+ &badd_baiof_mu_desc,
+ &badd_baif_fu_desc,
+ &badd_baof_fu_desc,
+ &badd_baiof_fu_desc,
+ &badd_clock_desc
+};
+
diff --git a/sound/usb/card.c b/sound/usb/card.c
index ccf06de..eaf18aa 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -45,6 +45,7 @@
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
#include <linux/module.h>
+#include <linux/usb/audio-v3.h>
#include <sound/control.h>
#include <sound/core.h>
@@ -285,7 +286,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
struct usb_host_interface *host_iface;
struct usb_interface_descriptor *altsd;
struct usb_interface *usb_iface;
- void *control_header;
int i, protocol;
usb_iface = usb_ifnum_to_if(dev, ctrlif);
@@ -302,16 +302,13 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
return -EINVAL;
}
- control_header = snd_usb_find_csint_desc(host_iface->extra,
- host_iface->extralen,
- NULL, UAC_HEADER);
altsd = get_iface_desc(host_iface);
protocol = altsd->bInterfaceProtocol;
- if (!control_header) {
- dev_err(&dev->dev, "cannot find UAC_HEADER\n");
- return -EINVAL;
- }
+ /*
+ * UAC 1.0 devices use AC HEADER Desc for linking AS interfaces;
+ * UAC 2.0 and 3.0 devices use IAD for linking AS interfaces
+ */
switch (protocol) {
default:
@@ -321,8 +318,17 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
/* fall through */
case UAC_VERSION_1: {
- struct uac1_ac_header_descriptor *h1 = control_header;
+ void *control_header;
+ struct uac1_ac_header_descriptor *h1;
+ control_header = snd_usb_find_csint_desc(host_iface->extra,
+ host_iface->extralen, NULL, UAC_HEADER);
+ if (!control_header) {
+ dev_err(&dev->dev, "cannot find UAC_HEADER\n");
+ return -EINVAL;
+ }
+
+ h1 = control_header;
if (!h1->bInCollection) {
dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
return -EINVAL;
@@ -339,7 +345,8 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
break;
}
- case UAC_VERSION_2: {
+ case UAC_VERSION_2:
+ case UAC_VERSION_3: {
struct usb_interface_assoc_descriptor *assoc =
usb_iface->intf_assoc;
if (!assoc) {
@@ -358,7 +365,8 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
}
if (!assoc) {
- dev_err(&dev->dev, "Audio class v2 interfaces need an interface association\n");
+ dev_err(&dev->dev, "Audio class V%d interfaces need an interface association\n",
+ protocol);
return -EINVAL;
}
@@ -606,6 +614,15 @@ static int usb_audio_probe(struct usb_interface *intf,
struct usb_host_interface *alts;
int ifnum;
u32 id;
+ struct usb_interface_assoc_descriptor *assoc;
+
+ assoc = intf->intf_assoc;
+ if (assoc && assoc->bFunctionClass == USB_CLASS_AUDIO &&
+ assoc->bFunctionProtocol == UAC_VERSION_3 &&
+ assoc->bFunctionSubClass == FULL_ADC_PROFILE) {
+ dev_info(&dev->dev, "No support for full-fledged ADC 3.0 yet!!\n");
+ return -EINVAL;
+ }
alts = &intf->altsetting[0];
ifnum = get_iface_desc(alts)->bInterfaceNumber;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 26dd5f2..8238180 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -428,6 +428,10 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
case UAC_VERSION_2:
return set_sample_rate_v2(chip, iface, alts, fmt, rate);
+
+ /* Clock rate is fixed at 48 kHz for BADD devices */
+ case UAC_VERSION_3:
+ return 0;
}
}
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 2c44386..eaf2615 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -20,6 +20,7 @@
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -69,6 +70,34 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
format <<= 1;
break;
}
+
+ case UAC_VERSION_3: {
+ switch (fp->maxpacksize) {
+ case BADD_MAXPSIZE_SYNC_MONO_16:
+ case BADD_MAXPSIZE_SYNC_STEREO_16:
+ case BADD_MAXPSIZE_ASYNC_MONO_16:
+ case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+ sample_width = BIT_RES_16_BIT;
+ sample_bytes = SUBSLOTSIZE_16_BIT;
+ break;
+ }
+
+ case BADD_MAXPSIZE_SYNC_MONO_24:
+ case BADD_MAXPSIZE_SYNC_STEREO_24:
+ case BADD_MAXPSIZE_ASYNC_MONO_24:
+ case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+ sample_width = BIT_RES_24_BIT;
+ sample_bytes = SUBSLOTSIZE_24_BIT;
+ break;
+ }
+ default:
+ usb_audio_err(chip, "%u:%d : Invalid wMaxPacketSize\n",
+ fp->iface, fp->altsetting);
+ return pcm_formats;
+ }
+ format = 1 << format;
+ break;
+ }
}
if ((pcm_formats == 0) &&
@@ -364,17 +393,34 @@ static int parse_audio_format_rates_v2(struct snd_usb_audio *chip,
return ret;
}
+static int badd_set_audio_rate_v3(struct snd_usb_audio *chip,
+ struct audioformat *fp)
+{
+ unsigned int rate;
+
+ fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+ if (fp->rate_table == NULL)
+ return -ENOMEM;
+
+ fp->nr_rates = 1;
+ rate = BADD_SAMPLING_RATE;
+ fp->rate_min = fp->rate_max = fp->rate_table[0] = rate;
+ fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+ return 0;
+}
+
/*
* parse the format type I and III descriptors
*/
static int parse_audio_format_i(struct snd_usb_audio *chip,
struct audioformat *fp, unsigned int format,
+ u8 format_type,
struct uac_format_type_i_continuous_descriptor *fmt)
{
snd_pcm_format_t pcm_format;
int ret;
- if (fmt->bFormatType == UAC_FORMAT_TYPE_III) {
+ if (format_type == UAC_FORMAT_TYPE_III) {
/* FIXME: the format type is really IECxxx
* but we give normal PCM format to get the existing
* apps working...
@@ -413,6 +459,9 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
/* fp->channels is already set in this case */
ret = parse_audio_format_rates_v2(chip, fp);
break;
+ case UAC_VERSION_3:
+ ret = badd_set_audio_rate_v3(chip, fp);
+ break;
}
if (fp->channels < 1) {
@@ -484,11 +533,18 @@ int snd_usb_parse_audio_format(struct snd_usb_audio *chip,
int stream)
{
int err;
+ int format_type = -EINVAL;
- switch (fmt->bFormatType) {
+ if ((fp->protocol == UAC_VERSION_1) ||
+ (fp->protocol == UAC_VERSION_2))
+ format_type = fmt->bFormatType;
+ else
+ format_type = UAC_FORMAT_TYPE_I; /* only BADD is supported */
+
+ switch (format_type) {
case UAC_FORMAT_TYPE_I:
case UAC_FORMAT_TYPE_III:
- err = parse_audio_format_i(chip, fp, format, fmt);
+ err = parse_audio_format_i(chip, fp, format, format_type, fmt);
break;
case UAC_FORMAT_TYPE_II:
err = parse_audio_format_ii(chip, fp, format, fmt);
@@ -497,10 +553,10 @@ int snd_usb_parse_audio_format(struct snd_usb_audio *chip,
usb_audio_info(chip,
"%u:%d : format type %d is not supported yet\n",
fp->iface, fp->altsetting,
- fmt->bFormatType);
+ format_type);
return -ENOTSUPP;
}
- fp->fmt_type = fmt->bFormatType;
+ fp->fmt_type = format_type;
if (err < 0)
return err;
#if 1
@@ -511,7 +567,7 @@ int snd_usb_parse_audio_format(struct snd_usb_audio *chip,
if (chip->usb_id == USB_ID(0x041e, 0x3000) ||
chip->usb_id == USB_ID(0x041e, 0x3020) ||
chip->usb_id == USB_ID(0x041e, 0x3061)) {
- if (fmt->bFormatType == UAC_FORMAT_TYPE_I &&
+ if (format_type == UAC_FORMAT_TYPE_I &&
fp->rates != SNDRV_PCM_RATE_48000 &&
fp->rates != SNDRV_PCM_RATE_96000)
return -ENOTSUPP;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 932ce3e..c3bf5ff 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -51,6 +51,7 @@
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -185,6 +186,17 @@ static void *find_audio_control_unit(struct mixer_build *state,
/* we just parse the header */
struct uac_feature_unit_descriptor *hdr = NULL;
+ if (state->mixer->protocol == UAC_VERSION_3) {
+ int i;
+
+ for (i = 0; i < NUM_BADD_DESCS; i++) {
+ hdr = (void *)badd_desc_list[i];
+ if (hdr->bUnitID == unit)
+ return hdr;
+ }
+
+ return NULL;
+ }
while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr,
USB_DT_CS_INTERFACE)) != NULL) {
if (hdr->bLength >= 4 &&
@@ -718,7 +730,7 @@ static int check_input_term(struct mixer_build *state, int id,
term->channels = d->bNrChannels;
term->chconfig = le16_to_cpu(d->wChannelConfig);
term->name = d->iTerminal;
- } else { /* UAC_VERSION_2 */
+ } else if (state->mixer->protocol == UAC_VERSION_2) {
struct uac2_input_terminal_descriptor *d = p1;
/* call recursively to verify that the
@@ -735,6 +747,24 @@ static int check_input_term(struct mixer_build *state, int id,
term->channels = d->bNrChannels;
term->chconfig = le32_to_cpu(d->bmChannelConfig);
term->name = d->iTerminal;
+ } else { /* UAC_VERSION_3 */
+ struct uac3_input_terminal_descriptor *d = p1;
+
+ err = check_input_term(state,
+ d->bCSourceID, term);
+ if (err < 0)
+ return err;
+
+ term->id = id;
+ term->type = d->wTerminalType;
+ if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+ term->channels = NUM_CHANNELS_MONO;
+ term->chconfig = BADD_CH_CONFIG_MONO;
+ } else {
+ term->channels = NUM_CHANNELS_STEREO;
+ term->chconfig = BADD_CH_CONFIG_STEREO;
+ }
+ term->name = d->wTerminalDescrStr;
}
return 0;
case UAC_FEATURE_UNIT: {
@@ -752,41 +782,81 @@ static int check_input_term(struct mixer_build *state, int id,
return 0;
}
case UAC_SELECTOR_UNIT:
- case UAC2_CLOCK_SELECTOR: {
- struct uac_selector_unit_descriptor *d = p1;
- /* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
- if (err < 0)
- return err;
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
- term->id = id;
- term->name = uac_selector_unit_iSelector(d);
+ /* UAC3_MIXER_UNIT_V3 */
+ case UAC2_CLOCK_SELECTOR:
+ /* UAC3_CLOCK_SOURCE */ {
+ if (state->mixer->protocol == UAC_VERSION_3
+ && hdr[2] == UAC3_CLOCK_SOURCE) {
+ struct uac3_clock_source_descriptor *d = p1;
+
+ term->type = d->bDescriptorSubtype << 16;
+ term->id = id;
+ term->name = d->wClockSourceStr;
+ } else if (state->mixer->protocol == UAC_VERSION_3
+ && hdr[2] == UAC3_MIXER_UNIT_V3) {
+ struct uac3_mixer_unit_descriptor *d = p1;
+
+ term->type = d->bDescriptorSubtype << 16;
+ if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+ term->channels = NUM_CHANNELS_MONO;
+ term->chconfig = BADD_CH_CONFIG_MONO;
+ } else {
+ term->channels = NUM_CHANNELS_STEREO;
+ term->chconfig = BADD_CH_CONFIG_STEREO;
+ }
+ term->name = d->wMixerDescrStr;
+ } else {
+ struct uac_selector_unit_descriptor *d = p1;
+ /* call recursively to retrieve channel info */
+ err = check_input_term(state,
+ d->baSourceID[0], term);
+ if (err < 0)
+ return err;
+ /* virtual type */
+ term->type = d->bDescriptorSubtype << 16;
+ term->id = id;
+ term->name = uac_selector_unit_iSelector(d);
+ }
return 0;
}
case UAC1_PROCESSING_UNIT:
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 */
/* UAC2_EFFECT_UNIT */
+ /* UAC3_FEATURE_UNIT_V3 */
case UAC2_EXTENSION_UNIT_V2: {
- struct uac_processing_unit_descriptor *d = p1;
+ if (state->mixer->protocol == UAC_VERSION_3) {
+ struct uac_feature_unit_descriptor *d = p1;
- if (state->mixer->protocol == UAC_VERSION_2 &&
- hdr[2] == UAC2_EFFECT_UNIT) {
- /* UAC2/UAC1 unit IDs overlap here in an
- * uncompatible way. Ignore this unit for now.
- */
+ id = d->bSourceID;
+ } else {
+ struct uac_processing_unit_descriptor *d = p1;
+
+ if (state->mixer->protocol == UAC_VERSION_2 &&
+ hdr[2] == UAC2_EFFECT_UNIT) {
+ /* UAC2/UAC1 unit IDs overlap here in an
+ * uncompatible way. Ignore this unit
+ * for now.
+ */
+ return 0;
+ }
+
+ if (d->bNrInPins) {
+ id = d->baSourceID[0];
+ break; /* continue to parse */
+ }
+ /* virtual type */
+ term->type = d->bDescriptorSubtype << 16;
+ term->channels =
+ uac_processing_unit_bNrChannels(d);
+ term->chconfig =
+ uac_processing_unit_wChannelConfig(
+ d, state->mixer->protocol);
+ term->name = uac_processing_unit_iProcessing(
+ d, state->mixer->protocol);
return 0;
}
-
- if (d->bNrInPins) {
- id = d->baSourceID[0];
- break; /* continue to parse */
- }
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
- term->channels = uac_processing_unit_bNrChannels(d);
- term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol);
- term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol);
- return 0;
+ break;
}
case UAC2_CLOCK_SOURCE: {
struct uac_clock_source_descriptor *d = p1;
@@ -1233,12 +1303,18 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
struct usb_feature_control_info *ctl_info;
unsigned int len = 0;
int mapped_name = 0;
- int nameid = uac_feature_unit_iFeature(desc);
+ int nameid;
struct snd_kcontrol *kctl;
struct usb_mixer_elem_info *cval;
const struct usbmix_name_map *map;
unsigned int range;
+ if (state->mixer->protocol == UAC_VERSION_3)
+ nameid = ((struct uac3_feature_unit_descriptor *)
+ raw_desc)->wFeatureDescrStr;
+ else
+ nameid = uac_feature_unit_iFeature(desc);
+
control++; /* change from zero-based to 1-based value */
if (control == UAC_FU_GRAPHIC_EQUALIZER) {
@@ -1259,7 +1335,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
ctl_info = &audio_feature_info[control-1];
if (state->mixer->protocol == UAC_VERSION_1)
cval->val_type = ctl_info->type;
- else /* UAC_VERSION_2 */
+ else /* UAC_VERSION_2 or UAC_VERSION_3*/
cval->val_type = ctl_info->type_uac2 >= 0 ?
ctl_info->type_uac2 : ctl_info->type;
@@ -1447,6 +1523,62 @@ static int parse_clock_source_unit(struct mixer_build *state, int unitid,
return snd_usb_mixer_add_control(&cval->head, kctl);
}
+static int find_num_channels(struct mixer_build *state, int dir)
+{
+ int num_ch = -EINVAL, num, i, j, wMaxPacketSize;
+ int ctrlif = get_iface_desc(state->mixer->hostif)->bInterfaceNumber;
+ struct usb_interface *usb_iface =
+ usb_ifnum_to_if(state->mixer->chip->dev, ctrlif);
+ struct usb_interface_assoc_descriptor *assoc = usb_iface->intf_assoc;
+ struct usb_host_interface *alts;
+
+ for (i = 0; i < assoc->bInterfaceCount; i++) {
+ int intf = assoc->bFirstInterface + i;
+
+ if (intf != ctrlif) {
+ struct usb_interface *iface =
+ usb_ifnum_to_if(state->mixer->chip->dev, intf);
+
+ alts = &iface->altsetting[1];
+ if (dir == USB_DIR_OUT &&
+ get_endpoint(alts, 0)->bEndpointAddress &
+ USB_DIR_IN)
+ continue;
+ if (dir == USB_DIR_IN &&
+ !(get_endpoint(alts, 0)->bEndpointAddress &
+ USB_DIR_IN))
+ continue;
+ num = iface->num_altsetting;
+ for (j = 1; j < num; j++) {
+ num_ch = NUM_CHANNELS_MONO;
+ alts = &iface->altsetting[j];
+ wMaxPacketSize = le16_to_cpu(
+ get_endpoint(alts, 0)->
+ wMaxPacketSize);
+ switch (wMaxPacketSize) {
+ case BADD_MAXPSIZE_SYNC_MONO_16:
+ case BADD_MAXPSIZE_SYNC_MONO_24:
+ case BADD_MAXPSIZE_ASYNC_MONO_16:
+ case BADD_MAXPSIZE_ASYNC_MONO_24:
+ break;
+ case BADD_MAXPSIZE_SYNC_STEREO_16:
+ case BADD_MAXPSIZE_SYNC_STEREO_24:
+ case BADD_MAXPSIZE_ASYNC_STEREO_16:
+ case BADD_MAXPSIZE_ASYNC_STEREO_24:
+ num_ch = NUM_CHANNELS_STEREO;
+ break;
+ }
+ if (num_ch == NUM_CHANNELS_MONO)
+ continue;
+ else
+ break;
+ }
+ }
+ }
+
+ return num_ch;
+}
+
/*
* parse a feature unit
*
@@ -1478,7 +1610,7 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
unitid);
return -EINVAL;
}
- } else {
+ } else if (state->mixer->protocol == UAC_VERSION_2) {
struct uac2_feature_unit_descriptor *ftr = _ftr;
csize = 4;
channels = (hdr->bLength - 6) / 4 - 1;
@@ -1489,11 +1621,118 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
unitid);
return -EINVAL;
}
+ } else {
+ struct usb_interface *usb_iface =
+ usb_ifnum_to_if(state->mixer->chip->dev,
+ get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+ struct usb_interface_assoc_descriptor *assoc =
+ usb_iface->intf_assoc;
+
+ csize = 4;
+ switch (unitid) {
+ case BADD_FU_ID_BAIOF:
+ channels = NUM_CHANNELS_MONO;
+ bmaControls = monoControls;
+ badd_baif_in_term_desc.wClusterDescrID =
+ CLUSTER_ID_MONO;
+ break;
+
+ case BADD_FU_ID_BAOF:
+ switch (assoc->bFunctionSubClass) {
+ case PROF_HEADPHONE:
+ case PROF_HEADSET_ADAPTER:
+ channels = NUM_CHANNELS_STEREO;
+ bmaControls = stereoControls;
+ badd_baiof_mu_desc.wClusterDescrID =
+ CLUSTER_ID_MONO;
+ break;
+ case PROF_SPEAKERPHONE:
+ channels = NUM_CHANNELS_MONO;
+ bmaControls = monoControls;
+ badd_baof_in_term_desc.wClusterDescrID =
+ CLUSTER_ID_MONO;
+ break;
+ default:
+ channels = find_num_channels(state,
+ USB_DIR_OUT);
+ if (channels < 0) {
+ usb_audio_err(state->chip,
+ "unit %u: Cant find num of channels\n",
+ unitid);
+ return channels;
+ }
+
+ bmaControls = (channels == NUM_CHANNELS_MONO) ?
+ monoControls : stereoControls;
+ badd_baof_in_term_desc.wClusterDescrID =
+ (channels == NUM_CHANNELS_MONO) ?
+ CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+ break;
+ }
+ break;
+
+ case BADD_FU_ID_BAIF:
+ switch (assoc->bFunctionSubClass) {
+ case PROF_HEADSET:
+ case PROF_HEADSET_ADAPTER:
+ case PROF_SPEAKERPHONE:
+ channels = NUM_CHANNELS_MONO;
+ bmaControls = monoControls;
+ badd_baif_in_term_desc.wClusterDescrID =
+ CLUSTER_ID_MONO;
+ break;
+ default:
+ channels = find_num_channels(state, USB_DIR_IN);
+ if (channels < 0) {
+ usb_audio_err(state->chip,
+ "unit %u: Cant find num of channels\n",
+ unitid);
+ return channels;
+ }
+
+ bmaControls = (channels == NUM_CHANNELS_MONO) ?
+ monoControls : stereoControls;
+ badd_baif_in_term_desc.wClusterDescrID =
+ (channels == NUM_CHANNELS_MONO) ?
+ CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+ break;
+ }
+ break;
+
+ default:
+ usb_audio_err(state->chip, "Invalid unit %u\n", unitid);
+ return -EINVAL;
+ }
}
/* parse the source unit */
- if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0)
- return err;
+ if (state->mixer->protocol != UAC_VERSION_3) {
+ err = parse_audio_unit(state, hdr->bSourceID);
+ if (err < 0)
+ return err;
+ } else {
+ struct usb_interface *usb_iface =
+ usb_ifnum_to_if(state->mixer->chip->dev,
+ get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+ struct usb_interface_assoc_descriptor *assoc =
+ usb_iface->intf_assoc;
+
+ switch (unitid) {
+ case BADD_FU_ID_BAOF:
+ switch (assoc->bFunctionSubClass) {
+ case PROF_HEADSET:
+ case PROF_HEADSET_ADAPTER:
+ hdr->bSourceID = BADD_MU_ID_BAIOF;
+ break;
+ default:
+ hdr->bSourceID = BADD_IN_TERM_ID_BAOF;
+ break;
+ }
+ }
+ err = parse_audio_unit(state, hdr->bSourceID);
+ if (err < 0)
+ return err;
+ }
/* determine the input source type and name */
err = check_input_term(state, hdr->bSourceID, &iterm);
@@ -1547,7 +1786,7 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
build_feature_ctl(state, _ftr, 0, i, &iterm,
unitid, 0);
}
- } else { /* UAC_VERSION_2 */
+ } else { /* UAC_VERSION_2 or UAC_VERSION_3*/
for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) {
unsigned int ch_bits = 0;
unsigned int ch_read_only = 0;
@@ -1665,12 +1904,20 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
int input_pins, num_ins, num_outs;
int pin, ich, err;
- if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
- !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
- usb_audio_err(state->chip,
- "invalid MIXER UNIT descriptor %d\n",
- unitid);
- return -EINVAL;
+ if (state->mixer->protocol == UAC_VERSION_3) {
+ input_pins = badd_baiof_mu_desc.bNrInPins;
+ num_outs =
+ (badd_baiof_mu_desc.wClusterDescrID == CLUSTER_ID_MONO) ?
+ NUM_CHANNELS_MONO : NUM_CHANNELS_STEREO;
+ } else {
+ input_pins = desc->bNrInPins;
+ num_outs = uac_mixer_unit_bNrChannels(desc);
+ if (desc->bLength < 11 || !input_pins || !num_outs) {
+ usb_audio_err(state->chip,
+ "invalid MIXER UNIT descriptor %d\n",
+ unitid);
+ return -EINVAL;
+ }
}
num_ins = 0;
@@ -1690,9 +1937,14 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
int och, ich_has_controls = 0;
for (och = 0; och < num_outs; och++) {
- __u8 *c = uac_mixer_unit_bmControls(desc,
- state->mixer->protocol);
+ __u8 *c = NULL;
+ if (state->mixer->protocol == UAC_VERSION_3)
+ c =
+ &(badd_baiof_mu_desc.bmMixerControls);
+ else
+ c = uac_mixer_unit_bmControls(desc,
+ state->mixer->protocol);
if (check_matrix_bitmap(c, ich, och, num_outs)) {
ich_has_controls = 1;
break;
@@ -2201,16 +2453,28 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
case UAC2_CLOCK_SOURCE:
return parse_clock_source_unit(state, unitid, p1);
case UAC_SELECTOR_UNIT:
+ /* UAC3_MIXER_UNIT_V3 has the same value */
case UAC2_CLOCK_SELECTOR:
- return parse_audio_selector_unit(state, unitid, p1);
+ /* UAC3_CLOCK_SOURCE has the same value */
+ if (state->mixer->protocol == UAC_VERSION_3 &&
+ p1[2] == UAC3_CLOCK_SOURCE)
+ return 0; /* NOP */
+ else if (state->mixer->protocol == UAC_VERSION_3
+ && p1[2] == UAC3_MIXER_UNIT_V3)
+ return parse_audio_mixer_unit(state, unitid, p1);
+ else
+ return parse_audio_selector_unit(state, unitid, p1);
case UAC_FEATURE_UNIT:
return parse_audio_feature_unit(state, unitid, p1);
case UAC1_PROCESSING_UNIT:
/* UAC2_EFFECT_UNIT has the same value */
+ /* UAC3_FEATURE_UNIT_V3 has the same value */
if (state->mixer->protocol == UAC_VERSION_1)
return parse_audio_processing_unit(state, unitid, p1);
- else
+ else if (state->mixer->protocol == UAC_VERSION_2)
return 0; /* FIXME - effect units not implemented yet */
+ else
+ return parse_audio_feature_unit(state, unitid, p1);
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 has the same value */
if (state->mixer->protocol == UAC_VERSION_1)
@@ -2245,6 +2509,23 @@ static int snd_usb_mixer_dev_free(struct snd_device *device)
return 0;
}
+static int make_out_term(struct mixer_build state, int wTerminalType)
+{
+ struct uac3_output_terminal_descriptor *desc = NULL;
+
+ if (wTerminalType == UAC_TERMINAL_STREAMING)
+ desc = &badd_baif_out_term_desc;
+ else {
+ desc = &badd_baof_out_term_desc;
+ desc->wTerminalType = wTerminalType;
+ }
+ set_bit(desc->bTerminalID, state.unitbitmap);
+ state.oterm.id = desc->bTerminalID;
+ state.oterm.type = desc->wTerminalType;
+ state.oterm.name = desc->wTerminalDescrStr;
+ return parse_audio_unit(&state, desc->bSourceID);
+}
+
/*
* create mixer controls
*
@@ -2253,9 +2534,8 @@ static int snd_usb_mixer_dev_free(struct snd_device *device)
static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
{
struct mixer_build state;
- int err;
+ int err = -EINVAL;
const struct usbmix_ctl_map *map;
- void *p;
memset(&state, 0, sizeof(state));
state.chip = mixer->chip;
@@ -2273,44 +2553,108 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
}
}
- p = NULL;
- while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
- mixer->hostif->extralen,
- p, UAC_OUTPUT_TERMINAL)) != NULL) {
- if (mixer->protocol == UAC_VERSION_1) {
- struct uac1_output_terminal_descriptor *desc = p;
+ if (mixer->protocol == UAC_VERSION_3) {
+ struct usb_interface *usb_iface =
+ usb_ifnum_to_if(mixer->chip->dev,
+ get_iface_desc(mixer->hostif)->bInterfaceNumber);
+ struct usb_interface_assoc_descriptor *assoc =
+ usb_iface->intf_assoc;
- if (desc->bLength < sizeof(*desc))
- continue; /* invalid descriptor? */
- /* mark terminal ID as visited */
- set_bit(desc->bTerminalID, state.unitbitmap);
- state.oterm.id = desc->bTerminalID;
- state.oterm.type = le16_to_cpu(desc->wTerminalType);
- state.oterm.name = desc->iTerminal;
- err = parse_audio_unit(&state, desc->bSourceID);
+ switch (assoc->bFunctionSubClass) {
+ case PROF_GENERIC_IO: {
+ if (assoc->bInterfaceCount == 0x02) {
+ if (get_endpoint(mixer->hostif,
+ 0)->bEndpointAddress | USB_DIR_IN)
+ err = make_out_term(state,
+ UAC_TERMINAL_STREAMING);
+ else
+ err = make_out_term(state,
+ UAC_OUTPUT_TERMINAL_UNDEFINED);
+ } else {
+ err = make_out_term(state,
+ UAC_OUTPUT_TERMINAL_UNDEFINED);
+ if (err < 0 && err != -EINVAL)
+ return err;
+ err = make_out_term(state,
+ UAC_TERMINAL_STREAMING);
+ }
+ break;
+ }
+
+ case PROF_HEADPHONE:
+ err = make_out_term(state,
+ UAC_OUTPUT_TERMINAL_HEADPHONES);
+ break;
+ case PROF_SPEAKER:
+ err = make_out_term(state, UAC_OUTPUT_TERMINAL_SPEAKER);
+ break;
+ case PROF_MICROPHONE:
+ err = make_out_term(state, UAC_TERMINAL_STREAMING);
+ break;
+ case PROF_HEADSET:
+ case PROF_HEADSET_ADAPTER:
+ err = make_out_term(state, UAC_BIDIR_TERMINAL_HEADSET);
if (err < 0 && err != -EINVAL)
return err;
- } else { /* UAC_VERSION_2 */
- struct uac2_output_terminal_descriptor *desc = p;
-
- if (desc->bLength < sizeof(*desc))
- continue; /* invalid descriptor? */
- /* mark terminal ID as visited */
- set_bit(desc->bTerminalID, state.unitbitmap);
- state.oterm.id = desc->bTerminalID;
- state.oterm.type = le16_to_cpu(desc->wTerminalType);
- state.oterm.name = desc->iTerminal;
- err = parse_audio_unit(&state, desc->bSourceID);
+ err = make_out_term(state, UAC_TERMINAL_STREAMING);
+ break;
+ case PROF_SPEAKERPHONE:
+ err = make_out_term(state,
+ UAC_BIDIR_TERMINAL_SPEAKERPHONE);
if (err < 0 && err != -EINVAL)
return err;
+ err = make_out_term(state, UAC_TERMINAL_STREAMING);
+ break;
+ }
+ if (err < 0 && err != -EINVAL)
+ return err;
+ } else {
+ void *p;
- /*
- * For UAC2, use the same approach to also add the
- * clock selectors
- */
- err = parse_audio_unit(&state, desc->bCSourceID);
- if (err < 0 && err != -EINVAL)
- return err;
+ p = NULL;
+ while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
+ mixer->hostif->extralen, p,
+ UAC_OUTPUT_TERMINAL)) != NULL) {
+ if (mixer->protocol == UAC_VERSION_1) {
+ struct uac1_output_terminal_descriptor *desc =
+ p;
+
+ if (desc->bLength < sizeof(*desc))
+ continue; /* invalid descriptor? */
+ /* mark terminal ID as visited */
+ set_bit(desc->bTerminalID, state.unitbitmap);
+ state.oterm.id = desc->bTerminalID;
+ state.oterm.type =
+ le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+ if (err < 0 && err != -EINVAL)
+ return err;
+ } else { /* UAC_VERSION_2 */
+ struct uac2_output_terminal_descriptor *desc =
+ p;
+
+ if (desc->bLength < sizeof(*desc))
+ continue; /* invalid descriptor? */
+ /* mark terminal ID as visited */
+ set_bit(desc->bTerminalID, state.unitbitmap);
+ state.oterm.id = desc->bTerminalID;
+ state.oterm.type =
+ le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+ if (err < 0 && err != -EINVAL)
+ return err;
+
+ /*
+ * For UAC2, use the same approach to also add
+ * the clock selectors
+ */
+ err = parse_audio_unit(&state,
+ desc->bCSourceID);
+ if (err < 0 && err != -EINVAL)
+ return err;
+ }
}
}
@@ -2552,6 +2896,9 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
case UAC_VERSION_2:
mixer->protocol = UAC_VERSION_2;
break;
+ case UAC_VERSION_3:
+ mixer->protocol = UAC_VERSION_3;
+ break;
}
if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 7437cd5..5bc84b4 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -20,6 +20,7 @@
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -282,8 +283,6 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
0 /* terminator */
};
struct snd_pcm_chmap_elem *chmap;
- const unsigned int *maps;
- int c;
if (channels > ARRAY_SIZE(chmap->map))
return NULL;
@@ -292,26 +291,41 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
if (!chmap)
return NULL;
- maps = protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
chmap->channels = channels;
- c = 0;
- if (bits) {
- for (; bits && *maps; maps++, bits >>= 1)
- if (bits & 1)
- chmap->map[c++] = *maps;
+ if (protocol == UAC_VERSION_3) {
+ switch (channels) {
+ case 1:
+ chmap->map[0] = SNDRV_CHMAP_MONO;
+ break;
+ case 2:
+ chmap->map[0] = SNDRV_CHMAP_FL;
+ chmap->map[1] = SNDRV_CHMAP_FR;
+ break;
+ }
} else {
- /* If we're missing wChannelConfig, then guess something
- to make sure the channel map is not skipped entirely */
- if (channels == 1)
- chmap->map[c++] = SNDRV_CHMAP_MONO;
- else
- for (; c < channels && *maps; maps++)
- chmap->map[c++] = *maps;
- }
+ int c = 0;
+ const unsigned int *maps =
+ protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
- for (; c < channels; c++)
- chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+ if (bits) {
+ for (; bits && *maps; maps++, bits >>= 1)
+ if (bits & 1)
+ chmap->map[c++] = *maps;
+ } else {
+ /*
+ * If we're missing wChannelConfig, then guess something
+ * to make sure the channel map is not skipped entirely
+ */
+ if (channels == 1)
+ chmap->map[c++] = SNDRV_CHMAP_MONO;
+ else
+ for (; c < channels && *maps; maps++)
+ chmap->map[c++] = *maps;
+ }
+ for (; c < channels; c++)
+ chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+ }
return chmap;
}
@@ -409,6 +423,9 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
struct usb_interface_descriptor *altsd = get_iface_desc(alts);
int attributes = 0;
+ if (protocol == UAC_VERSION_3)
+ return 0;
+
csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
/* Creamware Noah has this descriptor after the 2nd endpoint */
@@ -492,7 +509,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
unsigned int format = 0, num_channels = 0;
struct audioformat *fp = NULL;
int num, protocol, clock = 0;
- struct uac_format_type_i_continuous_descriptor *fmt;
+ struct uac_format_type_i_continuous_descriptor *fmt = NULL;
unsigned int chconfig;
dev = chip->dev;
@@ -629,38 +646,78 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
iface_no, altno, as->bTerminalLink);
continue;
}
+
+ case UAC_VERSION_3: {
+ int wMaxPacketSize;
+
+ format = UAC_FORMAT_TYPE_I_PCM;
+ clock = BADD_CLOCK_SOURCE;
+ wMaxPacketSize = le16_to_cpu(get_endpoint(alts, 0)
+ ->wMaxPacketSize);
+ switch (wMaxPacketSize) {
+ case BADD_MAXPSIZE_SYNC_MONO_16:
+ case BADD_MAXPSIZE_SYNC_MONO_24:
+ case BADD_MAXPSIZE_ASYNC_MONO_16:
+ case BADD_MAXPSIZE_ASYNC_MONO_24: {
+ num_channels = NUM_CHANNELS_MONO;
+ chconfig = BADD_CH_CONFIG_MONO;
+ break;
+ }
+
+ case BADD_MAXPSIZE_SYNC_STEREO_16:
+ case BADD_MAXPSIZE_SYNC_STEREO_24:
+ case BADD_MAXPSIZE_ASYNC_STEREO_16:
+ case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+ num_channels = NUM_CHANNELS_STEREO;
+ chconfig = BADD_CH_CONFIG_STEREO;
+ break;
+ }
+ default:
+ dev_err(&dev->dev,
+ "%u:%d: invalid wMaxPacketSize\n",
+ iface_no, altno);
+ continue;
+ }
+ }
}
- /* get format type */
- fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE);
- if (!fmt) {
- dev_err(&dev->dev,
- "%u:%d : no UAC_FORMAT_TYPE desc\n",
- iface_no, altno);
- continue;
- }
- if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8)) ||
- ((protocol == UAC_VERSION_2) && (fmt->bLength < 6))) {
- dev_err(&dev->dev,
- "%u:%d : invalid UAC_FORMAT_TYPE desc\n",
- iface_no, altno);
- continue;
- }
+ if ((protocol == UAC_VERSION_1) ||
+ (protocol == UAC_VERSION_2)) {
+ /* get format type */
+ fmt = snd_usb_find_csint_desc(alts->extra,
+ alts->extralen, NULL, UAC_FORMAT_TYPE);
+ if (!fmt) {
+ dev_err(&dev->dev,
+ "%u:%d : no UAC_FORMAT_TYPE desc\n",
+ iface_no, altno);
+ continue;
+ }
+ if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8))
+ || ((protocol == UAC_VERSION_2) &&
+ (fmt->bLength < 6))) {
+ dev_err(&dev->dev,
+ "%u:%d :invalid UAC_FORMAT_TYPE desc\n",
+ iface_no, altno);
+ continue;
+ }
- /*
- * Blue Microphones workaround: The last altsetting is identical
- * with the previous one, except for a larger packet size, but
- * is actually a mislabeled two-channel setting; ignore it.
- */
- if (fmt->bNrChannels == 1 &&
- fmt->bSubframeSize == 2 &&
- altno == 2 && num == 3 &&
- fp && fp->altsetting == 1 && fp->channels == 1 &&
- fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
- protocol == UAC_VERSION_1 &&
- le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) ==
+ /*
+ * Blue Microphones workaround: The last altsetting is
+ * identical with the previous one, except for a larger
+ * packet size, but is actually a mislabeled two-channel
+ * setting; ignore it.
+ */
+ if (fmt->bNrChannels == 1 &&
+ fmt->bSubframeSize == 2 &&
+ altno == 2 && num == 3 &&
+ fp && fp->altsetting == 1 && fp->channels == 1 &&
+ fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
+ protocol == UAC_VERSION_1 &&
+ le16_to_cpu(
+ get_endpoint(alts, 0)->wMaxPacketSize) ==
fp->maxpacksize * 2)
- continue;
+ continue;
+ }
fp = kzalloc(sizeof(*fp), GFP_KERNEL);
if (! fp) {
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 5a1974e..801508c 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -27,6 +27,7 @@
#include <soc/qcom/msm_qmi_interface.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
+#include <linux/usb/audio-v3.h>
#include "usbaudio.h"
#include "card.h"
@@ -427,12 +428,14 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
protocol = altsd->bInterfaceProtocol;
/* get format type */
- fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
- UAC_FORMAT_TYPE);
- if (!fmt) {
- pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n", __func__,
- subs->interface, subs->altset_idx);
- goto err;
+ if (protocol != UAC_VERSION_3) {
+ fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+ UAC_FORMAT_TYPE);
+ if (!fmt) {
+ pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n",
+ __func__, subs->interface, subs->altset_idx);
+ goto err;
+ }
}
if (!uadev[card_num].ctrl_intf) {
@@ -440,12 +443,15 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
goto err;
}
- hdr_ptr = snd_usb_find_csint_desc(uadev[card_num].ctrl_intf->extra,
- uadev[card_num].ctrl_intf->extralen,
- NULL, UAC_HEADER);
- if (!hdr_ptr) {
- pr_err("%s: no UAC_HEADER desc\n", __func__);
- goto err;
+ if (protocol != UAC_VERSION_3) {
+ hdr_ptr = snd_usb_find_csint_desc(
+ uadev[card_num].ctrl_intf->extra,
+ uadev[card_num].ctrl_intf->extralen,
+ NULL, UAC_HEADER);
+ if (!hdr_ptr) {
+ pr_err("%s: no UAC_HEADER desc\n", __func__);
+ goto err;
+ }
}
if (protocol == UAC_VERSION_1) {
@@ -473,6 +479,31 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
resp->usb_audio_spec_revision =
((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
resp->usb_audio_spec_revision_valid = 1;
+ } else if (protocol == UAC_VERSION_3) {
+ switch (le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize)) {
+ case BADD_MAXPSIZE_SYNC_MONO_16:
+ case BADD_MAXPSIZE_SYNC_STEREO_16:
+ case BADD_MAXPSIZE_ASYNC_MONO_16:
+ case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+ resp->usb_audio_subslot_size = SUBSLOTSIZE_16_BIT;
+ break;
+ }
+
+ case BADD_MAXPSIZE_SYNC_MONO_24:
+ case BADD_MAXPSIZE_SYNC_STEREO_24:
+ case BADD_MAXPSIZE_ASYNC_MONO_24:
+ case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+ resp->usb_audio_subslot_size = SUBSLOTSIZE_24_BIT;
+ break;
+ }
+
+ default:
+ pr_err("%d: %u: Invalid wMaxPacketSize\n",
+ subs->interface, subs->altset_idx);
+ ret = -EINVAL;
+ goto err;
+ }
+ resp->usb_audio_subslot_size_valid = 1;
} else {
pr_err("%s: unknown protocol version %x\n", __func__, protocol);
goto err;