Merge "usb: dwc3: Use high priority worker as bottom half handler" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
index 90ddc27..50488b4 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -49,6 +49,16 @@
Value Type: <stringlist>
Definition: Address names. Must be "llcc"
+- llcc-bank-off:
+ Usage: required
+ Value Type: <u32 array>
+ Definition: Offsets of llcc banks from llcc base address starting from
+ LLCC bank0.
+- llcc-broadcast-off:
+ Usage: required
+ Value Type: <u32>
+ Definition: Offset of broadcast register from LLCC bank0 address.
+
- #cache-cells:
Usage: required
Value Type: <u32>
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 7405115..d95aa59 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -17,6 +17,7 @@
"qcom,gcc-msm8996"
"qcom,gcc-mdm9615"
"qcom,gcc-sdm845"
+ "qcom,debugcc-sdm845"
- reg : shall contain base register location and length
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index b6544961..f4b6013 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -152,6 +152,11 @@
baseAddr - base address of the gpu channels in the qdss stm memory region
size - size of the gpu stm region
+- qcom,gpu-qtimer:
+ <baseAddr size>
+ baseAddr - base address of the qtimer memory region
+ size - size of the qtimer region
+
- qcom,tsens-name:
Specify the name of GPU temperature sensor. This name will be used
to get the temperature from the thermal driver API.
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index ef6c04a..2d971b7a 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -25,6 +25,8 @@
- reg : Base address and size of the SMMU.
+- reg-names : For the "qcom,qsmmu-v500" device "tcu-base" is expected.
+
- #global-interrupts : The number of global interrupts exposed by the
device.
@@ -176,6 +178,9 @@
"base" is the main TBU register region.
"status-reg" indicates whether hw can process a new request.
+-qcom,stream-id-range:
+ Pair of values describing the smallest supported stream-id
+ and the size of the entire set.
Example:
smmu {
@@ -186,5 +191,6 @@
<0x2000 0x8>;
reg-names = "base",
"status-reg";
+ qcom,stream-id-range = <0x800 0x400>;
};
};
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
index a77a291..1e6aac5 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
@@ -75,6 +75,9 @@
- qcom,lcd-auto-pfm-thresh : Specify the auto-pfm threshold, if the headroom voltage level
falls below this threshold and auto PFM is enabled, boost
controller will enter into PFM mode automatically.
+- qcom,lcd-psm-ctrl : A boolean property to specify if PSM needs to be
+ controlled dynamically when WLED module is enabled
+ or disabled.
Optional properties if 'qcom,disp-type-amoled' is mentioned in DT:
- qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 521c783..c01036d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -43,6 +43,17 @@
the first cell will be used to define gpio number and the
second denotes the flags for this gpio
+- qcom,gpios-disallowed:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of the GPIO hardware numbers corresponding to GPIOs
+ which the APSS processor is not allowed to configure.
+ The hardware numbers are indexed from 1.
+ The interrupt resources for these GPIOs must not be defined
+ in "interrupts" and "interrupt-names" properties.
+ GPIOs defined in this array won't be registered as pins
+ in the pinctrl device or gpios in the gpio chip.
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -233,6 +244,7 @@
gpio-controller;
#gpio-cells = <2>;
+ qcom,gpios-disallowed = <1 20>;
pm8921_gpio_keys: gpio-keys {
volume-keys {
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
index d08ca95..c9cfc88 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
@@ -149,6 +149,8 @@
already. If it it not specified, then
output voltage can be configured to
any value in the allowed limit.
+- qcom,notify-lab-vreg-ok-sts: A boolean property which upon set will
+ poll and notify the lab_vreg_ok status.
Following properties are available only for PM660A:
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
index 8b3a38da0..63da8ec 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
@@ -26,6 +26,13 @@
Value type: <prop-encoded-array>
Definition: Base address of the LCDB SPMI peripheral.
+- qcom,force-module-reenable
+ Usage: required if using SW mode for module enable
+ Value type: <bool>
+ Definition: This enables the workaround to force enable
+ the vph_pwr_2p5_ok signal required for
+ turning on the LCDB module.
+
Touch-to-wake (TTW) properties:
TTW supports 2 modes of operation - HW and SW. In the HW mode the enable/disable
@@ -59,7 +66,6 @@
Definition: ON time (in mS) for the VDISP/VDISN signals.
Possible values are 4, 8, 16, 32.
-
========================================
Second Level Nodes - LDO/NCP/BOOST block
========================================
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
index 5d80a04..38f599b 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
@@ -44,12 +44,12 @@
Value type: <bool>
Definition: Enables the voltage programming through SWIRE signal.
- qcom,ext-pin-control
+- qcom,ext-pin-control
Usage: optional
Value type: <bool>
Definition: Configures the OLED module to be enabled by a external pin.
- qcom,dynamic-ext-pinctl-config
+- qcom,dynamic-ext-pinctl-config
Usage: optional
Value type: <bool>
Definition: Used to dynamically enable/disable the OLEDB module
@@ -57,13 +57,27 @@
rail. This property is applicable only if qcom,ext-pin-ctl
property is specified and it is specific to PM660A.
- qcom,pbs-control
+- qcom,force-pd-control
+ Usage: optional
+ Value type: <bool>
+ Definition: Used to enable the pull down control forcibly via SPMI by
+ disabling the pull down configuration done by hardware
+ automatically through SWIRE pulses.
+
+- qcom,pbs-client
+ Usage: optional
+ Value type: <phandle>
+ Definition: Used to send the PBS trigger to the specified PBS client.
+ This property is applicable only if qcom,force-pd-control
+ property is specified.
+
+- qcom,pbs-control
Usage: optional
Value type: <bool>
Definition: PMIC PBS logic directly configures the output voltage update
and pull down control.
- qcom,oledb-init-voltage-mv
+- qcom,oledb-init-voltage-mv
Usage: optional
Value type: <u32>
Definition: Sets the AVDD bias voltage (in mV) when the module is
@@ -71,53 +85,53 @@
property is not specified. Supported values are from 5.0V
to 8.1V with a step of 100mV.
-qcom,oledb-default-voltage-mv
+- qcom,oledb-default-voltage-mv
Usage: optional
Value type: <u32>
Definition: Sets the default AVDD bias voltage (in mV) before module
enable. Supported values are from 5.0V to 8.1V with the
step of 100mV.
-qcom,bias-gen-warmup-delay-ns
+- qcom,bias-gen-warmup-delay-ns
Usage: optional
Value type: <u32>
Definition: Bias generator warm-up time (ns). Supported values are
6700, 13300, 267000, 534000.
-qcom,peak-curr-limit-ma
+- qcom,peak-curr-limit-ma
Usage: optional
Value type: <u32>
Definition: Peak current limit (in mA). Supported values are 115, 265,
415, 570, 720, 870, 1020, 1170.
-qcom,pull-down-enable
+- qcom,pull-down-enable
Usage: optional
Value type: <u32>
Definition: Pull down configuration of OLEDB.
1 - Enable pull-down
0 - Disable pull-down
-qcom,negative-curr-limit-enable
+- qcom,negative-curr-limit-enable
Usage: optional
Value type: <u32>
Definition: negative current limit enable/disable.
1 = enable negative current limit
0 = disable negative current limit
-qcom,negative-curr-limit-ma
+- qcom,negative-curr-limit-ma
Usage: optional
Value type: <u32>
Definition: Negative current limit (in mA). Supported values are
170, 300, 420, 550.
-qcom,enable-short-circuit
+- qcom,enable-short-circuit
Usage: optional
Value type: <u32>
Definition: Short circuit protection enable/disable.
1 = enable short circuit protection
0 = disable short circuit protection
-qcom,short-circuit-dbnc-time
+- qcom,short-circuit-dbnc-time
usage: optional
Value type: <u32>
Definitioan: Short circuit debounce time (in Fsw). Supported
@@ -126,26 +140,26 @@
Fast precharge properties:
-------------------------
-qcom,fast-precharge-ppulse-enable
+- qcom,fast-precharge-ppulse-enable
usage: optional
Value type: <u32>
Definitioan: Fast precharge pfet pulsing enable/disable.
1 = enable fast precharge pfet pulsing
0 = disable fast precharge pfet pulsing
-qcom,precharge-debounce-time-ms
+- qcom,precharge-debounce-time-ms
usage: optional
Value type: <u32>
Definitioan: Fast precharge debounce time (in ms). Supported
values are 1, 2, 4, 8.
-qcom,precharge-pulse-period-us
+- qcom,precharge-pulse-period-us
usage: optional
Value type: <u32>
Definitioan: Fast precharge pulse period (in us). Supported
values are 3, 6, 9, 12.
-qcom,precharge-pulse-on-time-us
+- qcom,precharge-pulse-on-time-us
usage: optional
Value type: <u32>
Definitioan: Fast precharge pulse on time (in ns). Supported
@@ -154,20 +168,20 @@
Pulse Skip Modulation (PSM) properties:
--------------------------------------
-qcom,psm-enable
+- qcom,psm-enable
Usage: optional
Value type: <u32>
Definition: Pulse Skip Modulation mode.
1 - Enable PSM mode
0 - Disable PSM mode
-qcom,psm-hys-mv
+- qcom,psm-hys-mv
Usage: optional
Value type: <u32>
Definition: PSM hysterysis voltage (in mV).
Supported values are 13mV and 26mV.
-qcom,psm-vref-mv
+- qcom,psm-vref-mv
Usage: optional
Value type: <u32>
Definition: Reference voltage(in mV) control for PSM comparator.
@@ -177,26 +191,26 @@
Pulse Frequency Modulation (PFM) properties:
-------------------------------------------
-qcom,pfm-enable
+- qcom,pfm-enable
Usage: optional
Value type: <u32>
Definition: Pulse Frequency Modulation mode.
1 - Enable PFM mode
0 - Disable PFM mode
-qcom,pfm-hys-mv
+- qcom,pfm-hys-mv
Usage: optional
Value type: <u32>
Definition: PFM hysterysis voltage (in mV).
Supported values are 13mV and 26mV.
-qcom,pfm-curr-limit-ma
+- qcom,pfm-curr-limit-ma
Usage: optional
Value type: <u32>
Definition: PFM current limit (in mA).
Supported values are 130, 200, 270, 340.
-qcom,pfm-off-time-ns
+- qcom,pfm-off-time-ns
Usage: optional
Value type: <u32>
Definition: NFET off time at PFM (in ns).
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 2cb5419..123a65b 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -174,6 +174,10 @@
"fair_share": Use fair share governor.
"user_space": Use user space governor.
"power_allocator": Use power allocator governor.
+ "low_limits_floor": Use low limits floor
+ mitigation governor.
+ "low_limits_cap": Use a low limits cap mitigation
+ governor.
Type: string
- sustainable-power: An estimate of the sustainable power (in mW) that the
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index af1ba92..af754fe 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -7,11 +7,12 @@
contain a phandle reference to UFS PHY node.
Required properties:
-- compatible : compatible list, contains one of the following:
+- compatible : compatible list, contains one of the following
+ according to the relevant phy in use:
"qcom,ufs-phy-qmp-14nm"
"qcom,ufs-phy-qmp-v3"
"qcom,ufs-phy-qrbtc-sdm845"
-according to the relevant phy in use.
+ "qcom,ufs-phy-qmp-v3-660"
- reg : should contain PHY register address space (mandatory),
- reg-names : indicates various resources passed to driver (via reg proptery) by name.
Required "reg-names" is "phy_mem".
@@ -27,11 +28,12 @@
Optional properties:
- vdda-phy-max-microamp : specifies max. load that can be drawn from phy supply
- vdda-pll-max-microamp : specifies max. load that can be drawn from pll supply
-- vddp-ref-clk-supply : phandle to UFS device ref_clk pad power supply
-- vddp-ref-clk-max-microamp : specifies max. load that can be drawn from this supply
-- vddp-ref-clk-always-on : specifies if this supply needs to be kept always on
- qcom,disable-lpm : disable various LPM mechanisms in UFS for platform compatibility
(limit link to PWM Gear-1, 1-lane slow mode; disable hibernate, and avoid suspend/resume)
+- lanes-per-direction: number of lanes available per direction - either 1 or 2.
+ Note that it is assumed that same number of lanes is
+ used both directions at once.
+ If not specified, default is 2 lanes per direction.
Example:
@@ -40,6 +42,7 @@
reg = <0xfc597000 0x800>;
reg-names = "phy_mem";
#phy-cells = <0>;
+ lanes-per-direction = <1>;
vdda-phy-supply = <&pma8084_l4>;
vdda-pll-supply = <&pma8084_l12>;
vdda-phy-max-microamp = <50000>;
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 81c74c5..958194b 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -13,6 +13,9 @@
- reg : <registers mapping>
first entry should contain UFS host controller register address space (mandatory),
second entry is the device ref. clock control register map (optional).
+- reset : reset specifier pair consists of phandle for the reset provider
+ and reset lines used by this controller.
+- reset-names : reset signal name strings sorted in the same order as the resets property.
Optional properties:
- phys : phandle to UFS PHY node
@@ -52,6 +55,8 @@
- lanes-per-direction: number of lanes available per direction - either 1 or 2.
Note that it is assume same number of lanes is used both directions at once.
If not specified, default is 2 lanes per direction.
+- pinctrl-names, pinctrl-0, pinctrl-1,.. pinctrl-n: Refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+ for these optional properties
- limit-tx-hs-gear : Specify the max. limit on the TX HS gear.
Valid range: 1-3. 1 => HS-G1, 2 => HS-G2, 3 => HS-G3
- limit-rx-hs-gear : Specify the max. limit on the RX HS gear. Refer "limit-tx-hs-gear" for expected values.
@@ -89,6 +94,8 @@
clocks = <&core 0>, <&ref 0>, <&iface 0>;
clock-names = "core_clk", "ref_clk", "iface_clk";
freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
+ resets = <clock_gcc GCC_UFS_BCR>;
+ reset-names = "core_reset";
phys = <&ufsphy1>;
phy-names = "ufsphy";
rpm-level = <3>;
@@ -146,6 +153,9 @@
- qcom,pm-qos-default-cpu: PM QoS voting is based on the cpu associated with each IO request by the block layer.
This defined the default cpu used for PM QoS voting in case a specific cpu value is not available.
+- qcom,vddp-ref-clk-supply : reference clock to ufs device. Controlled by the host driver.
+- qcom,vddp-ref-clk-max-microamp : specifies max. load that can be drawn for
+ ref-clk supply.
Example:
ufshc@0xfc598000 {
...
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 3497e50..a94a716 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -49,7 +49,9 @@
apps_smmu: apps-smmu@0x15000000 {
compatible = "qcom,qsmmu-v500";
- reg = <0x15000000 0x80000>;
+ reg = <0x15000000 0x80000>,
+ <0x150c2000 0x20>;
+ reg-names = "base", "tcu-base";
#iommu-cells = <1>;
qcom,skip-init;
#global-interrupts = <1>;
@@ -128,6 +130,7 @@
reg = <0x150c5000 0x1000>,
<0x150c2200 0x8>;
reg-names = "base", "status-reg";
+ qcom,stream-id-range = <0x0 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>;
};
@@ -138,6 +141,7 @@
reg = <0x150c9000 0x1000>,
<0x150c2208 0x8>;
reg-names = "base", "status-reg";
+ qcom,stream-id-range = <0x400 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>;
};
@@ -148,6 +152,7 @@
reg = <0x150cd000 0x1000>,
<0x150c2210 0x8>;
reg-names = "base", "status-reg";
+ qcom,stream-id-range = <0x800 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>;
};
@@ -158,6 +163,7 @@
reg = <0x150d1000 0x1000>,
<0x150c2218 0x8>;
reg-names = "base", "status-reg";
+ qcom,stream-id-range = <0xc00 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>;
};
@@ -168,6 +174,7 @@
reg = <0x150d5000 0x1000>,
<0x150c2220 0x8>;
reg-names = "base", "status-reg";
+ qcom,stream-id-range = <0x1000 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
};
@@ -178,6 +185,7 @@
reg = <0x150d9000 0x1000>,
<0x150c2228 0x8>;
reg-names = "base", "status-reg";
+ qcom,stream-id-range = <0x1400 0x400>;
/* No GDSC */
};
@@ -187,6 +195,7 @@
reg = <0x150dd000 0x1000>,
<0x150c2230 0x8>;
reg-names = "base", "status-reg";
+ qcom,stream-id-range = <0x1800 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>;
};
@@ -197,6 +206,7 @@
reg = <0x150e1000 0x1000>,
<0x150c2238 0x8>;
reg-names = "base", "status-reg";
+ qcom,stream-id-range = <0x1c00 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>;
};
diff --git a/arch/arm64/boot/dts/qcom/pm8005.dtsi b/arch/arm64/boot/dts/qcom/pm8005.dtsi
index 241864f..1f8d20e 100644
--- a/arch/arm64/boot/dts/qcom/pm8005.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8005.dtsi
@@ -32,37 +32,15 @@
label = "pm8005_tz";
};
- pm8005_gpios: gpios {
- compatible = "qcom,qpnp-pin";
+ pm8005_gpios: pinctrl@c000 {
+ compatible = "qcom,spmi-gpio";
+ reg = <0xc000 0x400>;
+ interrupts = <0x4 0xc0 0 IRQ_TYPE_NONE>,
+ <0x4 0xc1 0 IRQ_TYPE_NONE>;
+ interrupt-names = "pm8005_gpio1", "pm8005_gpio2";
gpio-controller;
#gpio-cells = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
- label = "pm8005-gpio";
-
- gpio@c000 {
- reg = <0xc000 0x100>;
- qcom,pin-num = <1>;
- status = "disabled";
- };
-
- gpio@c100 {
- reg = <0xc100 0x100>;
- qcom,pin-num = <2>;
- status = "disabled";
- };
-
- gpio@c200 {
- reg = <0xc200 0x100>;
- qcom,pin-num = <3>;
- status = "disabled";
- };
-
- gpio@c300 {
- reg = <0xc300 0x100>;
- qcom,pin-num = <4>;
- status = "disabled";
- };
+ qcom,gpios-disallowed = <3 4>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index ed4fdde..5290f46 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -72,169 +72,41 @@
label = "pm8998_tz";
};
- pm8998_gpios: gpios {
- compatible = "qcom,qpnp-pin";
+ pm8998_gpios: pinctrl@c000 {
+ compatible = "qcom,spmi-gpio";
+ reg = <0xc000 0x1a00>;
+ interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
+ <0x0 0xc1 0 IRQ_TYPE_NONE>,
+ <0x0 0xc3 0 IRQ_TYPE_NONE>,
+ <0x0 0xc4 0 IRQ_TYPE_NONE>,
+ <0x0 0xc5 0 IRQ_TYPE_NONE>,
+ <0x0 0xc6 0 IRQ_TYPE_NONE>,
+ <0x0 0xc7 0 IRQ_TYPE_NONE>,
+ <0x0 0xc8 0 IRQ_TYPE_NONE>,
+ <0x0 0xc9 0 IRQ_TYPE_NONE>,
+ <0x0 0xca 0 IRQ_TYPE_NONE>,
+ <0x0 0xcb 0 IRQ_TYPE_NONE>,
+ <0x0 0xcc 0 IRQ_TYPE_NONE>,
+ <0x0 0xcd 0 IRQ_TYPE_NONE>,
+ <0x0 0xcf 0 IRQ_TYPE_NONE>,
+ <0x0 0xd0 0 IRQ_TYPE_NONE>,
+ <0x0 0xd1 0 IRQ_TYPE_NONE>,
+ <0x0 0xd2 0 IRQ_TYPE_NONE>,
+ <0x0 0xd4 0 IRQ_TYPE_NONE>,
+ <0x0 0xd6 0 IRQ_TYPE_NONE>;
+ interrupt-names = "pm8998_gpio1", "pm8998_gpio2",
+ "pm8998_gpio4", "pm8998_gpio5",
+ "pm8998_gpio6", "pm8998_gpio7",
+ "pm8998_gpio8", "pm8998_gpio9",
+ "pm8998_gpio10", "pm8998_gpio11",
+ "pm8998_gpio12", "pm8998_gpio13",
+ "pm8998_gpio14", "pm8998_gpio16",
+ "pm8998_gpio17", "pm8998_gpio18",
+ "pm8998_gpio19", "pm8998_gpio21",
+ "pm8998_gpio23";
gpio-controller;
#gpio-cells = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
- label = "pm8998-gpio";
-
- gpio@c000 {
- reg = <0xc000 0x100>;
- qcom,pin-num = <1>;
- status = "disabled";
- };
-
- gpio@c100 {
- reg = <0xc100 0x100>;
- qcom,pin-num = <2>;
- status = "disabled";
- };
-
- gpio@c200 {
- reg = <0xc200 0x100>;
- qcom,pin-num = <3>;
- status = "disabled";
- };
-
- gpio@c300 {
- reg = <0xc300 0x100>;
- qcom,pin-num = <4>;
- status = "disabled";
- };
-
- gpio@c400 {
- reg = <0xc400 0x100>;
- qcom,pin-num = <5>;
- status = "disabled";
- };
-
- gpio@c500 {
- reg = <0xc500 0x100>;
- qcom,pin-num = <6>;
- status = "disabled";
- };
-
- gpio@c600 {
- reg = <0xc600 0x100>;
- qcom,pin-num = <7>;
- status = "disabled";
- };
-
- gpio@c700 {
- reg = <0xc700 0x100>;
- qcom,pin-num = <8>;
- status = "disabled";
- };
-
- gpio@c800 {
- reg = <0xc800 0x100>;
- qcom,pin-num = <9>;
- status = "disabled";
- };
-
- gpio@c900 {
- reg = <0xc900 0x100>;
- qcom,pin-num = <10>;
- status = "disabled";
- };
-
- gpio@ca00 {
- reg = <0xca00 0x100>;
- qcom,pin-num = <11>;
- status = "disabled";
- };
-
- gpio@cb00 {
- reg = <0xcb00 0x100>;
- qcom,pin-num = <12>;
- status = "disabled";
- };
-
- gpio@cc00 {
- reg = <0xcc00 0x100>;
- qcom,pin-num = <13>;
- status = "disabled";
- };
-
- gpio@cd00 {
- reg = <0xcd00 0x100>;
- qcom,pin-num = <14>;
- status = "disabled";
- };
-
- gpio@ce00 {
- reg = <0xce00 0x100>;
- qcom,pin-num = <15>;
- status = "disabled";
- };
-
- gpio@cf00 {
- reg = <0xcf00 0x100>;
- qcom,pin-num = <16>;
- status = "disabled";
- };
-
- gpio@d000 {
- reg = <0xd000 0x100>;
- qcom,pin-num = <17>;
- status = "disabled";
- };
-
- gpio@d100 {
- reg = <0xd100 0x100>;
- qcom,pin-num = <18>;
- status = "disabled";
- };
-
- gpio@d200 {
- reg = <0xd200 0x100>;
- qcom,pin-num = <19>;
- status = "disabled";
- };
-
- gpio@d300 {
- reg = <0xd300 0x100>;
- qcom,pin-num = <20>;
- status = "disabled";
- };
-
- gpio@d400 {
- reg = <0xd400 0x100>;
- qcom,pin-num = <21>;
- status = "disabled";
- };
-
- gpio@d500 {
- reg = <0xd500 0x100>;
- qcom,pin-num = <22>;
- status = "disabled";
- };
-
- gpio@d600 {
- reg = <0xd600 0x100>;
- qcom,pin-num = <23>;
- status = "disabled";
- };
-
- gpio@d700 {
- reg = <0xd700 0x100>;
- qcom,pin-num = <24>;
- status = "disabled";
- };
-
- gpio@d800 {
- reg = <0xd800 0x100>;
- qcom,pin-num = <25>;
- status = "disabled";
- };
-
- gpio@d900 {
- reg = <0xd900 0x100>;
- qcom,pin-num = <26>;
- status = "disabled";
- };
+ qcom,gpios-disallowed = <3 15 20 22 24 25 26>;
};
pm8998_coincell: qcom,coincell@2800 {
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 1659706..1f27b21 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -38,97 +38,29 @@
label = "pmi8998_tz";
};
- pmi8998_gpios: gpios {
- compatible = "qcom,qpnp-pin";
+ pmi8998_gpios: pinctrl@c000 {
+ compatible = "qcom,spmi-gpio";
+ reg = <0xc000 0xe00>;
+ interrupts = <0x2 0xc0 0 IRQ_TYPE_NONE>,
+ <0x2 0xc1 0 IRQ_TYPE_NONE>,
+ <0x2 0xc2 0 IRQ_TYPE_NONE>,
+ <0x2 0xc4 0 IRQ_TYPE_NONE>,
+ <0x2 0xc5 0 IRQ_TYPE_NONE>,
+ <0x2 0xc7 0 IRQ_TYPE_NONE>,
+ <0x2 0xc8 0 IRQ_TYPE_NONE>,
+ <0x2 0xc9 0 IRQ_TYPE_NONE>,
+ <0x2 0xca 0 IRQ_TYPE_NONE>,
+ <0x2 0xcb 0 IRQ_TYPE_NONE>,
+ <0x2 0xcd 0 IRQ_TYPE_NONE>;
+ interrupt-names = "pmi8998_gpio1", "pmi8998_gpio2",
+ "pmi8998_gpio3", "pmi8998_gpio5",
+ "pmi8998_gpio6", "pmi8998_gpio8",
+ "pmi8998_gpio9", "pmi8998_gpio10",
+ "pmi8998_gpio11", "pmi8998_gpio12",
+ "pmi8998_gpio14";
gpio-controller;
#gpio-cells = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
- label = "pmi8998-gpio";
-
- gpio@c000 {
- reg = <0xc000 0x100>;
- qcom,pin-num = <1>;
- status = "disabled";
- };
-
- gpio@c100 {
- reg = <0xc100 0x100>;
- qcom,pin-num = <2>;
- status = "disabled";
- };
-
- gpio@c200 {
- reg = <0xc200 0x100>;
- qcom,pin-num = <3>;
- status = "disabled";
- };
-
- gpio@c300 {
- reg = <0xc300 0x100>;
- qcom,pin-num = <4>;
- status = "disabled";
- };
-
- gpio@c400 {
- reg = <0xc400 0x100>;
- qcom,pin-num = <5>;
- status = "disabled";
- };
-
- gpio@c500 {
- reg = <0xc500 0x100>;
- qcom,pin-num = <6>;
- status = "disabled";
- };
-
- gpio@c600 {
- reg = <0xc600 0x100>;
- qcom,pin-num = <7>;
- status = "disabled";
- };
-
- gpio@c700 {
- reg = <0xc700 0x100>;
- qcom,pin-num = <8>;
- status = "disabled";
- };
-
- gpio@c800 {
- reg = <0xc800 0x100>;
- qcom,pin-num = <9>;
- status = "disabled";
- };
-
- gpio@c900 {
- reg = <0xc900 0x100>;
- qcom,pin-num = <10>;
- status = "disabled";
- };
-
- gpio@ca00 {
- reg = <0xca00 0x100>;
- qcom,pin-num = <11>;
- status = "disabled";
- };
-
- gpio@cb00 {
- reg = <0xcb00 0x100>;
- qcom,pin-num = <12>;
- status = "disabled";
- };
-
- gpio@cc00 {
- reg = <0xcc00 0x100>;
- qcom,pin-num = <13>;
- status = "disabled";
- };
-
- gpio@cd00 {
- reg = <0xcd00 0x100>;
- qcom,pin-num = <14>;
- status = "disabled";
- };
+ qcom,gpios-disallowed = <4 7 13>;
};
pmi8998_rradc: rradc@4500 {
@@ -372,7 +304,7 @@
qcom,en-ext-pfet-sc-pro;
qcom,pmic-revid = <&pmi8998_revid>;
qcom,loop-auto-gm-en;
- status = "okay";
+ status = "disabled";
};
flash_led: qcom,leds@d300 {
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
index a8d559c..4b3fa93 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
@@ -11,9 +11,9 @@
*/
&soc {
- tlmm: pinctrl@03800000 {
+ tlmm: pinctrl@03400000 {
compatible = "qcom,sdm830-pinctrl";
- reg = <0x03800000 0xc00000>;
+ reg = <0x03400000 0xc00000>;
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index 4088f3b..a51f411 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -24,12 +24,13 @@
<0x1380000 0x40000>,
<0x1380000 0x40000>,
<0x1740000 0x40000>,
+ <0x1620000 0x40000>,
<0x1620000 0x40000>;
reg-names = "aggre1_noc-base", "aggre2_noc-base",
"config_noc-base", "dc_noc-base",
"gladiator_noc-base", "mc_virt-base", "mem_noc-base",
- "mmss_noc-base", "system_noc-base";
+ "mmss_noc-base", "system_noc-base", "ipa_virt-base";
mbox-names = "apps_rsc", "disp_rsc";
mboxes = <&apps_rsc 0 &disp_rsc 0>;
@@ -154,14 +155,6 @@
qcom,bcm-dev;
};
- bcm_mm5: bcm-mm5 {
- cell-id = <MSM_BUS_BCM_MM5>;
- label = "MM5";
- qcom,bcm-name = "MM5";
- qcom,rscs = <&rsc_apps>;
- qcom,bcm-dev;
- };
-
bcm_sn0: bcm-sn0 {
cell-id = <MSM_BUS_BCM_SN0>;
label = "SN0";
@@ -194,6 +187,13 @@
qcom,bcm-dev;
};
+ bcm_qup0: bcm-qup0 {
+ cell-id = <MSM_BUS_BCM_QUP0>;
+ label = "QUP0";
+ qcom,bcm-name = "QUP0";
+ qcom,bcm-dev;
+ };
+
bcm_sn1: bcm-sn1 {
cell-id = <MSM_BUS_BCM_SN1>;
label = "SN1";
@@ -397,6 +397,15 @@
clocks = <>;
};
+ fab_ipa_virt: fab-ipa_virt {
+ cell-id = <MSM_BUS_FAB_IPA_VIRT>;
+ label = "fab-ipa_virt";
+ qcom,fab-dev;
+ qcom,base-name = "ipa_virt-base";
+ qcom,bypass-qos-prg;
+ clocks = <>;
+ };
+
fab_mc_virt: fab-mc_virt {
cell-id = <MSM_BUS_FAB_MC_VIRT>;
label = "fab-mc_virt";
@@ -483,6 +492,7 @@
qcom,agg-ports = <1>;
qcom,connections = <&slv_qns_a1noc_snoc>;
qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,bcms = <&bcm_qup0>;
};
mas_qhm_tsif: mas-qhm-tsif {
@@ -559,6 +569,7 @@
qcom,agg-ports = <1>;
qcom,connections = <&slv_qns_a2noc_snoc>;
qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,bcms = <&bcm_qup0>;
};
mas_qnm_cnoc: mas-qnm-cnoc {
@@ -590,7 +601,6 @@
qcom,qport = <2>;
qcom,connections = <&slv_qns_a2noc_snoc>;
qcom,bus-dev = <&fab_aggre2_noc>;
- qcom,bcms = <&bcm_ip0>;
};
mas_xm_pcie3_1: mas-xm-pcie3-1 {
@@ -776,6 +786,15 @@
qcom,bus-dev = <&fab_gladiator_noc>;
};
+ mas_ipa_core: mas-ipa-core {
+ cell-id = <MSM_BUS_MASTER_IPA_CORE>;
+ label = "mas-ipa-core";
+ qcom,buswidth = <1>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_ipa_core>;
+ qcom,bus-dev = <&fab_ipa_virt>;
+ };
+
mas_llcc_mc: mas-llcc-mc {
cell-id = <MSM_BUS_MASTER_LLCC>;
label = "mas-llcc-mc";
@@ -878,7 +897,6 @@
qcom,agg-ports = <1>;
qcom,connections = <&slv_srvc_mnoc>;
qcom,bus-dev = <&fab_mmss_noc>;
- qcom,bcms = <&bcm_mm5>;
};
mas_qxm_camnoc_hf: mas-qxm-camnoc-hf {
@@ -1601,6 +1619,15 @@
qcom,bus-dev = <&fab_gladiator_noc>;
};
+ slv_ipa_core:slv-ipa-core {
+ cell-id = <MSM_BUS_SLAVE_IPA>;
+ label = "slv-ipa-core";
+ qcom,buswidth = <1>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_ipa_virt>;
+ qcom,bcms = <&bcm_ip0>;
+ };
+
slv_ebi:slv-ebi {
cell-id = <MSM_BUS_SLAVE_EBI_CH0>;
label = "slv-ebi";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index d47dd36..06f620b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -10,8 +10,111 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/gpio/gpio.h>
+
&soc {
sound-tavil {
qcom,us-euro-gpios = <&tavil_us_euro_sw>;
};
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ label = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_home_default
+ &key_vol_up_default
+ &key_cam_snapshot_default
+ &key_cam_focus_default>;
+
+ home {
+ label = "home";
+ gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <102>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+
+ cam_snapshot {
+ label = "cam_snapshot";
+ gpios = <&pm8998_gpios 7 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <766>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+
+ cam_focus {
+ label = "cam_focus";
+ gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <528>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+ };
+};
+
+&ufsphy_mem {
+ compatible = "qcom,ufs-phy-qmp-v3";
+
+ vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+
+ status = "ok";
+};
+
+&ufshc_mem {
+ vdd-hba-supply = <&ufs_phy_gdsc>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm8998_l20>;
+ vccq2-supply = <&pm8998_s4>;
+ vcc-max-microamp = <600000>;
+ vccq2-max-microamp = <600000>;
+
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
+ status = "ok";
+};
+
+&ufsphy_card {
+ compatible = "qcom,ufs-phy-qmp-v3";
+
+ vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+
+ status = "ok";
+};
+
+&ufshc_card {
+ vdd-hba-supply = <&ufs_card_gdsc>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm8998_l21>;
+ vccq2-supply = <&pm8998_s4>;
+ vcc-max-microamp = <300000>;
+ vccq2-max-microamp = <300000>;
+
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
+ status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index cfba6f4..734b6a9 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -9,3 +9,97 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+#include <dt-bindings/gpio/gpio.h>
+
+&soc {
+ gpio_keys {
+ compatible = "gpio-keys";
+ label = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_vol_up_default
+ &key_cam_snapshot_default
+ &key_cam_focus_default>;
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+
+ cam_snapshot {
+ label = "cam_snapshot";
+ gpios = <&pm8998_gpios 7 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <766>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+
+ cam_focus {
+ label = "cam_focus";
+ gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <528>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+ };
+};
+
+&ufsphy_mem {
+ compatible = "qcom,ufs-phy-qmp-v3";
+
+ vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+
+ status = "ok";
+};
+
+&ufshc_mem {
+ vdd-hba-supply = <&ufs_phy_gdsc>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm8998_l20>;
+ vccq2-supply = <&pm8998_s4>;
+ vcc-max-microamp = <600000>;
+ vccq2-max-microamp = <600000>;
+
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
+ status = "ok";
+};
+
+&ufsphy_card {
+ compatible = "qcom,ufs-phy-qmp-v3";
+
+ vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+
+ status = "ok";
+};
+
+&ufshc_card {
+ vdd-hba-supply = <&ufs_card_gdsc>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm8998_l21>;
+ vccq2-supply = <&pm8998_s4>;
+ vcc-max-microamp = <300000>;
+ vccq2-max-microamp = <300000>;
+
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index c5b53b8..b82e4d0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -11,15 +11,62 @@
*/
&soc {
- tlmm: pinctrl@03800000 {
+ tlmm: pinctrl@03400000 {
compatible = "qcom,sdm845-pinctrl";
- reg = <0x03800000 0xc00000>;
+ reg = <0x03400000 0xc00000>;
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ config {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ config {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+ };
+ };
+
+
wcd9xxx_intr {
wcd_intr_default: wcd_intr_default{
mux {
@@ -1113,3 +1160,45 @@
};
};
};
+
+&pm8998_gpios {
+ key_home {
+ key_home_default: key_home_default {
+ pins = "gpio5";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ power-source = <0>;
+ };
+ };
+
+ key_vol_up {
+ key_vol_up_default: key_vol_up_default {
+ pins = "gpio6";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ power-source = <0>;
+ };
+ };
+
+ key_cam_snapshot {
+ key_cam_snapshot_default: key_cam_snapshot_default {
+ pins = "gpio7";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ power-source = <0>;
+ };
+ };
+
+ key_cam_focus {
+ key_cam_focus_default: key_cam_focus_default {
+ pins = "gpio8";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ power-source = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 124ed99..80f34bf 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -17,11 +17,8 @@
vdda-phy-supply = <&pm8998_l1>;
vdda-pll-supply = <&pm8998_l2>;
- vddp-ref-clk-supply = <&pm8998_l26>;
vdda-phy-max-microamp = <44000>;
vdda-pll-max-microamp = <14600>;
- vddp-ref-clk-max-microamp = <100>;
- vddp-ref-clk-always-on;
status = "ok";
};
@@ -38,6 +35,9 @@
vcc-max-microamp = <600000>;
vccq2-max-microamp = <600000>;
+ qcom,vddp-ref-clk-supply = <&pm8998_l26>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
qcom,disable-lpm;
rpm-level = <0>;
spm-level = <0>;
@@ -122,11 +122,8 @@
vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
- vddp-ref-clk-supply = <&pm8998_l2>;
vdda-phy-max-microamp = <62900>;
vdda-pll-max-microamp = <18300>;
- vddp-ref-clk-max-microamp = <100>;
- vddp-ref-clk-always-on;
status = "ok";
};
@@ -142,6 +139,9 @@
vcc-max-microamp = <300000>;
vccq2-max-microamp = <300000>;
+ qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
qcom,disable-lpm;
rpm-level = <0>;
spm-level = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 1b697fb..2ff9b2f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -23,7 +23,6 @@
#include "dsi-panel-sharp-1080p-cmd.dtsi"
#include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi"
#include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
-#include "sdm845-pinctrl.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -294,3 +293,45 @@
&mdss_mdp {
connectors = <&sde_wb>;
};
+
+&dsi_dual_nt35597_truly_video {
+ qcom,mdss-dsi-panel-timings = [00 1c 07 07 23 21 07 07 05 03 04];
+ qcom,mdss-dsi-t-clk-post = <0x0D>;
+ qcom,mdss-dsi-t-clk-pre = <0x2D>;
+};
+
+&dsi_dual_nt35597_truly_cmd {
+ qcom,mdss-dsi-panel-timings = [00 1c 07 07 23 21 07 07 05 03 04];
+ qcom,mdss-dsi-t-clk-post = <0x0D>;
+ qcom,mdss-dsi-t-clk-pre = <0x2D>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+ qcom,mdss-dsi-panel-timings = [00 15 05 05 20 1f 05 05 03 03 04];
+ qcom,mdss-dsi-t-clk-post = <0x0b>;
+ qcom,mdss-dsi-t-clk-pre = <0x23>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+ qcom,mdss-dsi-panel-timings = [00 15 05 05 20 1f 05 05 03 03 04];
+ qcom,mdss-dsi-t-clk-post = <0x0b>;
+ qcom,mdss-dsi-t-clk-pre = <0x23>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,mdss-dsi-panel-timings = [00 12 04 04 1e 1e 04 04 02 03 04];
+ qcom,mdss-dsi-t-clk-post = <0x0a>;
+ qcom,mdss-dsi-t-clk-pre = <0x1e>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,mdss-dsi-panel-timings = [00 12 04 04 1e 1e 04 04 02 03 04];
+ qcom,mdss-dsi-t-clk-post = <0x0a>;
+ qcom,mdss-dsi-t-clk-pre = <0x1e>;
+};
+
+&dsi_dual_sharp_1080_120hz_cmd {
+ qcom,mdss-dsi-panel-timings = [00 24 09 09 26 24 09 09 06 03 04];
+ qcom,mdss-dsi-t-clk-post = <0x0f>;
+ qcom,mdss-dsi-t-clk-pre = <0x36>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index c4184c4..dcfb09e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -707,7 +707,7 @@
};
clock_gcc: qcom,gcc@100000 {
- compatible = "qcom,gcc-sdm845";
+ compatible = "qcom,gcc-sdm845", "syscon";
reg = <0x100000 0x1f0000>;
reg-names = "cc_base";
vdd_cx-supply = <&pm8998_s9_level>;
@@ -717,7 +717,7 @@
};
clock_videocc: qcom,videocc@ab00000 {
- compatible = "qcom,video_cc-sdm845";
+ compatible = "qcom,video_cc-sdm845", "syscon";
reg = <0xab00000 0x10000>;
reg-names = "cc_base";
vdd_cx-supply = <&pm8998_s9_level>;
@@ -726,7 +726,7 @@
};
clock_camcc: qcom,camcc@ad00000 {
- compatible = "qcom,cam_cc-sdm845";
+ compatible = "qcom,cam_cc-sdm845", "syscon";
reg = <0xad00000 0x10000>;
reg-names = "cc_base";
vdd_cx-supply = <&pm8998_s9_level>;
@@ -736,7 +736,7 @@
};
clock_dispcc: qcom,dispcc@af00000 {
- compatible = "qcom,dispcc-sdm845";
+ compatible = "qcom,dispcc-sdm845", "syscon";
reg = <0xaf00000 0x100000>;
reg-names = "cc_base";
vdd_cx-supply = <&pm8998_s9_level>;
@@ -744,16 +744,138 @@
#reset-cells = <1>;
};
- clock_gpucc: qcom,gpucc {
- compatible = "qcom,dummycc";
- clock-output-names = "gpucc_clocks";
+ clock_gpucc: qcom,gpucc@5090000 {
+ compatible = "qcom,gpucc-sdm845", "syscon";
+ reg = <0x5090000 0x9000>;
+ reg-names = "cc_base";
+ vdd_cx-supply = <&pm8998_s9_level>;
#clock-cells = <1>;
#reset-cells = <1>;
};
- clock_cpucc: qcom,cpucc {
- compatible = "qcom,dummycc";
- clock-output-names = "cpucc_clocks";
+ clock_gfx: qcom,gfxcc@5090000 {
+ compatible = "qcom,gfxcc-sdm845";
+ reg = <0x5090000 0x9000>;
+ reg-names = "cc_base";
+ vdd_gfx-supply = <&pm8005_s1_level>;
+ vdd_mx-supply = <&pm8998_s6_level>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ clock_cpucc: qcom,cpucc@0x17d41000 {
+ compatible = "qcom,clk-cpu-osm";
+ reg = <0x17d41000 0x1400>,
+ <0x17d43000 0x1400>,
+ <0x17d45800 0x1400>,
+ <0x178d0000 0x1000>,
+ <0x178c0000 0x1000>,
+ <0x178b0000 0x1000>,
+ <0x17d42400 0x0c00>,
+ <0x17d44400 0x0c00>,
+ <0x17d46c00 0x0c00>,
+ <0x17810090 0x8>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "l3_pll", "pwrcl_pll", "perfcl_pll",
+ "l3_sequencer", "pwrcl_sequencer",
+ "perfcl_sequencer", "apps_itm_ctl";
+
+ vdd-l3-supply = <&apc0_l3_vreg>;
+ vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+ vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+ qcom,l3-speedbin0-v0 =
+ < 300000000 0x000c000f 0x00002020 0x1 1 >,
+ < 422400000 0x50140116 0x00002020 0x1 2 >,
+ < 499200000 0x5014021a 0x00002020 0x1 3 >,
+ < 576000000 0x5014031e 0x00002020 0x1 4 >,
+ < 652800000 0x501c0422 0x00002020 0x1 5 >,
+ < 729600000 0x501c0526 0x00002020 0x1 6 >,
+ < 806400000 0x501c062a 0x00002222 0x1 7 >;
+
+ qcom,pwrcl-speedbin0-v0 =
+ < 300000000 0x000c000f 0x00002020 0x1 1 >,
+ < 422400000 0x50140116 0x00002020 0x1 2 >,
+ < 499200000 0x5014021a 0x00002020 0x1 3 >,
+ < 576000000 0x5014031e 0x00002020 0x1 4 >,
+ < 652800000 0x501c0422 0x00002020 0x1 5 >,
+ < 748800000 0x501c0527 0x00002020 0x1 6 >,
+ < 825600000 0x401c062b 0x00002222 0x1 7 >,
+ < 902400000 0x4024072f 0x00002626 0x1 8 >,
+ < 979200000 0x40240833 0x00002929 0x1 9 >,
+ < 1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+ < 1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+ < 1209600000 0x402c0b3f 0x00003333 0x1 12 >;
+
+ qcom,perfcl-speedbin0-v0 =
+ < 300000000 0x000c000f 0x00002020 0x1 1 >,
+ < 422400000 0x50140116 0x00002020 0x1 2 >,
+ < 499200000 0x5014021a 0x00002020 0x1 3 >,
+ < 576000000 0x5014031e 0x00002020 0x1 4 >,
+ < 652800000 0x501c0422 0x00002020 0x1 5 >,
+ < 729600000 0x501c0526 0x00002020 0x1 6 >,
+ < 806400000 0x501c062a 0x00002222 0x1 7 >,
+ < 883200000 0x4024072b 0x00002525 0x1 8 >,
+ < 960000000 0x40240832 0x00002828 0x1 9 >,
+ < 1036800000 0x40240936 0x00002b2b 0x1 10 >,
+ < 1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+ < 1190400000 0x402c0b3e 0x00003232 0x1 12 >;
+
+ qcom,l3-min-cpr-vc-bin0 = <7>;
+ qcom,pwrcl-min-cpr-vc-bin0 = <6>;
+ qcom,perfcl-min-cpr-vc-bin0 = <7>;
+
+ qcom,up-timer =
+ <1000 1000 1000>;
+ qcom,down-timer =
+ <100000 100000 100000>;
+ qcom,pc-override-index =
+ <0 0 0>;
+ qcom,set-ret-inactive;
+ qcom,enable-llm-freq-vote;
+ qcom,llm-freq-up-timer =
+ <1000 1000 1000>;
+ qcom,llm-freq-down-timer =
+ <327675 327675 327675>;
+ qcom,enable-llm-volt-vote;
+ qcom,llm-volt-up-timer =
+ <1000 1000 1000>;
+ qcom,llm-volt-down-timer =
+ <327675 327675 327675>;
+ qcom,cc-reads = <10>;
+ qcom,cc-delay = <5>;
+ qcom,cc-factor = <100>;
+ qcom,osm-clk-rate = <100000000>;
+ qcom,xo-clk-rate = <19200000>;
+
+ qcom,l-val-base =
+ <0x178d0004 0x178c0004 0x178b0004>;
+ qcom,apcs-pll-user-ctl =
+ <0x178d000c 0x178c000c 0x178b000c>;
+ qcom,apcs-pll-min-freq =
+ <0x17d41094 0x17d43094 0x17d45894>;
+ qcom,apm-mode-ctl =
+ <0x0 0x0 0x17d20010>;
+ qcom,apm-status-ctrl =
+ <0x0 0x0 0x17d20000>;
+ qcom,perfcl-isense-addr = <0x17871480>;
+ qcom,l3-mem-acc-addr = <0x17990170 0x17990170 0x17990170>;
+ qcom,pwrcl-mem-acc-addr = <0x17990160 0x17990164 0x17990164>;
+ qcom,perfcl-mem-acc-addr = <0x17990168 0x1799016c 0x1799016c>;
+ qcom,cfg-gfmux-addr =<0x178d0084 0x178c0084 0x178b0084>;
+ qcom,apcs-cbc-addr = <0x178d008c 0x178c008c 0x178b008c>;
+ qcom,apcs-ramp-ctl-addr = <0x17840904 0x17840904 0x17830904>;
+
+ qcom,perfcl-apcs-apm-threshold-voltage = <800000>;
+ qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
+ qcom,boost-fsm-en;
+ qcom,safe-fsm-en;
+ qcom,ps-fsm-en;
+ qcom,droop-fsm-en;
+ qcom,osm-pll-setup;
+
+ clock-names = "xo_ao";
+ clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
#clock-cells = <1>;
#reset-cells = <1>;
};
@@ -765,15 +887,31 @@
mbox-names = "apps";
};
+ clock_debug: qcom,cc-debug@100000 {
+ compatible = "qcom,debugcc-sdm845";
+ qcom,cc-count = <5>;
+ qcom,gcc = <&clock_gcc>;
+ qcom,videocc = <&clock_videocc>;
+ qcom,camcc = <&clock_camcc>;
+ qcom,dispcc = <&clock_dispcc>;
+ qcom,gpucc = <&clock_gpucc>;
+ clock-names = "xo_clk_src";
+ clocks = <&clock_rpmh RPMH_CXO_CLK>;
+ #clock-cells = <1>;
+ };
+
ufsphy_mem: ufsphy_mem@1d87000 {
reg = <0x1d87000 0xda8>; /* PHY regs */
reg-names = "phy_mem";
#phy-cells = <0>;
- /* TODO: add "ref_clk_src" */
- clock-names = "ref_clk",
+ lanes-per-direction = <2>;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
"ref_aux_clk";
- clocks = <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+ clocks = <&clock_rpmh RPMH_LN_BB_CLK1>,
+ <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
<&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
status = "disabled";
@@ -789,13 +927,13 @@
lanes-per-direction = <2>;
dev-ref-clk-freq = <0>; /* 19.2 MHz */
- /* TODO: add "ref_clk" */
clock-names =
"core_clk",
"bus_aggr_clk",
"iface_clk",
"core_clk_unipro",
"core_clk_ice",
+ "ref_clk",
"tx_lane0_sync_clk",
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
@@ -806,6 +944,7 @@
<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+ <&clock_rpmh RPMH_LN_BB_CLK1>,
<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
@@ -817,34 +956,46 @@
<75000000 300000000>,
<0 0>,
<0 0>,
+ <0 0>,
<0 0>;
qcom,msm-bus,name = "ufshc_mem";
qcom,msm-bus,num-cases = <22>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
- <95 512 0 0>, <1 650 0 0>, /* No vote */
- <95 512 922 0>, <1 650 1000 0>, /* PWM G1 */
- <95 512 1844 0>, <1 650 1000 0>, /* PWM G2 */
- <95 512 3688 0>, <1 650 1000 0>, /* PWM G3 */
- <95 512 7376 0>, <1 650 1000 0>, /* PWM G4 */
- <95 512 1844 0>, <1 650 1000 0>, /* PWM G1 L2 */
- <95 512 3688 0>, <1 650 1000 0>, /* PWM G2 L2 */
- <95 512 7376 0>, <1 650 1000 0>, /* PWM G3 L2 */
- <95 512 14752 0>, <1 650 1000 0>, /* PWM G4 L2 */
- <95 512 127796 0>, <1 650 1000 0>, /* HS G1 RA */
- <95 512 255591 0>, <1 650 1000 0>, /* HS G2 RA */
- <95 512 511181 0>, <1 650 1000 0>, /* HS G3 RA */
- <95 512 255591 0>, <1 650 1000 0>, /* HS G1 RA L2 */
- <95 512 511181 0>, <1 650 1000 0>, /* HS G2 RA L2 */
- <95 512 1022362 0>, <1 650 1000 0>, /* HS G3 RA L2 */
- <95 512 149422 0>, <1 650 1000 0>, /* HS G1 RB */
- <95 512 298189 0>, <1 650 1000 0>, /* HS G2 RB */
- <95 512 596378 0>, <1 650 1000 0>, /* HS G3 RB */
- <95 512 298189 0>, <1 650 1000 0>, /* HS G1 RB L2 */
- <95 512 596378 0>, <1 650 1000 0>, /* HS G2 RB L2 */
- <95 512 1192756 0>, <1 650 1000 0>, /* HS G3 RB L2 */
- <95 512 4096000 0>, <1 650 1000 0>; /* Max. bandwidth */
+ /*
+ * During HS G3 UFS runs at nominal voltage corner, vote
+ * higher bandwidth to push other buses in the data path
+ * to run at nominal to achieve max throughput.
+ * 4GBps pushes BIMC to run at nominal.
+ * 200MBps pushes CNOC to run at nominal.
+ * Vote for half of this bandwidth for HS G3 1-lane.
+ * For max bandwidth, vote high enough to push the buses
+ * to run in turbo voltage corner.
+ */
+ <123 512 0 0>, <1 757 0 0>, /* No vote */
+ <123 512 922 0>, <1 757 1000 0>, /* PWM G1 */
+ <123 512 1844 0>, <1 757 1000 0>, /* PWM G2 */
+ <123 512 3688 0>, <1 757 1000 0>, /* PWM G3 */
+ <123 512 7376 0>, <1 757 1000 0>, /* PWM G4 */
+ <123 512 1844 0>, <1 757 1000 0>, /* PWM G1 L2 */
+ <123 512 3688 0>, <1 757 1000 0>, /* PWM G2 L2 */
+ <123 512 7376 0>, <1 757 1000 0>, /* PWM G3 L2 */
+ <123 512 14752 0>, <1 757 1000 0>, /* PWM G4 L2 */
+ <123 512 127796 0>, <1 757 1000 0>, /* HS G1 RA */
+ <123 512 255591 0>, <1 757 1000 0>, /* HS G2 RA */
+ <123 512 2097152 0>, <1 757 102400 0>, /* HS G3 RA */
+ <123 512 255591 0>, <1 757 1000 0>, /* HS G1 RA L2 */
+ <123 512 511181 0>, <1 757 1000 0>, /* HS G2 RA L2 */
+ <123 512 4194304 0>, <1 757 204800 0>, /* HS G3 RA L2 */
+ <123 512 149422 0>, <1 757 1000 0>, /* HS G1 RB */
+ <123 512 298189 0>, <1 757 1000 0>, /* HS G2 RB */
+ <123 512 2097152 0>, <1 757 102400 0>, /* HS G3 RB */
+ <123 512 298189 0>, <1 757 1000 0>, /* HS G1 RB L2 */
+ <123 512 596378 0>, <1 757 1000 0>, /* HS G2 RB L2 */
+ <123 512 4194304 0>, <1 757 204800 0>, /* HS G3 RB L2 */
+ <123 512 7643136 0>, <1 757 307200 0>; /* Max. bandwidth */
+
qcom,bus-vector-names = "MIN",
"PWM_G1_L1", "PWM_G2_L1", "PWM_G3_L1", "PWM_G4_L1",
"PWM_G1_L2", "PWM_G2_L2", "PWM_G3_L2", "PWM_G4_L2",
@@ -854,6 +1005,18 @@
"HS_RB_G1_L2", "HS_RB_G2_L2", "HS_RB_G3_L2",
"MAX";
+ /* PM QoS */
+ qcom,pm-qos-cpu-groups = <0x0f 0xf0>;
+ qcom,pm-qos-cpu-group-latency-us = <70 70>;
+ qcom,pm-qos-default-cpu = <0>;
+
+ pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+ pinctrl-0 = <&ufs_dev_reset_assert>;
+ pinctrl-1 = <&ufs_dev_reset_deassert>;
+
+ resets = <&clock_gcc GCC_UFS_PHY_BCR>;
+ reset-names = "core_reset";
+
status = "disabled";
};
@@ -862,10 +1025,13 @@
reg-names = "phy_mem";
#phy-cells = <0>;
- /* TODO: add "ref_clk_src" */
- clock-names = "ref_clk",
+ lanes-per-direction = <1>;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
"ref_aux_clk";
- clocks = <&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
+ clocks = <&clock_rpmh RPMH_LN_BB_CLK1>,
+ <&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
<&clock_gcc GCC_UFS_CARD_PHY_AUX_CLK>;
status = "disabled";
@@ -881,13 +1047,13 @@
lanes-per-direction = <1>;
dev-ref-clk-freq = <0>; /* 19.2 MHz */
- /* TODO: add "ref_clk" */
clock-names =
"core_clk",
"bus_aggr_clk",
"iface_clk",
"core_clk_unipro",
"core_clk_ice",
+ "ref_clk",
"tx_lane0_sync_clk",
"rx_lane0_sync_clk";
/* TODO: add HW CTL clocks when available */
@@ -897,6 +1063,7 @@
<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>,
<&clock_gcc GCC_UFS_CARD_ICE_CORE_CLK>,
+ <&clock_rpmh RPMH_LN_BB_CLK1>,
<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
freq-table-hz =
@@ -906,27 +1073,41 @@
<37500000 150000000>,
<75000000 300000000>,
<0 0>,
+ <0 0>,
<0 0>;
qcom,msm-bus,name = "ufshc_card";
qcom,msm-bus,num-cases = <9>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
- <95 512 0 0>, <1 650 0 0>, /* No vote */
- <95 512 922 0>, <1 650 1000 0>, /* PWM G1 */
- <95 512 127796 0>, <1 650 1000 0>, /* HS G1 RA */
- <95 512 255591 0>, <1 650 1000 0>, /* HS G2 RA */
- <95 512 511181 0>, <1 650 1000 0>, /* HS G3 RA */
- <95 512 149422 0>, <1 650 1000 0>, /* HS G1 RB */
- <95 512 298189 0>, <1 650 1000 0>, /* HS G2 RB */
- <95 512 596378 0>, <1 650 1000 0>, /* HS G3 RB */
- <95 512 4096000 0>, <1 650 1000 0>; /* Max. bandwidth */
+ <122 512 0 0>, <1 756 0 0>, /* No vote */
+ <122 512 922 0>, <1 756 1000 0>, /* PWM G1 */
+ <122 512 127796 0>, <1 756 1000 0>, /* HS G1 RA */
+ <122 512 255591 0>, <1 756 1000 0>, /* HS G2 RA */
+ <122 512 2097152 0>, <1 756 102400 0>, /* HS G3 RA */
+ <122 512 149422 0>, <1 756 1000 0>, /* HS G1 RB */
+ <122 512 298189 0>, <1 756 1000 0>, /* HS G2 RB */
+ <122 512 2097152 0>, <1 756 102400 0>, /* HS G3 RB */
+ <122 512 7643136 0>, <1 756 307200 0>; /* Max. bandwidth */
qcom,bus-vector-names = "MIN",
"PWM_G1_L1",
"HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
"HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
"MAX";
+ /* PM QoS */
+ qcom,pm-qos-cpu-groups = <0x0f 0xf0>;
+ qcom,pm-qos-cpu-group-latency-us = <70 70>;
+ qcom,pm-qos-default-cpu = <0>;
+
+ /*
+ * Note: this instance doesn't have control over UFS device
+ * reset
+ */
+
+ resets = <&clock_gcc GCC_UFS_CARD_BCR>;
+ reset-names = "core_reset";
+
status = "disabled";
};
@@ -1346,10 +1527,12 @@
"l3-scu-faultirq";
};
- qcom,llcc@1300000 {
+ qcom,llcc@1100000 {
compatible = "qcom,llcc-core", "syscon", "simple-mfd";
- reg = <0x1300000 0x50000>;
+ reg = <0x1100000 0x250000>;
reg-names = "llcc_base";
+ qcom,llcc-banks-off = <0x0 0x80000 0x100000 0x180000>;
+ qcom,llcc-broadcast-off = <0x200000>;
llcc: qcom,sdm845-llcc {
compatible = "qcom,sdm845-llcc";
@@ -1419,6 +1602,7 @@
apps_rsc: mailbox@179e0000 {
compatible = "qcom,tcs-drv";
+ label = "apps_rsc";
reg = <0x179e0000 0x100>, <0x179e0d00 0x3000>;
interrupts = <0 5 0>;
#mbox-cells = <1>;
@@ -1431,6 +1615,7 @@
disp_rsc: mailbox@af20000 {
compatible = "qcom,tcs-drv";
+ label = "display_rsc";
reg = <0xaf20000 0x100>, <0xaf21c00 0x3000>;
interrupts = <0 129 0>;
#mbox-cells = <1>;
@@ -1670,7 +1855,6 @@
qcom,ipa-hw-ver = <13>; /* IPA core version = IPAv3.5.1 */
qcom,ipa-hw-mode = <1>;
qcom,ee = <0>;
- qcom,use-gsi;
qcom,use-ipa-tethering-bridge;
qcom,modem-cfg-emb-pipe-flt;
qcom,ipa-wdi2;
@@ -1825,6 +2009,11 @@
<0x10ae000 0x2000>;
reg-names = "dcc-base", "dcc-ram-base";
};
+
+ qcom,msm-core@780000 {
+ compatible = "qcom,apss-core-ea";
+ reg = <0x780000 0x1000>;
+ };
};
&pcie_0_gdsc {
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index bf8082e..b75b46f 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -272,18 +272,21 @@
CONFIG_HW_RANDOM=y
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
CONFIG_SOUNDWIRE=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
CONFIG_SPI_SPIDEV=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPMI=y
CONFIG_PINCTRL_SDM845=y
CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_FG_GEN3=y
@@ -403,6 +406,8 @@
CONFIG_MSM_DISPCC_SDM845=y
CONFIG_CLOCK_QPNP_DIV=y
CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
@@ -414,6 +419,8 @@
CONFIG_QCOM_EUD=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MSM_SMEM=y
@@ -437,6 +444,7 @@
CONFIG_QCOM_COMMAND_DB=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 4b73772..38d2ce8 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -13,13 +13,11 @@
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
-CONFIG_CGROUPS=y
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
-CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
@@ -27,6 +25,7 @@
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
@@ -284,18 +283,21 @@
CONFIG_HW_RANDOM=y
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
CONFIG_SOUNDWIRE=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
CONFIG_SPI_SPIDEV=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPMI=y
CONFIG_PINCTRL_SDM845=y
CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_FG_GEN3=y
@@ -423,6 +425,8 @@
CONFIG_MSM_DISPCC_SDM845=y
CONFIG_CLOCK_QPNP_DIV=y
CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
@@ -437,6 +441,8 @@
CONFIG_QCOM_EUD=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MSM_SMEM=y
@@ -461,6 +467,7 @@
CONFIG_QCOM_COMMAND_DB=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
CONFIG_QCOM_DCC_V2=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
@@ -522,6 +529,8 @@
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
CONFIG_SCHEDSTATS=y
CONFIG_SCHED_STACK_END_CHECK=y
CONFIG_TIMER_STATS=y
@@ -539,7 +548,8 @@
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_FUNCTION_TRACER=y
-CONFIG_TRACER_SNAPSHOT=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
CONFIG_LKDTM=y
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d8e6635..860c3b6 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -976,7 +976,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
* device, and allocated the default domain for that group.
*/
if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
- pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ pr_debug("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
dev_name(dev));
return false;
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 79902e7..d7eb419 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2830,7 +2830,7 @@ static int binder_thread_read(struct binder_proc *proc,
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
@@ -2872,14 +2872,14 @@ static int binder_thread_read(struct binder_proc *proc,
node->has_weak_ref = 0;
}
if (cmd != BR_NOOP) {
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
+ if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
+ if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
@@ -2923,7 +2923,7 @@ static int binder_thread_read(struct binder_proc *proc,
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index d82ce17..4609244 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -416,6 +416,7 @@ static struct cpu_attr cpu_attrs[] = {
_CPU_ATTR(online, &__cpu_online_mask),
_CPU_ATTR(possible, &__cpu_possible_mask),
_CPU_ATTR(present, &__cpu_present_mask),
+ _CPU_ATTR(core_ctl_isolated, &__cpu_isolated_mask),
};
/*
@@ -651,6 +652,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[0].attr.attr,
&cpu_attrs[1].attr.attr,
&cpu_attrs[2].attr.attr,
+ &cpu_attrs[3].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
&dev_attr_isolated.attr,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 83db1416..ece2f00 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -2101,7 +2101,7 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
- if (core->parent == parent)
+ if (core->parent == parent && !(core->flags & CLK_IS_MEASURE))
goto out;
/* verify ops for for multi-parent clks */
@@ -2599,7 +2599,7 @@ static const struct file_operations clk_enabled_list_fops = {
.release = seq_release,
};
-static void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
+void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
{
if (IS_ERR_OR_NULL(clk))
return;
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index 331e086..f0db049 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -23,6 +23,7 @@ void __clk_free_clk(struct clk *clk);
/* Debugfs API to print the enabled clocks */
void clock_debug_print_enabled(void);
+void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f);
#else
/* All these casts to avoid ifdefs in clkdev... */
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 836f0c7..6e13562 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -10,7 +10,7 @@
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += reset.o clk-voter.o
-clk-qcom-y += clk-dummy.o
+clk-qcom-y += clk-dummy.o clk-debug.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o gdsc-regulator.o
# Keep alphabetically sorted by config
@@ -31,10 +31,10 @@
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
-obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o
+obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o debugcc-sdm845.o
obj-$(CONFIG_MSM_GPUCC_SDM845) += gpucc-sdm845.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
-obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
\ No newline at end of file
+obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index c49eddf..a274975 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -107,9 +107,11 @@ static struct clk_alpha_pll cam_cc_pll0 = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_fabia_pll_ops,
- VDD_CX_FMAX_MAP2(
- MIN, 19200000,
- LOWER, 600000000),
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
},
},
};
@@ -152,9 +154,11 @@ static struct clk_alpha_pll cam_cc_pll1 = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_fabia_pll_ops,
- VDD_CX_FMAX_MAP2(
- MIN, 19200000,
- LOW, 808000000),
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
},
},
};
@@ -189,9 +193,11 @@ static struct clk_alpha_pll cam_cc_pll2 = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_fabia_pll_ops,
- VDD_MX_FMAX_MAP2(
- MIN, 19200000,
- LOWER, 960000000),
+ VDD_MX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
},
},
};
@@ -248,9 +254,11 @@ static struct clk_alpha_pll cam_cc_pll3 = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_fabia_pll_ops,
- VDD_CX_FMAX_MAP2(
- MIN, 19200000,
- LOWER, 384000000),
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
},
},
};
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 53f736c..51a5e0b 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -23,6 +23,7 @@
#include "clk-branch.h"
#include "clk-regmap.h"
+#include "clk-debug.h"
static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
{
@@ -338,6 +339,7 @@ const struct clk_ops clk_branch2_ops = {
.recalc_rate = clk_branch2_recalc_rate,
.set_flags = clk_branch_set_flags,
.list_registers = clk_branch2_list_registers,
+ .debug_init = clk_debug_measure_add,
};
EXPORT_SYMBOL_GPL(clk_branch2_ops);
@@ -393,6 +395,7 @@ const struct clk_ops clk_gate2_ops = {
.disable = clk_gate2_disable,
.is_enabled = clk_is_enabled_regmap,
.list_registers = clk_gate2_list_registers,
+ .debug_init = clk_debug_measure_add,
};
EXPORT_SYMBOL_GPL(clk_gate2_ops);
diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c
new file mode 100644
index 0000000..53288f7
--- /dev/null
+++ b/drivers/clk/qcom/clk-debug.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include "clk-regmap.h"
+#include "clk-debug.h"
+#include "common.h"
+
+static struct clk_hw *measure;
+
+static DEFINE_SPINLOCK(clk_reg_lock);
+static DEFINE_MUTEX(clk_debug_lock);
+
+#define TCXO_DIV_4_HZ 4800000
+#define SAMPLE_TICKS_1_MS 0x1000
+#define SAMPLE_TICKS_14_MS 0x10000
+
+#define XO_DIV4_CNT_DONE BIT(25)
+#define CNT_EN BIT(20)
+#define MEASURE_CNT BM(24, 0)
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned int ticks, struct regmap *regmap,
+ u32 ctl_reg, u32 status_reg)
+{
+ u32 regval;
+
+ /* Stop counters and set the XO4 counter start value. */
+ regmap_write(regmap, ctl_reg, ticks);
+
+ regmap_read(regmap, status_reg, ®val);
+
+ /* Wait for timer to become ready. */
+ while ((regval & XO_DIV4_CNT_DONE) != 0) {
+ cpu_relax();
+ regmap_read(regmap, status_reg, ®val);
+ }
+
+ /* Run measurement and wait for completion. */
+ regmap_write(regmap, ctl_reg, (CNT_EN|ticks));
+
+ regmap_read(regmap, status_reg, ®val);
+
+ while ((regval & XO_DIV4_CNT_DONE) == 0) {
+ cpu_relax();
+ regmap_read(regmap, status_reg, ®val);
+ }
+
+ /* Return measured ticks. */
+ regmap_read(regmap, status_reg, ®val);
+ regval &= MEASURE_CNT;
+
+ /* Stop the counters */
+ regmap_write(regmap, ctl_reg, ticks);
+
+ return regval;
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+static unsigned long clk_debug_mux_measure_rate(struct clk_hw *hw)
+{
+ unsigned long flags, ret = 0;
+ u32 gcc_xo4_reg, multiplier = 1;
+ u64 raw_count_short, raw_count_full;
+ struct clk_debug_mux *meas = to_clk_measure(hw);
+ struct measure_clk_data *data = meas->priv;
+
+ clk_prepare_enable(data->cxo);
+
+ spin_lock_irqsave(&clk_reg_lock, flags);
+
+ /* Enable CXO/4 and RINGOSC branch. */
+ regmap_read(meas->regmap[GCC], data->xo_div4_cbcr, &gcc_xo4_reg);
+ gcc_xo4_reg |= BIT(0);
+ regmap_write(meas->regmap[GCC], data->xo_div4_cbcr, gcc_xo4_reg);
+
+ /*
+ * The ring oscillator counter will not reset if the measured clock
+ * is not running. To detect this, run a short measurement before
+ * the full measurement. If the raw results of the two are the same
+ * then the clock must be off.
+ */
+
+ /* Run a short measurement. (~1 ms) */
+ raw_count_short = run_measurement(SAMPLE_TICKS_1_MS, meas->regmap[GCC],
+ data->ctl_reg, data->status_reg);
+
+ /* Run a full measurement. (~14 ms) */
+ raw_count_full = run_measurement(SAMPLE_TICKS_14_MS, meas->regmap[GCC],
+ data->ctl_reg, data->status_reg);
+
+ gcc_xo4_reg &= ~BIT(0);
+ regmap_write(meas->regmap[GCC], data->xo_div4_cbcr, gcc_xo4_reg);
+
+ /* Return 0 if the clock is off. */
+ if (raw_count_full == raw_count_short)
+ ret = 0;
+ else {
+ /* Compute rate in Hz. */
+ raw_count_full = ((raw_count_full * 10) + 15) * TCXO_DIV_4_HZ;
+ do_div(raw_count_full, ((SAMPLE_TICKS_14_MS * 10) + 35));
+ ret = (raw_count_full * multiplier);
+ }
+
+ spin_unlock_irqrestore(&clk_reg_lock, flags);
+
+ clk_disable_unprepare(data->cxo);
+
+ return ret;
+}
+
+static u8 clk_debug_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_debug_mux *meas = to_clk_measure(hw);
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ for (i = 0; i < num_parents; i++) {
+ if (!strcmp(meas->parent[i].parents,
+ hw->init->parent_names[i])) {
+ pr_debug("%s: Clock name %s index %d\n", __func__,
+ hw->init->name, i);
+ return i;
+ }
+ }
+
+ return 0;
+}
+
+static int clk_debug_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_debug_mux *meas = to_clk_measure(hw);
+ u32 regval = 0;
+ int dbg_cc = 0;
+
+ dbg_cc = meas->parent[index].dbg_cc;
+
+ if (dbg_cc != GCC) {
+ /* Update the recursive debug mux */
+ regmap_read(meas->regmap[dbg_cc],
+ meas->parent[index].mux_offset, ®val);
+ regval &= ~meas->parent[index].mux_sel_mask <<
+ meas->parent[index].mux_sel_shift;
+ regval |= (meas->parent[index].dbg_cc_mux_sel &
+ meas->parent[index].mux_sel_mask) <<
+ meas->parent[index].mux_sel_shift;
+ regmap_write(meas->regmap[dbg_cc],
+ meas->parent[index].mux_offset, regval);
+
+ regmap_read(meas->regmap[dbg_cc],
+ meas->parent[index].post_div_offset, ®val);
+ regval &= ~meas->parent[index].post_div_mask <<
+ meas->parent[index].post_div_shift;
+ regval |= ((meas->parent[index].post_div_val - 1) &
+ meas->parent[index].post_div_mask) <<
+ meas->parent[index].post_div_shift;
+ regmap_write(meas->regmap[dbg_cc],
+ meas->parent[index].post_div_offset, regval);
+
+ regmap_read(meas->regmap[dbg_cc],
+ meas->parent[index].cbcr_offset, ®val);
+ regval |= BIT(0);
+ regmap_write(meas->regmap[dbg_cc],
+ meas->parent[index].cbcr_offset, regval);
+ }
+
+ /* Update the debug sel for GCC */
+ regmap_read(meas->regmap[GCC], meas->debug_offset, ®val);
+ regval &= ~meas->src_sel_mask << meas->src_sel_shift;
+ regval |= (meas->parent[index].prim_mux_sel & meas->src_sel_mask) <<
+ meas->src_sel_shift;
+ regmap_write(meas->regmap[GCC], meas->debug_offset, regval);
+
+ /* Set the GCC mux's post divider bits */
+ regmap_read(meas->regmap[GCC], meas->post_div_offset, ®val);
+ regval &= ~meas->post_div_mask << meas->post_div_shift;
+ regval |= ((meas->parent[index].prim_mux_div_val - 1) &
+ meas->post_div_mask) << meas->post_div_shift;
+ regmap_write(meas->regmap[GCC], meas->post_div_offset, regval);
+
+ /* Turn on the GCC_DEBUG_CBCR */
+ regmap_read(meas->regmap[GCC], meas->cbcr_offset, ®val);
+ regval |= BIT(0);
+ regmap_write(meas->regmap[GCC], meas->cbcr_offset, regval);
+
+ return 0;
+}
+
+const struct clk_ops clk_debug_mux_ops = {
+ .get_parent = clk_debug_mux_get_parent,
+ .set_parent = clk_debug_mux_set_parent,
+};
+EXPORT_SYMBOL(clk_debug_mux_ops);
+
+static int clk_debug_measure_get(void *data, u64 *val)
+{
+ struct clk_hw *hw = data, *par;
+ struct clk_debug_mux *meas = to_clk_measure(measure);
+ int index;
+ int ret = 0;
+ unsigned long meas_rate, sw_rate;
+
+ mutex_lock(&clk_debug_lock);
+
+ ret = clk_set_parent(measure->clk, hw->clk);
+ if (!ret) {
+ par = measure;
+ index = clk_debug_mux_get_parent(measure);
+ while (par && par != hw) {
+ if (par->init->ops->enable)
+ par->init->ops->enable(par);
+ par = clk_hw_get_parent(par);
+ }
+ *val = clk_debug_mux_measure_rate(measure);
+ if (meas->parent[index].dbg_cc != GCC)
+ *val *= meas->parent[index].post_div_val;
+ *val *= meas->parent[index].prim_mux_div_val;
+ }
+
+ meas_rate = clk_get_rate(hw->clk);
+ par = clk_hw_get_parent(measure);
+ if (!par)
+ return -EINVAL;
+
+ sw_rate = clk_get_rate(par->clk);
+ if (sw_rate && meas_rate >= (sw_rate * 2))
+ *val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
+
+ mutex_unlock(&clk_debug_lock);
+
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clk_measure_fops, clk_debug_measure_get,
+ NULL, "%lld\n");
+
+int clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry)
+{
+ if (IS_ERR_OR_NULL(measure)) {
+ pr_err_once("Please check if `measure` clk is registered.\n");
+ return 0;
+ }
+
+ if (clk_set_parent(measure->clk, hw->clk))
+ return 0;
+
+ debugfs_create_file("clk_measure", 0x444, dentry, hw,
+ &clk_measure_fops);
+ return 0;
+}
+EXPORT_SYMBOL(clk_debug_measure_add);
+
+int clk_debug_measure_register(struct clk_hw *hw)
+{
+ if (IS_ERR_OR_NULL(measure)) {
+ if (hw->init->flags & CLK_IS_MEASURE) {
+ measure = hw;
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_debug_measure_register);
+
diff --git a/drivers/clk/qcom/clk-debug.h b/drivers/clk/qcom/clk-debug.h
new file mode 100644
index 0000000..280704e
--- /dev/null
+++ b/drivers/clk/qcom/clk-debug.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_DEBUG_H__
+#define __QCOM_CLK_DEBUG_H__
+
+#include "../clk.h"
+
+/* Debugfs Measure Clocks */
+
+/**
+ * struct measure_clk_data - Structure of clk measure
+ *
+ * @cxo: XO clock.
+ * @xo_div4_cbcr: offset of debug XO/4 div register.
+ * @ctl_reg: offset of debug control register.
+ * @status_reg: offset of debug status register.
+ * @cbcr_offset: branch register to turn on debug mux.
+ */
+struct measure_clk_data {
+ struct clk *cxo;
+ u32 ctl_reg;
+ u32 status_reg;
+ u32 xo_div4_cbcr;
+};
+
+/**
+ * List of Debug clock controllers.
+ */
+enum debug_cc {
+ GCC,
+ CAM_CC,
+ DISP_CC,
+ GPU_CC,
+ VIDEO_CC,
+ CPU,
+};
+
+/**
+ * struct clk_src - Structure of clock source for debug mux
+ *
+ * @parents: clock name to be used as parent for debug mux.
+ * @prim_mux_sel: debug mux index at global clock controller.
+ * @prim_mux_div_val: PLL post-divider setting for the primary mux.
+ * @dbg_cc: indicates the clock controller for recursive debug
+ * clock controllers.
+ * @dbg_cc_mux_sel: indicates the debug mux index at recursive debug mux.
+ * @mux_sel_mask: indicates the mask for the mux selection.
+ * @mux_sel_shift: indicates the shift required for mux selection.
+ * @post_div_mask: indicates the post div mask to be used at recursive
+ * debug mux.
+ * @post_div_shift: indicates the shift required for post divider
+ * configuration.
+ * @post_div_val: indicates the post div value to be used at recursive
+ * debug mux.
+ * @mux_offset: the debug mux offset.
+ * @post_div_offset: register with post-divider settings for the debug mux.
+ * @cbcr_offset: branch register to turn on debug mux.
+ */
+struct clk_src {
+ const char *parents;
+ int prim_mux_sel;
+ u32 prim_mux_div_val;
+ enum debug_cc dbg_cc;
+ int dbg_cc_mux_sel;
+ u32 mux_sel_mask;
+ u32 mux_sel_shift;
+ u32 post_div_mask;
+ u32 post_div_shift;
+ u32 post_div_val;
+ u32 mux_offset;
+ u32 post_div_offset;
+ u32 cbcr_offset;
+};
+
+#define MUX_SRC_LIST(...) \
+ .parent = (struct clk_src[]){__VA_ARGS__}, \
+ .num_parents = ARRAY_SIZE(((struct clk_src[]){__VA_ARGS__}))
+
+/**
+ * struct clk_debug_mux - Structure of clock debug mux
+ *
+ * @parent: structure of clk_src
+ * @num_parents: number of parents
+ * @regmap: regmaps of debug mux
+ * @priv: private measure_clk_data to be used by debug mux
+ * @debug_offset: debug mux offset.
+ * @post_div_offset: register with post-divider settings for the debug mux.
+ * @cbcr_offset: branch register to turn on debug mux.
+ * @src_sel_mask: indicates the mask to be used for src selection in
+ primary mux.
+ * @src_sel_shift: indicates the shift required for source selection in
+ primary mux.
+ * @post_div_mask: indicates the post div mask to be used for the primary
+ mux.
+ * @post_div_shift: indicates the shift required for post divider
+ selection in primary mux.
+ * @hw: handle between common and hardware-specific interfaces.
+ */
+struct clk_debug_mux {
+ struct clk_src *parent;
+ int num_parents;
+ struct regmap **regmap;
+ void *priv;
+ u32 debug_offset;
+ u32 post_div_offset;
+ u32 cbcr_offset;
+ u32 src_sel_mask;
+ u32 src_sel_shift;
+ u32 post_div_mask;
+ u32 post_div_shift;
+ struct clk_hw hw;
+};
+
+#define to_clk_measure(_hw) container_of((_hw), struct clk_debug_mux, hw)
+
+extern const struct clk_ops clk_debug_mux_ops;
+
+int clk_debug_measure_register(struct clk_hw *hw);
+int clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry);
+
+#endif
diff --git a/drivers/clk/qcom/clk-dummy.c b/drivers/clk/qcom/clk-dummy.c
index e2465c4..3435999 100644
--- a/drivers/clk/qcom/clk-dummy.c
+++ b/drivers/clk/qcom/clk-dummy.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include "common.h"
+#include "clk-debug.h"
#define to_clk_dummy(_hw) container_of(_hw, struct clk_dummy, hw)
@@ -60,6 +61,7 @@ struct clk_ops clk_dummy_ops = {
.round_rate = dummy_clk_round_rate,
.recalc_rate = dummy_clk_recalc_rate,
.set_flags = dummy_clk_set_flags,
+ .debug_init = clk_debug_measure_add,
};
EXPORT_SYMBOL_GPL(clk_dummy_ops);
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
new file mode 100644
index 0000000..d74db61
--- /dev/null
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -0,0 +1,885 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-debug.h"
+
+static struct measure_clk_data debug_mux_priv = {
+ .ctl_reg = 0x62024,
+ .status_reg = 0x62028,
+ .xo_div4_cbcr = 0x43008,
+};
+
+static const char *const debug_mux_parent_names[] = {
+ "cam_cc_bps_ahb_clk",
+ "cam_cc_bps_areg_clk",
+ "cam_cc_bps_axi_clk",
+ "cam_cc_bps_clk",
+ "cam_cc_camnoc_atb_clk",
+ "cam_cc_camnoc_axi_clk",
+ "cam_cc_cci_clk",
+ "cam_cc_cpas_ahb_clk",
+ "cam_cc_csi0phytimer_clk",
+ "cam_cc_csi1phytimer_clk",
+ "cam_cc_csi2phytimer_clk",
+ "cam_cc_csiphy0_clk",
+ "cam_cc_csiphy1_clk",
+ "cam_cc_csiphy2_clk",
+ "cam_cc_fd_core_clk",
+ "cam_cc_fd_core_uar_clk",
+ "cam_cc_icp_apb_clk",
+ "cam_cc_icp_atb_clk",
+ "cam_cc_icp_clk",
+ "cam_cc_icp_cti_clk",
+ "cam_cc_icp_ts_clk",
+ "cam_cc_ife_0_axi_clk",
+ "cam_cc_ife_0_clk",
+ "cam_cc_ife_0_cphy_rx_clk",
+ "cam_cc_ife_0_csid_clk",
+ "cam_cc_ife_0_dsp_clk",
+ "cam_cc_ife_1_axi_clk",
+ "cam_cc_ife_1_clk",
+ "cam_cc_ife_1_cphy_rx_clk",
+ "cam_cc_ife_1_csid_clk",
+ "cam_cc_ife_1_dsp_clk",
+ "cam_cc_ife_lite_clk",
+ "cam_cc_ife_lite_cphy_rx_clk",
+ "cam_cc_ife_lite_csid_clk",
+ "cam_cc_ipe_0_ahb_clk",
+ "cam_cc_ipe_0_areg_clk",
+ "cam_cc_ipe_0_axi_clk",
+ "cam_cc_ipe_0_clk",
+ "cam_cc_ipe_1_ahb_clk",
+ "cam_cc_ipe_1_areg_clk",
+ "cam_cc_ipe_1_axi_clk",
+ "cam_cc_ipe_1_clk",
+ "cam_cc_jpeg_clk",
+ "cam_cc_lrme_clk",
+ "cam_cc_mclk0_clk",
+ "cam_cc_mclk1_clk",
+ "cam_cc_mclk2_clk",
+ "cam_cc_mclk3_clk",
+ "cam_cc_soc_ahb_clk",
+ "cam_cc_sys_tmr_clk",
+ "disp_cc_mdss_ahb_clk",
+ "disp_cc_mdss_axi_clk",
+ "disp_cc_mdss_byte0_clk",
+ "disp_cc_mdss_byte0_intf_clk",
+ "disp_cc_mdss_byte1_clk",
+ "disp_cc_mdss_byte1_intf_clk",
+ "disp_cc_mdss_dp_aux_clk",
+ "disp_cc_mdss_dp_crypto_clk",
+ "disp_cc_mdss_dp_link_clk",
+ "disp_cc_mdss_dp_link_intf_clk",
+ "disp_cc_mdss_dp_pixel1_clk",
+ "disp_cc_mdss_dp_pixel_clk",
+ "disp_cc_mdss_esc0_clk",
+ "disp_cc_mdss_esc1_clk",
+ "disp_cc_mdss_mdp_clk",
+ "disp_cc_mdss_mdp_lut_clk",
+ "disp_cc_mdss_pclk0_clk",
+ "disp_cc_mdss_pclk1_clk",
+ "disp_cc_mdss_qdss_at_clk",
+ "disp_cc_mdss_qdss_tsctr_div8_clk",
+ "disp_cc_mdss_rot_clk",
+ "disp_cc_mdss_rscc_ahb_clk",
+ "disp_cc_mdss_rscc_vsync_clk",
+ "disp_cc_mdss_spdm_debug_clk",
+ "disp_cc_mdss_spdm_dp_crypto_clk",
+ "disp_cc_mdss_spdm_dp_pixel1_clk",
+ "disp_cc_mdss_spdm_dp_pixel_clk",
+ "disp_cc_mdss_spdm_mdp_clk",
+ "disp_cc_mdss_spdm_pclk0_clk",
+ "disp_cc_mdss_spdm_pclk1_clk",
+ "disp_cc_mdss_spdm_rot_clk",
+ "disp_cc_mdss_vsync_clk",
+ "gcc_aggre_noc_pcie_tbu_clk",
+ "gcc_aggre_ufs_card_axi_clk",
+ "gcc_aggre_ufs_phy_axi_clk",
+ "gcc_aggre_usb3_prim_axi_clk",
+ "gcc_aggre_usb3_sec_axi_clk",
+ "gcc_boot_rom_ahb_clk",
+ "gcc_camera_ahb_clk",
+ "gcc_camera_axi_clk",
+ "gcc_camera_xo_clk",
+ "gcc_ce1_ahb_clk",
+ "gcc_ce1_axi_clk",
+ "gcc_ce1_clk",
+ "gcc_cfg_noc_usb3_prim_axi_clk",
+ "gcc_cfg_noc_usb3_sec_axi_clk",
+ "gcc_cpuss_ahb_clk",
+ "gcc_cpuss_dvm_bus_clk",
+ "gcc_cpuss_gnoc_clk",
+ "gcc_cpuss_rbcpr_clk",
+ "gcc_ddrss_gpu_axi_clk",
+ "gcc_disp_ahb_clk",
+ "gcc_disp_axi_clk",
+ "gcc_disp_gpll0_clk_src",
+ "gcc_disp_gpll0_div_clk_src",
+ "gcc_disp_xo_clk",
+ "gcc_gp1_clk",
+ "gcc_gp2_clk",
+ "gcc_gp3_clk",
+ "gcc_gpu_cfg_ahb_clk",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src",
+ "gcc_gpu_memnoc_gfx_clk",
+ "gcc_gpu_snoc_dvm_gfx_clk",
+ "gcc_mss_axis2_clk",
+ "gcc_mss_cfg_ahb_clk",
+ "gcc_mss_gpll0_div_clk_src",
+ "gcc_mss_mfab_axis_clk",
+ "gcc_mss_q6_memnoc_axi_clk",
+ "gcc_mss_snoc_axi_clk",
+ "gcc_pcie_0_aux_clk",
+ "gcc_pcie_0_cfg_ahb_clk",
+ "gcc_pcie_0_mstr_axi_clk",
+ "gcc_pcie_0_pipe_clk",
+ "gcc_pcie_0_slv_axi_clk",
+ "gcc_pcie_0_slv_q2a_axi_clk",
+ "gcc_pcie_1_aux_clk",
+ "gcc_pcie_1_cfg_ahb_clk",
+ "gcc_pcie_1_mstr_axi_clk",
+ "gcc_pcie_1_pipe_clk",
+ "gcc_pcie_1_slv_axi_clk",
+ "gcc_pcie_1_slv_q2a_axi_clk",
+ "gcc_pcie_phy_aux_clk",
+ "gcc_pcie_phy_refgen_clk",
+ "gcc_pdm2_clk",
+ "gcc_pdm_ahb_clk",
+ "gcc_pdm_xo4_clk",
+ "gcc_prng_ahb_clk",
+ "gcc_qmip_camera_ahb_clk",
+ "gcc_qmip_disp_ahb_clk",
+ "gcc_qmip_video_ahb_clk",
+ "gcc_qupv3_wrap0_core_2x_clk",
+ "gcc_qupv3_wrap0_core_clk",
+ "gcc_qupv3_wrap0_s0_clk",
+ "gcc_qupv3_wrap0_s1_clk",
+ "gcc_qupv3_wrap0_s2_clk",
+ "gcc_qupv3_wrap0_s3_clk",
+ "gcc_qupv3_wrap0_s4_clk",
+ "gcc_qupv3_wrap0_s5_clk",
+ "gcc_qupv3_wrap0_s6_clk",
+ "gcc_qupv3_wrap0_s7_clk",
+ "gcc_qupv3_wrap1_core_2x_clk",
+ "gcc_qupv3_wrap1_core_clk",
+ "gcc_qupv3_wrap1_s0_clk",
+ "gcc_qupv3_wrap1_s1_clk",
+ "gcc_qupv3_wrap1_s2_clk",
+ "gcc_qupv3_wrap1_s3_clk",
+ "gcc_qupv3_wrap1_s4_clk",
+ "gcc_qupv3_wrap1_s5_clk",
+ "gcc_qupv3_wrap1_s6_clk",
+ "gcc_qupv3_wrap1_s7_clk",
+ "gcc_qupv3_wrap_0_m_ahb_clk",
+ "gcc_qupv3_wrap_0_s_ahb_clk",
+ "gcc_qupv3_wrap_1_m_ahb_clk",
+ "gcc_qupv3_wrap_1_s_ahb_clk",
+ "gcc_sdcc2_ahb_clk",
+ "gcc_sdcc2_apps_clk",
+ "gcc_sdcc4_ahb_clk",
+ "gcc_sdcc4_apps_clk",
+ "gcc_sys_noc_cpuss_ahb_clk",
+ "gcc_tsif_ahb_clk",
+ "gcc_tsif_inactivity_timers_clk",
+ "gcc_tsif_ref_clk",
+ "gcc_ufs_card_ahb_clk",
+ "gcc_ufs_card_axi_clk",
+ "gcc_ufs_card_ice_core_clk",
+ "gcc_ufs_card_phy_aux_clk",
+ "gcc_ufs_card_rx_symbol_0_clk",
+ "gcc_ufs_card_rx_symbol_1_clk",
+ "gcc_ufs_card_tx_symbol_0_clk",
+ "gcc_ufs_card_unipro_core_clk",
+ "gcc_ufs_phy_ahb_clk",
+ "gcc_ufs_phy_axi_clk",
+ "gcc_ufs_phy_ice_core_clk",
+ "gcc_ufs_phy_phy_aux_clk",
+ "gcc_ufs_phy_rx_symbol_0_clk",
+ "gcc_ufs_phy_rx_symbol_1_clk",
+ "gcc_ufs_phy_tx_symbol_0_clk",
+ "gcc_ufs_phy_unipro_core_clk",
+ "gcc_usb30_prim_master_clk",
+ "gcc_usb30_prim_mock_utmi_clk",
+ "gcc_usb30_prim_sleep_clk",
+ "gcc_usb30_sec_master_clk",
+ "gcc_usb30_sec_mock_utmi_clk",
+ "gcc_usb30_sec_sleep_clk",
+ "gcc_usb3_prim_phy_aux_clk",
+ "gcc_usb3_prim_phy_com_aux_clk",
+ "gcc_usb3_prim_phy_pipe_clk",
+ "gcc_usb3_sec_phy_aux_clk",
+ "gcc_usb3_sec_phy_com_aux_clk",
+ "gcc_usb3_sec_phy_pipe_clk",
+ "gcc_usb_phy_cfg_ahb2phy_clk",
+ "gcc_video_ahb_clk",
+ "gcc_video_axi_clk",
+ "gcc_video_xo_clk",
+ "gpu_cc_acd_cxo_clk",
+ "gpu_cc_ahb_clk",
+ "gpu_cc_crc_ahb_clk",
+ "gpu_cc_cx_apb_clk",
+ "gpu_cc_cx_gfx3d_clk",
+ "gpu_cc_cx_gfx3d_slv_clk",
+ "gpu_cc_cx_gmu_clk",
+ "gpu_cc_cx_qdss_at_clk",
+ "gpu_cc_cx_qdss_trig_clk",
+ "gpu_cc_cx_qdss_tsctr_clk",
+ "gpu_cc_cx_snoc_dvm_clk",
+ "gpu_cc_cxo_aon_clk",
+ "gpu_cc_cxo_clk",
+ "gpu_cc_gx_cxo_clk",
+ "gpu_cc_gx_gmu_clk",
+ "gpu_cc_gx_qdss_tsctr_clk",
+ "gpu_cc_gx_vsense_clk",
+ "gpu_cc_rbcpr_ahb_clk",
+ "gpu_cc_rbcpr_clk",
+ "gpu_cc_sleep_clk",
+ "gpu_cc_spdm_gx_gfx3d_div_clk",
+ "video_cc_apb_clk",
+ "video_cc_at_clk",
+ "video_cc_qdss_trig_clk",
+ "video_cc_qdss_tsctr_div8_clk",
+ "video_cc_vcodec0_axi_clk",
+ "video_cc_vcodec0_core_clk",
+ "video_cc_vcodec1_axi_clk",
+ "video_cc_vcodec1_core_clk",
+ "video_cc_venus_ahb_clk",
+ "video_cc_venus_ctl_axi_clk",
+ "video_cc_venus_ctl_core_clk",
+};
+
+static struct clk_debug_mux gcc_debug_mux = {
+ .priv = &debug_mux_priv,
+ .debug_offset = 0x62008,
+ .post_div_offset = 0x62000,
+ .cbcr_offset = 0x62004,
+ .src_sel_mask = 0x3FF,
+ .src_sel_shift = 0,
+ .post_div_mask = 0xF,
+ .post_div_shift = 0,
+ MUX_SRC_LIST(
+ { "cam_cc_bps_ahb_clk", 0x46, 4, CAM_CC,
+ 0xE, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_bps_areg_clk", 0x46, 4, CAM_CC,
+ 0xD, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_bps_axi_clk", 0x46, 4, CAM_CC,
+ 0xC, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_bps_clk", 0x46, 4, CAM_CC,
+ 0xB, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_camnoc_atb_clk", 0x46, 4, CAM_CC,
+ 0x34, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_camnoc_axi_clk", 0x46, 4, CAM_CC,
+ 0x2D, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_cci_clk", 0x46, 4, CAM_CC,
+ 0x2A, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_cpas_ahb_clk", 0x46, 4, CAM_CC,
+ 0x2C, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_csi0phytimer_clk", 0x46, 4, CAM_CC,
+ 0x5, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_csi1phytimer_clk", 0x46, 4, CAM_CC,
+ 0x7, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_csi2phytimer_clk", 0x46, 4, CAM_CC,
+ 0x9, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_csiphy0_clk", 0x46, 4, CAM_CC,
+ 0x6, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_csiphy1_clk", 0x46, 4, CAM_CC,
+ 0x8, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_csiphy2_clk", 0x46, 4, CAM_CC,
+ 0xA, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_fd_core_clk", 0x46, 4, CAM_CC,
+ 0x28, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_fd_core_uar_clk", 0x46, 4, CAM_CC,
+ 0x29, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_icp_apb_clk", 0x46, 4, CAM_CC,
+ 0x32, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_icp_atb_clk", 0x46, 4, CAM_CC,
+ 0x2F, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_icp_clk", 0x46, 4, CAM_CC,
+ 0x26, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_icp_cti_clk", 0x46, 4, CAM_CC,
+ 0x30, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_icp_ts_clk", 0x46, 4, CAM_CC,
+ 0x31, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_0_axi_clk", 0x46, 4, CAM_CC,
+ 0x1B, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_0_clk", 0x46, 4, CAM_CC,
+ 0x17, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_0_cphy_rx_clk", 0x46, 4, CAM_CC,
+ 0x1A, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_0_csid_clk", 0x46, 4, CAM_CC,
+ 0x19, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_0_dsp_clk", 0x46, 4, CAM_CC,
+ 0x18, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_1_axi_clk", 0x46, 4, CAM_CC,
+ 0x21, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_1_clk", 0x46, 4, CAM_CC,
+ 0x1D, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_1_cphy_rx_clk", 0x46, 4, CAM_CC,
+ 0x20, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_1_csid_clk", 0x46, 4, CAM_CC,
+ 0x1F, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_1_dsp_clk", 0x46, 4, CAM_CC,
+ 0x1E, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_lite_clk", 0x46, 4, CAM_CC,
+ 0x22, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_lite_cphy_rx_clk", 0x46, 4, CAM_CC,
+ 0x24, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ife_lite_csid_clk", 0x46, 4, CAM_CC,
+ 0x23, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ipe_0_ahb_clk", 0x46, 4, CAM_CC,
+ 0x12, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ipe_0_areg_clk", 0x46, 4, CAM_CC,
+ 0x11, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ipe_0_axi_clk", 0x46, 4, CAM_CC,
+ 0x10, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ipe_0_clk", 0x46, 4, CAM_CC,
+ 0xF, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ipe_1_ahb_clk", 0x46, 4, CAM_CC,
+ 0x16, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ipe_1_areg_clk", 0x46, 4, CAM_CC,
+ 0x15, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ipe_1_axi_clk", 0x46, 4, CAM_CC,
+ 0x14, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_ipe_1_clk", 0x46, 4, CAM_CC,
+ 0x13, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_jpeg_clk", 0x46, 4, CAM_CC,
+ 0x25, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_lrme_clk", 0x46, 4, CAM_CC,
+ 0x2B, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_mclk0_clk", 0x46, 4, CAM_CC,
+ 0x1, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_mclk1_clk", 0x46, 4, CAM_CC,
+ 0x2, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_mclk2_clk", 0x46, 4, CAM_CC,
+ 0x3, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_mclk3_clk", 0x46, 4, CAM_CC,
+ 0x4, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_soc_ahb_clk", 0x46, 4, CAM_CC,
+ 0x2E, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "cam_cc_sys_tmr_clk", 0x46, 4, CAM_CC,
+ 0x33, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+ { "disp_cc_mdss_ahb_clk", 0x47, 4, DISP_CC,
+ 0x13, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_axi_clk", 0x47, 4, DISP_CC,
+ 0x14, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_byte0_clk", 0x47, 4, DISP_CC,
+ 0x7, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_byte0_intf_clk", 0x47, 4, DISP_CC,
+ 0x8, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_byte1_clk", 0x47, 4, DISP_CC,
+ 0x9, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_byte1_intf_clk", 0x47, 4, DISP_CC,
+ 0xA, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_dp_aux_clk", 0x47, 4, DISP_CC,
+ 0x12, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_dp_crypto_clk", 0x47, 4, DISP_CC,
+ 0xF, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_dp_link_clk", 0x47, 4, DISP_CC,
+ 0xD, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_dp_link_intf_clk", 0x47, 4, DISP_CC,
+ 0xE, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_dp_pixel1_clk", 0x47, 4, DISP_CC,
+ 0x11, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_dp_pixel_clk", 0x47, 4, DISP_CC,
+ 0x10, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_esc0_clk", 0x47, 4, DISP_CC,
+ 0xB, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_esc1_clk", 0x47, 4, DISP_CC,
+ 0xC, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_mdp_clk", 0x47, 4, DISP_CC,
+ 0x3, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_mdp_lut_clk", 0x47, 4, DISP_CC,
+ 0x5, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_pclk0_clk", 0x47, 4, DISP_CC,
+ 0x1, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_pclk1_clk", 0x47, 4, DISP_CC,
+ 0x2, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_qdss_at_clk", 0x47, 4, DISP_CC,
+ 0x15, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_qdss_tsctr_div8_clk", 0x47, 4, DISP_CC,
+ 0x16, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_rot_clk", 0x47, 4, DISP_CC,
+ 0x4, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_rscc_ahb_clk", 0x47, 4, DISP_CC,
+ 0x17, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_rscc_vsync_clk", 0x47, 4, DISP_CC,
+ 0x18, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_spdm_debug_clk", 0x47, 4, DISP_CC,
+ 0x20, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_spdm_dp_crypto_clk", 0x47, 4, DISP_CC,
+ 0x1D, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_spdm_dp_pixel1_clk", 0x47, 4, DISP_CC,
+ 0x1F, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_spdm_dp_pixel_clk", 0x47, 4, DISP_CC,
+ 0x1E, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_spdm_mdp_clk", 0x47, 4, DISP_CC,
+ 0x1B, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_spdm_pclk0_clk", 0x47, 4, DISP_CC,
+ 0x19, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_spdm_pclk1_clk", 0x47, 4, DISP_CC,
+ 0x1A, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_spdm_rot_clk", 0x47, 4, DISP_CC,
+ 0x1C, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC,
+ 0x6, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "gcc_aggre_noc_pcie_tbu_clk", 0x2D, 4, GCC,
+ 0x2D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_aggre_ufs_card_axi_clk", 0x11E, 4, GCC,
+ 0x11E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_aggre_ufs_phy_axi_clk", 0x11D, 4, GCC,
+ 0x11D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_aggre_usb3_prim_axi_clk", 0x11B, 4, GCC,
+ 0x11B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_aggre_usb3_sec_axi_clk", 0x11C, 4, GCC,
+ 0x11C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_boot_rom_ahb_clk", 0x94, 4, GCC,
+ 0x94, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_camera_ahb_clk", 0x3A, 4, GCC,
+ 0x3A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_camera_axi_clk", 0x40, 4, GCC,
+ 0x40, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_camera_xo_clk", 0x43, 4, GCC,
+ 0x43, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ce1_ahb_clk", 0xA9, 4, GCC,
+ 0xA9, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ce1_axi_clk", 0xA8, 4, GCC,
+ 0xA8, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ce1_clk", 0xA7, 4, GCC,
+ 0xA7, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_cfg_noc_usb3_prim_axi_clk", 0x1D, 4, GCC,
+ 0x1D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_cfg_noc_usb3_sec_axi_clk", 0x1E, 4, GCC,
+ 0x1E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_cpuss_ahb_clk", 0xCE, 4, GCC,
+ 0xCE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_cpuss_dvm_bus_clk", 0xD3, 4, GCC,
+ 0xD3, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_cpuss_gnoc_clk", 0xCF, 4, GCC,
+ 0xCF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_cpuss_rbcpr_clk", 0xD0, 4, GCC,
+ 0xD0, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ddrss_gpu_axi_clk", 0xBB, 4, GCC,
+ 0xBB, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_disp_ahb_clk", 0x3B, 4, GCC,
+ 0x3B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_disp_axi_clk", 0x41, 4, GCC,
+ 0x41, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_disp_gpll0_clk_src", 0x4C, 4, GCC,
+ 0x4C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_disp_gpll0_div_clk_src", 0x4D, 4, GCC,
+ 0x4D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_disp_xo_clk", 0x44, 4, GCC,
+ 0x44, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_gp1_clk", 0xDE, 4, GCC,
+ 0xDE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_gp2_clk", 0xDF, 4, GCC,
+ 0xDF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_gp3_clk", 0xE0, 4, GCC,
+ 0xE0, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_gpu_cfg_ahb_clk", 0x142, 4, GCC,
+ 0x142, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_gpu_gpll0_clk_src", 0x148, 4, GCC,
+ 0x148, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_gpu_gpll0_div_clk_src", 0x149, 4, GCC,
+ 0x149, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_gpu_memnoc_gfx_clk", 0x145, 4, GCC,
+ 0x145, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_gpu_snoc_dvm_gfx_clk", 0x147, 4, GCC,
+ 0x147, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_mss_axis2_clk", 0x12F, 4, GCC,
+ 0x12F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_mss_cfg_ahb_clk", 0x12D, 4, GCC,
+ 0x12D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_mss_gpll0_div_clk_src", 0x133, 4, GCC,
+ 0x133, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_mss_mfab_axis_clk", 0x12E, 4, GCC,
+ 0x12E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_mss_q6_memnoc_axi_clk", 0x135, 4, GCC,
+ 0x135, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_mss_snoc_axi_clk", 0x134, 4, GCC,
+ 0x134, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_0_aux_clk", 0xE5, 4, GCC,
+ 0xE5, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_0_cfg_ahb_clk", 0xE4, 4, GCC,
+ 0xE4, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_0_mstr_axi_clk", 0xE3, 4, GCC,
+ 0xE3, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_0_pipe_clk", 0xE6, 4, GCC,
+ 0xE6, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_0_slv_axi_clk", 0xE2, 4, GCC,
+ 0xE2, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_0_slv_q2a_axi_clk", 0xE1, 4, GCC,
+ 0xE1, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_1_aux_clk", 0xEC, 4, GCC,
+ 0xEC, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_1_cfg_ahb_clk", 0xEB, 4, GCC,
+ 0xEB, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_1_mstr_axi_clk", 0xEA, 4, GCC,
+ 0xEA, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_1_pipe_clk", 0xED, 4, GCC,
+ 0xED, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_1_slv_axi_clk", 0xE9, 4, GCC,
+ 0xE9, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_1_slv_q2a_axi_clk", 0xE8, 4, GCC,
+ 0xE8, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_phy_aux_clk", 0xEF, 4, GCC,
+ 0xEF, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pcie_phy_refgen_clk", 0x160, 4, GCC,
+ 0x160, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pdm2_clk", 0x8E, 4, GCC,
+ 0x8E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pdm_ahb_clk", 0x8C, 4, GCC,
+ 0x8C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_pdm_xo4_clk", 0x8D, 4, GCC,
+ 0x8D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_prng_ahb_clk", 0x8F, 4, GCC,
+ 0x8F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qmip_camera_ahb_clk", 0x3D, 4, GCC,
+ 0x3D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qmip_disp_ahb_clk", 0x3E, 4, GCC,
+ 0x3E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qmip_video_ahb_clk", 0x3C, 4, GCC,
+ 0x3C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_core_2x_clk", 0x77, 4, GCC,
+ 0x77, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_core_clk", 0x76, 4, GCC,
+ 0x76, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_s0_clk", 0x78, 4, GCC,
+ 0x78, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_s1_clk", 0x79, 4, GCC,
+ 0x79, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_s2_clk", 0x7A, 4, GCC,
+ 0x7A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_s3_clk", 0x7B, 4, GCC,
+ 0x7B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_s4_clk", 0x7C, 4, GCC,
+ 0x7C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_s5_clk", 0x7D, 4, GCC,
+ 0x7D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_s6_clk", 0x7E, 4, GCC,
+ 0x7E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap0_s7_clk", 0x7F, 4, GCC,
+ 0x7F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_core_2x_clk", 0x80, 4, GCC,
+ 0x80, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_core_clk", 0x81, 4, GCC,
+ 0x81, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_s0_clk", 0x84, 4, GCC,
+ 0x84, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_s1_clk", 0x85, 4, GCC,
+ 0x85, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_s2_clk", 0x86, 4, GCC,
+ 0x86, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_s3_clk", 0x87, 4, GCC,
+ 0x87, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_s4_clk", 0x88, 4, GCC,
+ 0x88, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_s5_clk", 0x89, 4, GCC,
+ 0x89, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_s6_clk", 0x8A, 4, GCC,
+ 0x8A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap1_s7_clk", 0x8B, 4, GCC,
+ 0x8B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap_0_m_ahb_clk", 0x74, 4, GCC,
+ 0x74, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap_0_s_ahb_clk", 0x75, 4, GCC,
+ 0x75, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap_1_m_ahb_clk", 0x82, 4, GCC,
+ 0x82, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_qupv3_wrap_1_s_ahb_clk", 0x83, 4, GCC,
+ 0x83, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_sdcc2_ahb_clk", 0x71, 4, GCC,
+ 0x71, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_sdcc2_apps_clk", 0x70, 4, GCC,
+ 0x70, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_sdcc4_ahb_clk", 0x73, 4, GCC,
+ 0x73, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_sdcc4_apps_clk", 0x72, 4, GCC,
+ 0x72, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_sys_noc_cpuss_ahb_clk", 0xC, 4, GCC,
+ 0xC, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_tsif_ahb_clk", 0x90, 4, GCC,
+ 0x90, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_tsif_inactivity_timers_clk", 0x92, 4, GCC,
+ 0x92, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_tsif_ref_clk", 0x91, 4, GCC,
+ 0x91, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_card_ahb_clk", 0xF1, 4, GCC,
+ 0xF1, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_card_axi_clk", 0xF0, 4, GCC,
+ 0xF0, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_card_ice_core_clk", 0xF7, 4, GCC,
+ 0xF7, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_card_phy_aux_clk", 0xF8, 4, GCC,
+ 0xF8, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_card_rx_symbol_0_clk", 0xF3, 4, GCC,
+ 0xF3, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_card_rx_symbol_1_clk", 0xF9, 4, GCC,
+ 0xF9, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_card_tx_symbol_0_clk", 0xF2, 4, GCC,
+ 0xF2, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_card_unipro_core_clk", 0xF6, 4, GCC,
+ 0xF6, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_phy_ahb_clk", 0xFC, 4, GCC,
+ 0xFC, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_phy_axi_clk", 0xFB, 4, GCC,
+ 0xFB, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_phy_ice_core_clk", 0x102, 4, GCC,
+ 0x102, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_phy_phy_aux_clk", 0x103, 4, GCC,
+ 0x103, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_phy_rx_symbol_0_clk", 0xFE, 4, GCC,
+ 0xFE, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_phy_rx_symbol_1_clk", 0x104, 4, GCC,
+ 0x104, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_phy_tx_symbol_0_clk", 0xFD, 4, GCC,
+ 0xFD, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_ufs_phy_unipro_core_clk", 0x101, 4, GCC,
+ 0x101, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb30_prim_master_clk", 0x5F, 4, GCC,
+ 0x5F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb30_prim_mock_utmi_clk", 0x61, 4, GCC,
+ 0x61, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb30_prim_sleep_clk", 0x60, 4, GCC,
+ 0x60, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb30_sec_master_clk", 0x65, 4, GCC,
+ 0x65, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb30_sec_mock_utmi_clk", 0x67, 4, GCC,
+ 0x67, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb30_sec_sleep_clk", 0x66, 4, GCC,
+ 0x66, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb3_prim_phy_aux_clk", 0x62, 4, GCC,
+ 0x62, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb3_prim_phy_com_aux_clk", 0x63, 4, GCC,
+ 0x63, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb3_prim_phy_pipe_clk", 0x64, 4, GCC,
+ 0x64, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb3_sec_phy_aux_clk", 0x68, 4, GCC,
+ 0x68, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb3_sec_phy_com_aux_clk", 0x69, 4, GCC,
+ 0x69, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb3_sec_phy_pipe_clk", 0x6A, 4, GCC,
+ 0x6A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_usb_phy_cfg_ahb2phy_clk", 0x6F, 4, GCC,
+ 0x6F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_video_ahb_clk", 0x39, 4, GCC,
+ 0x39, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_video_axi_clk", 0x3F, 4, GCC,
+ 0x3F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gcc_video_xo_clk", 0x42, 4, GCC,
+ 0x42, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "gpu_cc_acd_cxo_clk", 0x144, 4, GPU_CC,
+ 0x1F, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_ahb_clk", 0x144, 4, GPU_CC,
+ 0x11, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_crc_ahb_clk", 0x144, 4, GPU_CC,
+ 0x12, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cx_apb_clk", 0x144, 4, GPU_CC,
+ 0x15, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cx_gfx3d_clk", 0x144, 4, GPU_CC,
+ 0x1A, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cx_gfx3d_slv_clk", 0x144, 4, GPU_CC,
+ 0x1B, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cx_gmu_clk", 0x144, 4, GPU_CC,
+ 0x19, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cx_qdss_at_clk", 0x144, 4, GPU_CC,
+ 0x13, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cx_qdss_trig_clk", 0x144, 4, GPU_CC,
+ 0x18, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cx_qdss_tsctr_clk", 0x144, 4, GPU_CC,
+ 0x14, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cx_snoc_dvm_clk", 0x144, 4, GPU_CC,
+ 0x16, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cxo_aon_clk", 0x144, 4, GPU_CC,
+ 0xB, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_cxo_clk", 0x144, 4, GPU_CC,
+ 0xA, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_gx_cxo_clk", 0x144, 4, GPU_CC,
+ 0xF, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_gx_gmu_clk", 0x144, 4, GPU_CC,
+ 0x10, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_gx_qdss_tsctr_clk", 0x144, 4, GPU_CC,
+ 0xE, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_gx_vsense_clk", 0x144, 4, GPU_CC,
+ 0xD, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_rbcpr_ahb_clk", 0x144, 4, GPU_CC,
+ 0x1D, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_rbcpr_clk", 0x144, 4, GPU_CC,
+ 0x1C, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_sleep_clk", 0x144, 4, GPU_CC,
+ 0x17, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "gpu_cc_spdm_gx_gfx3d_div_clk", 0x144, 4, GPU_CC,
+ 0x1E, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+ { "video_cc_apb_clk", 0x48, 4, VIDEO_CC,
+ 0x8, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_at_clk", 0x48, 4, VIDEO_CC,
+ 0xB, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_qdss_trig_clk", 0x48, 4, VIDEO_CC,
+ 0x7, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_qdss_tsctr_div8_clk", 0x48, 4, VIDEO_CC,
+ 0xA, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_vcodec0_axi_clk", 0x48, 4, VIDEO_CC,
+ 0x5, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_vcodec0_core_clk", 0x48, 4, VIDEO_CC,
+ 0x2, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_vcodec1_axi_clk", 0x48, 4, VIDEO_CC,
+ 0x6, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_vcodec1_core_clk", 0x48, 4, VIDEO_CC,
+ 0x3, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_venus_ahb_clk", 0x48, 4, VIDEO_CC,
+ 0x9, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_venus_ctl_axi_clk", 0x48, 4, VIDEO_CC,
+ 0x4, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ { "video_cc_venus_ctl_core_clk", 0x48, 4, VIDEO_CC,
+ 0x1, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+ ),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_debug_mux",
+ .ops = &clk_debug_mux_ops,
+ .parent_names = debug_mux_parent_names,
+ .num_parents = ARRAY_SIZE(debug_mux_parent_names),
+ .flags = CLK_IS_MEASURE,
+ },
+};
+
+static const struct of_device_id clk_debug_match_table[] = {
+ { .compatible = "qcom,debugcc-sdm845" },
+ {}
+};
+
+static int clk_debug_845_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ int ret = 0, count;
+
+ clk = devm_clk_get(&pdev->dev, "xo_clk_src");
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get xo clock\n");
+ return PTR_ERR(clk);
+ }
+
+ debug_mux_priv.cxo = clk;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,cc-count",
+ &count);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Num of debug clock controller not specified\n");
+ return ret;
+ }
+
+ if (!count) {
+ dev_err(&pdev->dev, "Count of CC cannot be zero\n");
+ return -EINVAL;
+ }
+
+ gcc_debug_mux.regmap = devm_kzalloc(&pdev->dev,
+ sizeof(struct regmap *) * count, GFP_KERNEL);
+ if (!gcc_debug_mux.regmap)
+ return -ENOMEM;
+
+ if (of_get_property(pdev->dev.of_node, "qcom,gcc", NULL)) {
+ gcc_debug_mux.regmap[GCC] =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "qcom,gcc");
+ if (IS_ERR(gcc_debug_mux.regmap[GCC])) {
+ pr_err("Failed to map qcom,gcc\n");
+ return PTR_ERR(gcc_debug_mux.regmap[GCC]);
+ }
+ }
+
+ if (of_get_property(pdev->dev.of_node, "qcom,dispcc", NULL)) {
+ gcc_debug_mux.regmap[DISP_CC] =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "qcom,dispcc");
+ if (IS_ERR(gcc_debug_mux.regmap[DISP_CC])) {
+ pr_err("Failed to map qcom,dispcc\n");
+ return PTR_ERR(gcc_debug_mux.regmap[DISP_CC]);
+ }
+ }
+
+ if (of_get_property(pdev->dev.of_node, "qcom,videocc", NULL)) {
+ gcc_debug_mux.regmap[VIDEO_CC] =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "qcom,videocc");
+ if (IS_ERR(gcc_debug_mux.regmap[VIDEO_CC])) {
+ pr_err("Failed to map qcom,videocc\n");
+ return PTR_ERR(gcc_debug_mux.regmap[VIDEO_CC]);
+ }
+ }
+
+ if (of_get_property(pdev->dev.of_node, "qcom,camcc", NULL)) {
+ gcc_debug_mux.regmap[CAM_CC] =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "qcom,camcc");
+ if (IS_ERR(gcc_debug_mux.regmap[CAM_CC])) {
+ pr_err("Failed to map qcom,camcc\n");
+ return PTR_ERR(gcc_debug_mux.regmap[CAM_CC]);
+ }
+ }
+
+ if (of_get_property(pdev->dev.of_node, "qcom,gpucc", NULL)) {
+ gcc_debug_mux.regmap[GPU_CC] =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "qcom,gpucc");
+ if (IS_ERR(gcc_debug_mux.regmap[GPU_CC])) {
+ pr_err("Failed to map qcom,gpucc\n");
+ return PTR_ERR(gcc_debug_mux.regmap[GPU_CC]);
+ }
+ }
+
+ clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Unable to register GCC debug mux\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_debug_measure_register(&gcc_debug_mux.hw);
+ if (ret)
+ dev_err(&pdev->dev, "Could not register Measure clock\n");
+ else
+ dev_info(&pdev->dev, "Registered debug mux successfully\n");
+
+ return ret;
+}
+
+static struct platform_driver clk_debug_driver = {
+ .probe = clk_debug_845_probe,
+ .driver = {
+ .name = "debugcc-sdm845",
+ .of_match_table = clk_debug_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init clk_debug_845_init(void)
+{
+ return platform_driver_register(&clk_debug_driver);
+}
+fs_initcall(clk_debug_845_init);
+
+MODULE_DESCRIPTION("QTI DEBUG CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:debugcc-sdm845");
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 44c9b48..6b1eca8 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -146,10 +146,11 @@ static struct clk_alpha_pll disp_cc_pll0 = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_fabia_pll_ops,
- VDD_CX_FMAX_MAP2(
- MIN, 430000000,
- LOW, 860000000
- ),
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
},
},
};
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 0f6039e..08dce3f 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -168,6 +168,11 @@ static struct clk_alpha_pll gpll0 = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_fabia_fixed_pll_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
},
},
};
@@ -207,7 +212,11 @@ static struct clk_alpha_pll gpll1 = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_fabia_fixed_pll_ops,
- VDD_CX_FMAX_MAP1(MIN, 1066000000),
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
},
},
};
diff --git a/drivers/clk/qcom/vdd-level-sdm845.h b/drivers/clk/qcom/vdd-level-sdm845.h
index 6f876ba..a8d08b3 100644
--- a/drivers/clk/qcom/vdd-level-sdm845.h
+++ b/drivers/clk/qcom/vdd-level-sdm845.h
@@ -90,14 +90,6 @@
}, \
.num_rate_max = VDD_CX_NUM
-#define VDD_MX_FMAX_MAP2(l1, f1, l2, f2) \
- .vdd_class = &vdd_mx, \
- .rate_max = (unsigned long[VDD_CX_NUM]) { \
- [VDD_CX_##l1] = (f1), \
- [VDD_CX_##l2] = (f2), \
- }, \
- .num_rate_max = VDD_CX_NUM
-
#define VDD_MX_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
.vdd_class = &vdd_mx, \
.rate_max = (unsigned long[VDD_CX_NUM]) { \
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 0e9cf88..8b63979 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -83,13 +83,11 @@ static struct clk_alpha_pll video_pll0 = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_fabia_pll_ops,
- VDD_CX_FMAX_MAP5(
- MIN, 200000000,
- LOW, 640000000,
- LOW_L1, 760000000,
- NOMINAL, 1332000000,
- HIGH, 1599000000),
-
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
},
},
};
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index 39090dc..0c9e428 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -215,7 +215,7 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
esoc_clink->name);
return -EIO;
}
- put_user(req, (unsigned long __user *)uarg);
+ put_user(req, (unsigned int __user *)uarg);
}
return err;
@@ -227,7 +227,7 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
err = clink_ops->get_status(&status, esoc_clink);
if (err)
return err;
- put_user(status, (unsigned long __user *)uarg);
+ put_user(status, (unsigned int __user *)uarg);
break;
case ESOC_WAIT_FOR_CRASH:
err = wait_event_interruptible(esoc_udev->evt_wait,
@@ -241,7 +241,7 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
esoc_clink->name);
return -EIO;
}
- put_user(evt, (unsigned long __user *)uarg);
+ put_user(evt, (unsigned int __user *)uarg);
}
return err;
default:
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 600b250..9d2e95b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1065,6 +1065,262 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
return ret;
}
+static int msm_drm_object_supports_event(struct drm_device *dev,
+ struct drm_msm_event_req *req)
+{
+ int ret = -EINVAL;
+ struct drm_mode_object *arg_obj;
+
+ arg_obj = drm_mode_object_find(dev, req->object_id, req->object_type);
+ if (!arg_obj)
+ return -ENOENT;
+
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CRTC:
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_register_event(struct drm_device *dev,
+ struct drm_msm_event_req *req, struct drm_file *file, bool en)
+{
+ int ret = -EINVAL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct drm_mode_object *arg_obj;
+
+ arg_obj = drm_mode_object_find(dev, req->object_id, req->object_type);
+ if (!arg_obj)
+ return -ENOENT;
+
+ ret = kms->funcs->register_events(kms, arg_obj, req->event, en);
+ return ret;
+}
+
+static int msm_event_client_count(struct drm_device *dev,
+ struct drm_msm_event_req *req_event, bool locked)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned long flag = 0;
+ struct msm_drm_event *node;
+ int count = 0;
+
+ if (!locked)
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_for_each_entry(node, &priv->client_event_list, base.link) {
+ if (node->event.type == req_event->event &&
+ node->info.object_id == req_event->object_id)
+ count++;
+ }
+ if (!locked)
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ return count;
+}
+
+static int msm_ioctl_register_event(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_event_req *req_event = data;
+ struct msm_drm_event *client, *node;
+ unsigned long flag = 0;
+ bool dup_request = false;
+ int ret = 0, count = 0;
+
+ ret = msm_drm_object_supports_event(dev, req_event);
+ if (ret) {
+ DRM_ERROR("unsupported event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_for_each_entry(node, &priv->client_event_list, base.link) {
+ if (node->base.file_priv != file)
+ continue;
+ if (node->event.type == req_event->event &&
+ node->info.object_id == req_event->object_id) {
+ DRM_DEBUG("duplicate request for event %x obj id %d\n",
+ node->event.type, node->info.object_id);
+ dup_request = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ if (dup_request)
+ return -EALREADY;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->base.file_priv = file;
+ client->base.pid = current->pid;
+ client->base.event = &client->event;
+ client->event.type = req_event->event;
+ memcpy(&client->info, req_event, sizeof(client->info));
+
+ /* Get the count of clients that have registered for event.
+ * Event should be enabled for first client, for subsequent enable
+ * calls add to client list and return.
+ */
+ count = msm_event_client_count(dev, req_event, false);
+ /* Add current client to list */
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_add_tail(&client->base.link, &priv->client_event_list);
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ if (count)
+ return 0;
+
+ ret = msm_register_event(dev, req_event, file, true);
+ if (ret) {
+ DRM_ERROR("failed to enable event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_del(&client->base.link);
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+ kfree(client);
+ }
+ return ret;
+}
+
+static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_event_req *req_event = data;
+ struct msm_drm_event *client = NULL, *node, *temp;
+ unsigned long flag = 0;
+ int count = 0;
+ bool found = false;
+ int ret = 0;
+
+ ret = msm_drm_object_supports_event(dev, req_event);
+ if (ret) {
+ DRM_ERROR("unsupported event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_for_each_entry_safe(node, temp, &priv->client_event_list,
+ base.link) {
+ if (node->event.type == req_event->event &&
+ node->info.object_id == req_event->object_id &&
+ node->base.file_priv == file) {
+ client = node;
+ list_del(&client->base.link);
+ found = true;
+ kfree(client);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ if (!found)
+ return -ENOENT;
+
+ count = msm_event_client_count(dev, req_event, false);
+ if (!count)
+ ret = msm_register_event(dev, req_event, file, false);
+
+ return ret;
+}
+
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+ struct drm_event *event, u8 *payload)
+{
+ struct drm_device *dev = NULL;
+ struct msm_drm_private *priv = NULL;
+ unsigned long flags;
+ struct msm_drm_event *notify, *node;
+ int len = 0, ret;
+
+ if (!crtc || !event || !event->length || !payload) {
+ DRM_ERROR("err param crtc %pK event %pK len %d payload %pK\n",
+ crtc, event, ((event) ? (event->length) : -1),
+ payload);
+ return;
+ }
+ dev = crtc->dev;
+ priv = (dev) ? dev->dev_private : NULL;
+ if (!dev || !priv) {
+ DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_for_each_entry(node, &priv->client_event_list, base.link) {
+ if (node->event.type != event->type ||
+ crtc->base.id != node->info.object_id)
+ continue;
+ len = event->length + sizeof(struct drm_msm_event_resp);
+ if (node->base.file_priv->event_space < len) {
+ DRM_ERROR("Insufficient space to notify\n");
+ continue;
+ }
+ notify = kzalloc(len, GFP_ATOMIC);
+ if (!notify)
+ continue;
+ notify->base.file_priv = node->base.file_priv;
+ notify->base.event = ¬ify->event;
+ notify->base.pid = node->base.pid;
+ notify->event.type = node->event.type;
+ notify->event.length = len;
+ memcpy(¬ify->info, &node->info, sizeof(notify->info));
+ memcpy(notify->data, payload, event->length);
+ ret = drm_event_reserve_init_locked(dev, node->base.file_priv,
+ ¬ify->base, ¬ify->event);
+ if (ret) {
+ kfree(notify);
+ continue;
+ }
+ drm_send_event_locked(dev, ¬ify->base);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int msm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_event *node, *temp;
+ u32 count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_for_each_entry_safe(node, temp, &priv->client_event_list,
+ base.link) {
+ if (node->base.file_priv != file_priv)
+ continue;
+ list_del(&node->base.link);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ count = msm_event_client_count(dev, &node->info, true);
+ if (!count)
+ msm_register_event(dev, &node->info, file_priv, false);
+ kfree(node);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return drm_release(inode, filp);
+}
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -1075,6 +1331,10 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT, msm_ioctl_register_event,
+ DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT, msm_ioctl_deregister_event,
+ DRM_UNLOCKED|DRM_CONTROL_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -1086,7 +1346,7 @@ static const struct vm_operations_struct vm_ops = {
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .release = drm_release,
+ .release = msm_release,
.unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a904dcd..f2fccd7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -374,6 +374,7 @@ struct msm_display_info {
struct msm_drm_event {
struct drm_pending_event base;
struct drm_event event;
+ struct drm_msm_event_req info;
u8 data[];
};
@@ -623,6 +624,15 @@ enum msm_dsi_encoder_id {
MSM_DSI_CMD_ENCODER_ID = 1,
MSM_DSI_ENCODER_NUM = 2
};
+
+/* *
+ * msm_send_crtc_notification - notify user-space clients of crtc events.
+ * @crtc: crtc that is generating the event.
+ * @event: event that needs to be notified.
+ * @payload: payload for the event.
+ */
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+ struct drm_event *event, u8 *payload);
#ifdef CONFIG_DRM_MSM_DSI
void __init msm_dsi_register(void);
void __exit msm_dsi_unregister(void);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 4ebbc58..aa1b090 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -83,6 +83,8 @@ struct msm_kms_funcs {
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*postclose)(struct msm_kms *kms, struct drm_file *file);
void (*lastclose)(struct msm_kms *kms);
+ int (*register_events)(struct msm_kms *kms,
+ struct drm_mode_object *obj, u32 event, bool en);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
};
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 5d4648e..1f39180 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -846,3 +846,9 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
return ERR_PTR(rc);
}
+
+int sde_connector_register_custom_event(struct sde_kms *kms,
+ struct drm_connector *conn_drm, u32 event, bool val)
+{
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 0ece0d2..9d36851 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -392,5 +392,16 @@ int sde_connector_register_event(struct drm_connector *connector,
void sde_connector_unregister_event(struct drm_connector *connector,
uint32_t event_idx);
+/**
+ * sde_connector_register_custom_event - register for async events
+ * @kms: Pointer to sde_kms
+ * @conn_drm: Pointer to drm connector object
+ * @event: Event for which request is being sent
+ * @en: Flag to enable/disable the event
+ * Returns: Zero on success
+ */
+int sde_connector_register_custom_event(struct sde_kms *kms,
+ struct drm_connector *conn_drm, u32 event, bool en);
+
#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index ec15215..acb5695 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -37,6 +37,14 @@
#include "sde_power_handle.h"
#include "sde_core_perf.h"
+struct sde_crtc_irq_info {
+ struct sde_irq_callback irq;
+ u32 event;
+ int (*func)(struct drm_crtc *crtc, bool en,
+ struct sde_irq_callback *irq);
+ struct list_head list;
+};
+
/* default input fence timeout, in ms */
#define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000
@@ -51,6 +59,8 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
+#define MISR_BUFF_SIZE 256
+
static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
@@ -68,6 +78,35 @@ static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
return to_sde_kms(priv->kms);
}
+static inline int _sde_crtc_power_enable(struct sde_crtc *sde_crtc, bool enable)
+{
+ struct drm_crtc *crtc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!sde_crtc) {
+ SDE_ERROR("invalid sde crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &sde_crtc->base;
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+
+ return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ enable);
+}
+
static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
{
if (!sde_crtc)
@@ -1096,8 +1135,6 @@ static void _sde_crtc_vblank_enable_nolock(
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_encoder *enc;
- struct msm_drm_private *priv;
- struct sde_kms *sde_kms;
if (!sde_crtc) {
SDE_ERROR("invalid crtc\n");
@@ -1106,17 +1143,11 @@ static void _sde_crtc_vblank_enable_nolock(
crtc = &sde_crtc->base;
dev = crtc->dev;
- priv = dev->dev_private;
-
- if (!priv->kms) {
- SDE_ERROR("invalid kms\n");
- return;
- }
- sde_kms = to_sde_kms(priv->kms);
if (enable) {
- sde_power_resource_enable(&priv->phandle,
- sde_kms->core_client, true);
+ if (_sde_crtc_power_enable(sde_crtc, true))
+ return;
+
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
if (enc->crtc != crtc)
continue;
@@ -1135,8 +1166,7 @@ static void _sde_crtc_vblank_enable_nolock(
sde_encoder_register_vblank_callback(enc, NULL, NULL);
}
- sde_power_resource_enable(&priv->phandle,
- sde_kms->core_client, false);
+ _sde_crtc_power_enable(sde_crtc, false);
}
}
@@ -1273,6 +1303,9 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
struct drm_encoder *encoder;
+ unsigned long flags;
+ struct sde_crtc_irq_info *node = NULL;
+ int ret;
if (!crtc || !crtc->dev || !crtc->state) {
SDE_ERROR("invalid crtc\n");
@@ -1324,6 +1357,18 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
sde_crtc->num_mixers = 0;
+
+ spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+ list_for_each_entry(node, &sde_crtc->user_event_list, list) {
+ ret = 0;
+ if (node->func)
+ ret = node->func(crtc, false, &node->irq);
+ if (ret)
+ SDE_ERROR("%s failed to disable event %x\n",
+ sde_crtc->name, node->event);
+ }
+ spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
mutex_unlock(&sde_crtc->crtc_lock);
}
@@ -1334,7 +1379,9 @@ static void sde_crtc_enable(struct drm_crtc *crtc)
struct sde_hw_mixer *lm;
struct drm_display_mode *mode;
struct drm_encoder *encoder;
- int i;
+ unsigned long flags;
+ struct sde_crtc_irq_info *node = NULL;
+ int i, ret;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
@@ -1369,6 +1416,17 @@ static void sde_crtc_enable(struct drm_crtc *crtc)
lm->cfg.flags = 0;
lm->ops.setup_mixer_out(lm, &lm->cfg);
}
+
+ spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+ list_for_each_entry(node, &sde_crtc->user_event_list, list) {
+ ret = 0;
+ if (node->func)
+ ret = node->func(crtc, true, &node->irq);
+ if (ret)
+ SDE_ERROR("%s failed to enable event %x\n",
+ sde_crtc->name, node->event);
+ }
+ spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
}
struct plane_state {
@@ -2023,7 +2081,108 @@ static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
return single_open(file, _sde_debugfs_status_show, inode->i_private);
}
-#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
+static ssize_t _sde_crtc_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_mixer *m;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ u32 frame_count, enable;
+ size_t buff_copy;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ sde_crtc = file->private_data;
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy)) {
+ SDE_ERROR("buffer copy failed\n");
+ return -EINVAL;
+ }
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _sde_crtc_power_enable(sde_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ sde_crtc->misr_enable = enable;
+ for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ m = &sde_crtc->mixers[i];
+ if (!m->hw_lm)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+ }
+ mutex_unlock(&sde_crtc->crtc_lock);
+ _sde_crtc_power_enable(sde_crtc, false);
+
+ return count;
+}
+
+static ssize_t _sde_crtc_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_mixer *m;
+ int i = 0, rc;
+ ssize_t len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ sde_crtc = file->private_data;
+ rc = _sde_crtc_power_enable(sde_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ if (!sde_crtc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ m = &sde_crtc->mixers[i];
+ if (!m->hw_lm)
+ continue;
+
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+ m->hw_lm->idx - LM_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ m->hw_lm->ops.collect_misr(m->hw_lm));
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&sde_crtc->crtc_lock);
+ _sde_crtc_power_enable(sde_crtc, false);
+ return len;
+}
+
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __prefix ## _show, inode->i_private); \
@@ -2064,6 +2223,11 @@ static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
.llseek = seq_lseek,
.release = single_release,
};
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _sde_crtc_misr_read,
+ .write = _sde_crtc_misr_setup,
+ };
if (!crtc)
return -EINVAL;
@@ -2086,6 +2250,8 @@ static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
sde_crtc->debugfs_root,
&sde_crtc->base,
&sde_crtc_debugfs_state_fops);
+ debugfs_create_file("misr_data", 0644, sde_crtc->debugfs_root,
+ sde_crtc, &debugfs_misr_fops);
return 0;
}
@@ -2263,6 +2429,7 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
atomic_set(&sde_crtc->frame_pending, 0);
INIT_LIST_HEAD(&sde_crtc->frame_event_list);
+ INIT_LIST_HEAD(&sde_crtc->user_event_list);
for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
INIT_LIST_HEAD(&sde_crtc->frame_events[i].list);
list_add(&sde_crtc->frame_events[i].list,
@@ -2306,3 +2473,9 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
return crtc;
}
+
+int sde_crtc_register_custom_event(struct sde_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool val)
+{
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 0647ff4..5934405 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -136,6 +136,7 @@ struct sde_crtc_event {
* @event_cache : Local cache of event worker structures
* @event_free_list : List of available event structures
* @event_lock : Spinlock around event handling code
+ * @misr_enable : boolean entry indicates misr enable/disable status.
*/
struct sde_crtc {
struct drm_crtc base;
@@ -169,6 +170,7 @@ struct sde_crtc {
struct list_head dirty_list;
struct list_head ad_dirty;
struct list_head ad_active;
+ struct list_head user_event_list;
struct mutex crtc_lock;
@@ -183,6 +185,7 @@ struct sde_crtc {
struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT];
struct list_head event_free_list;
spinlock_t event_lock;
+ bool misr_enable;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -316,7 +319,17 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane);
void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
/**
- * sde_crtc_get_intf_mode - get primary interface mode of the given crtc
+ * sde_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to sde_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int sde_crtc_register_custom_event(struct sde_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * sde_crtc_get_intf_mode - get interface mode of the given crtc
* @crtc: Pointert to crtc
*/
enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 274dc6f..7ab4f8d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -57,6 +57,8 @@
#define MAX_CHANNELS_PER_ENC 2
+#define MISR_BUFF_SIZE 256
+
/**
* struct sde_encoder_virt - virtual encoder. Container of one or more physical
* encoders. Virtual encoder manages one "logical" display. Physical
@@ -90,6 +92,7 @@
* @crtc_frame_event: callback event
* @frame_done_timeout: frame done timeout in Hz
* @frame_done_timer: watchdog timer for frame done event
+ * @misr_enable: misr enable/disable status
*/
struct sde_encoder_virt {
struct drm_encoder base;
@@ -120,6 +123,7 @@ struct sde_encoder_virt {
struct sde_rsc_client *rsc_client;
struct msm_display_info disp_info;
bool rsc_state_update;
+ bool misr_enable;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -131,6 +135,36 @@ inline bool _sde_is_dsc_enabled(struct sde_encoder_virt *sde_enc)
return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
}
+static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc,
+ bool enable)
+{
+ struct drm_encoder *drm_enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!sde_enc) {
+ SDE_ERROR("invalid sde enc\n");
+ return -EINVAL;
+ }
+
+ drm_enc = &sde_enc->base;
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+
+ return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ enable);
+}
+
void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
struct sde_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state)
@@ -706,8 +740,6 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
- struct msm_drm_private *priv;
- struct sde_kms *sde_kms;
int i = 0;
int ret = 0;
@@ -723,13 +755,13 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
}
sde_enc = to_sde_encoder_virt(drm_enc);
- priv = drm_enc->dev->dev_private;
- sde_kms = to_sde_kms(priv->kms);
SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc));
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ ret = _sde_encoder_power_enable(sde_enc, true);
+ if (ret)
+ return;
sde_enc->cur_master = NULL;
@@ -810,7 +842,7 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
sde_rm_release(&sde_kms->rm, drm_enc);
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+ _sde_encoder_power_enable(sde_enc, false);
}
static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
@@ -1389,105 +1421,108 @@ static int _sde_encoder_debugfs_status_open(struct inode *inode,
return single_open(file, _sde_encoder_status_show, inode->i_private);
}
-static void _sde_set_misr_params(struct sde_encoder_phys *phys, u32 enable,
- u32 frame_count)
-{
- int j;
-
- if (!phys->misr_map)
- return;
-
- phys->misr_map->enable = enable;
-
- if (frame_count <= SDE_CRC_BATCH_SIZE)
- phys->misr_map->frame_count = frame_count;
- else if (frame_count <= 0)
- phys->misr_map->frame_count = 0;
- else
- phys->misr_map->frame_count = SDE_CRC_BATCH_SIZE;
-
- if (!enable) {
- phys->misr_map->last_idx = 0;
- phys->misr_map->frame_count = 0;
- for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
- phys->misr_map->crc_value[j] = 0;
- }
-}
-
-static ssize_t _sde_encoder_misr_set(struct file *file,
+static ssize_t _sde_encoder_misr_setup(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct sde_encoder_virt *sde_enc;
- struct drm_encoder *drm_enc;
- int i = 0;
- char buf[10];
- u32 enable, frame_count;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ size_t buff_copy;
+ u32 frame_count, enable;
- drm_enc = file->private_data;
- sde_enc = to_sde_encoder_virt(drm_enc);
+ if (!file || !file->private_data)
+ return -EINVAL;
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
+ sde_enc = file->private_data;
- buf[count] = 0; /* end of string */
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy))
+ return -EINVAL;
+
+ buf[buff_copy] = 0; /* end of string */
if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
- return -EFAULT;
+ return -EINVAL;
+
+ rc = _sde_encoder_power_enable(sde_enc, true);
+ if (rc)
+ return rc;
mutex_lock(&sde_enc->enc_lock);
+ sde_enc->misr_enable = enable;
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
- if (!phys || !phys->misr_map || !phys->ops.setup_misr)
+ if (!phys || !phys->ops.setup_misr)
continue;
- _sde_set_misr_params(phys, enable, frame_count);
- phys->ops.setup_misr(phys, phys->misr_map);
+ phys->ops.setup_misr(phys, enable, frame_count);
}
mutex_unlock(&sde_enc->enc_lock);
+ _sde_encoder_power_enable(sde_enc, false);
+
return count;
}
-static ssize_t _sde_encoder_misr_read(
- struct file *file,
- char __user *buff, size_t count, loff_t *ppos)
+static ssize_t _sde_encoder_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
{
struct sde_encoder_virt *sde_enc;
- struct drm_encoder *drm_enc;
- int i = 0, j = 0, len = 0;
- char buf[512] = {'\0'};
+ int i = 0, len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+ int rc;
if (*ppos)
return 0;
- drm_enc = file->private_data;
- sde_enc = to_sde_encoder_virt(drm_enc);
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ sde_enc = file->private_data;
+
+ rc = _sde_encoder_power_enable(sde_enc, true);
+ if (rc)
+ return rc;
mutex_lock(&sde_enc->enc_lock);
- for (i = 0; i < sde_enc->num_phys_encs; i++) {
- struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
- struct sde_misr_params *misr_map;
-
- if (!phys || !phys->misr_map)
- continue;
-
- misr_map = phys->misr_map;
-
- len += snprintf(buf+len, sizeof(buf), "INTF%d\n", i);
- for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
- len += snprintf(buf+len, sizeof(buf), "%x\n",
- misr_map->crc_value[j]);
+ if (!sde_enc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ } else if (sde_enc->disp_info.capabilities &
+ ~MSM_DISPLAY_CAP_VID_MODE) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "unsupported\n");
+ goto buff_check;
}
- if (len < 0 || len >= sizeof(buf))
- return 0;
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+ if (!phys || !phys->ops.collect_misr)
+ continue;
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "Intf idx:%d\n", phys->intf_idx - INTF_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ phys->ops.collect_misr(phys));
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
*ppos += len; /* increase offset */
- mutex_unlock(&sde_enc->enc_lock);
+end:
+ mutex_unlock(&sde_enc->enc_lock);
+ _sde_encoder_power_enable(sde_enc, false);
return len;
}
@@ -1507,7 +1542,7 @@ static int _sde_encoder_init_debugfs(struct drm_encoder *drm_enc)
static const struct file_operations debugfs_misr_fops = {
.open = simple_open,
.read = _sde_encoder_misr_read,
- .write = _sde_encoder_misr_set,
+ .write = _sde_encoder_misr_setup,
};
char name[SDE_NAME_SIZE];
@@ -1534,7 +1569,7 @@ static int _sde_encoder_init_debugfs(struct drm_encoder *drm_enc)
sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
debugfs_create_file("misr_data", 0644,
- sde_enc->debugfs_root, drm_enc, &debugfs_misr_fops);
+ sde_enc->debugfs_root, sde_enc, &debugfs_misr_fops);
return 0;
}
@@ -1896,10 +1931,6 @@ int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
if (ret)
return ret;
}
-
- if (phys && phys->ops.collect_misr)
- if (phys->misr_map && phys->misr_map->enable)
- phys->ops.collect_misr(phys, phys->misr_map);
}
return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index b9e802f..da155b0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -147,9 +147,8 @@ struct sde_encoder_phys_ops {
bool (*needs_single_flush)(struct sde_encoder_phys *phys_enc);
void (*setup_misr)(struct sde_encoder_phys *phys_encs,
- struct sde_misr_params *misr_map);
- void (*collect_misr)(struct sde_encoder_phys *phys_enc,
- struct sde_misr_params *misr_map);
+ bool enable, u32 frame_count);
+ u32 (*collect_misr)(struct sde_encoder_phys *phys_enc);
void (*hw_reset)(struct sde_encoder_phys *phys_enc);
};
@@ -183,7 +182,6 @@ enum sde_intr_idx {
* @hw_pp: Hardware interface to the ping pong registers
* @sde_kms: Pointer to the sde_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
- * @misr_map: Interface for setting and collecting MISR data
* @enabled: Whether the encoder has enabled and running a mode
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
@@ -212,7 +210,6 @@ struct sde_encoder_phys {
struct sde_hw_pingpong *hw_pp;
struct sde_kms *sde_kms;
struct drm_display_mode cached_mode;
- struct sde_misr_params *misr_map;
enum sde_enc_split_role split_role;
enum sde_intf_mode intf_mode;
enum sde_intf intf_idx;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 82d32dc..39dfd5d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -830,23 +830,29 @@ static void sde_encoder_phys_vid_handle_post_kickoff(
}
static void sde_encoder_phys_vid_setup_misr(struct sde_encoder_phys *phys_enc,
- struct sde_misr_params *misr_map)
+ bool enable, u32 frame_count)
{
- struct sde_encoder_phys_vid *vid_enc =
- to_sde_encoder_phys_vid(phys_enc);
+ struct sde_encoder_phys_vid *vid_enc;
- if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
- vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf, misr_map);
+ if (!phys_enc)
+ return;
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+ if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+ vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+ enable, frame_count);
}
-static void sde_encoder_phys_vid_collect_misr(struct sde_encoder_phys *phys_enc,
- struct sde_misr_params *misr_map)
+static u32 sde_encoder_phys_vid_collect_misr(struct sde_encoder_phys *phys_enc)
{
- struct sde_encoder_phys_vid *vid_enc =
- to_sde_encoder_phys_vid(phys_enc);
+ struct sde_encoder_phys_vid *vid_enc;
- if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr)
- vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf, misr_map);
+ if (!phys_enc)
+ return 0;
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+ return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+ vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
}
static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
@@ -919,13 +925,6 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(
goto fail;
}
- phys_enc->misr_map = kzalloc(sizeof(struct sde_misr_params),
- GFP_KERNEL);
- if (!phys_enc->misr_map) {
- ret = -ENOMEM;
- goto fail;
- }
-
SDE_DEBUG_VIDENC(vid_enc, "\n");
sde_encoder_phys_vid_init_ops(&phys_enc->ops);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 00b6c85..01d0d20 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -134,217 +134,217 @@ struct sde_media_color_map {
static const struct sde_format sde_format_map[] = {
INTERLEAVED_RGB_FMT(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGB888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 3, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 3, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA1010102,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 4, SDE_FORMAT_FLAG_DX,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA1010102,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 4, SDE_FORMAT_FLAG_DX,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, SDE_FORMAT_FLAG_DX,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 4, SDE_FORMAT_FLAG_DX,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 4, SDE_FORMAT_FLAG_DX,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX1010102,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 4, SDE_FORMAT_FLAG_DX,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, SDE_FORMAT_FLAG_DX,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX1010102,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 4, SDE_FORMAT_FLAG_DX,
SDE_FETCH_LINEAR, 1),
@@ -492,31 +492,31 @@ static const struct sde_format sde_format_map_tp10_tile[] = {
* the data will be passed by user-space.
*/
static const struct sde_format sde_format_map_ubwc[] = {
- INTERLEAVED_RGB_FMT(RGB565,
+ INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, SDE_FORMAT_FLAG_COMPRESSED,
SDE_FETCH_UBWC, 2),
- INTERLEAVED_RGB_FMT(RGBA8888,
+ INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, SDE_FORMAT_FLAG_COMPRESSED,
SDE_FETCH_UBWC, 2),
- INTERLEAVED_RGB_FMT(RGBX8888,
+ INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, SDE_FORMAT_FLAG_COMPRESSED,
SDE_FETCH_UBWC, 2),
- INTERLEAVED_RGB_FMT(RGBA1010102,
+ INTERLEAVED_RGB_FMT(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
SDE_FETCH_UBWC, 2),
- INTERLEAVED_RGB_FMT(RGBX1010102,
+ INTERLEAVED_RGB_FMT(XBGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
@@ -590,11 +590,11 @@ static void _sde_get_v_h_subsample_rate(
static int _sde_format_get_media_color_ubwc(const struct sde_format *fmt)
{
static const struct sde_media_color_map sde_media_ubwc_map[] = {
- {DRM_FORMAT_RGBA8888, COLOR_FMT_RGBA8888_UBWC},
- {DRM_FORMAT_RGBX8888, COLOR_FMT_RGBA8888_UBWC},
- {DRM_FORMAT_RGBA1010102, COLOR_FMT_RGBA1010102_UBWC},
- {DRM_FORMAT_RGBX1010102, COLOR_FMT_RGBA1010102_UBWC},
- {DRM_FORMAT_RGB565, COLOR_FMT_RGB565_UBWC},
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
};
int color_fmt = -1;
int i;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
index 354b892..cdb3450 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
@@ -16,17 +16,17 @@ static const struct sde_format_extended plane_formats[] = {
{DRM_FORMAT_ARGB8888, 0},
{DRM_FORMAT_ABGR8888, 0},
{DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGRA8888, 0},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_RGBX8888, 0},
{DRM_FORMAT_BGRX8888, 0},
{DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_RGB888, 0},
{DRM_FORMAT_BGR888, 0},
{DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGR565, 0},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_ABGR1555, 0},
@@ -52,16 +52,16 @@ static const struct sde_format_extended plane_formats_yuv[] = {
{DRM_FORMAT_ABGR8888, 0},
{DRM_FORMAT_RGBA8888, 0},
{DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGRA8888, 0},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_XBGR8888, 0},
{DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_RGB888, 0},
{DRM_FORMAT_BGR888, 0},
{DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGR565, 0},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_ABGR1555, 0},
@@ -113,14 +113,14 @@ static const struct sde_format_extended cursor_formats[] = {
static const struct sde_format_extended wb2_formats[] = {
{DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_RGB888, 0},
{DRM_FORMAT_ARGB8888, 0},
{DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_RGBA5551, 0},
{DRM_FORMAT_XRGB1555, 0},
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index d96e49a..1f17378 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -67,12 +67,6 @@
#define INTF_MISR_CTRL 0x180
#define INTF_MISR_SIGNATURE 0x184
-#define MISR_FRAME_COUNT_MASK 0xFF
-#define MISR_CTRL_ENABLE BIT(8)
-#define MISR_CTRL_STATUS BIT(9)
-#define MISR_CTRL_STATUS_CLEAR BIT(10)
-#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
-
static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
struct sde_mdss_cfg *m,
void __iomem *addr,
@@ -270,48 +264,28 @@ static void sde_hw_intf_get_status(
}
}
-static void sde_hw_intf_set_misr(struct sde_hw_intf *intf,
- struct sde_misr_params *misr_map)
+static void sde_hw_intf_setup_misr(struct sde_hw_intf *intf,
+ bool enable, u32 frame_count)
{
struct sde_hw_blk_reg_map *c = &intf->hw;
u32 config = 0;
- if (!misr_map)
- return;
-
SDE_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
- /* Clear data */
+ /* clear misr data */
wmb();
- if (misr_map->enable) {
- config = (MISR_FRAME_COUNT_MASK & 1) |
- (MISR_CTRL_ENABLE);
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
- SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
- } else {
- SDE_REG_WRITE(c, INTF_MISR_CTRL, 0);
- }
+ SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
}
-static void sde_hw_intf_collect_misr(struct sde_hw_intf *intf,
- struct sde_misr_params *misr_map)
+static u32 sde_hw_intf_collect_misr(struct sde_hw_intf *intf)
{
struct sde_hw_blk_reg_map *c = &intf->hw;
- if (!misr_map)
- return;
-
- if (misr_map->enable) {
- if (misr_map->last_idx < misr_map->frame_count &&
- misr_map->last_idx < SDE_CRC_BATCH_SIZE)
- misr_map->crc_value[misr_map->last_idx] =
- SDE_REG_READ(c, INTF_MISR_SIGNATURE);
- }
-
- misr_map->enable =
- misr_map->enable & (misr_map->last_idx <= SDE_CRC_BATCH_SIZE);
-
- misr_map->last_idx++;
+ return SDE_REG_READ(c, INTF_MISR_SIGNATURE);
}
static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
@@ -321,7 +295,7 @@ static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
ops->setup_prg_fetch = sde_hw_intf_setup_prg_fetch;
ops->get_status = sde_hw_intf_get_status;
ops->enable_timing = sde_hw_intf_enable_timing_engine;
- ops->setup_misr = sde_hw_intf_set_misr;
+ ops->setup_misr = sde_hw_intf_setup_misr;
ops->collect_misr = sde_hw_intf_collect_misr;
if (cap & BIT(SDE_INTF_ROT_START))
ops->setup_rot_start = sde_hw_intf_setup_rot_start;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
index c6428ca..d24e83a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -19,24 +19,6 @@
struct sde_hw_intf;
-/* Batch size of frames for collecting MISR data */
-#define SDE_CRC_BATCH_SIZE 16
-
-/**
- * struct sde_misr_params : Interface for getting and setting MISR data
- * Assumption is these functions will be called after clocks are enabled
- * @ enable : enables/disables MISR
- * @ frame_count : represents number of frames for which MISR is enabled
- * @ last_idx: number of frames for which MISR data is collected
- * @ crc_value: stores the collected MISR data
- */
-struct sde_misr_params {
- bool enable;
- u32 frame_count;
- u32 last_idx;
- u32 crc_value[SDE_CRC_BATCH_SIZE];
-};
-
/* intf timing settings */
struct intf_timing_params {
u32 width; /* active width */
@@ -98,10 +80,9 @@ struct sde_hw_intf_ops {
struct intf_status *status);
void (*setup_misr)(struct sde_hw_intf *intf,
- struct sde_misr_params *misr_map);
+ bool enable, u32 frame_count);
- void (*collect_misr)(struct sde_hw_intf *intf,
- struct sde_misr_params *misr_map);
+ u32 (*collect_misr)(struct sde_hw_intf *intf);
};
struct sde_hw_intf {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 520c7b1..7780c5b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -33,6 +33,9 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+
static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
struct sde_mdss_cfg *m,
void __iomem *addr,
@@ -224,6 +227,30 @@ static void sde_hw_lm_setup_dim_layer(struct sde_hw_mixer *ctx,
SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
}
+static void sde_hw_lm_setup_misr(struct sde_hw_mixer *ctx,
+ bool enable, u32 frame_count)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 config = 0;
+
+ SDE_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ SDE_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 sde_hw_lm_collect_misr(struct sde_hw_mixer *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ return SDE_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
static void _setup_mixer_ops(struct sde_mdss_cfg *m,
struct sde_hw_lm_ops *ops,
unsigned long features)
@@ -236,6 +263,8 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m,
ops->setup_alpha_out = sde_hw_lm_setup_color3;
ops->setup_border_color = sde_hw_lm_setup_border_color;
ops->setup_gc = sde_hw_lm_gc;
+ ops->setup_misr = sde_hw_lm_setup_misr;
+ ops->collect_misr = sde_hw_lm_collect_misr;
if (test_bit(SDE_DIM_LAYER, &features)) {
ops->setup_dim_layer = sde_hw_lm_setup_dim_layer;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
index 1ef36ac..45c0fc9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -79,6 +79,13 @@ struct sde_hw_lm_ops {
* @ctx: Pointer to layer mixer context
*/
void (*clear_dim_layer)(struct sde_hw_mixer *ctx);
+
+ /* setup_misr: enables/disables MISR in HW register */
+ void (*setup_misr)(struct sde_hw_mixer *ctx,
+ bool enable, u32 frame_count);
+
+ /* collect_misr: reads and stores MISR data from HW register */
+ u32 (*collect_misr)(struct sde_hw_mixer *ctx);
};
struct sde_hw_mixer {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
index 008b657..c1bfb79 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h
@@ -47,6 +47,12 @@ int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off);
#define SDE_REG_WRITE(c, off, val) sde_reg_write(c, off, val, #off)
#define SDE_REG_READ(c, off) sde_reg_read(c, off)
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
void *sde_hw_util_get_dir(void);
void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index ef2c80e..b6a9f42 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -81,7 +81,8 @@ MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
static int sde_kms_hw_init(struct msm_kms *kms);
static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
-
+static int _sde_kms_register_events(struct msm_kms *kms,
+ struct drm_mode_object *obj, u32 event, bool en);
bool sde_is_custom_client(void)
{
return sdecustom;
@@ -1231,6 +1232,7 @@ static const struct msm_kms_funcs kms_funcs = {
.get_format = sde_get_msm_format,
.round_pixclk = sde_kms_round_pixclk,
.destroy = sde_kms_destroy,
+ .register_events = _sde_kms_register_events,
};
/* the caller api needs to turn on clock before calling it */
@@ -1596,3 +1598,32 @@ struct msm_kms *sde_kms_init(struct drm_device *dev)
return &sde_kms->base;
}
+
+static int _sde_kms_register_events(struct msm_kms *kms,
+ struct drm_mode_object *obj, u32 event, bool en)
+{
+ int ret = 0;
+ struct drm_crtc *crtc = NULL;
+ struct drm_connector *conn = NULL;
+ struct sde_kms *sde_kms = NULL;
+
+ if (!kms || !obj) {
+ SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CRTC:
+ crtc = obj_to_crtc(obj);
+ ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
+ break;
+ case DRM_MODE_OBJECT_CONNECTOR:
+ conn = obj_to_connector(obj);
+ ret = sde_connector_register_custom_event(sde_kms, conn, event,
+ en);
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index fc68820..0be17e4 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -769,8 +769,6 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde,
psde->pipe_cfg.horz_decimation);
scale_cfg->src_height[i] = DECIMATED_DIMENSION(src_h,
psde->pipe_cfg.vert_decimation);
- if (SDE_FORMAT_IS_YUV(fmt))
- scale_cfg->src_width[i] &= ~0x1;
if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2) {
scale_cfg->src_width[i] /= chroma_subsmpl_h;
scale_cfg->src_height[i] /= chroma_subsmpl_v;
@@ -1201,6 +1199,7 @@ static int _sde_plane_color_fill(struct sde_plane *psde,
psde->pipe_cfg.src_rect.y = 0;
psde->pipe_cfg.src_rect.w = psde->pipe_cfg.dst_rect.w;
psde->pipe_cfg.src_rect.h = psde->pipe_cfg.dst_rect.h;
+ _sde_plane_setup_scaler(psde, fmt, 0);
if (psde->pipe_hw->ops.setup_format)
psde->pipe_hw->ops.setup_format(psde->pipe_hw,
@@ -1212,7 +1211,6 @@ static int _sde_plane_color_fill(struct sde_plane *psde,
&psde->pipe_cfg,
pstate->multirect_index);
- _sde_plane_setup_scaler(psde, fmt, 0);
if (psde->pipe_hw->ops.setup_pe)
psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
&psde->pixel_ext);
@@ -2296,6 +2294,8 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
psde->pipe_cfg.src_rect = src;
psde->pipe_cfg.dst_rect = dst;
+ _sde_plane_setup_scaler(psde, fmt, pstate);
+
/* check for color fill */
psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
PLANE_PROP_COLOR_FILL);
@@ -2308,7 +2308,6 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
pstate->multirect_index);
}
- _sde_plane_setup_scaler(psde, fmt, pstate);
if (psde->pipe_hw->ops.setup_pe)
psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
&psde->pixel_ext);
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 7cdd2b2..2709aca 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -621,6 +621,48 @@
#define A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x3119
#define A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x311a
+/* CX_DBGC_CFG registers */
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x18400
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x18401
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x18402
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x18403
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x18404
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT 0x0
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT 0xC
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT 0x1C
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x18405
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT 0x18
+#define A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x18408
+#define A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x18409
+#define A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x1840A
+#define A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x1840B
+#define A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x1840C
+#define A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x1840D
+#define A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x1840E
+#define A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x1840F
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x18410
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x18411
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT 0x0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT 0x4
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT 0x8
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT 0xC
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT 0x10
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT 0x14
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT 0x18
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT 0x1C
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT 0x0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT 0x4
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT 0x8
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT 0xC
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT 0x10
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT 0x14
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT 0x18
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT 0x1C
+#define A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x1842F
+#define A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x18430
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT 0x0
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT 0x8
+
/* GMU control registers */
#define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x1A881
#define A6XX_GMU_CM3_ITCM_START 0x1B400
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index f0d8746..1c37978 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1854,6 +1854,30 @@ static int adreno_getproperty(struct kgsl_device *device,
status = 0;
}
break;
+ case KGSL_PROP_DEVICE_QTIMER:
+ {
+ struct kgsl_qtimer_prop qtimerprop = {0};
+ struct kgsl_memdesc *qtimer_desc =
+ kgsl_mmu_get_qtimer_global_entry(device);
+
+ if (sizebytes != sizeof(qtimerprop)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (qtimer_desc) {
+ qtimerprop.gpuaddr = qtimer_desc->gpuaddr;
+ qtimerprop.size = qtimer_desc->size;
+ }
+
+ if (copy_to_user(value, &qtimerprop,
+ sizeof(qtimerprop))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
case KGSL_PROP_MMU_ENABLE:
{
/* Report MMU only if we can handle paged memory */
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 7d87096..f2a7963 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -342,6 +342,13 @@ static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
{ A6XX_DBGBUS_TPL1_3, 0x100, },
};
+static void __iomem *a6xx_cx_dbgc;
+static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
+ { A6XX_DBGBUS_VBIF, 0x100, },
+ { A6XX_DBGBUS_GMU, 0x100, },
+ { A6XX_DBGBUS_CX, 0x100, },
+};
+
#define A6XX_NUM_SHADER_BANKS 3
#define A6XX_SHADER_STATETYPE_SHIFT 8
@@ -904,6 +911,100 @@ static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
return size;
}
+static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
+{
+ void __iomem *reg;
+
+ if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
+ (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
+ "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
+ return;
+
+ reg = a6xx_cx_dbgc +
+ ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
+ *value = __raw_readl(reg);
+
+ /*
+ * ensure this read finishes before the next one.
+ * i.e. act like normal readl()
+ */
+ rmb();
+}
+
+static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
+{
+ void __iomem *reg;
+
+ if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
+ (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
+ "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
+ return;
+
+ reg = a6xx_cx_dbgc +
+ ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
+
+ /*
+ * ensure previous writes post before this one,
+ * i.e. act like normal writel()
+ */
+ wmb();
+ __raw_writel(value, reg);
+}
+
+/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
+static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
+ unsigned int block_id, unsigned int index, unsigned int *val)
+{
+ unsigned int reg;
+
+ reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
+ (index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
+
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
+ val++;
+ _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
+}
+
+/*
+ * a6xx_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
+ * block from the CX DBGC block
+ */
+static size_t a6xx_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
+ u8 *buf, size_t remain, void *priv)
+{
+ struct kgsl_snapshot_debugbus *header =
+ (struct kgsl_snapshot_debugbus *)buf;
+ struct adreno_debugbus_block *block = priv;
+ int i;
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ unsigned int dwords;
+ size_t size;
+
+ dwords = block->dwords;
+
+ /* For a6xx each debug bus data unit is 2 DWRODS */
+ size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
+
+ if (remain < size) {
+ SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
+ return 0;
+ }
+
+ header->id = block->block_id;
+ header->count = dwords * 2;
+
+ for (i = 0; i < dwords; i++)
+ a6xx_cx_debug_bus_read(device, block->block_id, i,
+ &data[i*2]);
+
+ return size;
+}
+
/* a6xx_snapshot_debugbus() - Capture debug bus data */
static void a6xx_snapshot_debugbus(struct kgsl_device *device,
struct kgsl_snapshot *snapshot)
@@ -947,12 +1048,67 @@ static void a6xx_snapshot_debugbus(struct kgsl_device *device,
kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ a6xx_cx_dbgc = ioremap(device->reg_phys +
+ (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
+ (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
+
+ if (a6xx_cx_dbgc) {
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
+ (0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
+ (0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
+ (0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
+
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
+ 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
+
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
+ (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
+ (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
+ (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
+ (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
+ (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
+ (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
+ (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
+ (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
+ (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
+ (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
+ (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
+ (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
+ (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
+ (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
+ (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
+ (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
+
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ } else
+ KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
+
for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_DEBUGBUS,
snapshot, a6xx_snapshot_dbgc_debugbus_block,
(void *) &a6xx_dbgc_debugbus_blocks[i]);
}
+
+ if (a6xx_cx_dbgc) {
+ for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
+ kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+ snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
+ (void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
+ }
+ iounmap(a6xx_cx_dbgc);
+ }
}
static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/adreno_compat.c b/drivers/gpu/msm/adreno_compat.c
index d86a0c6..5a8d587 100644
--- a/drivers/gpu/msm/adreno_compat.c
+++ b/drivers/gpu/msm/adreno_compat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -113,6 +113,30 @@ int adreno_getproperty_compat(struct kgsl_device *device,
status = 0;
}
break;
+ case KGSL_PROP_DEVICE_QTIMER:
+ {
+ struct kgsl_qtimer_prop qtimerprop = {0};
+ struct kgsl_memdesc *qtimer_desc =
+ kgsl_mmu_get_qtimer_global_entry(device);
+
+ if (sizebytes != sizeof(qtimerprop)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (qtimer_desc) {
+ qtimerprop.gpuaddr = qtimer_desc->gpuaddr;
+ qtimerprop.size = qtimer_desc->size;
+ }
+
+ if (copy_to_user(value, &qtimerprop,
+ sizeof(qtimerprop))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
default:
/*
* Call the adreno_getproperty to check if the property type
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index cfd5cd1..0325db8 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -113,6 +113,7 @@ static struct kgsl_memdesc *kgsl_global_secure_pt_entry;
static int global_pt_count;
uint64_t global_pt_alloc;
static struct kgsl_memdesc gpu_qdss_desc;
+static struct kgsl_memdesc gpu_qtimer_desc;
void kgsl_print_global_pt_entries(struct seq_file *s)
{
@@ -272,6 +273,50 @@ static inline void kgsl_cleanup_qdss_desc(struct kgsl_mmu *mmu)
kgsl_sharedmem_free(&gpu_qdss_desc);
}
+struct kgsl_memdesc *kgsl_iommu_get_qtimer_global_entry(void)
+{
+ return &gpu_qtimer_desc;
+}
+
+static void kgsl_setup_qtimer_desc(struct kgsl_device *device)
+{
+ int result = 0;
+ uint32_t gpu_qtimer_entry[2];
+
+ if (!of_find_property(device->pdev->dev.of_node,
+ "qcom,gpu-qtimer", NULL))
+ return;
+
+ if (of_property_read_u32_array(device->pdev->dev.of_node,
+ "qcom,gpu-qtimer", gpu_qtimer_entry, 2)) {
+ KGSL_CORE_ERR("Failed to read gpu qtimer dts entry\n");
+ return;
+ }
+
+ gpu_qtimer_desc.flags = 0;
+ gpu_qtimer_desc.priv = 0;
+ gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
+ gpu_qtimer_desc.size = gpu_qtimer_entry[1];
+ gpu_qtimer_desc.pagetable = NULL;
+ gpu_qtimer_desc.ops = NULL;
+ gpu_qtimer_desc.dev = device->dev->parent;
+ gpu_qtimer_desc.hostptr = NULL;
+
+ result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
+ gpu_qtimer_desc.size);
+ if (result) {
+ KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
+ return;
+ }
+
+ kgsl_mmu_add_global(device, &gpu_qtimer_desc, "gpu-qtimer");
+}
+
+static inline void kgsl_cleanup_qtimer_desc(struct kgsl_mmu *mmu)
+{
+ kgsl_iommu_remove_global(mmu, &gpu_qtimer_desc);
+ kgsl_sharedmem_free(&gpu_qtimer_desc);
+}
static inline void _iommu_sync_mmu_pc(bool lock)
{
@@ -1452,6 +1497,7 @@ static void kgsl_iommu_close(struct kgsl_mmu *mmu)
kgsl_iommu_remove_global(mmu, &iommu->setstate);
kgsl_sharedmem_free(&iommu->setstate);
kgsl_cleanup_qdss_desc(mmu);
+ kgsl_cleanup_qtimer_desc(mmu);
}
static int _setstate_alloc(struct kgsl_device *device,
@@ -1523,6 +1569,7 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate");
kgsl_setup_qdss_desc(device);
+ kgsl_setup_qtimer_desc(device);
done:
if (status)
@@ -2671,6 +2718,7 @@ struct kgsl_mmu_ops kgsl_iommu_ops = {
.mmu_remove_global = kgsl_iommu_remove_global,
.mmu_getpagetable = kgsl_iommu_getpagetable,
.mmu_get_qdss_global_entry = kgsl_iommu_get_qdss_global_entry,
+ .mmu_get_qtimer_global_entry = kgsl_iommu_get_qtimer_global_entry,
.probe = kgsl_iommu_probe,
};
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 9e516e1..8ea4492 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -617,6 +617,18 @@ struct kgsl_memdesc *kgsl_mmu_get_qdss_global_entry(struct kgsl_device *device)
}
EXPORT_SYMBOL(kgsl_mmu_get_qdss_global_entry);
+struct kgsl_memdesc *kgsl_mmu_get_qtimer_global_entry(
+ struct kgsl_device *device)
+{
+ struct kgsl_mmu *mmu = &device->mmu;
+
+ if (MMU_OP_VALID(mmu, mmu_get_qtimer_global_entry))
+ return mmu->mmu_ops->mmu_get_qtimer_global_entry();
+
+ return NULL;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_qtimer_global_entry);
+
/*
* NOMMU definitions - NOMMU really just means that the MMU is kept in pass
* through and the GPU directly accesses physical memory. Used in debug mode
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 0f9f486..56bb317 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -83,6 +83,7 @@ struct kgsl_mmu_ops {
struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu,
unsigned long name);
struct kgsl_memdesc* (*mmu_get_qdss_global_entry)(void);
+ struct kgsl_memdesc* (*mmu_get_qtimer_global_entry)(void);
};
struct kgsl_mmu_pt_ops {
@@ -233,6 +234,9 @@ int kgsl_mmu_unmap_offset(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *kgsl_mmu_get_qdss_global_entry(struct kgsl_device *device);
+struct kgsl_memdesc *kgsl_mmu_get_qtimer_global_entry(
+ struct kgsl_device *device);
+
int kgsl_mmu_sparse_dummy_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 302cf14..e412230 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -163,12 +163,16 @@
#define FG_ADC_RR_DIE_TEMP_SLOPE 2
#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000
-#define FAB_ID_GF 0x30
-#define FAB_ID_SMIC 0x11
#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV 1303168
#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C 3784
#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV 1338433
#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C 3655
+#define FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV 1309001
+#define FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C 3403
+#define FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV 1295898
+#define FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C 3596
+#define FG_ADC_RR_CHG_TEMP_660_MGNA_OFFSET_UV 1314779
+#define FG_RR_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C 3496
#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 25000
#define FG_ADC_RR_CHG_THRESHOLD_SCALE 4
@@ -388,23 +392,70 @@ static int rradc_post_process_die_temp(struct rradc_chip *chip,
return 0;
}
+static int rradc_get_660_fab_coeff(struct rradc_chip *chip,
+ int64_t *offset, int64_t *slope)
+{
+ switch (chip->pmic_fab_id->fab_id) {
+ case PM660_FAB_ID_GF:
+ *offset = FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV;
+ *slope = FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C;
+ break;
+ case PM660_FAB_ID_TSMC:
+ *offset = FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV;
+ *slope = FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ *offset = FG_ADC_RR_CHG_TEMP_660_MGNA_OFFSET_UV;
+ *slope = FG_RR_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C;
+ }
+
+ return 0;
+}
+
+static int rradc_get_8998_fab_coeff(struct rradc_chip *chip,
+ int64_t *offset, int64_t *slope)
+{
+ switch (chip->pmic_fab_id->fab_id) {
+ case PMI8998_FAB_ID_GF:
+ *offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ *slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case PMI8998_FAB_ID_SMIC:
+ *offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ *slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
int64_t uv = 0, offset = 0, slope = 0;
+ int rc = 0;
if (chip->revid_dev_node) {
- switch (chip->pmic_fab_id->fab_id) {
- case FAB_ID_GF:
- offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
- slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ switch (chip->pmic_fab_id->pmic_subtype) {
+ case PM660_SUBTYPE:
+ rc = rradc_get_660_fab_coeff(chip, &offset, &slope);
+ if (rc < 0) {
+ pr_err("Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
break;
- case FAB_ID_SMIC:
- offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
- slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ case PMI8998_SUBTYPE:
+ rc = rradc_get_8998_fab_coeff(chip, &offset, &slope);
+ if (rc < 0) {
+ pr_err("Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
break;
default:
+ pr_err("No PMIC subtype found\n");
return -EINVAL;
}
} else {
@@ -444,18 +495,26 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip,
int *result_millidegc)
{
int64_t uv = 0, offset = 0, slope = 0;
+ int rc = 0;
if (chip->revid_dev_node) {
- switch (chip->pmic_fab_id->fab_id) {
- case FAB_ID_GF:
- offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
- slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ switch (chip->pmic_fab_id->pmic_subtype) {
+ case PM660_SUBTYPE:
+ rc = rradc_get_660_fab_coeff(chip, &offset, &slope);
+ if (rc < 0) {
+ pr_err("Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
break;
- case FAB_ID_SMIC:
- offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
- slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ case PMI8998_SUBTYPE:
+ rc = rradc_get_8998_fab_coeff(chip, &offset, &slope);
+ if (rc < 0) {
+ pr_err("Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
break;
default:
+ pr_err("No PMIC subtype found\n");
return -EINVAL;
}
} else {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 0850563..34df44c 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -3946,16 +3946,36 @@ IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
+#define TCU_HW_VERSION_HLOS1 (0x18)
+
#define DEBUG_SID_HALT_REG 0x0
#define DEBUG_SID_HALT_VAL (0x1 << 16)
+#define DEBUG_SID_HALT_SID_MASK 0x3ff
+
+#define DEBUG_VA_ADDR_REG 0x8
+
+#define DEBUG_TXN_TRIGG_REG 0x18
+#define DEBUG_TXN_AXPROT_SHIFT 6
+#define DEBUG_TXN_AXCACHE_SHIFT 2
+#define DEBUG_TRX_WRITE (0x1 << 1)
+#define DEBUG_TXN_READ (0x0 << 1)
+#define DEBUG_TXN_TRIGGER 0x1
#define DEBUG_SR_HALT_ACK_REG 0x20
#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
+#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
+
+#define DEBUG_PAR_REG 0x28
+#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
+#define DEBUG_PAR_PA_SHIFT 12
+#define DEBUG_PAR_FAULT_VAL 0x1
#define TBU_DBG_TIMEOUT_US 30000
struct qsmmuv500_archdata {
struct list_head tbus;
+ void __iomem *tcu_base;
+ u32 version;
};
struct qsmmuv500_tbu_device {
@@ -3966,6 +3986,8 @@ struct qsmmuv500_tbu_device {
void __iomem *status_reg;
struct arm_smmu_power_resources *pwr;
+ u32 sid_start;
+ u32 num_sids;
/* Protects halt count */
spinlock_t halt_lock;
@@ -4091,6 +4113,20 @@ static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
}
}
+static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
+ struct arm_smmu_device *smmu, u32 sid)
+{
+ struct qsmmuv500_tbu_device *tbu = NULL;
+ struct qsmmuv500_archdata *data = smmu->archdata;
+
+ list_for_each_entry(tbu, &data->tbus, list) {
+ if (tbu->sid_start <= sid &&
+ sid < tbu->sid_start + tbu->num_sids)
+ break;
+ }
+ return tbu;
+}
+
static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
{
int i, ret;
@@ -4109,6 +4145,187 @@ static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
qsmmuv500_tbu_power_off_all(smmu);
}
+static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
+ struct qsmmuv500_tbu_device *tbu,
+ unsigned long *flags)
+{
+ struct arm_smmu_device *smmu = tbu->smmu;
+ struct qsmmuv500_archdata *data = smmu->archdata;
+ u32 val;
+
+ spin_lock_irqsave(&smmu->atos_lock, *flags);
+ /* The status register is not accessible on version 1.0 */
+ if (data->version == 0x01000000)
+ return 0;
+
+ if (readl_poll_timeout_atomic(tbu->status_reg,
+ val, (val == 0x1), 0,
+ TBU_DBG_TIMEOUT_US)) {
+ dev_err(tbu->dev, "ECATS hw busy!\n");
+ spin_unlock_irqrestore(&smmu->atos_lock, *flags);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
+ struct qsmmuv500_tbu_device *tbu,
+ unsigned long *flags)
+{
+ struct arm_smmu_device *smmu = tbu->smmu;
+ struct qsmmuv500_archdata *data = smmu->archdata;
+
+ /* The status register is not accessible on version 1.0 */
+ if (data->version != 0x01000000)
+ writel_relaxed(0, tbu->status_reg);
+ spin_unlock_irqrestore(&smmu->atos_lock, *flags);
+}
+
+/*
+ * Zero means failure.
+ */
+static phys_addr_t qsmmuv500_iova_to_phys(
+ struct iommu_domain *domain, dma_addr_t iova, u32 sid)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct qsmmuv500_tbu_device *tbu;
+ int ret;
+ phys_addr_t phys = 0;
+ u64 val, fsr;
+ unsigned long flags;
+ void __iomem *cb_base;
+ u32 sctlr_orig, sctlr;
+ int needs_redo = 0;
+
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ tbu = qsmmuv500_find_tbu(smmu, sid);
+ if (!tbu)
+ return 0;
+
+ ret = arm_smmu_power_on(tbu->pwr);
+ if (ret)
+ return 0;
+
+ /*
+ * Disable client transactions & wait for existing operations to
+ * complete.
+ */
+ ret = qsmmuv500_tbu_halt(tbu);
+ if (ret)
+ goto out_power_off;
+
+ /* Only one concurrent atos operation */
+ ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
+ if (ret)
+ goto out_resume;
+
+ /*
+ * We can be called from an interrupt handler with FSR already set
+ * so terminate the faulting transaction prior to starting ecats.
+ * No new racing faults can occur since we in the halted state.
+ * ECATS can trigger the fault interrupt, so disable it temporarily
+ * and check for an interrupt manually.
+ */
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (fsr & FSR_FAULT) {
+ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+ writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+ }
+ sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
+ writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
+redo:
+ /* Set address and stream-id */
+ val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val |= sid & DEBUG_SID_HALT_SID_MASK;
+ writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+ writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
+
+ /*
+ * Write-back Read and Write-Allocate
+ * Priviledged, nonsecure, data transaction
+ * Read operation.
+ */
+ val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
+ val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
+ val |= DEBUG_TXN_TRIGGER;
+ writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
+
+ ret = 0;
+ if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
+ val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
+ 0, TBU_DBG_TIMEOUT_US)) {
+ dev_err(tbu->dev, "ECATS translation timed out!\n");
+ }
+
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (fsr & FSR_FAULT) {
+ dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
+ val);
+ ret = -EINVAL;
+
+ writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
+ /*
+ * Clear pending interrupts
+ * Barrier required to ensure that the FSR is cleared
+ * before resuming SMMU operation
+ */
+ wmb();
+ writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+ }
+
+ val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
+ if (val & DEBUG_PAR_FAULT_VAL) {
+ dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
+ val);
+ ret = -EINVAL;
+ }
+
+ phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
+ if (ret < 0)
+ phys = 0;
+
+ /* Reset hardware */
+ writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
+ writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
+
+ /*
+ * After a failed translation, the next successful translation will
+ * incorrectly be reported as a failure.
+ */
+ if (!phys && needs_redo++ < 2)
+ goto redo;
+
+ writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+ qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
+
+out_resume:
+ qsmmuv500_tbu_resume(tbu);
+
+out_power_off:
+ arm_smmu_power_off(tbu->pwr);
+
+ return phys;
+}
+
+static phys_addr_t qsmmuv500_iova_to_phys_hard(
+ struct iommu_domain *domain, dma_addr_t iova)
+{
+ u16 sid;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct iommu_fwspec *fwspec;
+
+ /* Select a sid */
+ fwspec = smmu_domain->dev->iommu_fwspec;
+ sid = (u16)fwspec->ids[0];
+
+ return qsmmuv500_iova_to_phys(domain, iova, sid);
+}
+
static int qsmmuv500_tbu_register(struct device *dev, void *data)
{
struct arm_smmu_device *smmu = data;
@@ -4130,8 +4347,10 @@ static int qsmmuv500_tbu_register(struct device *dev, void *data)
static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
{
+ struct resource *res;
struct device *dev = smmu->dev;
struct qsmmuv500_archdata *data;
+ struct platform_device *pdev;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -4139,6 +4358,14 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
return -ENOMEM;
INIT_LIST_HEAD(&data->tbus);
+
+ pdev = container_of(dev, struct platform_device, dev);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
+ data->tcu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->tcu_base))
+ return PTR_ERR(data->tcu_base);
+
+ data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
smmu->archdata = data;
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
@@ -4156,6 +4383,7 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
.init = qsmmuv500_arch_init,
.device_reset = qsmmuv500_device_reset,
+ .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
};
static const struct of_device_id qsmmuv500_tbu_of_match[] = {
@@ -4168,6 +4396,8 @@ static int qsmmuv500_tbu_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
struct qsmmuv500_tbu_device *tbu;
+ const __be32 *cell;
+ int len;
tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
if (!tbu)
@@ -4187,6 +4417,13 @@ static int qsmmuv500_tbu_probe(struct platform_device *pdev)
if (IS_ERR(tbu->status_reg))
return PTR_ERR(tbu->status_reg);
+ cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
+ if (!cell || len < 8)
+ return -EINVAL;
+
+ tbu->sid_start = of_read_number(cell, 1);
+ tbu->num_sids = of_read_number(cell + 1, 1);
+
tbu->pwr = arm_smmu_init_power_resources(pdev);
if (IS_ERR(tbu->pwr))
return PTR_ERR(tbu->pwr);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 01e553c..b045e3b 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -1077,6 +1077,8 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
pr_err("trigger lmh mitigation failed, rc=%d\n", rc);
return rc;
}
+ /* Wait for LMH mitigation to take effect */
+ udelay(500);
}
if (led->trigger_chgr) {
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index c31d2e1..3060cfa 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -33,6 +33,7 @@
/* ctrl registers */
#define QPNP_WLED_FAULT_STATUS(b) (b + 0x08)
+#define QPNP_WLED_INT_RT_STS(b) (b + 0x10)
#define QPNP_WLED_EN_REG(b) (b + 0x46)
#define QPNP_WLED_FDBK_OP_REG(b) (b + 0x48)
#define QPNP_WLED_VREF_REG(b) (b + 0x49)
@@ -44,6 +45,7 @@
#define QPNP_WLED_SOFTSTART_RAMP_DLY(b) (b + 0x53)
#define QPNP_WLED_VLOOP_COMP_RES_REG(b) (b + 0x55)
#define QPNP_WLED_VLOOP_COMP_GM_REG(b) (b + 0x56)
+#define QPNP_WLED_EN_PSM_REG(b) (b + 0x5A)
#define QPNP_WLED_PSM_CTRL_REG(b) (b + 0x5B)
#define QPNP_WLED_LCD_AUTO_PFM_REG(b) (b + 0x5C)
#define QPNP_WLED_SC_PRO_REG(b) (b + 0x5E)
@@ -82,12 +84,13 @@
#define QPNP_WLED_VREF_PSM_MIN_MV 400
#define QPNP_WLED_VREF_PSM_MAX_MV 750
#define QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV 450
-#define QPNP_WLED_PSM_CTRL_OVERWRITE 0x80
+#define QPNP_WLED_PSM_OVERWRITE_BIT BIT(7)
#define QPNP_WLED_LCD_AUTO_PFM_DFLT_THRESH 1
#define QPNP_WLED_LCD_AUTO_PFM_THRESH_MAX 0xF
#define QPNP_WLED_LCD_AUTO_PFM_EN_SHIFT 7
#define QPNP_WLED_LCD_AUTO_PFM_EN_BIT BIT(7)
#define QPNP_WLED_LCD_AUTO_PFM_THRESH_MASK GENMASK(3, 0)
+#define QPNP_WLED_EN_PSM_BIT BIT(7)
#define QPNP_WLED_ILIM_MASK GENMASK(2, 0)
#define QPNP_WLED_ILIM_OVERWRITE BIT(7)
@@ -117,6 +120,9 @@
QPNP_WLED_TEST4_EN_CLAMP_BIT | \
QPNP_WLED_TEST4_EN_SOFT_START_BIT)
#define QPNP_WLED_TEST4_EN_IIND_UP 0x1
+#define QPNP_WLED_ILIM_FAULT_BIT BIT(0)
+#define QPNP_WLED_OVP_FAULT_BIT BIT(1)
+#define QPNP_WLED_SC_FAULT_BIT BIT(2)
/* sink registers */
#define QPNP_WLED_CURR_SINK_REG(b) (b + 0x46)
@@ -335,6 +341,7 @@ static struct wled_vref_setting vref_setting_pmi8998 = {
* @ lcd_auto_pfm_thresh - the threshold for lcd auto pfm mode
* @ loop_auto_gm_en - select if auto gm is enabled
* @ lcd_auto_pfm_en - select if auto pfm is enabled in lcd mode
+ * @ lcd_psm_ctrl - select if psm needs to be controlled in lcd mode
* @ avdd_mode_spmi - enable avdd programming via spmi
* @ en_9b_dim_res - enable or disable 9bit dimming
* @ en_phase_stag - enable or disable phase staggering
@@ -380,6 +387,7 @@ struct qpnp_wled {
u8 lcd_auto_pfm_thresh;
bool loop_auto_gm_en;
bool lcd_auto_pfm_en;
+ bool lcd_psm_ctrl;
bool avdd_mode_spmi;
bool en_9b_dim_res;
bool en_phase_stag;
@@ -549,6 +557,30 @@ static int qpnp_wled_set_level(struct qpnp_wled *wled, int level)
return 0;
}
+static int qpnp_wled_psm_config(struct qpnp_wled *wled, bool enable)
+{
+ int rc;
+
+ if (!wled->lcd_psm_ctrl)
+ return 0;
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_EN_PSM_REG(wled->ctrl_base),
+ QPNP_WLED_EN_PSM_BIT,
+ enable ? QPNP_WLED_EN_PSM_BIT : 0);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base),
+ QPNP_WLED_PSM_OVERWRITE_BIT,
+ enable ? QPNP_WLED_PSM_OVERWRITE_BIT : 0);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
static int qpnp_wled_module_en(struct qpnp_wled *wled,
u16 base_addr, bool state)
{
@@ -561,21 +593,31 @@ static int qpnp_wled_module_en(struct qpnp_wled *wled,
if (rc < 0)
return rc;
- if (wled->ovp_irq > 0) {
- if (state && wled->ovp_irq_disabled) {
- /*
- * Wait for at least 10ms before enabling OVP fault
- * interrupt after enabling the module so that soft
- * start is completed. Keep OVP interrupt disabled
- * when the module is disabled.
- */
- usleep_range(10000, 11000);
+ /*
+ * Wait for at least 10ms before enabling OVP fault interrupt after
+ * enabling the module so that soft start is completed. Also, this
+ * delay can be used to control PSM during enable when required. Keep
+ * OVP interrupt disabled when the module is disabled.
+ */
+ if (state) {
+ usleep_range(10000, 11000);
+ rc = qpnp_wled_psm_config(wled, false);
+ if (rc < 0)
+ return rc;
+
+ if (wled->ovp_irq > 0 && wled->ovp_irq_disabled) {
enable_irq(wled->ovp_irq);
wled->ovp_irq_disabled = false;
- } else if (!state && !wled->ovp_irq_disabled) {
+ }
+ } else {
+ if (wled->ovp_irq > 0 && !wled->ovp_irq_disabled) {
disable_irq(wled->ovp_irq);
wled->ovp_irq_disabled = true;
}
+
+ rc = qpnp_wled_psm_config(wled, true);
+ if (rc < 0)
+ return rc;
}
return 0;
@@ -990,7 +1032,7 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
reg &= QPNP_WLED_VREF_PSM_MASK;
reg |= ((wled->vref_psm_mv - QPNP_WLED_VREF_PSM_MIN_MV)/
QPNP_WLED_VREF_PSM_STEP_MV);
- reg |= QPNP_WLED_PSM_CTRL_OVERWRITE;
+ reg |= QPNP_WLED_PSM_OVERWRITE_BIT;
rc = qpnp_wled_write_reg(wled,
QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base), reg);
if (rc)
@@ -1053,16 +1095,25 @@ static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled)
{
struct qpnp_wled *wled = _wled;
int rc;
- u8 val;
+ u8 fault_sts, int_sts;
rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &val);
+ QPNP_WLED_INT_RT_STS(wled->ctrl_base), &int_sts);
+ if (rc < 0) {
+ pr_err("Error in reading WLED_INT_RT_STS rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &fault_sts);
if (rc < 0) {
pr_err("Error in reading WLED_FAULT_STATUS rc=%d\n", rc);
return IRQ_HANDLED;
}
- pr_err("WLED OVP fault detected, fault_status= %x\n", val);
+ if (fault_sts & (QPNP_WLED_OVP_FAULT_BIT | QPNP_WLED_ILIM_FAULT_BIT))
+ pr_err("WLED OVP fault detected, int_sts=%x fault_sts= %x\n",
+ int_sts, fault_sts);
return IRQ_HANDLED;
}
@@ -1677,6 +1728,8 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
wled->ovp_irq, rc);
return rc;
}
+ disable_irq(wled->ovp_irq);
+ wled->ovp_irq_disabled = true;
}
if (wled->sc_irq >= 0) {
@@ -2063,6 +2116,8 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
wled->en_ext_pfet_sc_pro = of_property_read_bool(pdev->dev.of_node,
"qcom,en-ext-pfet-sc-pro");
+ wled->lcd_psm_ctrl = of_property_read_bool(pdev->dev.of_node,
+ "qcom,lcd-psm-ctrl");
return 0;
}
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index 85a6be8..817dfa3 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -897,9 +897,10 @@ static int qpnp_mpp_set(struct qpnp_led_data *led)
}
}
- if (led->mpp_cfg->pwm_mode != MANUAL_MODE)
+ if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
pwm_enable(led->mpp_cfg->pwm_cfg->pwm_dev);
- else {
+ led->mpp_cfg->pwm_cfg->pwm_enabled = 1;
+ } else {
if (led->cdev.brightness < LED_MPP_CURRENT_MIN)
led->cdev.brightness = LED_MPP_CURRENT_MIN;
else {
@@ -950,6 +951,7 @@ static int qpnp_mpp_set(struct qpnp_led_data *led)
led->mpp_cfg->pwm_mode =
led->mpp_cfg->pwm_cfg->default_mode;
pwm_disable(led->mpp_cfg->pwm_cfg->pwm_dev);
+ led->mpp_cfg->pwm_cfg->pwm_enabled = 0;
}
rc = qpnp_led_masked_write(led,
LED_MPP_MODE_CTRL(led->base),
@@ -1606,7 +1608,7 @@ static int qpnp_kpdbl_set(struct qpnp_led_data *led)
dev_err(&led->pdev->dev, "pwm enable failed\n");
return rc;
}
-
+ led->kpdbl_cfg->pwm_cfg->pwm_enabled = 1;
set_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use);
/* is_kpdbl_master_turn_on will be set to true when GPLED1
@@ -1642,6 +1644,7 @@ static int qpnp_kpdbl_set(struct qpnp_led_data *led)
"pwm enable failed\n");
return rc;
}
+ led->kpdbl_cfg->pwm_cfg->pwm_enabled = 1;
} else {
if (kpdbl_master) {
pwm_disable(kpdbl_master);
@@ -1660,6 +1663,7 @@ static int qpnp_kpdbl_set(struct qpnp_led_data *led)
is_kpdbl_master_turn_on = false;
} else {
pwm_disable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+ led->kpdbl_cfg->pwm_cfg->pwm_enabled = 0;
clear_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use);
if (bitmap_weight(kpdbl_leds_in_use,
NUM_KPDBL_LEDS) == 1 && kpdbl_master &&
@@ -1727,20 +1731,17 @@ static int qpnp_rgb_set(struct qpnp_led_data *led)
"Failed to write led enable reg\n");
return rc;
}
-
+ if (!led->rgb_cfg->pwm_cfg->pwm_enabled) {
+ pwm_enable(led->rgb_cfg->pwm_cfg->pwm_dev);
+ led->rgb_cfg->pwm_cfg->pwm_enabled = 1;
+ }
+ } else {
+ led->rgb_cfg->pwm_cfg->mode =
+ led->rgb_cfg->pwm_cfg->default_mode;
if (led->rgb_cfg->pwm_cfg->pwm_enabled) {
pwm_disable(led->rgb_cfg->pwm_cfg->pwm_dev);
led->rgb_cfg->pwm_cfg->pwm_enabled = 0;
}
-
- rc = pwm_enable(led->rgb_cfg->pwm_cfg->pwm_dev);
- if (!rc)
- led->rgb_cfg->pwm_cfg->pwm_enabled = 1;
- } else {
- led->rgb_cfg->pwm_cfg->mode =
- led->rgb_cfg->pwm_cfg->default_mode;
- pwm_disable(led->rgb_cfg->pwm_cfg->pwm_dev);
- led->rgb_cfg->pwm_cfg->pwm_enabled = 0;
rc = qpnp_led_masked_write(led,
RGB_LED_EN_CTL(led->base),
led->rgb_cfg->enable, RGB_LED_DISABLE);
@@ -2183,11 +2184,17 @@ static ssize_t pwm_us_store(struct device *dev,
previous_pwm_us = pwm_cfg->pwm_period_us;
pwm_cfg->pwm_period_us = pwm_us;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
if (ret) {
pwm_cfg->pwm_period_us = previous_pwm_us;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
qpnp_led_set(&led->cdev, led->cdev.brightness);
dev_err(&led->pdev->dev,
@@ -2237,12 +2244,18 @@ static ssize_t pause_lo_store(struct device *dev,
previous_pause_lo = pwm_cfg->lut_params.lut_pause_lo;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
pwm_cfg->lut_params.lut_pause_lo = pause_lo;
ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
if (ret) {
pwm_cfg->lut_params.lut_pause_lo = previous_pause_lo;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
qpnp_led_set(&led->cdev, led->cdev.brightness);
dev_err(&led->pdev->dev,
@@ -2292,12 +2305,18 @@ static ssize_t pause_hi_store(struct device *dev,
previous_pause_hi = pwm_cfg->lut_params.lut_pause_hi;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
pwm_cfg->lut_params.lut_pause_hi = pause_hi;
ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
if (ret) {
pwm_cfg->lut_params.lut_pause_hi = previous_pause_hi;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
qpnp_led_set(&led->cdev, led->cdev.brightness);
dev_err(&led->pdev->dev,
@@ -2348,12 +2367,18 @@ static ssize_t start_idx_store(struct device *dev,
previous_start_idx = pwm_cfg->duty_cycles->start_idx;
pwm_cfg->duty_cycles->start_idx = start_idx;
pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
if (ret) {
pwm_cfg->duty_cycles->start_idx = previous_start_idx;
pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
qpnp_led_set(&led->cdev, led->cdev.brightness);
dev_err(&led->pdev->dev,
@@ -2403,12 +2428,18 @@ static ssize_t ramp_step_ms_store(struct device *dev,
previous_ramp_step_ms = pwm_cfg->lut_params.ramp_step_ms;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
pwm_cfg->lut_params.ramp_step_ms = ramp_step_ms;
ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
if (ret) {
pwm_cfg->lut_params.ramp_step_ms = previous_ramp_step_ms;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
qpnp_led_set(&led->cdev, led->cdev.brightness);
dev_err(&led->pdev->dev,
@@ -2458,12 +2489,18 @@ static ssize_t lut_flags_store(struct device *dev,
previous_lut_flags = pwm_cfg->lut_params.flags;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
pwm_cfg->lut_params.flags = lut_flags;
ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
if (ret) {
pwm_cfg->lut_params.flags = previous_lut_flags;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
qpnp_led_set(&led->cdev, led->cdev.brightness);
dev_err(&led->pdev->dev,
@@ -2543,7 +2580,11 @@ static ssize_t duty_pcts_store(struct device *dev,
pwm_cfg->old_duty_pcts = previous_duty_pcts;
pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
+
ret = qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
if (ret)
goto restore;
@@ -2558,7 +2599,10 @@ static ssize_t duty_pcts_store(struct device *dev,
pwm_cfg->old_duty_pcts = pwm_cfg->duty_cycles->duty_pcts;
pwm_cfg->duty_cycles->duty_pcts = previous_duty_pcts;
pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts;
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
qpnp_led_set(&led->cdev, led->cdev.brightness);
return ret;
@@ -2588,7 +2632,10 @@ static void led_blink(struct qpnp_led_data *led,
led->kpdbl_cfg->pwm_mode =
pwm_cfg->default_mode;
}
- pwm_free(pwm_cfg->pwm_dev);
+ if (pwm_cfg->pwm_enabled) {
+ pwm_disable(pwm_cfg->pwm_dev);
+ pwm_cfg->pwm_enabled = 0;
+ }
qpnp_pwm_init(pwm_cfg, led->pdev, led->cdev.name);
if (led->id == QPNP_ID_RGB_RED || led->id == QPNP_ID_RGB_GREEN
|| led->id == QPNP_ID_RGB_BLUE) {
@@ -3541,8 +3588,11 @@ static int qpnp_get_config_kpdbl(struct qpnp_led_data *led,
}
rc = qpnp_get_config_pwm(led->kpdbl_cfg->pwm_cfg, led->pdev, node);
- if (rc < 0)
+ if (rc < 0) {
+ if (led->kpdbl_cfg->pwm_cfg->pwm_dev)
+ pwm_put(led->kpdbl_cfg->pwm_cfg->pwm_dev);
return rc;
+ }
rc = of_property_read_u32(node, "qcom,row-id", &val);
if (!rc)
@@ -3605,8 +3655,11 @@ static int qpnp_get_config_rgb(struct qpnp_led_data *led,
}
rc = qpnp_get_config_pwm(led->rgb_cfg->pwm_cfg, led->pdev, node);
- if (rc < 0)
+ if (rc < 0) {
+ if (led->rgb_cfg->pwm_cfg->pwm_dev)
+ pwm_put(led->rgb_cfg->pwm_cfg->pwm_dev);
return rc;
+ }
return 0;
}
@@ -3729,8 +3782,11 @@ static int qpnp_get_config_mpp(struct qpnp_led_data *led,
}
rc = qpnp_get_config_pwm(led->mpp_cfg->pwm_cfg, led->pdev, node);
- if (rc < 0)
+ if (rc < 0) {
+ if (led->mpp_cfg->pwm_cfg && led->mpp_cfg->pwm_cfg->pwm_dev)
+ pwm_put(led->mpp_cfg->pwm_cfg->pwm_dev);
goto err_config_mpp;
+ }
return 0;
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index 31119ea..1c73c5a2 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -334,6 +334,7 @@ static irqreturn_t tcs_irq_handler(int irq, void *p)
u32 irq_status, sts;
struct tcs_mbox *tcs;
struct tcs_response *resp;
+ struct tcs_cmd *cmd;
u32 irq_clear = 0;
u32 data;
@@ -353,28 +354,20 @@ static irqreturn_t tcs_irq_handler(int irq, void *p)
cancel_delayed_work(&resp->dwork);
- /* Clear the AMC mode for non-ACTIVE TCSes */
tcs = get_tcs_from_index(drv, m);
if (!tcs) {
pr_err("TCS-%d doesn't exist in DRV\n", m);
continue;
}
- if (tcs->type != ACTIVE_TCS) {
- data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
- data &= ~TCS_AMC_MODE_ENABLE;
- write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
- } else {
- /* Clear the enable bit for the commands */
- write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
- }
/* Check if all commands were completed */
resp->err = 0;
for (i = 0; i < resp->msg->num_payload; i++) {
+ cmd = &resp->msg->payload[i];
sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
- if (!(sts & CMD_STATUS_ISSUED) ||
- (resp->msg->is_complete &&
- !(sts & CMD_STATUS_COMPL)))
+ if ((!(sts & CMD_STATUS_ISSUED)) ||
+ ((resp->msg->is_complete || cmd->complete) &&
+ (!(sts & CMD_STATUS_COMPL))))
resp->err = -EIO;
}
@@ -389,6 +382,16 @@ static irqreturn_t tcs_irq_handler(int irq, void *p)
trace_rpmh_notify_irq(drv->name, m, resp->msg->payload[0].addr,
resp->err);
+ /* Clear the AMC mode for non-ACTIVE TCSes */
+ if (tcs->type != ACTIVE_TCS) {
+ data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
+ data &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
+ } else {
+ /* Clear the enable bit for the commands */
+ write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
+ }
+
/* Notify the client that this request is completed. */
send_tcs_response(resp);
irq_clear |= BIT(m);
@@ -474,7 +477,7 @@ static void tcs_notify_timeout(struct work_struct *work)
static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
struct tcs_mbox_msg *msg, bool trigger)
{
- u32 cmd_msgid = 0;
+ u32 msgid, cmd_msgid = 0;
u32 cmd_enable = 0;
u32 cmd_complete;
u32 enable = TCS_AMC_MODE_ENABLE;
@@ -494,10 +497,12 @@ static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
cmd = &msg->payload[i];
cmd_enable |= BIT(n + i);
cmd_complete |= cmd->complete << (n + i);
- write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, cmd_msgid);
+ msgid = cmd_msgid;
+ msgid |= (cmd->complete) ? CMD_MSGID_RESP_REQ : 0;
+ write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, msgid);
write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
- trace_rpmh_send_msg(drv->name, m, n + i, cmd_msgid, cmd->addr,
+ trace_rpmh_send_msg(drv->name, m, n + i, msgid, cmd->addr,
cmd->data, cmd->complete, trigger);
}
@@ -732,6 +737,41 @@ static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
return 0;
}
+static void __tcs_buffer_invalidate(void __iomem *base, int m)
+{
+ write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
+}
+
+static int tcs_mbox_invalidate(struct mbox_chan *chan)
+{
+ struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+ struct tcs_mbox *tcs;
+ int m, i;
+ int inv_types[] = { WAKE_TCS, SLEEP_TCS };
+ int type = 0;
+
+ do {
+ tcs = get_tcs_of_type(drv, inv_types[type]);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock(&tcs->tcs_lock);
+ for (i = 0; i < tcs->num_tcs; i++) {
+ m = i + tcs->tcs_offset;
+ spin_lock(&tcs->tcs_m_lock[i]);
+ while (!tcs_is_free(drv->reg_base, m))
+ cpu_relax();
+ __tcs_buffer_invalidate(drv->reg_base, m);
+ spin_unlock(&tcs->tcs_m_lock[i]);
+ }
+ /* Mark the TCS as free */
+ bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+ spin_unlock(&tcs->tcs_lock);
+ } while (++type < ARRAY_SIZE(inv_types));
+
+ return 0;
+}
+
/**
* chan_tcs_write: Validate the incoming message and write to the
* appropriate TCS block.
@@ -776,6 +816,13 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
goto tx_fail;
}
+ /*
+ * Since we are re-purposing the wake TCS, invalidate previous
+ * contents to avoid confusion.
+ */
+ if (msg->state == RPMH_AWAKE_STATE)
+ tcs_mbox_invalidate(chan);
+
/* Post the message to the TCS and trigger */
ret = tcs_mbox_write(chan, msg, true);
@@ -796,41 +843,6 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data)
return 0;
}
-static void __tcs_buffer_invalidate(void __iomem *base, int m)
-{
- write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
-}
-
-static int tcs_mbox_invalidate(struct mbox_chan *chan)
-{
- struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
- struct tcs_mbox *tcs;
- int m, i;
- int inv_types[] = { WAKE_TCS, SLEEP_TCS };
- int type = 0;
-
- do {
- tcs = get_tcs_of_type(drv, inv_types[type]);
- if (IS_ERR(tcs))
- return PTR_ERR(tcs);
-
- spin_lock(&tcs->tcs_lock);
- for (i = 0; i < tcs->num_tcs; i++) {
- m = i + tcs->tcs_offset;
- spin_lock(&tcs->tcs_m_lock[i]);
- while (!tcs_is_free(drv->reg_base, m))
- cpu_relax();
- __tcs_buffer_invalidate(drv->reg_base, m);
- spin_unlock(&tcs->tcs_m_lock[i]);
- }
- /* Mark the TCS as free */
- bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
- spin_unlock(&tcs->tcs_lock);
- } while (++type < ARRAY_SIZE(inv_types));
-
- return 0;
-}
-
static void __tcs_write_hidden(struct tcs_drv *drv, int d,
struct tcs_mbox_msg *msg)
{
@@ -955,6 +967,7 @@ static int tcs_drv_probe(struct platform_device *pdev)
u32 config, max_tcs, ncpt;
int tcs_type_count[TCS_TYPE_NR] = { 0 };
struct resource *res;
+ u32 irq_mask;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -1098,9 +1111,14 @@ static int tcs_drv_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Enable interrupts for AMC TCS */
- write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0,
- drv->tcs[ACTIVE_TCS].tcs_mask);
+ /*
+ * Enable interrupts for AMC TCS,
+ * if there are no AMC TCS, use wake TCS.
+ */
+ irq_mask = (drv->tcs[ACTIVE_TCS].num_tcs) ?
+ drv->tcs[ACTIVE_TCS].tcs_mask :
+ drv->tcs[WAKE_TCS].tcs_mask;
+ write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0, irq_mask);
ret = mbox_controller_register(&drv->mbox);
if (ret)
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index db01353..c897669 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -1,5 +1,4 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-
obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
diff --git a/drivers/media/platform/msm/camera/cam_sync/Makefile b/drivers/media/platform/msm/camera/cam_sync/Makefile
new file mode 100644
index 0000000..e3012cb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
new file mode 100644
index 0000000..a736148
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -0,0 +1,1024 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SYNC %s:%d " fmt, __func__, __LINE__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include "cam_sync_util.h"
+
+struct sync_device *sync_dev;
+
+int cam_sync_create(int32_t *sync_obj, const char *name)
+{
+ int rc;
+ long idx;
+
+ do {
+ idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+ if (idx >= CAM_SYNC_MAX_OBJS)
+ return -ENOMEM;
+ } while (!spin_trylock_bh(&sync_dev->row_spinlocks[idx]));
+
+ rc = cam_sync_init_object(sync_dev->sync_table, idx, name);
+ if (rc) {
+ pr_err("Error: Unable to init row at idx = %ld\n", idx);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ return -EINVAL;
+ }
+
+ set_bit(idx, sync_dev->bitmap);
+ *sync_obj = idx;
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+ return rc;
+}
+
+int cam_sync_register_callback(sync_callback cb_func,
+ void *userdata, int32_t sync_obj)
+{
+ struct sync_callback_info *sync_cb;
+ struct sync_callback_info *cb_info;
+ struct sync_callback_info *temp_cb;
+ struct sync_table_row *row = NULL;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
+ return -EINVAL;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ pr_err("Error: accessing an uninitialized sync obj %d\n",
+ sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -EINVAL;
+ }
+
+ sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
+ if (!sync_cb) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -ENOMEM;
+ }
+
+ /* Trigger callback if sync object is already in SIGNALED state */
+ if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
+ row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
+ sync_cb->callback_func = cb_func;
+ sync_cb->cb_data = userdata;
+ sync_cb->sync_obj = sync_obj;
+ INIT_WORK(&sync_cb->cb_dispatch_work,
+ cam_sync_util_cb_dispatch);
+
+ sync_cb->status = row->state;
+ queue_work(sync_dev->work_queue,
+ &sync_cb->cb_dispatch_work);
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return 0;
+ }
+
+ /* Don't register if callback was registered earlier */
+ list_for_each_entry_safe(cb_info, temp_cb, &row->callback_list, list) {
+ if (cb_info->callback_func == cb_func &&
+ cb_info->cb_data == userdata) {
+ kfree(sync_cb);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -EALREADY;
+ }
+ }
+
+ sync_cb->callback_func = cb_func;
+ sync_cb->cb_data = userdata;
+ sync_cb->sync_obj = sync_obj;
+ INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
+ list_add_tail(&sync_cb->list, &row->callback_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
+ return 0;
+}
+
+int cam_sync_deregister_callback(sync_callback cb_func,
+ void *userdata, int32_t sync_obj)
+{
+ struct sync_table_row *row = NULL;
+ struct sync_callback_info *sync_cb, *temp;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
+ if (sync_cb->callback_func == cb_func &&
+ sync_cb->cb_data == userdata) {
+ list_del_init(&sync_cb->list);
+ kfree(sync_cb);
+ }
+ }
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return 0;
+}
+
+int cam_sync_signal(int32_t sync_obj, uint32_t status)
+{
+ int rc;
+ struct sync_table_row *row = NULL;
+ struct sync_table_row *parent_row = NULL;
+ struct sync_callback_info *sync_cb;
+ struct sync_user_payload *payload_info;
+ struct sync_parent_info *parent_info;
+ struct list_head sync_list;
+ struct cam_signalable_info *list_info = NULL;
+ struct cam_signalable_info *temp_list_info = NULL;
+
+ /* Objects to be signaled will be added into this list */
+ INIT_LIST_HEAD(&sync_list);
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ row = sync_dev->sync_table + sync_obj;
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ sync_obj);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ if (row->type == CAM_SYNC_TYPE_GROUP) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ pr_err("Error: Signaling a GROUP sync object = %d\n",
+ sync_obj);
+ return -EINVAL;
+ }
+
+ if (row->state != CAM_SYNC_STATE_ACTIVE) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ pr_err("Error: Sync object already signaled sync_obj = %d",
+ sync_obj);
+ return -EALREADY;
+ }
+
+ if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
+ status != CAM_SYNC_STATE_SIGNALED_ERROR) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ pr_err("Error: signaling with undefined status = %d\n",
+ status);
+ return -EINVAL;
+ }
+
+ row->state = status;
+ rc = cam_sync_util_add_to_signalable_list(sync_obj, status, &sync_list);
+ if (rc < 0) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return rc;
+ }
+
+ /*
+ * Now iterate over all parents of this object and if they too need to
+ * be signaled add them to the list
+ */
+ list_for_each_entry(parent_info,
+ &row->parents_list,
+ list) {
+ parent_row = sync_dev->sync_table + parent_info->sync_id;
+ spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+ parent_row->remaining--;
+
+ parent_row->state = cam_sync_util_get_state(
+ parent_row->state,
+ status);
+
+ if (!parent_row->remaining) {
+ rc = cam_sync_util_add_to_signalable_list
+ (parent_info->sync_id,
+ parent_row->state,
+ &sync_list);
+ if (rc < 0) {
+ spin_unlock_bh(
+ &sync_dev->row_spinlocks[
+ parent_info->sync_id]);
+ return rc;
+ }
+ }
+ spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+ }
+
+ /*
+ * Now dispatch the various sync objects collected so far, in our
+ * list
+ */
+ list_for_each_entry_safe(list_info,
+ temp_list_info,
+ &sync_list,
+ list) {
+ struct sync_table_row *signalable_row = NULL;
+ struct sync_callback_info *temp_sync_cb;
+ struct sync_user_payload *temp_payload_info;
+
+ signalable_row = sync_dev->sync_table + list_info->sync_obj;
+ /* Dispatch kernel callbacks if any were registered earlier */
+ list_for_each_entry_safe(sync_cb,
+ temp_sync_cb, &signalable_row->callback_list, list) {
+ sync_cb->status = list_info->status;
+ queue_work(sync_dev->work_queue,
+ &sync_cb->cb_dispatch_work);
+ list_del_init(&sync_cb->list);
+ }
+
+ /* Dispatch user payloads if any were registered earlier */
+ list_for_each_entry_safe(payload_info, temp_payload_info,
+ &signalable_row->user_payload_list, list) {
+ spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+ if (!sync_dev->cam_sync_eventq) {
+ spin_unlock_bh(
+ &sync_dev->cam_sync_eventq_lock);
+ break;
+ }
+ spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+ cam_sync_util_send_v4l2_event(
+ CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+ list_info->sync_obj,
+ list_info->status,
+ payload_info->payload_data,
+ CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+ list_del_init(&payload_info->list);
+ /*
+ * We can free the list node here because
+ * sending V4L event will make a deep copy
+ * anyway
+ */
+ kfree(payload_info);
+ }
+
+ /*
+ * This needs to be done because we want to unblock anyone
+ * who might be blocked and waiting on this sync object
+ */
+ complete_all(&signalable_row->signaled);
+
+ list_del_init(&list_info->list);
+ kfree(list_info);
+ }
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
+ return rc;
+}
+
+int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
+{
+ int rc;
+ long idx = 0;
+
+ rc = cam_sync_util_validate_merge(sync_obj,
+ num_objs);
+ if (rc < 0) {
+ pr_err("Validation failed, Merge not allowed");
+ return -EINVAL;
+ }
+
+ rc = cam_sync_util_find_and_set_empty_row(sync_dev, &idx);
+ if (rc < 0) {
+ pr_err("Error: Unable to find empty row, table full");
+ return -EINVAL;
+ }
+
+ if (idx <= 0 || idx >= CAM_SYNC_MAX_OBJS) {
+ pr_err("Error: Invalid empty row index returned = %ld", idx);
+ return -EINVAL;
+ }
+
+ rc = cam_sync_init_group_object(sync_dev->sync_table,
+ idx, sync_obj,
+ num_objs);
+
+ if (rc < 0) {
+ pr_err("Error: Unable to init row at idx = %ld\n", idx);
+ return -EINVAL;
+ }
+
+ *merged_obj = idx;
+
+ return 0;
+}
+
+int cam_sync_destroy(int32_t sync_obj)
+{
+ struct sync_table_row *row = NULL;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ row = sync_dev->sync_table + sync_obj;
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ pr_err("Error: accessing an uninitialized sync obj: idx = %d\n",
+ sync_obj);
+ return -EINVAL;
+ }
+
+ cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
+ return 0;
+}
+
+int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
+{
+ unsigned long timeleft;
+ int rc = -EINVAL;
+ struct sync_table_row *row = NULL;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ sync_obj);
+ return -EINVAL;
+ }
+
+ timeleft = wait_for_completion_timeout(&row->signaled,
+ msecs_to_jiffies(timeout_ms));
+
+ if (!timeleft) {
+ pr_err("Error: cam_sync_wait() timed out for sync obj = %d\n",
+ sync_obj);
+ rc = -ETIMEDOUT;
+ } else {
+ switch (row->state) {
+ case CAM_SYNC_STATE_INVALID:
+ case CAM_SYNC_STATE_ACTIVE:
+ case CAM_SYNC_STATE_SIGNALED_ERROR:
+ pr_err("Error: Wait on invalid state = %d, obj = %d\n",
+ row->state, sync_obj);
+ rc = -EINVAL;
+ break;
+ case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+ rc = 0;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_info sync_create;
+ int result;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_info))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_create,
+ (void *)k_ioctl->ioctl_ptr,
+ k_ioctl->size))
+ return -EFAULT;
+
+ result = cam_sync_create(&sync_create.sync_obj,
+ sync_create.name);
+
+ if (!result)
+ if (copy_to_user((void *)k_ioctl->ioctl_ptr,
+ &sync_create,
+ k_ioctl->size))
+ return -EFAULT;
+
+ return result;
+}
+
+static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_signal sync_signal;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_signal))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_signal,
+ (void *)k_ioctl->ioctl_ptr,
+ k_ioctl->size))
+ return -EFAULT;
+
+ return cam_sync_signal(sync_signal.sync_obj,
+ sync_signal.sync_state);
+}
+
+static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_merge sync_merge;
+ uint32_t *sync_objs;
+ uint32_t num_objs;
+ uint32_t size;
+ int result;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_merge))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_merge,
+ (void *)k_ioctl->ioctl_ptr,
+ k_ioctl->size))
+ return -EFAULT;
+
+ if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
+ return -EINVAL;
+
+ size = sizeof(uint32_t) * sync_merge.num_objs;
+ sync_objs = kzalloc(size, GFP_ATOMIC);
+
+ if (!sync_objs)
+ return -ENOMEM;
+
+ if (copy_from_user(sync_objs,
+ (void *)sync_merge.sync_objs,
+ sizeof(uint32_t) * sync_merge.num_objs)) {
+ kfree(sync_objs);
+ return -EFAULT;
+ }
+
+ num_objs = sync_merge.num_objs;
+
+ result = cam_sync_merge(sync_objs,
+ num_objs,
+ &sync_merge.merged);
+
+ if (!result)
+ if (copy_to_user((void *)k_ioctl->ioctl_ptr,
+ &sync_merge,
+ k_ioctl->size)) {
+ kfree(sync_objs);
+ return -EFAULT;
+ }
+
+ kfree(sync_objs);
+
+ return result;
+}
+
+static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_wait sync_wait;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_wait))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_wait,
+ (void *)k_ioctl->ioctl_ptr,
+ k_ioctl->size))
+ return -EFAULT;
+
+ k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
+ sync_wait.timeout_ms);
+
+ return 0;
+}
+
+static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_info sync_create;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_info))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_create,
+ (void *)k_ioctl->ioctl_ptr,
+ k_ioctl->size))
+ return -EFAULT;
+
+ return cam_sync_destroy(sync_create.sync_obj);
+}
+
+static int cam_sync_handle_register_user_payload(
+ struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_userpayload_info userpayload_info;
+ struct sync_user_payload *user_payload_kernel;
+ struct sync_user_payload *user_payload_iter;
+ struct sync_user_payload *temp_upayload_kernel;
+ uint32_t sync_obj;
+ struct sync_table_row *row = NULL;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&userpayload_info,
+ (void *)k_ioctl->ioctl_ptr,
+ k_ioctl->size))
+ return -EFAULT;
+
+ sync_obj = userpayload_info.sync_obj;
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
+ if (!user_payload_kernel)
+ return -ENOMEM;
+
+ memcpy(user_payload_kernel->payload_data,
+ userpayload_info.payload,
+ CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ kfree(user_payload_kernel);
+ return -EINVAL;
+ }
+
+ if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
+ row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
+
+ cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+ sync_obj,
+ row->state,
+ user_payload_kernel->payload_data,
+ CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ kfree(user_payload_kernel);
+ return 0;
+ }
+
+ list_for_each_entry_safe(user_payload_iter,
+ temp_upayload_kernel,
+ &row->user_payload_list,
+ list) {
+ if (user_payload_iter->payload_data[0] ==
+ user_payload_kernel->payload_data[0] &&
+ user_payload_iter->payload_data[1] ==
+ user_payload_kernel->payload_data[1]) {
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ kfree(user_payload_kernel);
+ return -EALREADY;
+ }
+ }
+
+ list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return 0;
+}
+
+static int cam_sync_handle_deregister_user_payload(
+ struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_userpayload_info userpayload_info;
+ struct sync_user_payload *user_payload_kernel, *temp;
+ uint32_t sync_obj;
+ struct sync_table_row *row = NULL;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
+ CDBG("Incorrect ioctl size\n");
+ return -EINVAL;
+ }
+
+ if (!k_ioctl->ioctl_ptr) {
+ CDBG("Invalid embedded ioctl ptr\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&userpayload_info,
+ (void *)k_ioctl->ioctl_ptr,
+ k_ioctl->size))
+ return -EFAULT;
+
+ sync_obj = userpayload_info.sync_obj;
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(user_payload_kernel, temp,
+ &row->user_payload_list, list) {
+ if (user_payload_kernel->payload_data[0] ==
+ userpayload_info.payload[0] &&
+ user_payload_kernel->payload_data[1] ==
+ userpayload_info.payload[1]) {
+ list_del_init(&user_payload_kernel->list);
+ kfree(user_payload_kernel);
+ }
+ }
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return 0;
+}
+
+static long cam_sync_dev_ioctl(struct file *filep, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ int32_t rc;
+ struct sync_device *sync_dev = video_drvdata(filep);
+ struct cam_private_ioctl_arg k_ioctl;
+
+ if (!sync_dev) {
+ pr_err("%s sync_dev NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!arg)
+ return -EINVAL;
+
+ if (cmd != CAM_PRIVATE_IOCTL_CMD)
+ return -ENOIOCTLCMD;
+
+ k_ioctl = *(struct cam_private_ioctl_arg *)arg;
+
+ switch (k_ioctl.id) {
+ case CAM_SYNC_CREATE:
+ rc = cam_sync_handle_create(&k_ioctl);
+ break;
+ case CAM_SYNC_DESTROY:
+ rc = cam_sync_handle_destroy(&k_ioctl);
+ break;
+ case CAM_SYNC_REGISTER_PAYLOAD:
+ rc = cam_sync_handle_register_user_payload(
+ &k_ioctl);
+ break;
+ case CAM_SYNC_DEREGISTER_PAYLOAD:
+ rc = cam_sync_handle_deregister_user_payload(
+ &k_ioctl);
+ break;
+ case CAM_SYNC_SIGNAL:
+ rc = cam_sync_handle_signal(&k_ioctl);
+ break;
+ case CAM_SYNC_MERGE:
+ rc = cam_sync_handle_merge(&k_ioctl);
+ break;
+ case CAM_SYNC_WAIT:
+ rc = cam_sync_handle_wait(&k_ioctl);
+ ((struct cam_private_ioctl_arg *)arg)->result =
+ k_ioctl.result;
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+
+ return rc;
+}
+
+static unsigned int cam_sync_poll(struct file *f,
+ struct poll_table_struct *pll_table)
+{
+ int rc = 0;
+ struct v4l2_fh *eventq = f->private_data;
+
+ if (!eventq)
+ return -EINVAL;
+
+ poll_wait(f, &eventq->wait, pll_table);
+
+ if (v4l2_event_pending(eventq))
+ rc = POLLPRI;
+
+ return rc;
+}
+
+static int cam_sync_open(struct file *filep)
+{
+ int rc;
+ struct sync_device *sync_dev = video_drvdata(filep);
+
+ if (!sync_dev) {
+ pr_err("%s Sync device NULL\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&sync_dev->table_lock);
+ if (sync_dev->open_cnt >= 1) {
+ mutex_unlock(&sync_dev->table_lock);
+ return -EALREADY;
+ }
+
+ rc = v4l2_fh_open(filep);
+ if (!rc) {
+ sync_dev->open_cnt++;
+ spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+ sync_dev->cam_sync_eventq = filep->private_data;
+ spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+ } else {
+ pr_err("v4l2_fh_open failed : %d\n", rc);
+ }
+ mutex_unlock(&sync_dev->table_lock);
+
+ return rc;
+}
+
+static int cam_sync_close(struct file *filep)
+{
+ int rc = 0;
+ int i;
+ struct sync_device *sync_dev = video_drvdata(filep);
+
+ if (!sync_dev) {
+ pr_err("%s Sync device NULL\n", __func__);
+ rc = -ENODEV;
+ return rc;
+ }
+ mutex_lock(&sync_dev->table_lock);
+ sync_dev->open_cnt--;
+ if (!sync_dev->open_cnt) {
+ for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
+ struct sync_table_row *row =
+ sync_dev->sync_table + i;
+ if (row->state == CAM_SYNC_STATE_INVALID)
+ continue;
+
+ /* Signal all remaining objects as ERR,but we don't care
+ * about the return status here apart from logging it
+ */
+ rc = cam_sync_signal(i, CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc < 0)
+ pr_err("Cleanup signal failed: idx = %d\n", i);
+
+ rc = cam_sync_destroy(i);
+ if (rc < 0)
+ pr_err("Cleanup destroy failed: idx = %d\n", i);
+ }
+ }
+ mutex_unlock(&sync_dev->table_lock);
+ spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+ sync_dev->cam_sync_eventq = NULL;
+ spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+ v4l2_fh_release(filep);
+
+ return rc;
+}
+
+int cam_sync_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS, NULL);
+}
+
+int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
+ .vidioc_subscribe_event = cam_sync_subscribe_event,
+ .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
+ .vidioc_default = cam_sync_dev_ioctl,
+};
+
+static struct v4l2_file_operations cam_sync_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = cam_sync_open,
+ .release = cam_sync_close,
+ .poll = cam_sync_poll,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = video_ioctl2,
+#endif
+};
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+static int cam_sync_media_controller_init(struct sync_device *sync_dev,
+ struct platform_device *pdev)
+{
+ int rc;
+
+ sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
+ GFP_KERNEL);
+ if (!sync_dev->v4l2_dev.mdev)
+ return -ENOMEM;
+
+ media_device_init(sync_dev->v4l2_dev.mdev);
+ strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
+ sizeof(sync_dev->v4l2_dev.mdev->model));
+ sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
+
+ rc = media_device_register(sync_dev->v4l2_dev.mdev);
+ if (rc < 0)
+ goto register_fail;
+
+ rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
+ if (rc < 0)
+ goto entity_fail;
+
+ return 0;
+
+entity_fail:
+ media_device_unregister(sync_dev->v4l2_dev.mdev);
+register_fail:
+ media_device_cleanup(sync_dev->v4l2_dev.mdev);
+ return rc;
+}
+
+static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
+{
+ media_entity_cleanup(&sync_dev->vdev->entity);
+ media_device_unregister(sync_dev->v4l2_dev.mdev);
+ media_device_cleanup(sync_dev->v4l2_dev.mdev);
+ kfree(sync_dev->v4l2_dev.mdev);
+}
+
+static void cam_sync_init_entity(struct sync_device *sync_dev)
+{
+ sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
+ sync_dev->vdev->entity.name =
+ video_device_node_name(sync_dev->vdev);
+}
+#else
+static int cam_sync_media_controller_init(struct sync_device *sync_dev,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+
+static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
+{
+}
+
+static void cam_sync_init_entity(struct sync_device *sync_dev)
+{
+}
+#endif
+
+static int cam_sync_probe(struct platform_device *pdev)
+{
+ int rc;
+ int idx;
+
+ sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
+ if (!sync_dev)
+ return -ENOMEM;
+
+ mutex_init(&sync_dev->table_lock);
+ spin_lock_init(&sync_dev->cam_sync_eventq_lock);
+
+ for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
+ spin_lock_init(&sync_dev->row_spinlocks[idx]);
+
+ sync_dev->vdev = video_device_alloc();
+ if (!sync_dev->vdev) {
+ rc = -ENOMEM;
+ goto vdev_fail;
+ }
+
+ rc = cam_sync_media_controller_init(sync_dev, pdev);
+ if (rc < 0)
+ goto mcinit_fail;
+
+ sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
+
+ rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
+ if (rc < 0)
+ goto register_fail;
+
+ strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
+ sizeof(sync_dev->vdev->name));
+ sync_dev->vdev->release = video_device_release;
+ sync_dev->vdev->fops = &cam_sync_v4l2_fops;
+ sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
+ sync_dev->vdev->minor = -1;
+ sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
+ rc = video_register_device(sync_dev->vdev,
+ VFL_TYPE_GRABBER, -1);
+ if (rc < 0)
+ goto v4l2_fail;
+
+ cam_sync_init_entity(sync_dev);
+ video_set_drvdata(sync_dev->vdev, sync_dev);
+ memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
+ memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
+ bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+
+ /*
+ * We treat zero as invalid handle, so we will keep the 0th bit set
+ * always
+ */
+ set_bit(0, sync_dev->bitmap);
+
+ sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
+ WQ_HIGHPRI | WQ_UNBOUND, 0);
+
+ if (!sync_dev->work_queue) {
+ pr_err("Error: high priority work queue creation failed!\n");
+ rc = -ENOMEM;
+ goto v4l2_fail;
+ }
+
+ return rc;
+
+v4l2_fail:
+ v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
+register_fail:
+ cam_sync_media_controller_cleanup(sync_dev);
+mcinit_fail:
+ video_device_release(sync_dev->vdev);
+vdev_fail:
+ mutex_destroy(&sync_dev->table_lock);
+ kfree(sync_dev);
+ return rc;
+}
+
+static int cam_sync_remove(struct platform_device *pdev)
+{
+ v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
+ cam_sync_media_controller_cleanup(sync_dev);
+ video_device_release(sync_dev->vdev);
+ kfree(sync_dev);
+ sync_dev = NULL;
+
+ return 0;
+}
+
+static struct platform_device cam_sync_device = {
+ .name = "cam_sync",
+ .id = -1,
+};
+
+static struct platform_driver cam_sync_driver = {
+ .probe = cam_sync_probe,
+ .remove = cam_sync_remove,
+ .driver = {
+ .name = "cam_sync",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init cam_sync_init(void)
+{
+ int rc;
+
+ rc = platform_device_register(&cam_sync_device);
+ if (rc)
+ return -ENODEV;
+
+ return platform_driver_register(&cam_sync_driver);
+}
+
+static void __exit cam_sync_exit(void)
+{
+ int idx;
+
+ for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
+ spin_lock_init(&sync_dev->row_spinlocks[idx]);
+ platform_driver_unregister(&cam_sync_driver);
+ platform_device_unregister(&cam_sync_device);
+ kfree(sync_dev);
+}
+
+module_init(cam_sync_init);
+module_exit(cam_sync_exit);
+MODULE_DESCRIPTION("Camera sync driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h
new file mode 100644
index 0000000..9646887
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_API_H__
+#define __CAM_SYNC_API_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/videodev2.h>
+#include <uapi/media/cam_sync.h>
+
+#define SYNC_DEBUG_NAME_LEN 63
+typedef void (*sync_callback)(int32_t sync_obj, int status, void *data);
+
+/* Kernel APIs */
+
+/**
+ * @brief: Creates a sync object
+ *
+ * The newly created sync obj is assigned to sync_obj.
+ * sync object.
+ *
+ * @param sync_obj : Pointer to int referencing the sync object.
+ * @param name : Optional parameter associating a name with the sync object for
+ * debug purposes. Only first SYNC_DEBUG_NAME_LEN bytes are accepted,
+ * rest will be ignored.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if sync_obj is an invalid pointer.
+ * -ENOMEM will be returned if the kernel can't allocate space for
+ * sync object.
+ */
+int cam_sync_create(int32_t *sync_obj, const char *name);
+
+/**
+ * @brief: Registers a callback with a sync object
+ *
+ * @param cb_func: Pointer to callback to be registered
+ * @param userdata: Opaque pointer which will be passed back with callback.
+ * @param sync_obj: int referencing the sync object.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ *
+ */
+int cam_sync_register_callback(sync_callback cb_func,
+ void *userdata, int32_t sync_obj);
+
+/**
+ * @brief: De-registers a callback with a sync object
+ *
+ * @param cb_func: Pointer to callback to be de-registered
+ * @param userdata: Opaque pointer which will be passed back with callback.
+ * @param sync_obj: int referencing the sync object.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ */
+int cam_sync_deregister_callback(sync_callback cb_func,
+ void *userdata, int32_t sync_obj);
+
+/**
+ * @brief: Signals a sync object with the status argument.
+ *
+ * This function will signal the sync object referenced by the sync_obj
+ * parameter and when doing so, will trigger callbacks in both user space and
+ * kernel. Callbacks will triggered asynchronously and their order of execution
+ * is not guaranteed. The status parameter will indicate whether the entity
+ * performing the signaling wants to convey an error case or a success case.
+ *
+ * @param sync_obj: int referencing the sync object.
+ * @param status: Status of the signaling. Can be either SYNC_SIGNAL_ERROR or
+ * SYNC_SIGNAL_SUCCESS.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_signal(int32_t sync_obj, uint32_t status);
+
+/**
+ * @brief: Merges multiple sync objects
+ *
+ * This function will merge multiple sync objects into a sync group.
+ *
+ * @param sync_obj: pointer to a block of ints to be merged
+ * @param num_objs: Number of ints in the block
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj);
+
+/**
+ * @brief: Destroys a sync object
+ *
+ * @param sync_obj: int referencing the sync object to be destroyed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_destroy(int32_t sync_obj);
+
+/**
+ * @brief: Waits for a sync object synchronously
+ *
+ * Does a wait on the sync object identified by sync_obj for a maximum
+ * of timeout_ms milliseconds. Must not be called from interrupt context as
+ * this API can sleep. Should be called from process context only.
+ *
+ * @param sync_obj: int referencing the sync object to be waited upon
+ * @timeout_ms sync_obj: Timeout in ms.
+ *
+ * @return 0 upon success, -EINVAL if sync object is in bad state or arguments
+ * are invalid, -ETIMEDOUT if wait times out.
+ */
+int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms);
+
+
+#endif /* __CAM_SYNC_API_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
new file mode 100644
index 0000000..ba9bef4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
@@ -0,0 +1,186 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_PRIVATE_H__
+#define __CAM_SYNC_PRIVATE_H__
+
+#include <linux/bitmap.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#ifdef CONFIG_CAM_SYNC_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CAM_SYNC_OBJ_NAME_LEN 64
+#define CAM_SYNC_MAX_OBJS 1024
+#define CAM_SYNC_MAX_V4L2_EVENTS 50
+#define CAM_SYNC_DEBUG_FILENAME "cam_debug"
+#define CAM_SYNC_DEBUG_BASEDIR "cam"
+#define CAM_SYNC_DEBUG_BUF_SIZE 32
+#define CAM_SYNC_PAYLOAD_WORDS 2
+#define CAM_SYNC_NAME "cam_sync"
+#define CAM_SYNC_WORKQUEUE_NAME "HIPRIO_SYNC_WORK_QUEUE"
+
+#define CAM_SYNC_TYPE_INDV 0
+#define CAM_SYNC_TYPE_GROUP 1
+
+/**
+ * enum sync_type - Enum to indicate the type of sync object,
+ * i.e. individual or group.
+ *
+ * @SYNC_TYPE_INDV : Object is an individual sync object
+ * @SYNC_TYPE_GROUP : Object is a group sync object
+ */
+enum sync_type {
+ SYNC_TYPE_INDV,
+ SYNC_TYPE_GROUP
+};
+
+/**
+ * struct sync_parent_info - Single node of information about a parent
+ * of a sync object, usually part of the parents linked list
+ *
+ * @sync_id : Sync object id of parent
+ * @list : List member used to append this node to a linked list
+ */
+struct sync_parent_info {
+ int32_t sync_id;
+ struct list_head list;
+};
+
+/**
+ * struct sync_parent_info - Single node of information about a child
+ * of a sync object, usually part of the children linked list
+ *
+ * @sync_id : Sync object id of child
+ * @list : List member used to append this node to a linked list
+ */
+struct sync_child_info {
+ int32_t sync_id;
+ struct list_head list;
+};
+
+
+/**
+ * struct sync_callback_info - Single node of information about a kernel
+ * callback registered on a sync object
+ *
+ * @callback_func : Callback function, registered by client driver
+ * @cb_data : Callback data, registered by client driver
+ * @status........ : Status with which callback will be invoked in client
+ * @sync_obj : Sync id of the object for which callback is registered
+ * @cb_dispatch_work : Work representing the call dispatch
+ * @list : List member used to append this node to a linked list
+ */
+struct sync_callback_info {
+ sync_callback callback_func;
+ void *cb_data;
+ int status;
+ int32_t sync_obj;
+ struct work_struct cb_dispatch_work;
+ struct list_head list;
+};
+
+/**
+ * struct sync_user_payload - Single node of information about a user space
+ * payload registered from user space
+ *
+ * @payload_data : Payload data, opaque to kernel
+ * @list : List member used to append this node to a linked list
+ */
+struct sync_user_payload {
+ uint64_t payload_data[CAM_SYNC_PAYLOAD_WORDS];
+ struct list_head list;
+};
+
+/**
+ * struct sync_table_row - Single row of information about a sync object, used
+ * for internal book keeping in the sync driver
+ *
+ * @name : Optional string representation of the sync object
+ * @type : Type of the sync object (individual or group)
+ * @sync_id : Integer id representing this sync object
+ * @parents_list : Linked list of parents of this sync object
+ * @children_list : Linked list of children of this sync object
+ * @state : State (INVALID, ACTIVE, SIGNALED_SUCCESS or
+ * SIGNALED_ERROR)
+ * @remaining : Count of remaining children that not been signaled
+ * @signaled : Completion variable on which block calls will wait
+ * @callback_list : Linked list of kernel callbacks registered
+ * @user_payload_list : LInked list of user space payloads registered
+ */
+struct sync_table_row {
+ char name[CAM_SYNC_OBJ_NAME_LEN];
+ enum sync_type type;
+ int32_t sync_id;
+ /* List of parents, which are merged objects */
+ struct list_head parents_list;
+ /* List of children, which constitute the merged object */
+ struct list_head children_list;
+ uint32_t state;
+ uint32_t remaining;
+ struct completion signaled;
+ struct list_head callback_list;
+ struct list_head user_payload_list;
+};
+
+/**
+ * struct cam_signalable_info - Information for a single sync object that is
+ * ready to be signaled
+ *
+ * @sync_obj : Sync object id of signalable object
+ * @status : Status with which to signal
+ * @list : List member used to append this node to a linked list
+ */
+struct cam_signalable_info {
+ int32_t sync_obj;
+ uint32_t status;
+ struct list_head list;
+};
+
+/**
+ * struct sync_device - Internal struct to book keep sync driver details
+ *
+ * @vdev : Video device
+ * @v4l2_dev : V4L2 device
+ * @sync_table : Table of all sync objects
+ * @row_spinlocks : Spinlock array, one for each row in the table
+ * @table_lock : Mutex used to lock the table
+ * @open_cnt : Count of file open calls made on the sync driver
+ * @work_queue : Work queue used for dispatching kernel callbacks
+ * @cam_sync_eventq : Event queue used to dispatch user payloads to user space
+ * @bitmap : Bitmap representation of all sync objects
+ */
+struct sync_device {
+ struct video_device *vdev;
+ struct v4l2_device v4l2_dev;
+ struct sync_table_row sync_table[CAM_SYNC_MAX_OBJS];
+ spinlock_t row_spinlocks[CAM_SYNC_MAX_OBJS];
+ struct mutex table_lock;
+ int open_cnt;
+ struct workqueue_struct *work_queue;
+ struct v4l2_fh *cam_sync_eventq;
+ spinlock_t cam_sync_eventq_lock;
+ DECLARE_BITMAP(bitmap, CAM_SYNC_MAX_OBJS);
+};
+
+
+#endif /* __CAM_SYNC_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
new file mode 100644
index 0000000..4f5bf87
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -0,0 +1,296 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SYNC-UTIL %s:%d " fmt, __func__, __LINE__
+
+#include "cam_sync_util.h"
+
+int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
+ long *idx)
+{
+ int rc = 0;
+
+ mutex_lock(&sync_dev->table_lock);
+
+ *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+
+ if (*idx < CAM_SYNC_MAX_OBJS)
+ set_bit(*idx, sync_dev->bitmap);
+ else
+ rc = -1;
+
+ mutex_unlock(&sync_dev->table_lock);
+
+ return rc;
+}
+
+int cam_sync_init_object(struct sync_table_row *table,
+ uint32_t idx,
+ const char *name)
+{
+ struct sync_table_row *row = table + idx;
+
+ if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+ return -EINVAL;
+
+ if (name)
+ strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
+ INIT_LIST_HEAD(&row->parents_list);
+ INIT_LIST_HEAD(&row->children_list);
+ row->type = CAM_SYNC_TYPE_INDV;
+ row->sync_id = idx;
+ row->state = CAM_SYNC_STATE_ACTIVE;
+ row->remaining = 0;
+ init_completion(&row->signaled);
+ INIT_LIST_HEAD(&row->callback_list);
+ INIT_LIST_HEAD(&row->user_payload_list);
+
+ return 0;
+}
+
+int cam_sync_init_group_object(struct sync_table_row *table,
+ uint32_t idx,
+ uint32_t *sync_objs,
+ uint32_t num_objs)
+{
+ int i;
+ struct sync_child_info *child_info;
+ struct sync_parent_info *parent_info;
+ struct sync_table_row *row = table + idx;
+ struct sync_table_row *child_row = NULL;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ INIT_LIST_HEAD(&row->parents_list);
+
+ INIT_LIST_HEAD(&row->children_list);
+
+ /*
+ * While traversing parents and children, we allocate in a loop and in
+ * case allocation fails, we call the clean up function which frees up
+ * all memory allocation thus far
+ */
+ for (i = 0; i < num_objs; i++) {
+ child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
+
+ if (!child_info) {
+ cam_sync_util_cleanup_children_list(
+ &row->children_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ return -ENOMEM;
+ }
+
+ child_info->sync_id = sync_objs[i];
+ list_add_tail(&child_info->list, &row->children_list);
+ }
+
+ for (i = 0; i < num_objs; i++) {
+ /* This gets us the row corresponding to the sync object */
+ child_row = table + sync_objs[i];
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
+ if (!parent_info) {
+ cam_sync_util_cleanup_parents_list(
+ &child_row->parents_list);
+ cam_sync_util_cleanup_children_list(
+ &row->children_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ return -ENOMEM;
+ }
+ parent_info->sync_id = idx;
+ list_add_tail(&parent_info->list, &child_row->parents_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ }
+
+ row->type = CAM_SYNC_TYPE_GROUP;
+ row->sync_id = idx;
+ row->state = CAM_SYNC_STATE_ACTIVE;
+ row->remaining = num_objs;
+ init_completion(&row->signaled);
+ INIT_LIST_HEAD(&row->callback_list);
+ INIT_LIST_HEAD(&row->user_payload_list);
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ return 0;
+}
+
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
+{
+ struct sync_table_row *row = table + idx;
+ struct sync_child_info *child_info, *temp_child;
+ struct sync_callback_info *sync_cb, *temp_cb;
+ struct sync_parent_info *parent_info, *temp_parent;
+ struct sync_user_payload *upayload_info, *temp_upayload;
+
+ if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+ return -EINVAL;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ clear_bit(idx, sync_dev->bitmap);
+ list_for_each_entry_safe(child_info, temp_child,
+ &row->children_list, list) {
+ list_del_init(&child_info->list);
+ kfree(child_info);
+ }
+
+ list_for_each_entry_safe(parent_info, temp_parent,
+ &row->parents_list, list) {
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
+ }
+
+ list_for_each_entry_safe(upayload_info, temp_upayload,
+ &row->user_payload_list, list) {
+ list_del_init(&upayload_info->list);
+ kfree(upayload_info);
+ }
+
+ list_for_each_entry_safe(sync_cb, temp_cb,
+ &row->callback_list, list) {
+ list_del_init(&sync_cb->list);
+ kfree(sync_cb);
+ }
+
+ row->state = CAM_SYNC_STATE_INVALID;
+ memset(row, 0, sizeof(*row));
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+ return 0;
+}
+
+void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
+{
+ struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
+ struct sync_callback_info,
+ cb_dispatch_work);
+
+ cb_info->callback_func(cb_info->sync_obj,
+ cb_info->status,
+ cb_info->cb_data);
+
+ kfree(cb_info);
+}
+
+void cam_sync_util_send_v4l2_event(uint32_t id,
+ uint32_t sync_obj,
+ int status,
+ void *payload,
+ int len)
+{
+ struct v4l2_event event;
+ __u64 *payload_data = NULL;
+ struct cam_sync_ev_header *ev_header = NULL;
+
+ event.id = id;
+ event.type = CAM_SYNC_V4L_EVENT;
+
+ ev_header = CAM_SYNC_GET_HEADER_PTR(event);
+ ev_header->sync_obj = sync_obj;
+ ev_header->status = status;
+
+ payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
+ memcpy(payload_data, payload, len);
+
+ v4l2_event_queue(sync_dev->vdev, &event);
+}
+
+int cam_sync_util_validate_merge(uint32_t *sync_obj, uint32_t num_objs)
+{
+ int i;
+ struct sync_table_row *row = NULL;
+
+ for (i = 0; i < num_objs; i++) {
+ row = sync_dev->sync_table + sync_obj[i];
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
+ if (row->type == CAM_SYNC_TYPE_GROUP ||
+ row->state == CAM_SYNC_STATE_INVALID) {
+ pr_err("Group obj %d can't be merged or obj UNINIT\n",
+ sync_obj[i]);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
+ }
+ return 0;
+}
+
+int cam_sync_util_add_to_signalable_list(int32_t sync_obj,
+ uint32_t status,
+ struct list_head *sync_list)
+{
+ struct cam_signalable_info *signalable_info = NULL;
+
+ signalable_info = kzalloc(sizeof(*signalable_info), GFP_ATOMIC);
+ if (!signalable_info)
+ return -ENOMEM;
+
+ signalable_info->sync_obj = sync_obj;
+ signalable_info->status = status;
+
+ list_add_tail(&signalable_info->list, sync_list);
+
+ return 0;
+}
+
+int cam_sync_util_get_state(int current_state,
+ int new_state)
+{
+ int result = CAM_SYNC_STATE_SIGNALED_ERROR;
+
+ if (new_state != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
+ new_state != CAM_SYNC_STATE_SIGNALED_ERROR)
+ return CAM_SYNC_STATE_SIGNALED_ERROR;
+
+ switch (current_state) {
+ case CAM_SYNC_STATE_INVALID:
+ result = CAM_SYNC_STATE_SIGNALED_ERROR;
+ break;
+
+ case CAM_SYNC_STATE_ACTIVE:
+ case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+ if (new_state == CAM_SYNC_STATE_SIGNALED_ERROR)
+ result = CAM_SYNC_STATE_SIGNALED_ERROR;
+ else if (new_state == CAM_SYNC_STATE_SIGNALED_SUCCESS)
+ result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+ break;
+
+ case CAM_SYNC_STATE_SIGNALED_ERROR:
+ result = CAM_SYNC_STATE_SIGNALED_ERROR;
+ break;
+ }
+
+ return result;
+}
+
+void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean)
+{
+ struct sync_child_info *child_info = NULL;
+ struct sync_child_info *temp_child_info = NULL;
+
+ list_for_each_entry_safe(child_info,
+ temp_child_info, list_to_clean, list) {
+ list_del_init(&child_info->list);
+ kfree(child_info);
+ }
+}
+
+void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean)
+{
+ struct sync_parent_info *parent_info = NULL;
+ struct sync_parent_info *temp_parent_info = NULL;
+
+ list_for_each_entry_safe(parent_info,
+ temp_parent_info, list_to_clean, list) {
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
+ }
+}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
new file mode 100644
index 0000000..9dedd14
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_UTIL_H__
+#define __CAM_SYNC_UTIL_H__
+
+
+#include <cam_sync_api.h>
+#include "cam_sync_private.h"
+
+extern struct sync_device *sync_dev;
+
+/**
+ * @brief: Finds an empty row in the sync table and sets its corresponding bit
+ * in the bit array
+ *
+ * @param sync_dev : Pointer to the sync device instance
+ * @param idx : Pointer to an long containing the index found in the bit
+ * array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
+ long *idx);
+
+/**
+ * @brief: Function to initialize an empty row in the sync table. This should be
+ * called only for individual sync objects.
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx : Index of row to initialize
+ * @param name : Optional string representation of the sync object. Should be
+ * 63 characters or less
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_init_object(struct sync_table_row *table,
+ uint32_t idx,
+ const char *name);
+
+/**
+ * @brief: Function to uninitialize a row in the sync table
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx : Index of row to initialize
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+
+/**
+ * @brief: Function to initialize a row in the sync table when the object is a
+ * group object, also known as a merged sync object
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx : Index of row to initialize
+ * @param sync_objs : Array of sync objects which will merged
+ * or grouped together
+ * @param num_objs : Number of sync objects in the array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_init_group_object(struct sync_table_row *table,
+ uint32_t idx,
+ uint32_t *sync_objs,
+ uint32_t num_objs);
+
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+
+/**
+ * @brief: Function to dispatch a kernel callback for a sync callback
+ *
+ * @param cb_dispatch_work : Pointer to the work_struct that needs to be
+ * dispatched
+ *
+ * @return None
+ */
+void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work);
+
+/**
+ * @brief: Function to send V4L event to user space
+ * @param id : V4L event id to send
+ * @param sync_obj : Sync obj for which event needs to be sent
+ * @param status : Status of the event
+ * @payload : Payload that needs to be sent to user space
+ * @len : Length of the payload
+ *
+ * @return None
+ */
+void cam_sync_util_send_v4l2_event(uint32_t id,
+ uint32_t sync_obj,
+ int status,
+ void *payload,
+ int len);
+
+/**
+ * @brief: Function to validate sync merge arguments
+ *
+ * @param sync_obj : Array of sync objects to merge
+ * @param num_objs : Number of sync objects in the array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_util_validate_merge(uint32_t *sync_obj, uint32_t num_objs);
+
+/**
+ * @brief: Function which adds sync object information to the signalable list
+ *
+ * @param sync_obj : Sync object to add
+ * @param status : Status of above sync object
+ * @param list : Linked list where the information should be added to
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_util_add_to_signalable_list(int32_t sync_obj,
+ uint32_t status,
+ struct list_head *sync_list);
+
+/**
+ * @brief: Function which gets the next state of the sync object based on the
+ * current state and the new state
+ *
+ * @param current_state : Current state of the sync object
+ * @param new_state : New state of the sync object
+ *
+ * @return Next state of the sync object
+ */
+int cam_sync_util_get_state(int current_state,
+ int new_state);
+
+/**
+ * @brief: Function to clean up the children of a sync object
+ * @param list_to_clean : List to clean up
+ *
+ * @return None
+ */
+void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean);
+
+/**
+ * @brief: Function to clean up the parents of a sync object
+ * @param list_to_clean : List to clean up
+ *
+ * @return None
+ */
+void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean);
+
+#endif /* __CAM_SYNC_UTIL_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 27156fc..9a28700 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -2007,7 +2007,7 @@ static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr,
devm_kfree(&mgr->pdev->dev, req);
}
-static void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
+void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
struct sde_rot_file_private *private)
{
struct sde_rot_entry_container *req, *req_next;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 0818917..980e4af 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -686,6 +686,14 @@ int sde_rotator_validate_request(struct sde_rot_mgr *rot_dev,
int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable);
/*
+ * sde_rotator_cancel_all_requests - cancel all outstanding requests
+ * @mgr: Pointer to rotator manager
+ * @private: Pointer to rotator manager per file context
+ */
+void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private);
+
+/*
* sde_rot_mgr_lock - serialization lock prior to rotator manager calls
* @mgr: Pointer to rotator manager
*/
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index 86e04d6..e56c70a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -996,11 +996,14 @@ static int sde_rotator_debug_base_release(struct inode *inode,
{
struct sde_rotator_debug_base *dbg = file->private_data;
- if (dbg && dbg->buf) {
+ if (dbg) {
+ mutex_lock(&dbg->buflock);
kfree(dbg->buf);
dbg->buf_len = 0;
dbg->buf = NULL;
+ mutex_unlock(&dbg->buflock);
}
+
return 0;
}
@@ -1032,8 +1035,10 @@ static ssize_t sde_rotator_debug_base_offset_write(struct file *file,
if (cnt > (dbg->max_offset - off))
cnt = dbg->max_offset - off;
+ mutex_lock(&dbg->buflock);
dbg->off = off;
dbg->cnt = cnt;
+ mutex_unlock(&dbg->buflock);
SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
@@ -1053,7 +1058,10 @@ static ssize_t sde_rotator_debug_base_offset_read(struct file *file,
if (*ppos)
return 0; /* the end */
+ mutex_lock(&dbg->buflock);
len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+ mutex_unlock(&dbg->buflock);
+
if (len < 0 || len >= sizeof(buf))
return 0;
@@ -1092,6 +1100,8 @@ static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
if (off >= dbg->max_offset)
return -EFAULT;
+ mutex_lock(&dbg->buflock);
+
/* Enable Clock for register access */
sde_rotator_clk_ctrl(dbg->mgr, true);
@@ -1100,6 +1110,8 @@ static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
/* Disable Clock after register access */
sde_rotator_clk_ctrl(dbg->mgr, false);
+ mutex_unlock(&dbg->buflock);
+
SDEROT_DBG("addr=%zx data=%x\n", off, data);
return count;
@@ -1110,12 +1122,14 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
{
struct sde_rotator_debug_base *dbg = file->private_data;
size_t len;
+ int rc = 0;
if (!dbg) {
SDEROT_ERR("invalid handle\n");
return -ENODEV;
}
+ mutex_lock(&dbg->buflock);
if (!dbg->buf) {
char dump_buf[64];
char *ptr;
@@ -1127,7 +1141,8 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
if (!dbg->buf) {
SDEROT_ERR("not enough memory to hold reg dump\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto debug_read_error;
}
ptr = dbg->base + dbg->off;
@@ -1157,18 +1172,26 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
dbg->buf_len = tot;
}
- if (*ppos >= dbg->buf_len)
- return 0; /* done reading */
+ if (*ppos >= dbg->buf_len) {
+ rc = 0; /* done reading */
+ goto debug_read_error;
+ }
len = min(count, dbg->buf_len - (size_t) *ppos);
if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
SDEROT_ERR("failed to copy to user\n");
- return -EFAULT;
+ rc = -EFAULT;
+ goto debug_read_error;
}
*ppos += len; /* increase offset */
+ mutex_unlock(&dbg->buflock);
return len;
+
+debug_read_error:
+ mutex_unlock(&dbg->buflock);
+ return rc;
}
static const struct file_operations sde_rotator_off_fops = {
@@ -1202,6 +1225,9 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
if (!dbg)
return -ENOMEM;
+ mutex_init(&dbg->buflock);
+ mutex_lock(&dbg->buflock);
+
if (name)
strlcpy(dbg->name, name, sizeof(dbg->name));
dbg->base = io_data->base;
@@ -1223,6 +1249,7 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
dbg->base += rot_dev->mdata->regdump ?
rot_dev->mdata->regdump[0].offset : 0;
}
+ mutex_unlock(&dbg->buflock);
strlcpy(dbgname + prefix_len, "off", sizeof(dbgname) - prefix_len);
ent_off = debugfs_create_file(dbgname, 0644, debugfs_root, dbg,
@@ -1240,7 +1267,9 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
goto reg_fail;
}
+ mutex_lock(&dbg->buflock);
dbg->mgr = rot_dev->mgr;
+ mutex_unlock(&dbg->buflock);
return 0;
reg_fail:
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
index c2c6f97..c6d0151 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,7 @@ struct sde_rotator_debug_base {
char *buf;
size_t buf_len;
struct sde_rot_mgr *mgr;
+ struct mutex buflock;
};
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 47f4cb0..c061446 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -450,11 +450,15 @@ static void sde_rotator_stop_streaming(struct vb2_queue *q)
list_empty(&ctx->pending_list),
msecs_to_jiffies(rot_dev->streamoff_timeout));
mutex_lock(q->lock);
- if (!ret)
+ if (!ret) {
SDEDEV_ERR(rot_dev->dev,
"timeout to stream off s:%d t:%d p:%d\n",
ctx->session_id, q->type,
!list_empty(&ctx->pending_list));
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_cancel_all_requests(rot_dev->mgr, ctx->private);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ }
sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 2c9c75e..9071361 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -1860,6 +1860,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
u32 danger_lut = 0; /* applicable for realtime client only */
u32 safe_lut = 0; /* applicable for realtime client only */
u32 flags = 0;
+ u32 rststs = 0;
struct sde_rotation_item *item;
int ret;
@@ -1931,10 +1932,46 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
+ /*
+ * if Rotator HW is reset, but missing PM event notification, we
+ * need to init the SW timestamp automatically.
+ */
+ rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
+ if (!rot->reset_hw_ts && rststs) {
+ u32 l_ts, h_ts, swts;
+
+ swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
+ h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
+ l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
+ SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
+
+ if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
+ h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
+ else
+ l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
+
+ /* construct the combined timstamp */
+ swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
+ ((l_ts & SDE_REGDMA_SWTS_MASK) <<
+ SDE_REGDMA_SWTS_SHIFT);
+
+ SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
+ swts, h_ts, l_ts);
+ SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
+ rot->last_hw_ts = swts;
+
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
+ rot->last_hw_ts);
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
+ /* ensure write is issued to the rotator HW */
+ wmb();
+ }
+
if (rot->reset_hw_ts) {
SDEROT_EVTLOG(rot->last_hw_ts);
SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
rot->last_hw_ts);
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
/* ensure write is issued to the rotator HW */
wmb();
rot->reset_hw_ts = false;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index dc97bdf..aa762dd 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -291,5 +291,6 @@
#define REGDMA_INT_LOW_MASK 0x00000700
#define REGDMA_INT_ERR_MASK 0x000F0000
#define REGDMA_TIMESTAMP_REG ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL
+#define REGDMA_RESET_STATUS_REG ROT_SSPP_TPG_RGB_MAPPING
#endif /*_SDE_ROTATOR_R3_HWIO_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 4f6386b..e209192 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -471,11 +471,18 @@ static int sde_smmu_fault_handler(struct iommu_domain *domain,
sde_smmu = (struct sde_smmu_client *)token;
- /* trigger rotator panic and dump */
- SDEROT_ERR("trigger rotator panic and dump, iova=0x%08lx\n", iova);
+ /* trigger rotator dump */
+ SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
+ iova, flags);
+ SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
- sde_rot_dump_panic();
+ /* generate dump, but no panic */
+ sde_rot_evtlog_tout_handler(false, __func__, "rot", "vbif_dbg_bus");
+ /*
+ * return -ENOSYS to allow smmu driver to dump out useful
+ * debug info.
+ */
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 16c2aae..87a4ac8 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -33,7 +33,6 @@ static int profile_table[] = {
[ilog2(HAL_H264_PROFILE_CONSTRAINED_HIGH)] =
HFI_H264_PROFILE_CONSTRAINED_HIGH,
[ilog2(HAL_VPX_PROFILE_VERSION_1)] = HFI_VPX_PROFILE_VERSION_1,
- [ilog2(HAL_MVC_PROFILE_STEREO_HIGH)] = HFI_H264_PROFILE_STEREO_HIGH,
};
static int entropy_mode[] = {
@@ -68,13 +67,10 @@ static int color_format[] = {
[ilog2(HAL_COLOR_FORMAT_BGR565)] = HFI_COLOR_FORMAT_BGR565,
[ilog2(HAL_COLOR_FORMAT_RGB888)] = HFI_COLOR_FORMAT_RGB888,
[ilog2(HAL_COLOR_FORMAT_BGR888)] = HFI_COLOR_FORMAT_BGR888,
- [ilog2(HAL_COLOR_FORMAT_RGBA8888)] = HFI_COLOR_FORMAT_RGBA8888,
/* UBWC Color formats*/
[ilog2(HAL_COLOR_FORMAT_NV12_UBWC)] = HFI_COLOR_FORMAT_NV12_UBWC,
[ilog2(HAL_COLOR_FORMAT_NV12_TP10_UBWC)] =
HFI_COLOR_FORMAT_YUV420_TP10_UBWC,
- [ilog2(HAL_COLOR_FORMAT_RGBA8888_UBWC)] =
- HFI_COLOR_FORMAT_RGBA8888_UBWC,
};
static int nal_type[] = {
@@ -126,26 +122,6 @@ static inline int hal_to_hfi_type(int property, int hal_type)
}
}
-u32 get_hfi_layout(enum hal_buffer_layout_type hal_buf_layout)
-{
- u32 hfi_layout;
-
- switch (hal_buf_layout) {
- case HAL_BUFFER_LAYOUT_TOP_BOTTOM:
- hfi_layout = HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM;
- break;
- case HAL_BUFFER_LAYOUT_SEQ:
- hfi_layout = HFI_MVC_BUFFER_LAYOUT_SEQ;
- break;
- default:
- dprintk(VIDC_ERR, "Invalid buffer layout: %#x\n",
- hal_buf_layout);
- hfi_layout = HFI_MVC_BUFFER_LAYOUT_SEQ;
- break;
- }
- return hfi_layout;
-}
-
enum hal_domain vidc_get_hal_domain(u32 hfi_domain)
{
enum hal_domain hal_domain = 0;
@@ -192,9 +168,6 @@ enum hal_video_codec vidc_get_hal_codec(u32 hfi_codec)
case HFI_VIDEO_CODEC_VP9:
hal_codec = HAL_VIDEO_CODEC_VP9;
break;
- case HFI_VIDEO_CODEC_HEVC_HYBRID:
- hal_codec = HAL_VIDEO_CODEC_HEVC_HYBRID;
- break;
default:
dprintk(VIDC_INFO, "%s: invalid codec 0x%x\n",
__func__, hfi_codec);
@@ -233,7 +206,6 @@ u32 vidc_get_hfi_codec(enum hal_video_codec hal_codec)
u32 hfi_codec = 0;
switch (hal_codec) {
- case HAL_VIDEO_CODEC_MVC:
case HAL_VIDEO_CODEC_H264:
hfi_codec = HFI_VIDEO_CODEC_H264;
break;
@@ -252,9 +224,6 @@ u32 vidc_get_hfi_codec(enum hal_video_codec hal_codec)
case HAL_VIDEO_CODEC_VP9:
hfi_codec = HFI_VIDEO_CODEC_VP9;
break;
- case HAL_VIDEO_CODEC_HEVC_HYBRID:
- hfi_codec = HFI_VIDEO_CODEC_HEVC_HYBRID;
- break;
default:
dprintk(VIDC_INFO, "%s: invalid codec 0x%x\n",
__func__, hal_codec);
@@ -555,12 +524,6 @@ static int get_hfi_extradata_index(enum hal_extradata_id index)
case HAL_EXTRADATA_INTERLACE_VIDEO:
ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
break;
- case HAL_EXTRADATA_VC1_FRAMEDISP:
- ret = HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA;
- break;
- case HAL_EXTRADATA_VC1_SEQDISP:
- ret = HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA;
- break;
case HAL_EXTRADATA_TIMESTAMP:
ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
break;
@@ -673,9 +636,6 @@ static u32 get_hfi_ltr_mode(enum ltr_mode ltr_mode_type)
case HAL_LTR_MODE_MANUAL:
ltrmode = HFI_LTR_MODE_MANUAL;
break;
- case HAL_LTR_MODE_PERIODIC:
- ltrmode = HFI_LTR_MODE_PERIODIC;
- break;
default:
dprintk(VIDC_ERR, "Invalid ltr mode: %#x\n",
ltr_mode_type);
@@ -939,31 +899,10 @@ int create_pkt_cmd_session_get_property(
struct hfi_cmd_session_get_property_packet *pkt,
struct hal_session *session, enum hal_property ptype)
{
- int rc = 0;
-
- if (!pkt || !session) {
- dprintk(VIDC_ERR, "%s Invalid parameters\n", __func__);
- return -EINVAL;
- }
- pkt->size = sizeof(struct hfi_cmd_session_get_property_packet);
- pkt->packet_type = HFI_CMD_SESSION_GET_PROPERTY;
- pkt->session_id = hash32_ptr(session);
- pkt->num_properties = 1;
- switch (ptype) {
- case HAL_CONFIG_VDEC_ENTROPY:
- pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
- break;
- case HAL_PARAM_PROFILE_LEVEL_CURRENT:
- pkt->rg_property_data[0] =
- HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
- break;
- default:
- dprintk(VIDC_ERR, "%s cmd:%#x not supported\n", __func__,
+ /* Currently no get property is supported */
+ dprintk(VIDC_ERR, "%s cmd:%#x not supported\n", __func__,
ptype);
- rc = -EINVAL;
- break;
- }
- return rc;
+ return -EINVAL;
}
int create_pkt_cmd_session_set_property(
@@ -1028,8 +967,6 @@ int create_pkt_cmd_session_set_property(
break;
case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO:
break;
- case HAL_PARAM_EXTRA_DATA_HEADER_CONFIG:
- break;
case HAL_PARAM_FRAME_SIZE:
{
struct hfi_frame_size *hfi;
@@ -1142,14 +1079,6 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) * 2;
break;
}
- case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
- {
- create_pkt_enable(pkt->rg_property_data,
- HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
- ((struct hal_enable *)pdata)->enable);
- pkt->size += sizeof(u32) * 2;
- break;
- }
case HAL_PARAM_VDEC_MULTI_STREAM:
{
struct hfi_multi_stream *hfi;
@@ -1199,10 +1128,6 @@ int create_pkt_cmd_session_set_property(
HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
pkt->size += sizeof(u32);
break;
- case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
- break;
- case HAL_PARAM_VENC_MPEG4_AC_PREDICTION:
- break;
case HAL_CONFIG_VENC_TARGET_BITRATE:
{
struct hfi_bitrate *hfi;
@@ -1590,14 +1515,6 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
break;
}
- case HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC:
- {
- create_pkt_enable(pkt->rg_property_data,
- HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC,
- ((struct hal_enable *)pdata)->enable);
- pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
- break;
- }
case HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY:
{
create_pkt_enable(pkt->rg_property_data,
@@ -1606,21 +1523,6 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
break;
}
- case HAL_PARAM_MVC_BUFFER_LAYOUT:
- {
- struct hfi_mvc_buffer_layout_descp_type *hfi;
- struct hal_mvc_buffer_layout *layout_info = pdata;
-
- pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT;
- hfi = (struct hfi_mvc_buffer_layout_descp_type *)
- &pkt->rg_property_data[1];
- hfi->layout_type = get_hfi_layout(layout_info->layout_type);
- hfi->bright_view_first = layout_info->bright_view_first;
- hfi->ngap = layout_info->ngap;
- pkt->size += sizeof(u32) +
- sizeof(struct hfi_mvc_buffer_layout_descp_type);
- break;
- }
case HAL_PARAM_VENC_LTRMODE:
{
struct hfi_ltr_mode *hfi;
@@ -1731,14 +1633,6 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) * 2;
break;
}
- case HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS:
- {
- pkt->rg_property_data[0] =
- HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER;
- pkt->rg_property_data[1] = *(u32 *)pdata;
- pkt->size += sizeof(u32) * 2;
- break;
- }
case HAL_PARAM_VENC_HIER_P_HYBRID_MODE:
{
pkt->rg_property_data[0] =
@@ -1937,7 +1831,6 @@ int create_pkt_cmd_session_set_property(
case HAL_PARAM_VDEC_MB_QUANTIZATION:
case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
- case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING:
case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
case HAL_CONFIG_VDEC_MULTI_STREAM:
case HAL_PARAM_VENC_MULTI_SLICE_INFO:
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index abc6cc8..7c99e90 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -26,13 +26,6 @@
#define MAX_OPERATING_FRAME_RATE (300 << 16)
#define OPERATING_FRAME_RATE_STEP (1 << 16)
-static const char *const mpeg_video_vidc_divx_format[] = {
- "DIVX Format 3",
- "DIVX Format 4",
- "DIVX Format 5",
- "DIVX Format 6",
- NULL
-};
static const char *const mpeg_video_stream_format[] = {
"NAL Format Start Codes",
"NAL Format One NAL Per Buffer",
@@ -57,29 +50,6 @@ static const char *const perf_level[] = {
"Turbo"
};
-static const char *const h263_level[] = {
- "1.0",
- "2.0",
- "3.0",
- "4.0",
- "4.5",
- "5.0",
- "6.0",
- "7.0",
-};
-
-static const char *const h263_profile[] = {
- "Baseline",
- "H320 Coding",
- "Backward Compatible",
- "ISWV2",
- "ISWV3",
- "High Compression",
- "Internet",
- "Interlace",
- "High Latency",
-};
-
static const char *const vp8_profile_level[] = {
"Unused",
"0.0",
@@ -108,11 +78,6 @@ static const char *const mpeg_vidc_video_entropy_mode[] = {
"CABAC Entropy Mode",
};
-static const char *const mpeg_vidc_video_h264_mvc_layout[] = {
- "Frame packing arrangement sequential",
- "Frame packing arrangement top-bottom",
-};
-
static const char *const mpeg_vidc_video_dpb_color_format[] = {
"DPB Color Format None",
"DPB Color Format UBWC",
@@ -462,37 +427,6 @@ static u32 get_frame_size(struct msm_vidc_inst *inst,
return frame_size;
}
-static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
- struct v4l2_ctrl *ctrl)
-{
- int rc = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
- ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) {
- dprintk(VIDC_ERR,
- "Profile %#x not supported for MVC\n",
- ctrl->val);
- rc = -ENOTSUPP;
- break;
- }
- break;
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
- ctrl->val >= V4L2_MPEG_VIDEO_H264_LEVEL_5_2) {
- dprintk(VIDC_ERR, "Level %#x not supported for MVC\n",
- ctrl->val);
- rc = -ENOTSUPP;
- break;
- }
- break;
- default:
- break;
- }
- return rc;
-}
-
struct msm_vidc_format vdec_formats[] = {
{
.name = "YCbCr Semiplanar 4:2:0",
@@ -516,14 +450,6 @@ struct msm_vidc_format vdec_formats[] = {
.type = CAPTURE_PORT,
},
{
- .name = "Mpeg4",
- .description = "Mpeg4 compressed format",
- .fourcc = V4L2_PIX_FMT_MPEG4,
- .get_frame_size = get_frame_size_compressed,
- .type = OUTPUT_PORT,
- .defer_outputs = false,
- },
- {
.name = "Mpeg2",
.description = "Mpeg2 compressed format",
.fourcc = V4L2_PIX_FMT_MPEG2,
@@ -532,30 +458,6 @@ struct msm_vidc_format vdec_formats[] = {
.defer_outputs = false,
},
{
- .name = "H263",
- .description = "H263 compressed format",
- .fourcc = V4L2_PIX_FMT_H263,
- .get_frame_size = get_frame_size_compressed,
- .type = OUTPUT_PORT,
- .defer_outputs = false,
- },
- {
- .name = "VC1",
- .description = "VC-1 compressed format",
- .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
- .get_frame_size = get_frame_size_compressed,
- .type = OUTPUT_PORT,
- .defer_outputs = false,
- },
- {
- .name = "VC1 SP",
- .description = "VC-1 compressed format G",
- .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
- .get_frame_size = get_frame_size_compressed,
- .type = OUTPUT_PORT,
- .defer_outputs = false,
- },
- {
.name = "H264",
.description = "H264 compressed format",
.fourcc = V4L2_PIX_FMT_H264,
@@ -564,14 +466,6 @@ struct msm_vidc_format vdec_formats[] = {
.defer_outputs = false,
},
{
- .name = "H264_MVC",
- .description = "H264_MVC compressed format",
- .fourcc = V4L2_PIX_FMT_H264_MVC,
- .get_frame_size = get_frame_size_compressed,
- .type = OUTPUT_PORT,
- .defer_outputs = false,
- },
- {
.name = "HEVC",
.description = "HEVC compressed format",
.fourcc = V4L2_PIX_FMT_HEVC,
@@ -826,10 +720,6 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
}
hdev = inst->core->device;
- rc = is_ctrl_valid_for_codec(inst, ctrl);
- if (rc)
- return rc;
-
/* Small helper macro for quickly getting a control and err checking */
#define TRY_GET_CTRL(__ctrl_id) ({ \
struct v4l2_ctrl *__temp; \
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 0a6a1ce..ff28dd0 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -37,11 +37,6 @@
#define MAX_HYBRID_HIER_P_LAYERS 6
#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
-#define CODING V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY
-#define BITSTREAM_RESTRICT_ENABLED \
- V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_ENABLED
-#define BITSTREAM_RESTRICT_DISABLED \
- V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_DISABLED
#define MIN_TIME_RESOLUTION 1
#define MAX_TIME_RESOLUTION 0xFFFFFF
#define DEFAULT_TIME_RESOLUTION 0x7530
@@ -774,72 +769,6 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.qmenu = NULL,
},
{
- .id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE,
- .name = "I-Frame X coordinate search range",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 4,
- .maximum = 128,
- .default_value = 4,
- .step = 1,
- .menu_skip_mask = 0,
- .qmenu = NULL,
- },
- {
- .id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE,
- .name = "I-Frame Y coordinate search range",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 4,
- .maximum = 128,
- .default_value = 4,
- .step = 1,
- .menu_skip_mask = 0,
- .qmenu = NULL,
- },
- {
- .id = V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE,
- .name = "P-Frame X coordinate search range",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 4,
- .maximum = 128,
- .default_value = 4,
- .step = 1,
- .menu_skip_mask = 0,
- .qmenu = NULL,
- },
- {
- .id = V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE,
- .name = "P-Frame Y coordinate search range",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 4,
- .maximum = 128,
- .default_value = 4,
- .step = 1,
- .menu_skip_mask = 0,
- .qmenu = NULL,
- },
- {
- .id = V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE,
- .name = "B-Frame X coordinate search range",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 4,
- .maximum = 128,
- .default_value = 4,
- .step = 1,
- .menu_skip_mask = 0,
- .qmenu = NULL,
- },
- {
- .id = V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE,
- .name = "B-Frame Y coordinate search range",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 4,
- .maximum = 128,
- .default_value = 4,
- .step = 1,
- .menu_skip_mask = 0,
- .qmenu = NULL,
- },
- {
.id = V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS,
.name = "Set Hier B num layers",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -1258,7 +1187,7 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
struct hal_ltr_use use_ltr;
struct hal_ltr_mark mark_ltr;
struct hal_hybrid_hierp hyb_hierp;
- u32 hier_p_layers = 0, hier_b_layers = 0;
+ u32 hier_p_layers = 0;
int max_hierp_layers;
int baselayerid = 0;
struct hal_video_signal_info signal_info = {0};
@@ -1475,28 +1404,6 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
temp_ctrl->val);
pdata = &h264_entropy_control;
break;
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL);
-
- property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
- profile_level.profile = msm_comm_v4l2_to_hal(ctrl->id,
- ctrl->val);
- profile_level.level = msm_comm_v4l2_to_hal(
- V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
- temp_ctrl->val);
- pdata = &profile_level;
- break;
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
- temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE);
-
- property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
- profile_level.level = msm_comm_v4l2_to_hal(ctrl->id,
- ctrl->val);
- profile_level.profile = msm_comm_v4l2_to_hal(
- V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
- temp_ctrl->val);
- pdata = &profile_level;
- break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
@@ -1836,16 +1743,6 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
enable.enable = ctrl->val;
pdata = &enable;
break;
- case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS:
- if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) {
- dprintk(VIDC_ERR, "Hier B supported for HEVC only\n");
- rc = -ENOTSUPP;
- break;
- }
- property_id = HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS;
- hier_b_layers = ctrl->val;
- pdata = &hier_b_layers;
- break;
case V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE:
property_id = HAL_PARAM_VENC_HIER_P_HYBRID_MODE;
hyb_hierp.layers = ctrl->val;
@@ -2082,7 +1979,6 @@ int msm_venc_s_ext_ctrl(struct msm_vidc_inst *inst,
struct v4l2_ext_control *control;
struct hfi_device *hdev;
struct hal_ltr_mode ltr_mode;
- struct hal_vc1e_perf_cfg_type search_range = { {0} };
u32 property_id = 0, layer_id = MSM_VIDC_ALL_LAYER_ID;
void *pdata = NULL;
struct msm_vidc_capability *cap = NULL;
@@ -2137,36 +2033,6 @@ int msm_venc_s_ext_ctrl(struct msm_vidc_inst *inst,
property_id = HAL_PARAM_VENC_LTRMODE;
pdata = <r_mode;
break;
- case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE:
- search_range.i_frame.x_subsampled = control[i].value;
- property_id = HAL_PARAM_VENC_SEARCH_RANGE;
- pdata = &search_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE:
- search_range.i_frame.y_subsampled = control[i].value;
- property_id = HAL_PARAM_VENC_SEARCH_RANGE;
- pdata = &search_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE:
- search_range.p_frame.x_subsampled = control[i].value;
- property_id = HAL_PARAM_VENC_SEARCH_RANGE;
- pdata = &search_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE:
- search_range.p_frame.y_subsampled = control[i].value;
- property_id = HAL_PARAM_VENC_SEARCH_RANGE;
- pdata = &search_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE:
- search_range.b_frame.x_subsampled = control[i].value;
- property_id = HAL_PARAM_VENC_SEARCH_RANGE;
- pdata = &search_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE:
- search_range.b_frame.y_subsampled = control[i].value;
- property_id = HAL_PARAM_VENC_SEARCH_RANGE;
- pdata = &search_range;
- break;
case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH:
sar.aspect_width = control[i].value;
property_id = HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 270fc31..114a702 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1820,14 +1820,12 @@ static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
ctrl->val = inst->profile;
break;
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
ctrl->val = inst->level;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index bc86ef4..5e49f42 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -52,8 +52,6 @@ const char *const mpeg_video_vidc_extradata[] = {
"Extradata none",
"Extradata MB Quantization",
"Extradata Interlace Video",
- "Extradata VC1 Framedisp",
- "Extradata VC1 Seqdisp",
"Extradata timestamp",
"Extradata S3D Frame Packing",
"Extradata Frame Rate",
@@ -715,22 +713,13 @@ enum hal_video_codec get_hal_codec(int fourcc)
case V4L2_PIX_FMT_H264_MVC:
codec = HAL_VIDEO_CODEC_MVC;
break;
- case V4L2_PIX_FMT_H263:
- codec = HAL_VIDEO_CODEC_H263;
- break;
+
case V4L2_PIX_FMT_MPEG1:
codec = HAL_VIDEO_CODEC_MPEG1;
break;
case V4L2_PIX_FMT_MPEG2:
codec = HAL_VIDEO_CODEC_MPEG2;
break;
- case V4L2_PIX_FMT_MPEG4:
- codec = HAL_VIDEO_CODEC_MPEG4;
- break;
- case V4L2_PIX_FMT_VC1_ANNEX_G:
- case V4L2_PIX_FMT_VC1_ANNEX_L:
- codec = HAL_VIDEO_CODEC_VC1;
- break;
case V4L2_PIX_FMT_VP8:
codec = HAL_VIDEO_CODEC_VP8;
break;
@@ -766,9 +755,6 @@ static enum hal_uncompressed_format get_hal_uncompressed(int fourcc)
case V4L2_PIX_FMT_NV12_TP10_UBWC:
format = HAL_COLOR_FORMAT_NV12_TP10_UBWC;
break;
- case V4L2_PIX_FMT_RGB32:
- format = HAL_COLOR_FORMAT_RGBA8888;
- break;
default:
format = HAL_UNUSED_COLOR;
break;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 58954f6..eb36b33 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -539,7 +539,8 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
- if (msm_vidc_debug & VIDC_PKT) {
+ if ((msm_vidc_debug & VIDC_PKT) &&
+ !(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
dprintk(VIDC_PKT, "%s: %pK\n", __func__, qinfo);
__dump_packet(packet, VIDC_PKT);
}
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 7caff53..2a833dc 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -73,8 +73,6 @@
#define HFI_EXTRADATA_NONE 0x00000000
#define HFI_EXTRADATA_MB_QUANTIZATION 0x00000001
#define HFI_EXTRADATA_INTERLACE_VIDEO 0x00000002
-#define HFI_EXTRADATA_VC1_FRAMEDISP 0x00000003
-#define HFI_EXTRADATA_VC1_SEQDISP 0x00000004
#define HFI_EXTRADATA_TIMESTAMP 0x00000005
#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006
#define HFI_EXTRADATA_FRAME_RATE 0x00000007
@@ -132,8 +130,6 @@ struct hfi_extradata_header {
(HFI_PROPERTY_PARAM_OX_START + 0x001)
#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO \
(HFI_PROPERTY_PARAM_OX_START + 0x002)
-#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG \
- (HFI_PROPERTY_PARAM_OX_START + 0x005)
#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA \
(HFI_PROPERTY_PARAM_OX_START + 0x006)
#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA \
@@ -175,10 +171,6 @@ struct hfi_extradata_header {
(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00C)
#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE \
(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00D)
-#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA \
- (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x011)
-#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA \
- (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x012)
#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA \
(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x013)
#define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA \
@@ -206,8 +198,6 @@ struct hfi_extradata_header {
#define HFI_PROPERTY_CONFIG_VDEC_OX_START \
(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
-#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER \
- (HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x001)
#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING \
(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x002)
#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP \
@@ -279,14 +269,6 @@ struct hfi_enable_picture {
u32 picture_type;
};
-struct hfi_extra_data_header_config {
- u32 type;
- u32 buffer_type;
- u32 version;
- u32 port_index;
- u32 client_extra_data_id;
-};
-
struct hfi_mb_error_map {
u32 error_map_size;
u8 rg_error_map[1];
@@ -720,35 +702,6 @@ struct hfi_extradata_mb_quantization_payload {
u8 rg_mb_qp[1];
};
-struct hfi_extradata_vc1_pswnd {
- u32 ps_wnd_h_offset;
- u32 ps_wnd_v_offset;
- u32 ps_wnd_width;
- u32 ps_wnd_height;
-};
-
-struct hfi_extradata_vc1_framedisp_payload {
- u32 res_pic;
- u32 ref;
- u32 range_map_present;
- u32 range_map_y;
- u32 range_map_uv;
- u32 num_pan_scan_wnds;
- struct hfi_extradata_vc1_pswnd rg_ps_wnd[1];
-};
-
-struct hfi_extradata_vc1_seqdisp_payload {
- u32 prog_seg_frm;
- u32 uv_sampling_fmt;
- u32 color_fmt_flag;
- u32 color_primaries;
- u32 transfer_char;
- u32 mat_coeff;
- u32 aspect_ratio;
- u32 aspect_horiz;
- u32 aspect_vert;
-};
-
struct hfi_extradata_timestamp_payload {
u32 time_stamp_low;
u32 time_stamp_high;
@@ -836,10 +789,6 @@ struct hfi_index_extradata_aspect_ratio_payload {
u32 aspect_width;
u32 aspect_height;
};
-struct hfi_extradata_panscan_wndw_payload {
- u32 num_window;
- struct hfi_extradata_vc1_pswnd wnd[1];
-};
struct hfi_extradata_frame_type_payload {
u32 frame_rate;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 75d7aea..8aa0bbb 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -98,8 +98,6 @@ enum hal_extradata_id {
HAL_EXTRADATA_NONE,
HAL_EXTRADATA_MB_QUANTIZATION,
HAL_EXTRADATA_INTERLACE_VIDEO,
- HAL_EXTRADATA_VC1_FRAMEDISP,
- HAL_EXTRADATA_VC1_SEQDISP,
HAL_EXTRADATA_TIMESTAMP,
HAL_EXTRADATA_S3D_FRAME_PACKING,
HAL_EXTRADATA_FRAME_RATE,
@@ -134,7 +132,6 @@ enum hal_property {
HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
- HAL_PARAM_EXTRA_DATA_HEADER_CONFIG,
HAL_PARAM_INDEX_EXTRADATA,
HAL_PARAM_FRAME_SIZE,
HAL_CONFIG_REALTIME,
@@ -144,22 +141,16 @@ enum hal_property {
HAL_PARAM_VDEC_OUTPUT_ORDER,
HAL_PARAM_VDEC_PICTURE_TYPE_DECODE,
HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
- HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
HAL_PARAM_VDEC_MULTI_STREAM,
HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
- HAL_PARAM_DIVX_FORMAT,
HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
HAL_CONFIG_VDEC_MB_ERROR_MAP,
HAL_CONFIG_VENC_REQUEST_IFRAME,
- HAL_PARAM_VENC_MPEG4_SHORT_HEADER,
- HAL_PARAM_VENC_MPEG4_AC_PREDICTION,
HAL_CONFIG_VENC_TARGET_BITRATE,
HAL_PARAM_PROFILE_LEVEL_CURRENT,
HAL_PARAM_VENC_H264_ENTROPY_CONTROL,
HAL_PARAM_VENC_RATE_CONTROL,
- HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION,
- HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION,
HAL_PARAM_VENC_H264_DEBLOCK_CONTROL,
HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
HAL_PARAM_VENC_SESSION_QP_RANGE,
@@ -190,7 +181,6 @@ enum hal_property {
HAL_PARAM_VDEC_NUM_CONCEALED_MB,
HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING,
HAL_PARAM_VENC_SLICE_DELIVERY_MODE,
- HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING,
HAL_CONFIG_BUFFER_COUNT_ACTUAL,
HAL_CONFIG_VDEC_MULTI_STREAM,
HAL_PARAM_VENC_MULTI_SLICE_INFO,
@@ -204,12 +194,10 @@ enum hal_property {
HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
HAL_PARAM_BUFFER_ALLOC_MODE,
HAL_PARAM_VDEC_FRAME_ASSEMBLY,
- HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC,
HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY,
HAL_PARAM_VDEC_CONCEAL_COLOR,
HAL_PARAM_VDEC_SCS_THRESHOLD,
HAL_PARAM_GET_BUFFER_REQUIREMENTS,
- HAL_PARAM_MVC_BUFFER_LAYOUT,
HAL_PARAM_VENC_LTRMODE,
HAL_CONFIG_VENC_MARKLTRFRAME,
HAL_CONFIG_VENC_USELTRFRAME,
@@ -221,7 +209,6 @@ enum hal_property {
HAL_PARAM_VPE_COLOR_SPACE_CONVERSION,
HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
HAL_CONFIG_VENC_PERF_MODE,
- HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS,
HAL_PARAM_VDEC_NON_SECURE_OUTPUT2,
HAL_PARAM_VENC_HIER_P_HYBRID_MODE,
HAL_PARAM_VENC_MBI_STATISTICS_MODE,
@@ -232,7 +219,6 @@ enum hal_property {
HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO,
HAL_CONFIG_VDEC_ENTROPY,
HAL_PARAM_VENC_BITRATE_TYPE,
- HAL_PARAM_VENC_H264_PIC_ORDER_CNT,
HAL_PARAM_VENC_LOW_LATENCY,
HAL_CONFIG_VENC_BLUR_RESOLUTION,
HAL_PARAM_VENC_H264_TRANSFORM_8x8,
@@ -289,31 +275,6 @@ enum hal_video_codec {
HAL_UNUSED_CODEC = 0x10000000,
};
-enum hal_h263_profile {
- HAL_H263_PROFILE_BASELINE = 0x00000001,
- HAL_H263_PROFILE_H320CODING = 0x00000002,
- HAL_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
- HAL_H263_PROFILE_ISWV2 = 0x00000008,
- HAL_H263_PROFILE_ISWV3 = 0x00000010,
- HAL_H263_PROFILE_HIGHCOMPRESSION = 0x00000020,
- HAL_H263_PROFILE_INTERNET = 0x00000040,
- HAL_H263_PROFILE_INTERLACE = 0x00000080,
- HAL_H263_PROFILE_HIGHLATENCY = 0x00000100,
- HAL_UNUSED_H263_PROFILE = 0x10000000,
-};
-
-enum hal_h263_level {
- HAL_H263_LEVEL_10 = 0x00000001,
- HAL_H263_LEVEL_20 = 0x00000002,
- HAL_H263_LEVEL_30 = 0x00000004,
- HAL_H263_LEVEL_40 = 0x00000008,
- HAL_H263_LEVEL_45 = 0x00000010,
- HAL_H263_LEVEL_50 = 0x00000020,
- HAL_H263_LEVEL_60 = 0x00000040,
- HAL_H263_LEVEL_70 = 0x00000080,
- HAL_UNUSED_H263_LEVEL = 0x10000000,
-};
-
enum hal_mpeg2_profile {
HAL_MPEG2_PROFILE_SIMPLE = 0x00000001,
HAL_MPEG2_PROFILE_MAIN = 0x00000002,
@@ -332,44 +293,6 @@ enum hal_mpeg2_level {
HAL_UNUSED_MEPG2_LEVEL = 0x10000000,
};
-enum hal_mpeg4_profile {
- HAL_MPEG4_PROFILE_SIMPLE = 0x00000001,
- HAL_MPEG4_PROFILE_ADVANCEDSIMPLE = 0x00000002,
- HAL_MPEG4_PROFILE_CORE = 0x00000004,
- HAL_MPEG4_PROFILE_MAIN = 0x00000008,
- HAL_MPEG4_PROFILE_NBIT = 0x00000010,
- HAL_MPEG4_PROFILE_SCALABLETEXTURE = 0x00000020,
- HAL_MPEG4_PROFILE_SIMPLEFACE = 0x00000040,
- HAL_MPEG4_PROFILE_SIMPLEFBA = 0x00000080,
- HAL_MPEG4_PROFILE_BASICANIMATED = 0x00000100,
- HAL_MPEG4_PROFILE_HYBRID = 0x00000200,
- HAL_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
- HAL_MPEG4_PROFILE_CORESCALABLE = 0x00000800,
- HAL_MPEG4_PROFILE_ADVANCEDCODING = 0x00001000,
- HAL_MPEG4_PROFILE_ADVANCEDCORE = 0x00002000,
- HAL_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
- HAL_MPEG4_PROFILE_SIMPLESCALABLE = 0x00008000,
- HAL_UNUSED_MPEG4_PROFILE = 0x10000000,
-};
-
-enum hal_mpeg4_level {
- HAL_MPEG4_LEVEL_0 = 0x00000001,
- HAL_MPEG4_LEVEL_0b = 0x00000002,
- HAL_MPEG4_LEVEL_1 = 0x00000004,
- HAL_MPEG4_LEVEL_2 = 0x00000008,
- HAL_MPEG4_LEVEL_3 = 0x00000010,
- HAL_MPEG4_LEVEL_4 = 0x00000020,
- HAL_MPEG4_LEVEL_4a = 0x00000040,
- HAL_MPEG4_LEVEL_5 = 0x00000080,
- HAL_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
- HAL_MPEG4_LEVEL_6 = 0x7F000001,
- HAL_MPEG4_LEVEL_7 = 0x7F000002,
- HAL_MPEG4_LEVEL_8 = 0x7F000003,
- HAL_MPEG4_LEVEL_9 = 0x7F000004,
- HAL_MPEG4_LEVEL_3b = 0x7F000005,
- HAL_UNUSED_MPEG4_LEVEL = 0x10000000,
-};
-
enum hal_h264_profile {
HAL_H264_PROFILE_BASELINE = 0x00000001,
HAL_H264_PROFILE_MAIN = 0x00000002,
@@ -457,66 +380,6 @@ enum hal_vpx_profile {
HAL_VPX_PROFILE_UNUSED = 0x10000000,
};
-enum hal_vc1_profile {
- HAL_VC1_PROFILE_SIMPLE = 0x00000001,
- HAL_VC1_PROFILE_MAIN = 0x00000002,
- HAL_VC1_PROFILE_ADVANCED = 0x00000004,
- HAL_UNUSED_VC1_PROFILE = 0x10000000,
-};
-
-enum hal_vc1_level {
- HAL_VC1_LEVEL_LOW = 0x00000001,
- HAL_VC1_LEVEL_MEDIUM = 0x00000002,
- HAL_VC1_LEVEL_HIGH = 0x00000004,
- HAL_VC1_LEVEL_0 = 0x00000008,
- HAL_VC1_LEVEL_1 = 0x00000010,
- HAL_VC1_LEVEL_2 = 0x00000020,
- HAL_VC1_LEVEL_3 = 0x00000040,
- HAL_VC1_LEVEL_4 = 0x00000080,
- HAL_UNUSED_VC1_LEVEL = 0x10000000,
-};
-
-enum hal_divx_format {
- HAL_DIVX_FORMAT_4,
- HAL_DIVX_FORMAT_5,
- HAL_DIVX_FORMAT_6,
- HAL_UNUSED_DIVX_FORMAT = 0x10000000,
-};
-
-enum hal_divx_profile {
- HAL_DIVX_PROFILE_QMOBILE = 0x00000001,
- HAL_DIVX_PROFILE_MOBILE = 0x00000002,
- HAL_DIVX_PROFILE_MT = 0x00000004,
- HAL_DIVX_PROFILE_HT = 0x00000008,
- HAL_DIVX_PROFILE_HD = 0x00000010,
- HAL_UNUSED_DIVX_PROFILE = 0x10000000,
-};
-
-enum hal_mvc_profile {
- HAL_MVC_PROFILE_STEREO_HIGH = 0x00001000,
- HAL_UNUSED_MVC_PROFILE = 0x10000000,
-};
-
-enum hal_mvc_level {
- HAL_MVC_LEVEL_1 = 0x00000001,
- HAL_MVC_LEVEL_1b = 0x00000002,
- HAL_MVC_LEVEL_11 = 0x00000004,
- HAL_MVC_LEVEL_12 = 0x00000008,
- HAL_MVC_LEVEL_13 = 0x00000010,
- HAL_MVC_LEVEL_2 = 0x00000020,
- HAL_MVC_LEVEL_21 = 0x00000040,
- HAL_MVC_LEVEL_22 = 0x00000080,
- HAL_MVC_LEVEL_3 = 0x00000100,
- HAL_MVC_LEVEL_31 = 0x00000200,
- HAL_MVC_LEVEL_32 = 0x00000400,
- HAL_MVC_LEVEL_4 = 0x00000800,
- HAL_MVC_LEVEL_41 = 0x00001000,
- HAL_MVC_LEVEL_42 = 0x00002000,
- HAL_MVC_LEVEL_5 = 0x00004000,
- HAL_MVC_LEVEL_51 = 0x00008000,
- HAL_UNUSED_MVC_LEVEL = 0x10000000,
-};
-
struct hal_frame_rate {
enum hal_buffer buffer_type;
u32 frame_rate;
@@ -585,14 +448,6 @@ struct hal_uncompressed_plane_actual_constraints_info {
struct hal_uncompressed_plane_constraints rg_plane_format[1];
};
-struct hal_extra_data_header_config {
- u32 type;
- enum hal_buffer buffer_type;
- u32 version;
- u32 port_index;
- u32 client_extradata_id;
-};
-
struct hal_frame_size {
enum hal_buffer buffer_type;
u32 width;
@@ -718,14 +573,6 @@ enum hal_rate_control {
HAL_UNUSED_RC = 0x10000000,
};
-struct hal_mpeg4_time_resolution {
- u32 time_increment_resolution;
-};
-
-struct hal_mpeg4_header_extension {
- u32 header_extension;
-};
-
enum hal_h264_db_mode {
HAL_H264_DB_MODE_DISABLE,
HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
@@ -946,12 +793,6 @@ enum hal_buffer_layout_type {
HAL_UNUSED_BUFFER_LAYOUT = 0x10000000,
};
-struct hal_mvc_buffer_layout {
- enum hal_buffer_layout_type layout_type;
- u32 bright_view_first;
- u32 ngap;
-};
-
struct hal_aspect_ratio {
u32 aspect_width;
u32 aspect_height;
@@ -985,13 +826,6 @@ struct hal_preserve_text_quality {
u32 enable;
};
-struct hal_vc1e_perf_cfg_type {
- struct {
- u32 x_subsampled;
- u32 y_subsampled;
- } i_frame, p_frame, b_frame;
-};
-
struct hal_vpe_color_space_conversion {
u32 csc_matrix[HAL_MAX_MATRIX_COEFFS];
u32 csc_bias[HAL_MAX_BIAS_COEFFS];
@@ -1105,7 +939,6 @@ struct hal_buffer_alloc_mode {
enum ltr_mode {
HAL_LTR_MODE_DISABLE,
HAL_LTR_MODE_MANUAL,
- HAL_LTR_MODE_PERIODIC,
};
struct hal_ltr_mode {
@@ -1149,7 +982,6 @@ union hal_get_property {
struct hal_uncompressed_plane_constraints plane_constraints;
struct hal_uncompressed_plane_actual_constraints_info
plane_constraints_info;
- struct hal_extra_data_header_config extra_data_header_config;
struct hal_frame_size frame_size;
struct hal_enable enable;
struct hal_buffer_count_actual buffer_count_actual;
@@ -1162,8 +994,6 @@ union hal_get_property {
struct hal_bitrate bitrate;
struct hal_profile_level profile_level;
struct hal_profile_level_supported profile_level_supported;
- struct hal_mpeg4_time_resolution mpeg4_time_resolution;
- struct hal_mpeg4_header_extension mpeg4_header_extension;
struct hal_h264_db_control h264_db_control;
struct hal_temporal_spatial_tradeoff temporal_spatial_tradeoff;
struct hal_quantization quantization;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index dc64ad2..0d73410 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -81,7 +81,6 @@
#define HFI_VIDEO_CODEC_VP8 0x00001000
#define HFI_VIDEO_CODEC_HEVC 0x00002000
#define HFI_VIDEO_CODEC_VP9 0x00004000
-#define HFI_VIDEO_CODEC_HEVC_HYBRID 0x80000000
#define HFI_PROFILE_UNKNOWN 0x00000000
#define HFI_H264_PROFILE_BASELINE 0x00000001
@@ -214,8 +213,6 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED \
(HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
-#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT \
- (HFI_PROPERTY_PARAM_COMMON_START + 0x00F)
#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED \
(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
@@ -281,8 +278,6 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
-#define HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC \
- (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x021)
#define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
#define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY \
@@ -549,7 +544,6 @@ struct hfi_quantization_range {
#define HFI_LTR_MODE_DISABLE 0x0
#define HFI_LTR_MODE_MANUAL 0x1
-#define HFI_LTR_MODE_PERIODIC 0x2
struct hfi_ltr_mode {
u32 ltr_mode;
@@ -744,7 +738,6 @@ struct hfi_venc_config_advanced {
u32 close_gop;
u32 h264_constrain_intra_pred;
u32 h264_transform_8x8_flag;
- u32 mpeg4_qpel_enable;
u32 multi_refp_en;
u32 qmatrix_en;
u8 vpp_info_packet_mode;
@@ -786,15 +779,6 @@ struct hfi_iframe_size {
u32 type;
};
-#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM (0)
-#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE (1)
-#define HFI_MVC_BUFFER_LAYOUT_SEQ (2)
-struct hfi_mvc_buffer_layout_descp_type {
- u32 layout_type;
- u32 bright_view_first;
- u32 ngap;
-};
-
#define HFI_CMD_SYS_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index b37b572..ad3e1e7 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -27,6 +27,9 @@
#include <asm/cputype.h>
#include <asm/irq_regs.h>
+#define USE_CPUHP_STATE CPUHP_AP_PERF_ARM_STARTING
+#define USE_CPUHP_STR "AP_PERF_ARM_STARTING"
+
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
@@ -366,6 +369,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return err;
}
+ armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
+
return 0;
}
@@ -568,6 +573,7 @@ static void armpmu_init(struct arm_pmu *armpmu)
.read = armpmu_read,
.filter_match = armpmu_filter_match,
.attr_groups = armpmu->attr_groups,
+ .events_across_hotplug = 1,
};
armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
&armpmu_common_attr_group;
@@ -620,6 +626,8 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
struct platform_device *pmu_device = cpu_pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+ cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
+
irqs = min(pmu_device->num_resources, num_possible_cpus());
irq = platform_get_irq(pmu_device, 0);
@@ -627,6 +635,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
on_each_cpu_mask(&cpu_pmu->supported_cpus,
cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &hw_events->percpu_pmu);
+ cpu_pmu->percpu_irq = -1;
} else {
for (i = 0; i < irqs; ++i) {
int cpu = i;
@@ -641,6 +650,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
}
+ cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
}
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
@@ -670,6 +680,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
on_each_cpu_mask(&cpu_pmu->supported_cpus,
cpu_pmu_enable_percpu_irq, &irq, 1);
+ cpu_pmu->percpu_irq = irq;
} else {
for (i = 0; i < irqs; ++i) {
int cpu = i;
@@ -709,22 +720,12 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
-{
- struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
-
- if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
- return 0;
- if (pmu->reset)
- pmu->reset(pmu);
- return 0;
-}
+struct cpu_pm_pmu_args {
+ struct arm_pmu *armpmu;
+ unsigned long cmd;
+ int cpu;
+ int ret;
+};
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
@@ -772,15 +773,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
}
}
-static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
- void *v)
+static void cpu_pm_pmu_common(void *info)
{
- struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
+ struct cpu_pm_pmu_args *data = info;
+ struct arm_pmu *armpmu = data->armpmu;
+ unsigned long cmd = data->cmd;
+ int cpu = data->cpu;
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
- if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
- return NOTIFY_DONE;
+ if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) {
+ data->ret = NOTIFY_DONE;
+ return;
+ }
/*
* Always reset the PMU registers on power-up even if
@@ -789,8 +794,12 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
if (cmd == CPU_PM_EXIT && armpmu->reset)
armpmu->reset(armpmu);
- if (!enabled)
- return NOTIFY_OK;
+ if (!enabled) {
+ data->ret = NOTIFY_OK;
+ return;
+ }
+
+ data->ret = NOTIFY_OK;
switch (cmd) {
case CPU_PM_ENTER:
@@ -798,15 +807,29 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
cpu_pm_pmu_setup(armpmu, cmd);
break;
case CPU_PM_EXIT:
- cpu_pm_pmu_setup(armpmu, cmd);
case CPU_PM_ENTER_FAILED:
+ cpu_pm_pmu_setup(armpmu, cmd);
armpmu->start(armpmu);
break;
default:
- return NOTIFY_DONE;
+ data->ret = NOTIFY_DONE;
+ break;
}
- return NOTIFY_OK;
+ return;
+}
+
+static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+ void *v)
+{
+ struct cpu_pm_pmu_args data = {
+ .armpmu = container_of(b, struct arm_pmu, cpu_pm_nb),
+ .cmd = cmd,
+ .cpu = smp_processor_id(),
+ };
+
+ cpu_pm_pmu_common(&data);
+ return data.ret;
}
static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
@@ -819,11 +842,75 @@ static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
{
cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
}
+
#else
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
+static void cpu_pm_pmu_common(void *info) { }
#endif
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+
+ struct cpu_pm_pmu_args data = {
+ .armpmu = pmu,
+ .cpu = (int)cpu,
+ };
+
+ if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return 0;
+
+ data.cmd = CPU_PM_EXIT;
+ cpu_pm_pmu_common(&data);
+ if (data.ret == NOTIFY_DONE)
+ return 0;
+
+ if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF &&
+ data.armpmu->plat_device) {
+ int irq = data.armpmu->percpu_irq;
+
+ if (irq > 0 && irq_is_percpu(irq))
+ cpu_pmu_enable_percpu_irq(&irq);
+
+ }
+
+ return 0;
+}
+
+static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+
+ struct cpu_pm_pmu_args data = {
+ .armpmu = pmu,
+ .cpu = (int)cpu,
+ };
+
+ if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return 0;
+
+ data.cmd = CPU_PM_ENTER;
+ cpu_pm_pmu_common(&data);
+ /* Disarm the PMU IRQ before disappearing. */
+ if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING &&
+ data.armpmu->plat_device) {
+ int irq = data.armpmu->percpu_irq;
+
+ if (irq > 0 && irq_is_percpu(irq))
+ cpu_pmu_disable_percpu_irq(&irq);
+
+ }
+
+ return 0;
+}
+
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int err;
@@ -834,14 +921,14 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!cpu_hw_events)
return -ENOMEM;
- err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ err = cpuhp_state_add_instance_nocalls(USE_CPUHP_STATE,
&cpu_pmu->node);
if (err)
goto out_free;
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
- goto out_unregister;
+ goto out_unreg_perf_starting;
for_each_possible_cpu(cpu) {
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
@@ -872,8 +959,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
-out_unregister:
- cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+out_unreg_perf_starting:
+ cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
&cpu_pmu->node);
out_free:
free_percpu(cpu_hw_events);
@@ -883,7 +970,7 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
cpu_pm_pmu_unregister(cpu_pmu);
- cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
&cpu_pmu->node);
free_percpu(cpu_pmu->hw_events);
}
@@ -1064,6 +1151,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (!__oprofile_cpu_pmu)
__oprofile_cpu_pmu = pmu;
+ pmu->pmu_state = ARM_PMU_STATE_OFF;
+ pmu->percpu_irq = -1;
+
pr_info("enabled with %s PMU driver, %d counters available\n",
pmu->name, pmu->num_events);
@@ -1083,11 +1173,12 @@ static int arm_pmu_hp_init(void)
{
int ret;
- ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
- "AP_PERF_ARM_STARTING",
- arm_perf_starting_cpu, NULL);
+ ret = cpuhp_setup_state_multi(USE_CPUHP_STATE,
+ USE_CPUHP_STR,
+ arm_perf_starting_cpu,
+ arm_perf_stopping_cpu);
if (ret)
- pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
+ pr_err("CPU hotplug ARM PMU STOPPING registering failed: %d\n",
ret);
return ret;
}
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 92fd916..f6b99d0 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -55,6 +55,7 @@
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-v3.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qrbtc-sdm845.o
+obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-v3-660.o
obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 35179c8..b92bc89 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -97,6 +97,10 @@ struct ufs_qcom_phy {
struct ufs_qcom_phy_vreg vdda_pll;
struct ufs_qcom_phy_vreg vdda_phy;
struct ufs_qcom_phy_vreg vddp_ref_clk;
+
+ /* Number of lanes available (1 or 2) for Rx/Tx */
+ u32 lanes_per_direction;
+
unsigned int quirks;
/*
@@ -152,6 +156,7 @@ struct ufs_qcom_phy {
* and writes to QSERDES_RX_SIGDET_CNTRL attribute
* @configure_lpm: pointer to a function that configures the phy
* for low power mode.
+ * @dbg_register_dump: pointer to a function that dumps phy registers for debug.
*/
struct ufs_qcom_phy_specific_ops {
int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
@@ -161,6 +166,7 @@ struct ufs_qcom_phy_specific_ops {
void (*ctrl_rx_linecfg)(struct ufs_qcom_phy *phy, bool ctrl);
void (*power_control)(struct ufs_qcom_phy *phy, bool val);
int (*configure_lpm)(struct ufs_qcom_phy *phy, bool enable);
+ void (*dbg_register_dump)(struct ufs_qcom_phy *phy);
};
struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
@@ -184,5 +190,6 @@ int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
struct ufs_qcom_phy_calibration *tbl,
int tbl_size);
-
+void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy,
+ int offset, int len, char *prefix);
#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3-660.c b/drivers/phy/phy-qcom-ufs-qmp-v3-660.c
new file mode 100644
index 0000000..9450e18
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3-660.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v3-660.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v3_660"
+
+static
+int ufs_qcom_phy_qmp_v3_660_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+ bool is_rate_B)
+{
+ int err;
+ int tbl_size_A, tbl_size_B;
+ struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+ u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+ u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+ u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+
+ tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+ tbl_B = phy_cal_table_rate_B;
+
+ if ((major == 0x3) && (minor == 0x001) && (step == 0x001)) {
+ tbl_A = phy_cal_table_rate_A_3_1_1;
+ tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_1);
+ } else {
+ dev_err(ufs_qcom_phy->dev,
+ "%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+ __func__, major, minor, step);
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+ tbl_A, tbl_size_A,
+ tbl_B, tbl_size_B,
+ is_rate_B);
+
+ if (err)
+ dev_err(ufs_qcom_phy->dev,
+ "%s: ufs_qcom_phy_calibrate() failed %d\n",
+ __func__, err);
+
+out:
+ return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_init(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy_qmp_v3_660 *phy = phy_get_drvdata(generic_phy);
+ struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+ int err;
+
+ err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_power_control(struct ufs_qcom_phy *phy,
+ bool power_ctrl)
+{
+ if (!power_ctrl) {
+ /* apply analog power collapse */
+ writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+ /*
+ * Make sure that PHY knows its analog rail is going to be
+ * powered OFF.
+ */
+ mb();
+ } else {
+ /* bring PHY out of analog power collapse */
+ writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+ /*
+ * Before any transactions involving PHY, ensure PHY knows
+ * that it's analog rail is powered ON.
+ */
+ mb();
+ }
+}
+
+static inline
+void ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable(struct ufs_qcom_phy *phy,
+ u32 val)
+{
+ /*
+ * v3 PHY does not have TX_LANE_ENABLE register.
+ * Implement this function so as not to propagate error to caller.
+ */
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg(struct ufs_qcom_phy *phy,
+ bool ctrl)
+{
+ u32 temp;
+
+ temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+ if (ctrl) /* enable RX LineCfg */
+ temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+ else /* disable RX LineCfg */
+ temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+ writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+ /* Make sure that RX LineCfg config applied before we return */
+ mb();
+}
+
+static inline void ufs_qcom_phy_qmp_v3_660_start_serdes(
+ struct ufs_qcom_phy *phy)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+ tmp &= ~MASK_SERDES_START;
+ tmp |= (1 << OFFSET_SERDES_START);
+ writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+ /* Ensure register value is committed */
+ mb();
+}
+
+static int ufs_qcom_phy_qmp_v3_660_is_pcs_ready(
+ struct ufs_qcom_phy *phy_common)
+{
+ int err = 0;
+ u32 val;
+
+ err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+ val, (val & MASK_PCS_READY), 10, 1000000);
+ if (err)
+ dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+ __func__, err);
+ return err;
+}
+
+static void ufs_qcom_phy_qmp_v3_660_dbg_register_dump(
+ struct ufs_qcom_phy *phy)
+{
+ ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+ "PHY QSERDES COM Registers ");
+ ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+ "PHY Registers ");
+ ufs_qcom_phy_dump_regs(phy, RX_BASE, RX_SIZE,
+ "PHY RX0 Registers ");
+ ufs_qcom_phy_dump_regs(phy, TX_BASE, TX_SIZE,
+ "PHY TX0 Registers ");
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v3_660_phy_ops = {
+ .init = ufs_qcom_phy_qmp_v3_660_init,
+ .exit = ufs_qcom_phy_exit,
+ .power_on = ufs_qcom_phy_power_on,
+ .power_off = ufs_qcom_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v3_660_ops = {
+ .calibrate_phy = ufs_qcom_phy_qmp_v3_660_phy_calibrate,
+ .start_serdes = ufs_qcom_phy_qmp_v3_660_start_serdes,
+ .is_physical_coding_sublayer_ready =
+ ufs_qcom_phy_qmp_v3_660_is_pcs_ready,
+ .set_tx_lane_enable = ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable,
+ .ctrl_rx_linecfg = ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg,
+ .power_control = ufs_qcom_phy_qmp_v3_660_power_control,
+ .dbg_register_dump = ufs_qcom_phy_qmp_v3_660_dbg_register_dump,
+};
+
+static int ufs_qcom_phy_qmp_v3_660_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct ufs_qcom_phy_qmp_v3_660 *phy;
+ int err = 0;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ &ufs_qcom_phy_qmp_v3_660_phy_ops,
+ &phy_v3_660_ops);
+
+ if (!generic_phy) {
+ dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+ __func__);
+ err = -EIO;
+ goto out;
+ }
+
+ phy_set_drvdata(generic_phy, phy);
+
+ strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+ sizeof(phy->common_cfg.name));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = to_phy(dev);
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+ int err = 0;
+
+ err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+ if (err)
+ dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+ __func__, err);
+
+ return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v3_660_of_match[] = {
+ {.compatible = "qcom,ufs-phy-qmp-v3-660"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v3_660_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v3_660_driver = {
+ .probe = ufs_qcom_phy_qmp_v3_660_probe,
+ .remove = ufs_qcom_phy_qmp_v3_660_remove,
+ .driver = {
+ .of_match_table = ufs_qcom_phy_qmp_v3_660_of_match,
+ .name = "ufs_qcom_phy_qmp_v3_660",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v3_660_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v3 660");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3-660.h b/drivers/phy/phy-qcom-ufs-qmp-v3-660.h
new file mode 100644
index 0000000..89fa5d3
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3-660.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V3_660_H_
+#define UFS_QCOM_PHY_QMP_V3_660_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_BASE 0x000
+#define COM_OFF(x) (COM_BASE + x)
+#define COM_SIZE 0x1C0
+
+#define TX_BASE 0x400
+#define TX_OFF(x) (TX_BASE + x)
+#define TX_SIZE 0x128
+
+#define RX_BASE 0x600
+#define RX_OFF(x) (RX_BASE + x)
+#define RX_SIZE 0x1FC
+
+#define PHY_BASE 0xC00
+#define PHY_OFF(x) (PHY_BASE + x)
+#define PHY_SIZE 0x1B4
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1 COM_OFF(0x00)
+#define QSERDES_COM_ATB_SEL2 COM_OFF(0x04)
+#define QSERDES_COM_FREQ_UPDATE COM_OFF(0x08)
+#define QSERDES_COM_BG_TIMER COM_OFF(0x0C)
+#define QSERDES_COM_SSC_EN_CENTER COM_OFF(0x10)
+#define QSERDES_COM_SSC_ADJ_PER1 COM_OFF(0x14)
+#define QSERDES_COM_SSC_ADJ_PER2 COM_OFF(0x18)
+#define QSERDES_COM_SSC_PER1 COM_OFF(0x1C)
+#define QSERDES_COM_SSC_PER2 COM_OFF(0x20)
+#define QSERDES_COM_SSC_STEP_SIZE1 COM_OFF(0x24)
+#define QSERDES_COM_SSC_STEP_SIZE2 COM_OFF(0x28)
+#define QSERDES_COM_POST_DIV COM_OFF(0x2C)
+#define QSERDES_COM_POST_DIV_MUX COM_OFF(0x30)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x34)
+#define QSERDES_COM_CLK_ENABLE1 COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE COM_OFF(0x40)
+#define QSERDES_COM_PLL_EN COM_OFF(0x44)
+#define QSERDES_COM_PLL_IVCO COM_OFF(0x48)
+#define QSERDES_COM_LOCK_CMP1_MODE0 COM_OFF(0X4C)
+#define QSERDES_COM_LOCK_CMP2_MODE0 COM_OFF(0X50)
+#define QSERDES_COM_LOCK_CMP3_MODE0 COM_OFF(0X54)
+#define QSERDES_COM_LOCK_CMP1_MODE1 COM_OFF(0X58)
+#define QSERDES_COM_LOCK_CMP2_MODE1 COM_OFF(0X5C)
+#define QSERDES_COM_LOCK_CMP3_MODE1 COM_OFF(0X60)
+#define QSERDES_COM_CMD_RSVD0 COM_OFF(0x64)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL COM_OFF(0x68)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS COM_OFF(0x6C)
+#define QSERDES_COM_BG_TRIM COM_OFF(0x70)
+#define QSERDES_COM_CLK_EP_DIV COM_OFF(0x74)
+#define QSERDES_COM_CP_CTRL_MODE0 COM_OFF(0x78)
+#define QSERDES_COM_CP_CTRL_MODE1 COM_OFF(0x7C)
+#define QSERDES_COM_CMN_RSVD1 COM_OFF(0x80)
+#define QSERDES_COM_PLL_RCTRL_MODE0 COM_OFF(0x84)
+#define QSERDES_COM_PLL_RCTRL_MODE1 COM_OFF(0x88)
+#define QSERDES_COM_CMN_RSVD2 COM_OFF(0x8C)
+#define QSERDES_COM_PLL_CCTRL_MODE0 COM_OFF(0x90)
+#define QSERDES_COM_PLL_CCTRL_MODE1 COM_OFF(0x94)
+#define QSERDES_COM_CMN_RSVD3 COM_OFF(0x98)
+#define QSERDES_COM_PLL_CNTRL COM_OFF(0x9C)
+#define QSERDES_COM_PHASE_SEL_CTRL COM_OFF(0xA0)
+#define QSERDES_COM_PHASE_SEL_DC COM_OFF(0xA4)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM COM_OFF(0xA8)
+#define QSERDES_COM_SYSCLK_EN_SEL COM_OFF(0xAC)
+#define QSERDES_COM_CML_SYSCLK_SEL COM_OFF(0xB0)
+#define QSERDES_COM_RESETSM_CNTRL COM_OFF(0xB4)
+#define QSERDES_COM_RESETSM_CNTRL2 COM_OFF(0xB8)
+#define QSERDES_COM_RESTRIM_CTRL COM_OFF(0xBC)
+#define QSERDES_COM_RESTRIM_CTRL2 COM_OFF(0xC0)
+#define QSERDES_COM_LOCK_CMP_EN COM_OFF(0xC8)
+#define QSERDES_COM_LOCK_CMP_CFG COM_OFF(0xCC)
+#define QSERDES_COM_DEC_START_MODE0 COM_OFF(0xD0)
+#define QSERDES_COM_DEC_START_MODE1 COM_OFF(0xD4)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL COM_OFF(0xD8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 COM_OFF(0xE0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 COM_OFF(0xE4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 COM_OFF(0xE8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 COM_OFF(0xEC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 COM_OFF(0xF0)
+#define QSERDES_COM_VCO_TUNE_MINVAL1 COM_OFF(0xF4)
+#define QSERDES_COM_VCO_TUNE_MINVAL2 COM_OFF(0xF8)
+#define QSERDES_COM_CMN_RSVD4 COM_OFF(0xFC)
+#define QSERDES_COM_INTEGLOOP_INITVAL COM_OFF(0x100)
+#define QSERDES_COM_INTEGLOOP_EN COM_OFF(0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 COM_OFF(0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 COM_OFF(0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 COM_OFF(0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1 COM_OFF(0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2 COM_OFF(0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2 COM_OFF(0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL COM_OFF(0x124)
+#define QSERDES_COM_VCO_TUNE_MAP COM_OFF(0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0 COM_OFF(0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0 COM_OFF(0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1 COM_OFF(0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1 COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_INITVAL1 COM_OFF(0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL2 COM_OFF(0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1 COM_OFF(0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2 COM_OFF(0x148)
+#define QSERDES_COM_SAR COM_OFF(0x14C)
+#define QSERDES_COM_SAR_CLK COM_OFF(0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS COM_OFF(0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS COM_OFF(0x158)
+#define QSERDES_COM_CMN_STATUS COM_OFF(0x15C)
+#define QSERDES_COM_RESET_SM_STATUS COM_OFF(0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS COM_OFF(0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS COM_OFF(0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS COM_OFF(0x16C)
+#define QSERDES_COM_BG_CTRL COM_OFF(0x170)
+#define QSERDES_COM_CLK_SELECT COM_OFF(0x174)
+#define QSERDES_COM_HSCLK_SEL COM_OFF(0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS COM_OFF(0x17C)
+#define QSERDES_COM_PLL_ANALOG COM_OFF(0x180)
+#define QSERDES_COM_CORECLK_DIV COM_OFF(0x184)
+#define QSERDES_COM_SW_RESET COM_OFF(0x188)
+#define QSERDES_COM_CORE_CLK_EN COM_OFF(0x18C)
+#define QSERDES_COM_C_READY_STATUS COM_OFF(0x190)
+#define QSERDES_COM_CMN_CONFIG COM_OFF(0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE COM_OFF(0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL COM_OFF(0x19C)
+#define QSERDES_COM_DEBUG_BUS0 COM_OFF(0x1A0)
+#define QSERDES_COM_DEBUG_BUS1 COM_OFF(0x1A4)
+#define QSERDES_COM_DEBUG_BUS2 COM_OFF(0x1A8)
+#define QSERDES_COM_DEBUG_BUS3 COM_OFF(0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL COM_OFF(0x1B0)
+#define QSERDES_COM_CMN_MISC1 COM_OFF(0x1B4)
+#define QSERDES_COM_CORECLK_DIV_MODE1 COM_OFF(0x1BC)
+#define QSERDES_COM_CMN_RSVD5 COM_OFF(0x1C0)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x04)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL PHY_OFF(0x34)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL PHY_OFF(0x3C)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP PHY_OFF(0xCC)
+#define UFS_PHY_LINECFG_DISABLE PHY_OFF(0x138)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL PHY_OFF(0x13C)
+#define UFS_PHY_RX_SIGDET_CTRL2 PHY_OFF(0x148)
+#define UFS_PHY_RX_PWM_GEAR_BAND PHY_OFF(0x154)
+#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x168)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN TX_OFF(0x68)
+#define QSERDES_TX_LANE_MODE TX_OFF(0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF RX_OFF(0x30)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER RX_OFF(0x34)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH RX_OFF(0x38)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN RX_OFF(0x3C)
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN RX_OFF(0x40)
+#define QSERDES_RX_UCDR_SO_SATURATION_ENABLE RX_OFF(0x48)
+#define QSERDES_RX_RX_TERM_BW RX_OFF(0x90)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB RX_OFF(0xC4)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB RX_OFF(0xC8)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB RX_OFF(0xCC)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB RX_OFF(0xD0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 RX_OFF(0xD8)
+#define QSERDES_RX_SIGDET_CNTRL RX_OFF(0x114)
+#define QSERDES_RX_SIGDET_LVL RX_OFF(0x118)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL RX_OFF(0x11C)
+#define QSERDES_RX_RX_INTERFACE_MODE RX_OFF(0x12C)
+
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT BIT(1)
+
+/*
+ * This structure represents the v3 660 specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v3_660 {
+ struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_1[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.c b/drivers/phy/phy-qcom-ufs-qmp-v3.c
index 6b8dbc2..0bfde0c7 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,26 +20,24 @@ static
int ufs_qcom_phy_qmp_v3_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
bool is_rate_B)
{
- int err;
- int tbl_size_A, tbl_size_B;
- struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+ /*
+ * Writing PHY calibration in this order:
+ * 1. Write Rate-A calibration first (1-lane mode).
+ * 2. Write 2nd lane configuration if needed.
+ * 3. Write Rate-B calibration overrides
+ */
+ ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A,
+ ARRAY_SIZE(phy_cal_table_rate_A));
+ if (ufs_qcom_phy->lanes_per_direction == 2)
+ ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_2nd_lane,
+ ARRAY_SIZE(phy_cal_table_2nd_lane));
+ if (is_rate_B)
+ ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_B,
+ ARRAY_SIZE(phy_cal_table_rate_B));
+ /* flush buffered writes */
+ mb();
- tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
- tbl_B = phy_cal_table_rate_B;
-
- tbl_A = phy_cal_table_rate_A;
- tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
-
- err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
- tbl_A, tbl_size_A,
- tbl_B, tbl_size_B,
- is_rate_B);
-
- if (err)
- dev_err(ufs_qcom_phy->dev,
- "%s: ufs_qcom_phy_calibrate() failed %d\n",
- __func__, err);
- return err;
+ return 0;
}
static int ufs_qcom_phy_qmp_v3_init(struct phy *generic_phy)
@@ -145,37 +143,20 @@ static int ufs_qcom_phy_qmp_v3_is_pcs_ready(struct ufs_qcom_phy *phy_common)
return err;
}
-static
-int ufs_qcom_phy_qmp_v3_configure_lpm(struct ufs_qcom_phy *ufs_qcom_phy,
- bool enable)
+static void ufs_qcom_phy_qmp_v3_dbg_register_dump(struct ufs_qcom_phy *phy)
{
- int err = 0;
- int tbl_size;
- struct ufs_qcom_phy_calibration *tbl = NULL;
-
- /* The default low power mode configuration is SVS2 */
- if (enable) {
- tbl_size = ARRAY_SIZE(phy_cal_table_svs2_enable);
- tbl = phy_cal_table_svs2_enable;
- } else {
- tbl_size = ARRAY_SIZE(phy_cal_table_svs2_disable);
- tbl = phy_cal_table_svs2_disable;
- }
-
- if (!tbl) {
- dev_err(ufs_qcom_phy->dev, "%s: tbl for SVS2 %s is NULL",
- __func__, enable ? "enable" : "disable");
- err = -EINVAL;
- goto out;
- }
-
- ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl, tbl_size);
-
- /* flush buffered writes */
- mb();
-
-out:
- return err;
+ ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+ "PHY QSERDES COM Registers ");
+ ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+ "PHY Registers ");
+ ufs_qcom_phy_dump_regs(phy, RX_BASE(0), RX_SIZE,
+ "PHY RX0 Registers ");
+ ufs_qcom_phy_dump_regs(phy, TX_BASE(0), TX_SIZE,
+ "PHY TX0 Registers ");
+ ufs_qcom_phy_dump_regs(phy, RX_BASE(1), RX_SIZE,
+ "PHY RX1 Registers ");
+ ufs_qcom_phy_dump_regs(phy, TX_BASE(1), TX_SIZE,
+ "PHY TX1 Registers ");
}
struct phy_ops ufs_qcom_phy_qmp_v3_phy_ops = {
@@ -193,7 +174,7 @@ struct ufs_qcom_phy_specific_ops phy_v3_ops = {
.set_tx_lane_enable = ufs_qcom_phy_qmp_v3_set_tx_lane_enable,
.ctrl_rx_linecfg = ufs_qcom_phy_qmp_v3_ctrl_rx_linecfg,
.power_control = ufs_qcom_phy_qmp_v3_power_control,
- .configure_lpm = ufs_qcom_phy_qmp_v3_configure_lpm,
+ .dbg_register_dump = ufs_qcom_phy_qmp_v3_dbg_register_dump,
};
static int ufs_qcom_phy_qmp_v3_probe(struct platform_device *pdev)
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
index e9ac76b..4851aac 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,10 +18,18 @@
#include "phy-qcom-ufs-i.h"
/* QCOM UFS PHY control registers */
-#define COM_OFF(x) (0x000 + x)
-#define PHY_OFF(x) (0xC00 + x)
-#define TX_OFF(n, x) (0x400 + (0x400 * n) + x)
-#define RX_OFF(n, x) (0x600 + (0x400 * n) + x)
+#define COM_BASE 0x000
+#define COM_SIZE 0x18C
+#define PHY_BASE 0xC00
+#define PHY_SIZE 0x1DC
+#define TX_BASE(n) (0x400 + (0x400 * n))
+#define TX_SIZE 0x128
+#define RX_BASE(n) (0x600 + (0x400 * n))
+#define RX_SIZE 0x1FC
+#define COM_OFF(x) (COM_BASE + x)
+#define PHY_OFF(x) (PHY_BASE + x)
+#define TX_OFF(n, x) (TX_BASE(n) + x)
+#define RX_OFF(n, x) (RX_BASE(n) + x)
/* UFS PHY QSERDES COM registers */
#define QSERDES_COM_ATB_SEL1 COM_OFF(0x00)
@@ -133,9 +141,13 @@
#define UFS_PHY_TX_SMALL_AMP_DRV_LVL PHY_OFF(0x34)
#define UFS_PHY_LINECFG_DISABLE PHY_OFF(0x130)
#define UFS_PHY_RX_SYM_RESYNC_CTRL PHY_OFF(0x134)
+#define UFS_PHY_RX_MIN_HIBERN8_TIME PHY_OFF(0x138)
+#define UFS_PHY_RX_SIGDET_CTRL1 PHY_OFF(0x13C)
#define UFS_PHY_RX_SIGDET_CTRL2 PHY_OFF(0x140)
#define UFS_PHY_RX_PWM_GEAR_BAND PHY_OFF(0x14C)
#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x160)
+#define UFS_PHY_TX_MID_TERM_CTRL1 PHY_OFF(0x1BC)
+#define UFS_PHY_MULTI_LANE_CTRL1 PHY_OFF(0x1C4)
/* UFS PHY TX registers */
#define QSERDES_TX0_TRANSCEIVER_BIAS_EN TX_OFF(0, 0x5C)
@@ -143,6 +155,9 @@
#define QSERDES_TX0_LANE_MODE_2 TX_OFF(0, 0x90)
#define QSERDES_TX0_LANE_MODE_3 TX_OFF(0, 0x94)
+#define QSERDES_TX1_LANE_MODE_1 TX_OFF(1, 0x8C)
+
+
/* UFS PHY RX registers */
#define QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF RX_OFF(0, 0x24)
#define QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER RX_OFF(0, 0x28)
@@ -163,6 +178,22 @@
#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL RX_OFF(0, 0x10C)
#define QSERDES_RX0_RX_INTERFACE_MODE RX_OFF(0, 0x11C)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF RX_OFF(1, 0x24)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER RX_OFF(1, 0x28)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN RX_OFF(1, 0x2C)
+#define QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN RX_OFF(1, 0x30)
+#define QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE RX_OFF(1, 0x34)
+#define QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW RX_OFF(1, 0x3C)
+#define QSERDES_RX1_UCDR_PI_CONTROLS RX_OFF(1, 0x44)
+#define QSERDES_RX1_RX_TERM_BW RX_OFF(1, 0x7C)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2 RX_OFF(1, 0xD4)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3 RX_OFF(1, 0xD8)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4 RX_OFF(1, 0xDC)
+#define QSERDES_RX1_SIGDET_CNTRL RX_OFF(1, 0x104)
+#define QSERDES_RX1_SIGDET_LVL RX_OFF(1, 0x108)
+#define QSERDES_RX1_SIGDET_DEGLITCH_CNTRL RX_OFF(1, 0x10C)
+#define QSERDES_RX1_RX_INTERFACE_MODE RX_OFF(1, 0x11C)
+
#define UFS_PHY_RX_LINECFG_DISABLE_BIT BIT(1)
/*
@@ -181,6 +212,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xD5),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
@@ -195,22 +227,22 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x34),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x36),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3F),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xCB),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xDA),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x34),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x36),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3F),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xB2),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xC1),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),
@@ -234,42 +266,33 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_MID_TERM_CTRL1, 0x43),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL1, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0x9A), /* 8 us */
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_LVL, 0x24),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_CNTRL, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_DEGLITCH_CNTRL, 0x1E),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_INTERFACE_MODE, 0x40),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x5B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0xF1),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
};
static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
};
-static struct ufs_qcom_phy_calibration phy_cal_table_svs2_enable[] = {
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE0, 0x14),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x14),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x7e),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0x7f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x06),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x7e),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x99),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x07),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x0b),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0x66),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_svs2_disable[] = {
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE0, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0xcc),
-};
-
#endif
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index b8b9080..d18929f 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -15,13 +15,15 @@
#include "phy-qcom-ufs-i.h"
#define MAX_PROP_NAME 32
-#define VDDA_PHY_MIN_UV 1000000
-#define VDDA_PHY_MAX_UV 1000000
+#define VDDA_PHY_MIN_UV 800000
+#define VDDA_PHY_MAX_UV 925000
#define VDDA_PLL_MIN_UV 1200000
#define VDDA_PLL_MAX_UV 1800000
#define VDDP_REF_CLK_MIN_UV 1200000
#define VDDP_REF_CLK_MAX_UV 1200000
+#define UFS_PHY_DEFAULT_LANES_PER_DIRECTION 1
+
static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
const char *, bool);
static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
@@ -113,6 +115,19 @@ struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
goto out;
}
+ if (of_property_read_u32(dev->of_node, "lanes-per-direction",
+ &common_cfg->lanes_per_direction))
+ common_cfg->lanes_per_direction =
+ UFS_PHY_DEFAULT_LANES_PER_DIRECTION;
+
+ /*
+ * UFS PHY power management is managed by its parent (UFS host
+ * controller) hence set the no the no runtime PM callbacks flag
+ * on UFS PHY device to avoid any accidental attempt to call the
+ * PM callbacks for PHY device.
+ */
+ pm_runtime_no_callbacks(&generic_phy->dev);
+
common_cfg->phy_spec_ops = phy_spec_ops;
common_cfg->dev = dev;
@@ -191,27 +206,20 @@ ufs_qcom_phy_init_clks(struct phy *generic_phy,
struct ufs_qcom_phy *phy_common)
{
int err;
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
- err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
- &phy_common->tx_iface_clk);
/*
* tx_iface_clk does not exist in newer version of ufs-phy HW,
* so don't return error if it is not found
*/
- if (err)
- dev_dbg(phy->dev, "%s: failed to get tx_iface_clk\n",
- __func__);
+ __ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+ &phy_common->tx_iface_clk, false);
- err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
- &phy_common->rx_iface_clk);
/*
* rx_iface_clk does not exist in newer version of ufs-phy HW,
* so don't return error if it is not found
*/
- if (err)
- dev_dbg(phy->dev, "%s: failed to get rx_iface_clk\n",
- __func__);
+ __ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+ &phy_common->rx_iface_clk, false);
err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
&phy_common->ref_clk_src);
@@ -246,7 +254,6 @@ ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
struct ufs_qcom_phy *phy_common)
{
int err;
- int vdda_phy_uV;
err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
"vdda-pll");
@@ -258,10 +265,6 @@ ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
if (err)
goto out;
- vdda_phy_uV = regulator_get_voltage(phy_common->vdda_phy.reg);
- phy_common->vdda_phy.max_uV = vdda_phy_uV;
- phy_common->vdda_phy.min_uV = vdda_phy_uV;
-
/* vddp-ref-clk-* properties are optional */
__ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
"vddp-ref-clk", true);
@@ -279,6 +282,14 @@ static int __ufs_qcom_phy_init_vreg(struct phy *phy,
char prop_name[MAX_PROP_NAME];
+ if (dev->of_node) {
+ snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
+ if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
+ dev_dbg(dev, "No vreg data found for %s\n", prop_name);
+ return optional ? err : -ENODATA;
+ }
+ }
+
vreg->name = kstrdup(name, GFP_KERNEL);
if (!vreg->name) {
err = -ENOMEM;
@@ -786,3 +797,21 @@ int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable)
return ret;
}
EXPORT_SYMBOL(ufs_qcom_phy_configure_lpm);
+
+void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy, int offset,
+ int len, char *prefix)
+{
+ print_hex_dump(KERN_ERR, prefix,
+ len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+ 16, 4, phy->mmio + offset, len, false);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_dump_regs);
+
+void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+ if (ufs_qcom_phy->phy_spec_ops->dbg_register_dump)
+ ufs_qcom_phy->phy_spec_ops->dbg_register_dump(ufs_qcom_phy);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_dbg_register_dump);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 4b576cc..67adf58 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -727,6 +727,15 @@ enum sdm845_functions {
msm_mux_reserved79,
msm_mux_reserved80,
msm_mux_qup15,
+ msm_mux_reserved81,
+ msm_mux_reserved82,
+ msm_mux_reserved83,
+ msm_mux_reserved84,
+ msm_mux_pcie1_pwrfault,
+ msm_mux_qup5,
+ msm_mux_reserved85,
+ msm_mux_pcie1_mrl,
+ msm_mux_reserved86,
msm_mux_reserved87,
msm_mux_reserved88,
msm_mux_tsif1_clk,
@@ -751,15 +760,6 @@ enum sdm845_functions {
msm_mux_vfr_1,
msm_mux_tgu_ch2,
msm_mux_reserved92,
- msm_mux_reserved81,
- msm_mux_reserved82,
- msm_mux_reserved83,
- msm_mux_reserved84,
- msm_mux_pcie1_pwrfault,
- msm_mux_qup5,
- msm_mux_reserved85,
- msm_mux_pcie1_mrl,
- msm_mux_reserved86,
msm_mux_tsif2_clk,
msm_mux_sdc4_clk,
msm_mux_qup7,
@@ -1681,6 +1681,33 @@ static const char * const reserved80_groups[] = {
static const char * const qup15_groups[] = {
"gpio81", "gpio82", "gpio83", "gpio84",
};
+static const char * const reserved81_groups[] = {
+ "gpio81",
+};
+static const char * const reserved82_groups[] = {
+ "gpio82",
+};
+static const char * const reserved83_groups[] = {
+ "gpio83",
+};
+static const char * const reserved84_groups[] = {
+ "gpio84",
+};
+static const char * const pcie1_pwrfault_groups[] = {
+ "gpio85",
+};
+static const char * const qup5_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const reserved85_groups[] = {
+ "gpio85",
+};
+static const char * const pcie1_mrl_groups[] = {
+ "gpio86",
+};
+static const char * const reserved86_groups[] = {
+ "gpio86",
+};
static const char * const reserved87_groups[] = {
"gpio87",
};
@@ -1753,33 +1780,6 @@ static const char * const tgu_ch2_groups[] = {
static const char * const reserved92_groups[] = {
"gpio92",
};
-static const char * const reserved81_groups[] = {
- "gpio81",
-};
-static const char * const reserved82_groups[] = {
- "gpio82",
-};
-static const char * const reserved83_groups[] = {
- "gpio83",
-};
-static const char * const reserved84_groups[] = {
- "gpio84",
-};
-static const char * const pcie1_pwrfault_groups[] = {
- "gpio85",
-};
-static const char * const qup5_groups[] = {
- "gpio85", "gpio86", "gpio87", "gpio88",
-};
-static const char * const reserved85_groups[] = {
- "gpio85",
-};
-static const char * const pcie1_mrl_groups[] = {
- "gpio86",
-};
-static const char * const reserved86_groups[] = {
- "gpio86",
-};
static const char * const tsif2_clk_groups[] = {
"gpio93",
};
@@ -2113,6 +2113,15 @@ static const struct msm_function sdm845_functions[] = {
FUNCTION(reserved79),
FUNCTION(reserved80),
FUNCTION(qup15),
+ FUNCTION(reserved81),
+ FUNCTION(reserved82),
+ FUNCTION(reserved83),
+ FUNCTION(reserved84),
+ FUNCTION(pcie1_pwrfault),
+ FUNCTION(qup5),
+ FUNCTION(reserved85),
+ FUNCTION(pcie1_mrl),
+ FUNCTION(reserved86),
FUNCTION(reserved87),
FUNCTION(reserved88),
FUNCTION(tsif1_clk),
@@ -2137,15 +2146,6 @@ static const struct msm_function sdm845_functions[] = {
FUNCTION(vfr_1),
FUNCTION(tgu_ch2),
FUNCTION(reserved92),
- FUNCTION(reserved81),
- FUNCTION(reserved82),
- FUNCTION(reserved83),
- FUNCTION(reserved84),
- FUNCTION(pcie1_pwrfault),
- FUNCTION(qup5),
- FUNCTION(reserved85),
- FUNCTION(pcie1_mrl),
- FUNCTION(reserved86),
FUNCTION(tsif2_clk),
FUNCTION(sdc4_clk),
FUNCTION(qup7),
@@ -2418,10 +2418,10 @@ static const struct msm_pingroup sdm845_groups[] = {
PINGROUP(147, NORTH, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
PINGROUP(148, NORTH, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(149, NORTH, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
- SDC_QDSD_PINGROUP(sdc2_clk, 0x59a000, 14, 6),
- SDC_QDSD_PINGROUP(sdc2_cmd, 0x59a000, 11, 3),
- SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
- UFS_RESET(ufs_reset, 0x59f000),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
+ UFS_RESET(ufs_reset, 0x99f000),
};
static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 77e9dd7..f06fb1f 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -138,6 +138,7 @@ enum pmic_gpio_func_index {
* struct pmic_gpio_pad - keep current GPIO settings
* @base: Address base in SPMI device.
* @irq: IRQ number which this GPIO generate.
+ * @gpio_idx: The index in GPIO's hardware number space (1-based)
* @is_enabled: Set to false when GPIO should be put in high Z state.
* @out_value: Cached pin output value
* @have_buffer: Set to true if GPIO output could be configured in push-pull,
@@ -158,6 +159,7 @@ enum pmic_gpio_func_index {
struct pmic_gpio_pad {
u16 base;
int irq;
+ int gpio_idx;
bool is_enabled;
bool out_value;
bool have_buffer;
@@ -179,6 +181,7 @@ struct pmic_gpio_state {
struct regmap *map;
struct pinctrl_dev *ctrl;
struct gpio_chip chip;
+ const char **gpio_groups;
};
static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -297,7 +300,9 @@ static int pmic_gpio_get_function_groups(struct pinctrl_dev *pctldev,
const char *const **groups,
unsigned *const num_qgroups)
{
- *groups = pmic_gpio_groups;
+ struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = state->gpio_groups;
*num_qgroups = pctldev->desc->npins;
return 0;
}
@@ -637,7 +642,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
pad = pctldev->desc->pins[pin].drv_data;
- seq_printf(s, " gpio%-2d:", pin + PMIC_GPIO_PHYSICAL_OFFSET);
+ seq_printf(s, " gpio%-2d:", pad->gpio_idx);
val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_EN_CTL);
@@ -742,13 +747,29 @@ static int pmic_gpio_of_xlate(struct gpio_chip *chip,
const struct of_phandle_args *gpio_desc,
u32 *flags)
{
+ int i;
+ struct pmic_gpio_state *state = gpiochip_get_data(chip);
+ struct pinctrl_desc *desc = state->ctrl->desc;
+ struct pmic_gpio_pad *pad;
+
if (chip->of_gpio_n_cells < 2)
return -EINVAL;
if (flags)
*flags = gpio_desc->args[1];
- return gpio_desc->args[0] - PMIC_GPIO_PHYSICAL_OFFSET;
+ for (i = 0; i < chip->ngpio; i++) {
+ pad = desc->pins[i].drv_data;
+ if (pad->gpio_idx == gpio_desc->args[0]) {
+ dev_dbg(state->dev, "gpio%-2d xlate to pin%-2d\n",
+ gpio_desc->args[0], i);
+ return i;
+ }
+ }
+
+ dev_err(state->dev, "Couldn't find pin for gpio %d\n",
+ gpio_desc->args[0]);
+ return -ENODEV;
}
static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
@@ -934,43 +955,124 @@ static int pmic_gpio_probe(struct platform_device *pdev)
struct pinctrl_desc *pctrldesc;
struct pmic_gpio_pad *pad, *pads;
struct pmic_gpio_state *state;
- int ret, npins, i;
- u32 reg;
+ int ret, npins, ngpios, i, j, pin_idx;
+ int disallowed_count = 0;
+ u32 reg[2], start, size;
+ u32 *disallowed = NULL;
- ret = of_property_read_u32(dev->of_node, "reg", ®);
+ ret = of_property_read_u32_array(dev->of_node, "reg", reg, 2);
if (ret < 0) {
- dev_err(dev, "missing base address");
+ dev_err(dev, "reg property reading failed\n");
return ret;
}
+ start = reg[0];
+ size = reg[1];
- npins = platform_irq_count(pdev);
- if (!npins)
+ ngpios = size / PMIC_GPIO_ADDRESS_RANGE;
+ if (ngpios == 0) {
+ dev_err(dev, "no gpios assigned\n");
+ return -ENODEV;
+ }
+
+ if (ngpios > ARRAY_SIZE(pmic_gpio_groups)) {
+ dev_err(dev, "reg property defines %d gpios, but only %d are allowed\n",
+ ngpios, (int)ARRAY_SIZE(pmic_gpio_groups));
return -EINVAL;
- if (npins < 0)
- return npins;
+ }
- BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups));
+ if (of_find_property(dev->of_node, "qcom,gpios-disallowed",
+ &disallowed_count)) {
+ disallowed_count /= sizeof(u32);
+ if (disallowed_count == 0) {
+ dev_err(dev, "No data in gpios-disallowed\n");
+ return -EINVAL;
+ }
+
+ disallowed = kcalloc(disallowed_count, sizeof(u32), GFP_KERNEL);
+ if (disallowed == NULL)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(dev->of_node,
+ "qcom,gpios-disallowed",
+ disallowed, disallowed_count);
+ if (ret < 0) {
+ dev_err(dev, "qcom,gpios-disallowed property reading failed, ret=%d\n",
+ ret);
+ goto err_free;
+ }
+
+ for (i = 0; i < disallowed_count; i++) {
+ if (disallowed[i] >= ngpios + PMIC_GPIO_PHYSICAL_OFFSET
+ || disallowed[i] < PMIC_GPIO_PHYSICAL_OFFSET) {
+ dev_err(dev, "invalid gpio = %d specified in qcom,gpios-disallowed, supported values: %d to %d\n",
+ disallowed[i],
+ PMIC_GPIO_PHYSICAL_OFFSET,
+ ngpios - 1 + PMIC_GPIO_PHYSICAL_OFFSET);
+ ret = -EINVAL;
+ goto err_free;
+ }
+ for (j = 0; j < i; j++) {
+ if (disallowed[i] == disallowed[j]) {
+ dev_err(dev, "duplicate gpio = %d listed in qcom,gpios-disallowed\n",
+ disallowed[i]);
+ ret = -EINVAL;
+ goto err_free;
+ }
+ }
+ dev_dbg(dev, "gpio %d NOT supported\n", disallowed[i]);
+ }
+ } else {
+ disallowed_count = 0;
+ }
+
+ npins = ngpios - disallowed_count;
+ if (npins <= 0) {
+ dev_err(dev, "No pins assigned\n");
+ ret = -ENODEV;
+ goto err_free;
+ }
+ if (platform_irq_count(pdev) != npins) {
+ dev_err(dev, "%d IRQs defined but %d expected\n",
+ platform_irq_count(pdev), npins);
+ ret = -EINVAL;
+ goto err_free;
+ }
state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
+ if (!state) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
platform_set_drvdata(pdev, state);
state->dev = &pdev->dev;
state->map = dev_get_regmap(dev->parent, NULL);
+ state->gpio_groups = devm_kcalloc(dev, sizeof(*state->gpio_groups),
+ npins, GFP_KERNEL);
+ if (!state->gpio_groups) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
- if (!pindesc)
- return -ENOMEM;
+ if (!pindesc) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
- if (!pads)
- return -ENOMEM;
+ if (!pads) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
- if (!pctrldesc)
- return -ENOMEM;
+ if (!pctrldesc) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
pctrldesc->pctlops = &pmic_gpio_pinctrl_ops;
pctrldesc->pmxops = &pmic_gpio_pinmux_ops;
@@ -984,22 +1086,42 @@ static int pmic_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
pctrldesc->custom_conf_items = pmic_conf_items;
#endif
+ for (pin_idx = 0, i = 0; i < ngpios; i++) {
+ for (j = 0; j < disallowed_count; j++) {
+ if (i + PMIC_GPIO_PHYSICAL_OFFSET == disallowed[j])
+ break;
+ }
+ if (j != disallowed_count)
+ continue;
- for (i = 0; i < npins; i++, pindesc++) {
- pad = &pads[i];
+ pad = &pads[pin_idx];
pindesc->drv_data = pad;
- pindesc->number = i;
+ pindesc->number = pin_idx;
pindesc->name = pmic_gpio_groups[i];
- pad->irq = platform_get_irq(pdev, i);
- if (pad->irq < 0)
- return pad->irq;
+ pad->gpio_idx = i + PMIC_GPIO_PHYSICAL_OFFSET;
+ pad->irq = platform_get_irq(pdev, pin_idx);
+ if (pad->irq < 0) {
+ dev_err(state->dev,
+ "failed to get irq for gpio %d (pin %d), ret=%d\n",
+ pad->gpio_idx, pin_idx, pad->irq);
+ ret = pad->irq;
+ goto err_free;
+ }
+ /* Every pin is a group */
+ state->gpio_groups[pin_idx] = pmic_gpio_groups[i];
- pad->base = reg + i * PMIC_GPIO_ADDRESS_RANGE;
+ pad->base = start + i * PMIC_GPIO_ADDRESS_RANGE;
ret = pmic_gpio_populate(state, pad);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ dev_err(state->dev,
+ "failed to populate gpio %d, ret=%d\n",
+ i, ret);
+ goto err_free;
+ }
+ pindesc++;
+ pin_idx++;
}
state->chip = pmic_gpio_gpio_template;
@@ -1011,25 +1133,29 @@ static int pmic_gpio_probe(struct platform_device *pdev)
state->chip.can_sleep = false;
state->ctrl = devm_pinctrl_register(dev, pctrldesc, state);
- if (IS_ERR(state->ctrl))
- return PTR_ERR(state->ctrl);
+ if (IS_ERR(state->ctrl)) {
+ ret = PTR_ERR(state->ctrl);
+ dev_err(state->dev, "failed to register pinctrl device, ret=%d\n",
+ ret);
+ goto err_free;
+ }
ret = gpiochip_add_data(&state->chip, state);
if (ret) {
- dev_err(state->dev, "can't add gpio chip\n");
- return ret;
+ dev_err(state->dev, "can't add gpio chip, ret=%d\n", ret);
+ goto err_free;
}
ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
if (ret) {
- dev_err(dev, "failed to add pin range\n");
- goto err_range;
+ dev_err(dev, "failed to add pin range\n, ret=%d\n", ret);
+ gpiochip_remove(&state->chip);
+ goto err_free;
}
- return 0;
+err_free:
+ kfree(disallowed);
-err_range:
- gpiochip_remove(&state->chip);
return ret;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 20b73d8..7759c98 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2845,6 +2845,13 @@ static int ipa3_setup_apps_pipes(void)
}
}
+ /* allocate the common PROD event ring */
+ if (ipa3_alloc_common_event_ring()) {
+ IPAERR("ipa3_alloc_common_event_ring failed.\n");
+ result = -EPERM;
+ goto fail_ch20_wa;
+ }
+
/* CMD OUT (AP->IPA) */
memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
@@ -4239,6 +4246,52 @@ static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
return 0;
}
+static int ipa3_alloc_pkt_init(void)
+{
+ struct ipa_mem_buffer mem;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_imm_cmd_ip_packet_init cmd = {0};
+ int i;
+
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
+ &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct IMM cmd\n");
+ return -ENOMEM;
+ }
+
+ mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
+ &mem.phys_base, GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+ ipahal_destroy_imm_cmd(cmd_pyld);
+ return -ENOMEM;
+ }
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+ memset(mem.base, 0, mem.size);
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ cmd.destination_pipe_index = i;
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
+ &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct IMM cmd\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ mem.size,
+ mem.base,
+ mem.phys_base);
+ return -ENOMEM;
+ }
+ memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
+ cmd_pyld->len);
+ ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
+ ipahal_destroy_imm_cmd(cmd_pyld);
+ }
+
+ return 0;
+}
+
/**
* ipa3_pre_init() - Initialize the IPA Driver.
* This part contains all initialization which doesn't require IPA HW, such
@@ -4648,6 +4701,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_create_apps_resource;
}
+ result = ipa3_alloc_pkt_init();
+ if (result) {
+ IPAERR("Failed to alloc pkt_init payload\n");
+ result = -ENODEV;
+ goto fail_create_apps_resource;
+ }
+
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
ipa3_enable_dcd();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 9bb3e0e..3e4bd79 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -69,6 +69,9 @@
#define IPA_GSI_CH_20_WA_VIRT_CHAN 29
#define IPA_DEFAULT_SYS_YELLOW_WM 32
+#define IPA_REPL_XFER_THRESH 10
+
+#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
/*
* The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
@@ -183,106 +186,64 @@ static void ipa3_wq_write_done(struct work_struct *work)
{
struct ipa3_tx_pkt_wrapper *tx_pkt;
struct ipa3_sys_context *sys;
+ struct ipa3_tx_pkt_wrapper *this_pkt;
tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
sys = tx_pkt->sys;
-
+ spin_lock_bh(&sys->spinlock);
+ this_pkt = list_first_entry(&sys->head_desc_list,
+ struct ipa3_tx_pkt_wrapper, link);
+ while (tx_pkt != this_pkt) {
+ spin_unlock_bh(&sys->spinlock);
+ ipa3_wq_write_done_common(sys, this_pkt);
+ spin_lock_bh(&sys->spinlock);
+ this_pkt = list_first_entry(&sys->head_desc_list,
+ struct ipa3_tx_pkt_wrapper, link);
+ }
+ spin_unlock_bh(&sys->spinlock);
ipa3_wq_write_done_common(sys, tx_pkt);
}
-/**
- * ipa3_send_one() - Send a single descriptor
- * @sys: system pipe context
- * @desc: descriptor to send
- * @in_atomic: whether caller is in atomic context
- *
- * - Allocate tx_packet wrapper
- * - transfer data to the IPA
- * - after the transfer was done the user will be notified via provided
- * callback
- *
- * Return codes: 0: success, -EFAULT: failure
- */
-int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
- bool in_atomic)
+
+static void ipa3_send_nop_desc(struct work_struct *work)
{
+ struct ipa3_sys_context *sys = container_of(work,
+ struct ipa3_sys_context, work);
+ struct gsi_xfer_elem nop_xfer;
struct ipa3_tx_pkt_wrapper *tx_pkt;
- struct gsi_xfer_elem gsi_xfer;
- int result;
- dma_addr_t dma_address;
- u32 mem_flag = GFP_ATOMIC;
- if (unlikely(!in_atomic))
- mem_flag = GFP_KERNEL;
-
- tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, mem_flag);
+ IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
+ tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
if (!tx_pkt) {
IPAERR("failed to alloc tx wrapper\n");
- goto fail_mem_alloc;
- }
-
- if (!desc->dma_address_valid) {
- dma_address = dma_map_single(ipa3_ctx->pdev, desc->pyld,
- desc->len, DMA_TO_DEVICE);
- } else {
- dma_address = desc->dma_address;
- tx_pkt->no_unmap_dma = true;
- }
- if (!dma_address) {
- IPAERR("failed to DMA wrap\n");
- goto fail_dma_map;
+ queue_work(sys->wq, &sys->work);
+ return;
}
INIT_LIST_HEAD(&tx_pkt->link);
- tx_pkt->type = desc->type;
- tx_pkt->cnt = 1; /* only 1 desc in this "set" */
-
- tx_pkt->mem.phys_base = dma_address;
- tx_pkt->mem.base = desc->pyld;
- tx_pkt->mem.size = desc->len;
- tx_pkt->sys = sys;
- tx_pkt->callback = desc->callback;
- tx_pkt->user1 = desc->user1;
- tx_pkt->user2 = desc->user2;
-
- memset(&gsi_xfer, 0, sizeof(gsi_xfer));
- gsi_xfer.addr = dma_address;
- gsi_xfer.flags |= GSI_XFER_FLAG_EOT;
- gsi_xfer.xfer_user_data = tx_pkt;
- if (desc->type == IPA_IMM_CMD_DESC) {
- gsi_xfer.len = desc->opcode;
- gsi_xfer.type = GSI_XFER_ELEM_IMME_CMD;
- } else {
- gsi_xfer.len = desc->len;
- gsi_xfer.type = GSI_XFER_ELEM_DATA;
- }
-
+ tx_pkt->cnt = 1;
INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
-
+ tx_pkt->no_unmap_dma = true;
+ tx_pkt->sys = sys;
spin_lock_bh(&sys->spinlock);
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ spin_unlock_bh(&sys->spinlock);
- result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
- &gsi_xfer, true);
- if (result != GSI_STATUS_SUCCESS) {
- IPAERR("GSI xfer failed.\n");
- goto fail_transport_send;
+ memset(&nop_xfer, 0, sizeof(nop_xfer));
+ nop_xfer.type = GSI_XFER_ELEM_NOP;
+ nop_xfer.flags = GSI_XFER_FLAG_EOT;
+ nop_xfer.xfer_user_data = tx_pkt;
+ if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) {
+ IPAERR("gsi_queue_xfer for ch:%lu failed\n",
+ sys->ep->gsi_chan_hdl);
+ queue_work(sys->wq, &sys->work);
+ return;
}
+ sys->len_pending_xfer = 0;
- spin_unlock_bh(&sys->spinlock);
-
- return 0;
-
-fail_transport_send:
- list_del(&tx_pkt->link);
- spin_unlock_bh(&sys->spinlock);
- dma_unmap_single(ipa3_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
-fail_dma_map:
- kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
-fail_mem_alloc:
- return -EFAULT;
}
+
/**
* ipa3_send() - Send multiple descriptors in one HW transaction
* @sys: system pipe context
@@ -437,19 +398,21 @@ int ipa3_send(struct ipa3_sys_context *sys,
}
if (i == (num_desc - 1)) {
- gsi_xfer_elem_array[i].flags |=
- GSI_XFER_FLAG_EOT;
- if (sys->ep->client == IPA_CLIENT_APPS_WAN_PROD
- && sys->policy == IPA_POLICY_INTR_MODE)
+ if (!sys->use_comm_evt_ring) {
+ gsi_xfer_elem_array[i].flags |=
+ GSI_XFER_FLAG_EOT;
gsi_xfer_elem_array[i].flags |=
GSI_XFER_FLAG_BEI;
+ }
gsi_xfer_elem_array[i].xfer_user_data =
tx_pkt_first;
- } else
- gsi_xfer_elem_array[i].flags |=
- GSI_XFER_FLAG_CHAIN;
+ } else {
+ gsi_xfer_elem_array[i].flags |=
+ GSI_XFER_FLAG_CHAIN;
+ }
}
+ IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
gsi_xfer_elem_array, true);
if (result != GSI_STATUS_SUCCESS) {
@@ -458,7 +421,18 @@ int ipa3_send(struct ipa3_sys_context *sys,
}
kfree(gsi_xfer_elem_array);
+ kfree(gsi_xfer_elem_array);
spin_unlock_bh(&sys->spinlock);
+
+ /* set the timer for sending the NOP descriptor */
+ if (sys->use_comm_evt_ring && !hrtimer_active(&sys->db_timer)) {
+ ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
+
+ IPADBG_LOW("scheduling timer for ch %lu\n",
+ sys->ep->gsi_chan_hdl);
+ hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
+ }
+
return 0;
failure:
@@ -491,6 +465,25 @@ int ipa3_send(struct ipa3_sys_context *sys,
}
/**
+ * ipa3_send_one() - Send a single descriptor
+ * @sys: system pipe context
+ * @desc: descriptor to send
+ * @in_atomic: whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ * notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+ bool in_atomic)
+{
+ return ipa3_send(sys, 1, desc, in_atomic);
+}
+
+/**
* ipa3_transport_irq_cmd_ack - callback function which will be called by
* the transport driver after an immediate command is complete.
* @user1: pointer to the descriptor of the transfer
@@ -771,15 +764,14 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
do {
cnt = ipa3_handle_rx_core(sys, true, true);
- if (cnt == 0) {
+ if (cnt == 0)
inactive_cycles++;
- trace_idle_sleep_enter3(sys->ep->client);
- usleep_range(POLLING_MIN_SLEEP_RX,
- POLLING_MAX_SLEEP_RX);
- trace_idle_sleep_exit3(sys->ep->client);
- } else {
+ else
inactive_cycles = 0;
- }
+
+ trace_idle_sleep_enter3(sys->ep->client);
+ usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
+ trace_idle_sleep_exit3(sys->ep->client);
} while (inactive_cycles <= POLLING_INACTIVITY_RX);
trace_poll_to_intr3(sys->ep->client);
@@ -808,6 +800,15 @@ static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
ipa3_handle_rx(sys);
}
+enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param)
+{
+ struct ipa3_sys_context *sys = container_of(param,
+ struct ipa3_sys_context, db_timer);
+
+ queue_work(sys->wq, &sys->work);
+ return HRTIMER_NORESTART;
+}
+
/**
* ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
* IPA EP configuration
@@ -889,6 +890,9 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
INIT_LIST_HEAD(&ep->sys->head_desc_list);
INIT_LIST_HEAD(&ep->sys->rcycl_list);
spin_lock_init(&ep->sys->spinlock);
+ hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
} else {
memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
}
@@ -1071,7 +1075,10 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
}
/* free event ring only when it is present */
- if (ep->gsi_evt_ring_hdl != ~0) {
+ if (ep->sys->use_comm_evt_ring) {
+ ipa3_ctx->gsi_evt_comm_ring_rem +=
+ ep->gsi_mem_info.chan_ring_len;
+ } else if (ep->gsi_evt_ring_hdl != ~0) {
result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("Failed to reset evt ring: %d.\n",
@@ -1145,7 +1152,7 @@ static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
dev_kfree_skb_any(skb);
}
-static void ipa3_tx_cmd_comp(void *user1, int user2)
+void ipa3_tx_cmd_comp(void *user1, int user2)
{
ipahal_destroy_imm_cmd(user1);
}
@@ -1180,7 +1187,6 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
struct ipa3_desc *desc;
struct ipa3_desc _desc[3];
int dst_ep_idx;
- struct ipahal_imm_cmd_ip_packet_init cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
struct ipa3_sys_context *sys;
int src_ep_idx;
@@ -1267,54 +1273,58 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
if (dst_ep_idx != -1) {
/* SW data path */
- cmd.destination_pipe_index = dst_ep_idx;
- cmd_pyld = ipahal_construct_imm_cmd(
- IPA_IMM_CMD_IP_PACKET_INIT, &cmd, true);
- if (unlikely(!cmd_pyld)) {
- IPAERR("failed to construct ip_packet_init imm cmd\n");
- goto fail_mem;
+ data_idx = 0;
+ if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+ /*
+ * For non-interrupt mode channel (where there is no
+ * event ring) TAG STATUS are used for completion
+ * notification. IPA will generate a status packet with
+ * tag info as a result of the TAG STATUS command.
+ */
+ desc[data_idx].opcode =
+ ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+ desc[data_idx].type = IPA_IMM_CMD_DESC;
+ desc[data_idx].callback = ipa3_tag_destroy_imm;
+ data_idx++;
}
-
- /* the tag field will be populated in ipa3_send() function */
- desc[0].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
- desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].callback = ipa3_tag_destroy_imm;
- desc[1].opcode =
+ desc[data_idx].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
- desc[1].pyld = cmd_pyld->data;
- desc[1].len = cmd_pyld->len;
- desc[1].type = IPA_IMM_CMD_DESC;
- desc[1].callback = ipa3_tx_cmd_comp;
- desc[1].user1 = cmd_pyld;
- desc[2].pyld = skb->data;
- desc[2].len = skb_headlen(skb);
- desc[2].type = IPA_DATA_DESC_SKB;
- desc[2].callback = ipa3_tx_comp_usr_notify_release;
- desc[2].user1 = skb;
- desc[2].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+ desc[data_idx].dma_address_valid = true;
+ desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
+ desc[data_idx].type = IPA_IMM_CMD_DESC;
+ desc[data_idx].callback = NULL;
+ data_idx++;
+ desc[data_idx].pyld = skb->data;
+ desc[data_idx].len = skb_headlen(skb);
+ desc[data_idx].type = IPA_DATA_DESC_SKB;
+ desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
+ desc[data_idx].user1 = skb;
+ desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid &&
meta->pkt_init_dst_ep_remote) ?
src_ep_idx :
dst_ep_idx;
if (meta && meta->dma_address_valid) {
- desc[2].dma_address_valid = true;
- desc[2].dma_address = meta->dma_address;
+ desc[data_idx].dma_address_valid = true;
+ desc[data_idx].dma_address = meta->dma_address;
}
+ data_idx++;
for (f = 0; f < num_frags; f++) {
- desc[3+f].frag = &skb_shinfo(skb)->frags[f];
- desc[3+f].type = IPA_DATA_DESC_SKB_PAGED;
- desc[3+f].len = skb_frag_size(desc[3+f].frag);
+ desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f];
+ desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED;
+ desc[data_idx + f].len =
+ skb_frag_size(desc[data_idx + f].frag);
}
/* don't free skb till frag mappings are released */
if (num_frags) {
- desc[3+f-1].callback = desc[2].callback;
- desc[3+f-1].user1 = desc[2].user1;
- desc[3+f-1].user2 = desc[2].user2;
- desc[2].callback = NULL;
+ desc[data_idx + f - 1].callback = desc[2].callback;
+ desc[data_idx + f - 1].user1 = desc[2].user1;
+ desc[data_idx + f - 1].user2 = desc[2].user2;
+ desc[data_idx - 1].callback = NULL;
}
- if (ipa3_send(sys, num_frags + 3, desc, true)) {
+ if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
IPAERR("fail to send skb %p num_frags %u SWP\n",
skb, num_frags);
goto fail_send;
@@ -1699,12 +1709,21 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
gsi_xfer_elem_one.xfer_user_data = rx_pkt;
ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
- 1, &gsi_xfer_elem_one, true);
+ 1, &gsi_xfer_elem_one, false);
if (ret != GSI_STATUS_SUCCESS) {
IPAERR("failed to provide buffer: %d\n",
ret);
goto fail_provide_rx_buffer;
}
+
+ /*
+ * As doorbell is a costly operation, notify to GSI
+ * of new buffers if threshold is exceeded
+ */
+ if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+ sys->len_pending_xfer = 0;
+ gsi_start_xfer(sys->ep->gsi_chan_hdl);
+ }
}
return;
@@ -1719,7 +1738,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
fail_skb_alloc:
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
fail_kmem_cache_alloc:
- if (rx_len_cached == 0)
+ if (rx_len_cached - sys->len_pending_xfer == 0)
queue_delayed_work(sys->wq, &sys->replenish_rx_work,
msecs_to_jiffies(1));
}
@@ -1794,12 +1813,21 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
gsi_xfer_elem_one.xfer_user_data = rx_pkt;
ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
- 1, &gsi_xfer_elem_one, true);
+ 1, &gsi_xfer_elem_one, false);
if (ret != GSI_STATUS_SUCCESS) {
IPAERR("failed to provide buffer: %d\n",
ret);
goto fail_provide_rx_buffer;
}
+
+ /*
+ * As doorbell is a costly operation, notify to GSI
+ * of new buffers if threshold is exceeded
+ */
+ if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+ sys->len_pending_xfer = 0;
+ gsi_start_xfer(sys->ep->gsi_chan_hdl);
+ }
}
return;
@@ -1815,7 +1843,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
INIT_LIST_HEAD(&rx_pkt->link);
spin_unlock_bh(&sys->spinlock);
fail_kmem_cache_alloc:
- if (rx_len_cached == 0)
+ if (rx_len_cached - sys->len_pending_xfer == 0)
queue_delayed_work(sys->wq, &sys->replenish_rx_work,
msecs_to_jiffies(1));
}
@@ -1848,12 +1876,22 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
gsi_xfer_elem_one.xfer_user_data = rx_pkt;
ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
- &gsi_xfer_elem_one, true);
+ &gsi_xfer_elem_one, false);
if (ret != GSI_STATUS_SUCCESS) {
IPAERR("failed to provide buffer: %d\n",
ret);
break;
}
+
+ /*
+ * As doorbell is a costly operation, notify to GSI
+ * of new buffers if threshold is exceeded
+ */
+ if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+ sys->len_pending_xfer = 0;
+ gsi_start_xfer(sys->ep->gsi_chan_hdl);
+ }
+
rx_len_cached = ++sys->len;
curr = (curr + 1) % sys->repl.capacity;
/* ensure write is done before setting head index */
@@ -1863,7 +1901,8 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
queue_work(sys->repl_wq, &sys->repl_work);
- if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+ if (rx_len_cached - sys->len_pending_xfer
+ <= IPA_DEFAULT_SYS_YELLOW_WM) {
if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
@@ -2641,6 +2680,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
if (in->client == IPA_CLIENT_APPS_CMD_PROD ||
in->client == IPA_CLIENT_APPS_WAN_PROD) {
sys->policy = IPA_POLICY_INTR_MODE;
+ sys->use_comm_evt_ring = false;
return 0;
}
@@ -2652,12 +2692,12 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
if (IPA_CLIENT_IS_PROD(in->client)) {
if (sys->ep->skip_ep_cfg) {
sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->use_comm_evt_ring = true;
atomic_set(&sys->curr_polling_state, 0);
} else {
- sys->policy = IPA_POLICY_NOINTR_MODE;
- sys->ep->status.status_en = true;
- sys->ep->status.status_ep = ipa3_get_ep_mapping(
- IPA_CLIENT_APPS_LAN_CONS);
+ sys->policy = IPA_POLICY_INTR_MODE;
+ sys->use_comm_evt_ring = true;
+ INIT_WORK(&sys->work, ipa3_send_nop_desc);
}
} else {
if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
@@ -3325,6 +3365,46 @@ static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
}
}
+int ipa3_alloc_common_event_ring(void)
+{
+ struct gsi_evt_ring_props gsi_evt_ring_props;
+ dma_addr_t evt_dma_addr;
+ int result;
+
+ memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+ gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
+ gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+ gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+
+ gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE;
+
+ gsi_evt_ring_props.ring_base_vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev,
+ gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
+ if (!gsi_evt_ring_props.ring_base_vaddr) {
+ IPAERR("fail to dma alloc %u bytes\n",
+ gsi_evt_ring_props.ring_len);
+ return -ENOMEM;
+ }
+ gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
+ gsi_evt_ring_props.int_modt = 0;
+ gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
+ gsi_evt_ring_props.rp_update_addr = 0;
+ gsi_evt_ring_props.exclusive = false;
+ gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
+ gsi_evt_ring_props.user_data = NULL;
+
+ result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+ ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
+ if (result) {
+ IPAERR("gsi_alloc_evt_ring failed %d\n", result);
+ return result;
+ }
+ ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
+
+ return 0;
+}
+
static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
struct ipa3_ep_context *ep)
{
@@ -3344,11 +3424,18 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
evt_dma_addr = 0;
ep->gsi_evt_ring_hdl = ~0;
memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
- /*
- * allocate event ring for all interrupt-policy
- * pipes and IPA consumers pipes
- */
- if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
+ if (ep->sys->use_comm_evt_ring) {
+ if (ipa3_ctx->gsi_evt_comm_ring_rem < 2 * in->desc_fifo_sz) {
+ IPAERR("not enough space in common event ring\n");
+ IPAERR("available: %d needed: %d\n",
+ ipa3_ctx->gsi_evt_comm_ring_rem,
+ 2 * in->desc_fifo_sz);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ ipa3_ctx->gsi_evt_comm_ring_rem -= (2 * in->desc_fifo_sz);
+ ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
+ } else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
IPA_CLIENT_IS_CONS(ep->client)) {
gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
gsi_evt_ring_props.intr = GSI_INTR_IRQ;
@@ -3375,10 +3462,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
gsi_evt_ring_props.ring_base_vaddr;
gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
- if (ep->client == IPA_CLIENT_APPS_WAN_PROD)
- gsi_evt_ring_props.int_modc = 248;
- else
- gsi_evt_ring_props.int_modc = 1;
+ gsi_evt_ring_props.int_modc = 1;
IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
ep->client,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 1a0d3ad..7419a64 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -45,6 +45,7 @@
#define IPA3_MAX_NUM_PIPES 31
#define IPA_SYS_DESC_FIFO_SZ 0x800
#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_COMMON_EVENT_RING_SIZE 0x7C00
#define IPA_LAN_RX_HEADER_LENGTH (2)
#define IPA_QMAP_HEADER_LENGTH (4)
#define IPA_DL_CHECKSUM_LENGTH (8)
@@ -591,9 +592,11 @@ struct ipa3_repl_ctx {
*/
struct ipa3_sys_context {
u32 len;
+ u32 len_pending_xfer;
atomic_t curr_polling_state;
struct delayed_work switch_to_intr_work;
enum ipa3_sys_pipe_policy policy;
+ bool use_comm_evt_ring;
int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
void (*free_skb)(struct sk_buff *skb);
@@ -616,6 +619,7 @@ struct ipa3_sys_context {
struct list_head head_desc_list;
struct list_head rcycl_list;
spinlock_t spinlock;
+ struct hrtimer db_timer;
struct workqueue_struct *wq;
struct workqueue_struct *repl_wq;
struct ipa3_status_stats *status_stat;
@@ -702,6 +706,7 @@ struct ipa3_dma_xfer_wrapper {
* @user1: cookie1 for above callback
* @user2: cookie2 for above callback
* @xfer_done: completion object for sync completion
+ * @skip_db_ring: specifies whether GSI doorbell should not be rang
*/
struct ipa3_desc {
enum ipa3_desc_type type;
@@ -715,6 +720,7 @@ struct ipa3_desc {
void *user1;
int user2;
struct completion xfer_done;
+ bool skip_db_ring;
};
/**
@@ -1133,6 +1139,8 @@ struct ipa3_context {
struct workqueue_struct *transport_power_mgmt_wq;
bool tag_process_before_gating;
struct ipa3_transport_pm transport_pm;
+ unsigned long gsi_evt_comm_hdl;
+ u32 gsi_evt_comm_ring_rem;
u32 clnt_hdl_cmd;
u32 clnt_hdl_data_in;
u32 clnt_hdl_data_out;
@@ -1165,6 +1173,7 @@ struct ipa3_context {
u32 curr_ipa_clk_rate;
bool q6_proxy_clk_vote_valid;
u32 ipa_num_pipes;
+ dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES];
struct ipa3_wlan_comm_memb wc_memb;
@@ -1971,4 +1980,5 @@ bool ipa3_is_msm_device(void);
struct device *ipa3_get_pdev(void);
void ipa3_enable_dcd(void);
void ipa3_disable_prefetch(enum ipa_client_type client);
+int ipa3_alloc_common_event_ring(void);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index c69a3d0..799246b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1207,6 +1207,8 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPADBG("Skipping endpoint configuration.\n");
}
+ ipa3_enable_data_path(ipa_ep_idx);
+
out->clnt_hdl = ipa_ep_idx;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index cf8f000..dbe2a08 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -19,16 +19,19 @@
#include <linux/kernel.h>
#include <linux/regmap.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/spmi.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
#define QPNP_LABIBB_REGULATOR_DRIVER_NAME "qcom,qpnp-labibb-regulator"
@@ -594,6 +597,7 @@ struct qpnp_labibb {
const struct lab_ver_ops *lab_ver_ops;
struct mutex bus_mutex;
enum qpnp_labibb_mode mode;
+ struct work_struct lab_vreg_ok_work;
bool standalone;
bool ttw_en;
bool in_ttw_mode;
@@ -603,10 +607,13 @@ struct qpnp_labibb {
bool ttw_force_lab_on;
bool skip_2nd_swire_cmd;
bool pfm_enable;
+ bool notify_lab_vreg_ok_sts;
u32 swire_2nd_cmd_delay;
u32 swire_ibb_ps_enable_delay;
};
+static RAW_NOTIFIER_HEAD(labibb_notifier);
+
struct ibb_ver_ops {
int (*set_default_voltage)(struct qpnp_labibb *labibb,
bool use_default);
@@ -2124,6 +2131,36 @@ static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
return rc;
}
+static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
+{
+ int rc = 0;
+ u16 retries = 1000, dly = 5000;
+ u8 val;
+ struct qpnp_labibb *labibb = container_of(work, struct qpnp_labibb,
+ lab_vreg_ok_work);
+
+ while (retries--) {
+ rc = qpnp_labibb_read(labibb, labibb->lab_base +
+ REG_LAB_STATUS1, &val, 1);
+ if (rc < 0) {
+ pr_err("read register %x failed rc = %d\n",
+ REG_LAB_STATUS1, rc);
+ return;
+ }
+
+ if (val & LAB_STATUS1_VREG_OK) {
+ raw_notifier_call_chain(&labibb_notifier,
+ LAB_VREG_OK, NULL);
+ break;
+ }
+
+ usleep_range(dly, dly + 100);
+ }
+
+ if (!retries)
+ pr_err("LAB_VREG_OK not set, failed to notify\n");
+}
+
static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
{
int rc;
@@ -2326,6 +2363,9 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
labibb->lab_vreg.vreg_enabled = 1;
}
+ if (labibb->notify_lab_vreg_ok_sts)
+ schedule_work(&labibb->lab_vreg_ok_work);
+
return 0;
}
@@ -2578,6 +2618,9 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
return rc;
}
+ labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node,
+ "qcom,notify-lab-vreg-ok-sts");
+
rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
&(labibb->lab_vreg.soft_start));
if (!rc) {
@@ -3817,6 +3860,8 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
goto fail_registration;
}
}
+
+ INIT_WORK(&labibb->lab_vreg_ok_work, qpnp_lab_vreg_notifier_work);
dev_set_drvdata(&pdev->dev, labibb);
pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n",
labibb->lab_vreg.vreg_enabled,
@@ -3834,6 +3879,18 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
return rc;
}
+int qpnp_labibb_notifier_register(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_register);
+
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb)
+{
+ return raw_notifier_chain_unregister(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_unregister);
+
static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
{
struct qpnp_labibb *labibb = dev_get_drvdata(&pdev->dev);
@@ -3843,6 +3900,8 @@ static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
regulator_unregister(labibb->lab_vreg.rdev);
if (labibb->ibb_vreg.rdev)
regulator_unregister(labibb->ibb_vreg.rdev);
+
+ cancel_work_sync(&labibb->lab_vreg_ok_work);
}
return 0;
}
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index a08ade6..aef28db 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
@@ -31,6 +32,13 @@
#define INT_RT_STATUS_REG 0x10
#define VREG_OK_RT_STS_BIT BIT(0)
+#define SC_ERROR_RT_STS_BIT BIT(1)
+
+#define LCDB_STS3_REG 0x0A
+#define LDO_VREG_OK_BIT BIT(7)
+
+#define LCDB_STS4_REG 0x0B
+#define NCP_VREG_OK_BIT BIT(7)
#define LCDB_AUTO_TOUCH_WAKE_CTL_REG 0x40
#define EN_AUTO_TOUCH_WAKE_BIT BIT(7)
@@ -185,14 +193,21 @@ struct qpnp_lcdb {
struct platform_device *pdev;
struct regmap *regmap;
u32 base;
+ int sc_irq;
/* TTW params */
bool ttw_enable;
bool ttw_mode_sw;
+ /* top level DT params */
+ bool force_module_reenable;
+
/* status parameters */
bool lcdb_enabled;
bool settings_saved;
+ bool lcdb_sc_disable;
+ int sc_count;
+ ktime_t sc_module_enable_time;
struct mutex lcdb_mutex;
struct mutex read_write_mutex;
@@ -569,8 +584,11 @@ static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb)
int rc = 0, timeout, delay;
u8 val = 0;
- if (lcdb->lcdb_enabled)
+ if (lcdb->lcdb_enabled || lcdb->lcdb_sc_disable) {
+ pr_debug("lcdb_enabled=%d lcdb_sc_disable=%d\n",
+ lcdb->lcdb_enabled, lcdb->lcdb_sc_disable);
return 0;
+ }
if (lcdb->ttw_enable) {
rc = qpnp_lcdb_ttw_exit(lcdb);
@@ -588,6 +606,23 @@ static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb)
goto fail_enable;
}
+ if (lcdb->force_module_reenable) {
+ val = 0;
+ rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to enable lcdb rc= %d\n", rc);
+ goto fail_enable;
+ }
+ val = MODULE_EN_BIT;
+ rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to disable lcdb rc= %d\n", rc);
+ goto fail_enable;
+ }
+ }
+
/* poll for vreg_ok */
timeout = 10;
delay = lcdb->bst.soft_start_us + lcdb->ldo.soft_start_us +
@@ -656,6 +691,111 @@ static int qpnp_lcdb_disable(struct qpnp_lcdb *lcdb)
return rc;
}
+#define LCDB_SC_RESET_CNT_DLY_US 1000000
+#define LCDB_SC_CNT_MAX 10
+static int qpnp_lcdb_handle_sc_event(struct qpnp_lcdb *lcdb)
+{
+ int rc = 0;
+ s64 elapsed_time_us;
+
+ mutex_lock(&lcdb->lcdb_mutex);
+ rc = qpnp_lcdb_disable(lcdb);
+ if (rc < 0) {
+ pr_err("Failed to disable lcdb rc=%d\n", rc);
+ goto unlock_mutex;
+ }
+
+ /* Check if the SC re-occurred immediately */
+ elapsed_time_us = ktime_us_delta(ktime_get(),
+ lcdb->sc_module_enable_time);
+ if (elapsed_time_us > LCDB_SC_RESET_CNT_DLY_US) {
+ lcdb->sc_count = 0;
+ } else if (lcdb->sc_count > LCDB_SC_CNT_MAX) {
+ pr_err("SC trigged %d times, disabling LCDB forever!\n",
+ lcdb->sc_count);
+ lcdb->lcdb_sc_disable = true;
+ goto unlock_mutex;
+ }
+ lcdb->sc_count++;
+ lcdb->sc_module_enable_time = ktime_get();
+
+ /* delay for SC to clear */
+ usleep_range(10000, 10100);
+
+ rc = qpnp_lcdb_enable(lcdb);
+ if (rc < 0)
+ pr_err("Failed to enable lcdb rc=%d\n", rc);
+
+unlock_mutex:
+ mutex_unlock(&lcdb->lcdb_mutex);
+ return rc;
+}
+
+static irqreturn_t qpnp_lcdb_sc_irq_handler(int irq, void *data)
+{
+ struct qpnp_lcdb *lcdb = data;
+ int rc;
+ u8 val, val2[2] = {0};
+
+ rc = qpnp_lcdb_read(lcdb, lcdb->base + INT_RT_STATUS_REG, &val, 1);
+ if (rc < 0)
+ goto irq_handled;
+
+ if (val & SC_ERROR_RT_STS_BIT) {
+ rc = qpnp_lcdb_read(lcdb,
+ lcdb->base + LCDB_MISC_CTL_REG, &val, 1);
+ if (rc < 0)
+ goto irq_handled;
+
+ if (val & EN_TOUCH_WAKE_BIT) {
+ /* blanking time */
+ usleep_range(300, 310);
+ /*
+ * The status registers need to written with any value
+ * before reading
+ */
+ rc = qpnp_lcdb_write(lcdb,
+ lcdb->base + LCDB_STS3_REG, val2, 2);
+ if (rc < 0)
+ goto irq_handled;
+
+ rc = qpnp_lcdb_read(lcdb,
+ lcdb->base + LCDB_STS3_REG, val2, 2);
+ if (rc < 0)
+ goto irq_handled;
+
+ if (!(val2[0] & LDO_VREG_OK_BIT) ||
+ !(val2[1] & NCP_VREG_OK_BIT)) {
+ rc = qpnp_lcdb_handle_sc_event(lcdb);
+ if (rc < 0) {
+ pr_err("Failed to handle SC rc=%d\n",
+ rc);
+ goto irq_handled;
+ }
+ }
+ } else {
+ /* blanking time */
+ usleep_range(2000, 2100);
+ /* Read the SC status again to confirm true SC */
+ rc = qpnp_lcdb_read(lcdb,
+ lcdb->base + INT_RT_STATUS_REG, &val, 1);
+ if (rc < 0)
+ goto irq_handled;
+
+ if (val & SC_ERROR_RT_STS_BIT) {
+ rc = qpnp_lcdb_handle_sc_event(lcdb);
+ if (rc < 0) {
+ pr_err("Failed to handle SC rc=%d\n",
+ rc);
+ goto irq_handled;
+ }
+ }
+ }
+ }
+irq_handled:
+ return IRQ_HANDLED;
+}
+
#define MIN_BST_VOLTAGE_MV 4700
#define MAX_BST_VOLTAGE_MV 6250
#define MIN_VOLTAGE_MV 4000
@@ -1534,6 +1674,18 @@ static int qpnp_lcdb_hw_init(struct qpnp_lcdb *lcdb)
return rc;
}
+ if (lcdb->sc_irq >= 0) {
+ lcdb->sc_count = 0;
+ rc = devm_request_threaded_irq(lcdb->dev, lcdb->sc_irq,
+ NULL, qpnp_lcdb_sc_irq_handler, IRQF_ONESHOT,
+ "qpnp_lcdb_sc_irq", lcdb);
+ if (rc < 0) {
+ pr_err("Unable to request sc(%d) irq rc=%d\n",
+ lcdb->sc_irq, rc);
+ return rc;
+ }
+ }
+
if (!is_lcdb_enabled(lcdb)) {
rc = qpnp_lcdb_read(lcdb, lcdb->base +
LCDB_MODULE_RDY_REG, &val, 1);
@@ -1590,6 +1742,9 @@ static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb)
}
}
+ lcdb->force_module_reenable = of_property_read_bool(node,
+ "qcom,force-module-reenable");
+
if (of_property_read_bool(node, "qcom,ttw-enable")) {
rc = qpnp_lcdb_parse_ttw(lcdb);
if (rc < 0) {
@@ -1599,6 +1754,10 @@ static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb)
lcdb->ttw_enable = true;
}
+ lcdb->sc_irq = platform_get_irq_byname(lcdb->pdev, "sc-irq");
+ if (lcdb->sc_irq < 0)
+ pr_debug("sc irq is not defined\n");
+
return rc;
}
diff --git a/drivers/regulator/qpnp-oledb-regulator.c b/drivers/regulator/qpnp-oledb-regulator.c
index 8d017fb..c012f37 100644
--- a/drivers/regulator/qpnp-oledb-regulator.c
+++ b/drivers/regulator/qpnp-oledb-regulator.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/spmi.h>
@@ -24,6 +25,8 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
+#include <linux/qpnp/qpnp-pbs.h>
#define QPNP_OLEDB_REGULATOR_DRIVER_NAME "qcom,qpnp-oledb-regulator"
#define OLEDB_VOUT_STEP_MV 100
@@ -91,6 +94,12 @@
#define OLEDB_ENABLE_NLIMIT_BIT_SHIFT 7
#define OLEDB_NLIMIT_PGM_MASK GENMASK(1, 0)
+#define OLEDB_SPARE_CTL 0xE9
+#define OLEDB_FORCE_PD_CTL_SPARE_BIT BIT(7)
+
+#define OLEDB_PD_PBS_TRIGGER_BIT BIT(0)
+
+#define OLEDB_SEC_UNLOCK_CODE 0xA5
#define OLEDB_PSM_HYS_CTRL_MIN 13
#define OLEDB_PSM_HYS_CTRL_MAX 26
@@ -150,6 +159,9 @@ struct qpnp_oledb {
struct qpnp_oledb_psm_ctl psm_ctl;
struct qpnp_oledb_pfm_ctl pfm_ctl;
struct qpnp_oledb_fast_precharge_ctl fast_prechg_ctl;
+ struct notifier_block oledb_nb;
+ struct mutex bus_lock;
+ struct device_node *pbs_dev_node;
u32 base;
u8 mod_enable;
@@ -168,6 +180,7 @@ struct qpnp_oledb {
bool ext_pin_control;
bool dynamic_ext_pinctl_config;
bool pbs_control;
+ bool force_pd_control;
};
static const u16 oledb_warmup_dly_ns[] = {6700, 13300, 26700, 53400};
@@ -184,11 +197,13 @@ static int qpnp_oledb_read(struct qpnp_oledb *oledb, u32 address,
int rc = 0;
struct platform_device *pdev = oledb->pdev;
+ mutex_lock(&oledb->bus_lock);
rc = regmap_bulk_read(oledb->regmap, address, val, count);
if (rc)
pr_err("Failed to read address=0x%02x sid=0x%02x rc=%d\n",
address, to_spmi_device(pdev->dev.parent)->usid, rc);
+ mutex_unlock(&oledb->bus_lock);
return rc;
}
@@ -197,6 +212,7 @@ static int qpnp_oledb_masked_write(struct qpnp_oledb *oledb,
{
int rc;
+ mutex_lock(&oledb->bus_lock);
rc = regmap_update_bits(oledb->regmap, address, mask, val);
if (rc < 0)
pr_err("Failed to write address 0x%04X, rc = %d\n",
@@ -205,6 +221,31 @@ static int qpnp_oledb_masked_write(struct qpnp_oledb *oledb,
pr_debug("Wrote 0x%02X to addr 0x%04X\n",
val, address);
+ mutex_unlock(&oledb->bus_lock);
+ return rc;
+}
+
+#define OLEDB_SEC_ACCESS 0xD0
+static int qpnp_oledb_sec_masked_write(struct qpnp_oledb *oledb, u16 address,
+ u8 mask, u8 val)
+{
+ int rc = 0;
+ u8 sec_val = OLEDB_SEC_UNLOCK_CODE;
+ u16 sec_reg_addr = (address & 0xFF00) | OLEDB_SEC_ACCESS;
+
+ mutex_lock(&oledb->bus_lock);
+ rc = regmap_write(oledb->regmap, sec_reg_addr, sec_val);
+ if (rc < 0) {
+ pr_err("register %x failed rc = %d\n", sec_reg_addr, rc);
+ goto error;
+ }
+
+ rc = regmap_update_bits(oledb->regmap, address, mask, val);
+ if (rc < 0)
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", address, rc);
+
+error:
+ mutex_unlock(&oledb->bus_lock);
return rc;
}
@@ -214,6 +255,7 @@ static int qpnp_oledb_write(struct qpnp_oledb *oledb, u16 address, u8 *val,
int rc = 0;
struct platform_device *pdev = oledb->pdev;
+ mutex_lock(&oledb->bus_lock);
rc = regmap_bulk_write(oledb->regmap, address, val, count);
if (rc)
pr_err("Failed to write address=0x%02x sid=0x%02x rc=%d\n",
@@ -222,7 +264,8 @@ static int qpnp_oledb_write(struct qpnp_oledb *oledb, u16 address, u8 *val,
pr_debug("Wrote 0x%02X to addr 0x%04X\n",
*val, address);
- return 0;
+ mutex_unlock(&oledb->bus_lock);
+ return rc;
}
static int qpnp_oledb_regulator_enable(struct regulator_dev *rdev)
@@ -285,6 +328,8 @@ static int qpnp_oledb_regulator_enable(struct regulator_dev *rdev)
static int qpnp_oledb_regulator_disable(struct regulator_dev *rdev)
{
int rc = 0;
+ u8 trigger_bitmap = OLEDB_PD_PBS_TRIGGER_BIT;
+ u8 val;
struct qpnp_oledb *oledb = rdev_get_drvdata(rdev);
@@ -314,6 +359,27 @@ static int qpnp_oledb_regulator_disable(struct regulator_dev *rdev)
pr_debug("Register-control mode, module disabled\n");
}
+ if (oledb->force_pd_control) {
+ rc = qpnp_oledb_read(oledb, oledb->base + OLEDB_SPARE_CTL,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to read OLEDB_SPARE_CTL rc=%d\n", rc);
+ return rc;
+ }
+
+ if (val & OLEDB_FORCE_PD_CTL_SPARE_BIT) {
+ rc = qpnp_pbs_trigger_event(oledb->pbs_dev_node,
+ trigger_bitmap);
+ if (rc < 0) {
+ pr_err("Failed to trigger the PBS sequence\n");
+ return rc;
+ }
+ pr_debug("PBS event triggered\n");
+ } else {
+ pr_debug("OLEDB_SPARE_CTL register bit not set\n");
+ }
+ }
+
oledb->mod_enable = false;
return rc;
@@ -1034,6 +1100,18 @@ static int qpnp_oledb_parse_dt(struct qpnp_oledb *oledb)
oledb->pbs_control =
of_property_read_bool(of_node, "qcom,pbs-control");
+ oledb->force_pd_control =
+ of_property_read_bool(of_node, "qcom,force-pd-control");
+
+ if (oledb->force_pd_control) {
+ oledb->pbs_dev_node = of_parse_phandle(of_node,
+ "qcom,pbs-client", 0);
+ if (!oledb->pbs_dev_node) {
+ pr_err("Missing qcom,pbs-client property\n");
+ return -EINVAL;
+ }
+ }
+
oledb->current_voltage = -EINVAL;
rc = of_property_read_u32(of_node, "qcom,oledb-init-voltage-mv",
&oledb->current_voltage);
@@ -1116,6 +1194,52 @@ static int qpnp_oledb_parse_dt(struct qpnp_oledb *oledb)
return rc;
}
+static int qpnp_oledb_force_pulldown_config(struct qpnp_oledb *oledb)
+{
+ int rc = 0;
+ u8 val;
+
+ rc = qpnp_oledb_sec_masked_write(oledb, oledb->base +
+ OLEDB_SPARE_CTL, OLEDB_FORCE_PD_CTL_SPARE_BIT, 0);
+ if (rc < 0) {
+ pr_err("Failed to write SPARE_CTL rc=%d\n", rc);
+ return rc;
+ }
+
+ val = 1;
+ rc = qpnp_oledb_write(oledb, oledb->base + OLEDB_PD_CTL,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to write PD_CTL rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_oledb_masked_write(oledb, oledb->base +
+ OLEDB_SWIRE_CONTROL, OLEDB_EN_SWIRE_PD_UPD_BIT, 0);
+ if (rc < 0)
+ pr_err("Failed to write SWIRE_CTL for pbs mode rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int qpnp_labibb_notifier_cb(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int rc = 0;
+ struct qpnp_oledb *oledb = container_of(nb, struct qpnp_oledb,
+ oledb_nb);
+
+ if (action == LAB_VREG_OK) {
+ /* Disable SWIRE pull down control and enable via spmi mode */
+ rc = qpnp_oledb_force_pulldown_config(oledb);
+ if (rc < 0)
+ return NOTIFY_STOP;
+ }
+
+ return NOTIFY_OK;
+}
+
static int qpnp_oledb_regulator_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -1143,6 +1267,7 @@ static int qpnp_oledb_regulator_probe(struct platform_device *pdev)
return rc;
}
+ mutex_init(&(oledb->bus_lock));
oledb->base = val;
rc = qpnp_oledb_parse_dt(oledb);
if (rc < 0) {
@@ -1156,18 +1281,47 @@ static int qpnp_oledb_regulator_probe(struct platform_device *pdev)
return rc;
}
+ if (oledb->force_pd_control) {
+ oledb->oledb_nb.notifier_call = qpnp_labibb_notifier_cb;
+ rc = qpnp_labibb_notifier_register(&oledb->oledb_nb);
+ if (rc < 0) {
+ pr_err("Failed to register qpnp_labibb_notifier_cb\n");
+ return rc;
+ }
+ }
+
rc = qpnp_oledb_register_regulator(oledb);
- if (!rc)
- pr_info("OLEDB registered successfully, ext_pin_en=%d mod_en=%d cuurent_voltage=%d mV\n",
+ if (rc < 0) {
+ pr_err("Failed to register regulator rc=%d\n", rc);
+ goto out;
+ }
+ pr_info("OLEDB registered successfully, ext_pin_en=%d mod_en=%d current_voltage=%d mV\n",
oledb->ext_pin_control, oledb->mod_enable,
oledb->current_voltage);
+ return 0;
+
+out:
+ if (oledb->force_pd_control) {
+ rc = qpnp_labibb_notifier_unregister(&oledb->oledb_nb);
+ if (rc < 0)
+ pr_err("Failed to unregister lab_vreg_ok notifier\n");
+ }
return rc;
}
static int qpnp_oledb_regulator_remove(struct platform_device *pdev)
{
- return 0;
+ int rc = 0;
+ struct qpnp_oledb *oledb = platform_get_drvdata(pdev);
+
+ if (oledb->force_pd_control) {
+ rc = qpnp_labibb_notifier_unregister(&oledb->oledb_nb);
+ if (rc < 0)
+ pr_err("Failed to unregister lab_vreg_ok notifier\n");
+ }
+
+ return rc;
}
const struct of_device_id qpnp_oledb_regulator_match_table[] = {
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index 6607fd4..bc2d2d4 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1412,8 +1412,10 @@ static ssize_t ufsdbg_reset_controller_write(struct file *filp,
struct ufs_hba *hba = filp->f_mapping->host->i_private;
unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, flags);
/*
* simulating a dummy error in order to "convince"
* eh_work to actually reset the controller
@@ -1421,9 +1423,13 @@ static ssize_t ufsdbg_reset_controller_write(struct file *filp,
hba->saved_err |= INT_FATAL_ERRORS;
hba->silence_err_logs = true;
schedule_work(&hba->eh_work);
-
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->eh_work);
+
+ ufshcd_release(hba, false);
+ pm_runtime_put_sync(hba->dev);
+
return cnt;
}
@@ -1471,8 +1477,8 @@ DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_err_state,
void ufsdbg_add_debugfs(struct ufs_hba *hba)
{
if (!hba) {
- dev_err(hba->dev, "%s: NULL hba, exiting", __func__);
- goto err_no_root;
+ pr_err("%s: NULL hba, exiting", __func__);
+ return;
}
hba->debugfs_files.debugfs_root = debugfs_create_dir(dev_name(hba->dev),
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
index 8532439..4547a6d 100644
--- a/drivers/scsi/ufs/ufs-qcom-debugfs.c
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -67,6 +67,7 @@ static int ufs_qcom_dbg_testbus_en_read(void *data, u64 *attr_val)
static int ufs_qcom_dbg_testbus_en_set(void *data, u64 attr_id)
{
struct ufs_qcom_host *host = data;
+ int ret = 0;
if (!host)
return -EINVAL;
@@ -76,7 +77,13 @@ static int ufs_qcom_dbg_testbus_en_set(void *data, u64 attr_id)
else
host->dbg_print_en &= ~UFS_QCOM_DBG_PRINT_TEST_BUS_EN;
- return ufs_qcom_testbus_config(host);
+ pm_runtime_get_sync(host->hba->dev);
+ ufshcd_hold(host->hba, false);
+ ret = ufs_qcom_testbus_config(host);
+ ufshcd_release(host->hba, false);
+ pm_runtime_put_sync(host->hba->dev);
+
+ return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_testbus_en_ops,
@@ -142,7 +149,11 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
* Sanity check of the {major, minor} tuple is done in the
* config function
*/
+ pm_runtime_get_sync(host->hba->dev);
+ ufshcd_hold(host->hba, false);
ret = ufs_qcom_testbus_config(host);
+ ufshcd_release(host->hba, false);
+ pm_runtime_put_sync(host->hba->dev);
if (!ret)
dev_dbg(host->hba->dev,
"%s: New configuration: major=%d, minor=%d\n",
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 1ba4f2b..814d1dc 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/blkdev.h>
+#include <linux/spinlock.h>
#include <crypto/ice.h>
#include "ufs-qcom-ice.h"
@@ -168,13 +169,23 @@ int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
+ unsigned long flags;
struct ice_data_setting ice_set;
struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work);
+ struct request *req_pending = NULL;
- if (!qcom_host->ice.vops->config_start || !qcom_host->req_pending)
+ if (!qcom_host->ice.vops->config_start)
return;
+ spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
+ req_pending = qcom_host->req_pending;
+ if (!req_pending) {
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
/*
* config_start is called again as previous attempt returned -EAGAIN,
* this call shall now take care of the necessary key setup.
@@ -185,12 +196,17 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
qcom_host->req_pending, &ice_set, false);
+ spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
+ qcom_host->req_pending = NULL;
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
/*
* Resume with requests processing. We assume config_start has been
* successful, but even if it wasn't we still must resume in order to
* allow for the request to be retried.
*/
ufshcd_scsi_unblock_requests(qcom_host->hba);
+
}
/**
@@ -246,6 +262,7 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
struct ice_data_setting ice_set;
char cmd_op = cmd->cmnd[0];
int err;
+ unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n",
@@ -255,6 +272,10 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
if (qcom_host->ice.vops->config_start) {
memset(&ice_set, 0, sizeof(ice_set));
+
+ spin_lock_irqsave(
+ &qcom_host->ice_work_lock, flags);
+
err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
cmd->request, &ice_set, true);
if (err) {
@@ -272,19 +293,41 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
- qcom_host->req_pending = cmd->request;
- if (schedule_work(&qcom_host->ice_cfg_work))
+
+ if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests(
qcom_host->hba);
+ qcom_host->req_pending = cmd->request;
+
+ if (!schedule_work(
+ &qcom_host->ice_cfg_work)) {
+ qcom_host->req_pending = NULL;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock,
+ flags);
+
+ ufshcd_scsi_unblock_requests(
+ qcom_host->hba);
+ return err;
+ }
+ }
+
} else {
- dev_err(qcom_host->hba->dev,
- "%s: error in ice_vops->config %d\n",
- __func__, err);
+ if (err != -EBUSY)
+ dev_err(qcom_host->hba->dev,
+ "%s: error in ice_vops->config %d\n",
+ __func__, err);
}
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock,
+ flags);
+
return err;
}
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
if (ufs_qcom_is_data_cmd(cmd_op, true))
*enable = !ice_set.encr_bypass;
else if (ufs_qcom_is_data_cmd(cmd_op, false))
@@ -320,6 +363,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
unsigned int bypass = 0;
struct request *req;
char cmd_op;
+ unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
@@ -339,7 +383,8 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
req = cmd->request;
if (req->bio)
- lba = req->bio->bi_iter.bi_sector;
+ lba = (req->bio->bi_iter.bi_sector) >>
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
slot = req->tag;
if (slot < 0 || slot > qcom_host->hba->nutrs) {
@@ -348,8 +393,13 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
return -EINVAL;
}
- memset(&ice_set, 0, sizeof(ice_set));
+
if (qcom_host->ice.vops->config_start) {
+ memset(&ice_set, 0, sizeof(ice_set));
+
+ spin_lock_irqsave(
+ &qcom_host->ice_work_lock, flags);
+
err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
req, &ice_set, true);
if (err) {
@@ -364,13 +414,44 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
* request processing.
*/
if (err == -EAGAIN) {
- qcom_host->req_pending = req;
- if (schedule_work(&qcom_host->ice_cfg_work))
+
+ dev_dbg(qcom_host->hba->dev,
+ "%s: scheduling task for ice setup\n",
+ __func__);
+
+ if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests(
+ qcom_host->hba);
+ qcom_host->req_pending = cmd->request;
+ if (!schedule_work(
+ &qcom_host->ice_cfg_work)) {
+ qcom_host->req_pending = NULL;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock,
+ flags);
+
+ ufshcd_scsi_unblock_requests(
qcom_host->hba);
+ return err;
+ }
+ }
+
+ } else {
+ if (err != -EBUSY)
+ dev_err(qcom_host->hba->dev,
+ "%s: error in ice_vops->config %d\n",
+ __func__, err);
}
- goto out;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock, flags);
+
+ return err;
}
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock, flags);
}
cmd_op = cmd->cmnd[0];
@@ -390,6 +471,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
UFS_QCOM_ICE_DISABLE_BYPASS;
+
/* Configure ICE index */
ctrl_info_val =
(ice_set.crypto_data.key_index &
@@ -398,8 +480,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
/* Configure data unit size of transfer request */
ctrl_info_val |=
- (UFS_QCOM_ICE_TR_DATA_UNIT_4_KB &
- MASK_UFS_QCOM_ICE_CTRL_INFO_CDU)
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB
<< OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU;
/* Configure ICE bypass mode */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9706273..d326b80 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,9 @@
#include "ufs-qcom-ice.h"
#include "ufs-qcom-debugfs.h"
+#define MAX_PROP_SIZE 32
+#define VDDP_REF_CLK_MIN_UV 1200000
+#define VDDP_REF_CLK_MAX_UV 1200000
/* TODO: further tuning for this parameter may be required */
#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
@@ -97,13 +100,10 @@ static int ufs_qcom_host_clk_get(struct device *dev,
int err = 0;
clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk))
err = PTR_ERR(clk);
- dev_err(dev, "%s: failed to get %s err %d",
- __func__, name, err);
- } else {
+ else
*clk_out = clk;
- }
return err;
}
@@ -182,20 +182,29 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_get(dev,
"rx_lane0_sync_clk", &host->rx_l0_sync_clk);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
+ __func__, err);
goto out;
+ }
err = ufs_qcom_host_clk_get(dev,
"tx_lane0_sync_clk", &host->tx_l0_sync_clk);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
+ __func__, err);
goto out;
+ }
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
+ __func__, err);
goto out;
+ }
/* The tx lane1 clk could be muxed, hence keep this optional */
ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
@@ -387,8 +396,9 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
/**
* Returns zero for success and non-zero in case of a failure
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer)
+static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ u32 hs, u32 rate, bool update_link_startup_timer,
+ bool is_pre_scale_up)
{
int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -435,8 +445,12 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
- if (!strcmp(clki->name, "core_clk"))
- core_clk_rate = clk_get_rate(clki->clk);
+ if (!strcmp(clki->name, "core_clk")) {
+ if (is_pre_scale_up)
+ core_clk_rate = clki->max_freq;
+ else
+ core_clk_rate = clk_get_rate(clki->clk);
+ }
}
/* If frequency is smaller than 1MHz, set to 1MHz */
@@ -533,6 +547,13 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
return ret;
}
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ u32 hs, u32 rate, bool update_link_startup_timer)
+{
+ return __ufs_qcom_cfg_timers(hba, gear, hs, rate,
+ update_link_startup_timer, false);
+}
+
static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -666,40 +687,105 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
return err;
}
+
+static int ufs_qcom_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
+{
+ int ret = 0;
+ struct regulator *reg;
+ int min_uV, uA_load;
+
+ if (!vreg) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ reg = vreg->reg;
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, vreg->name, ret);
+ goto out;
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+ ret = regulator_set_load(vreg->reg, uA_load);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (vreg->enabled)
+ return ret;
+
+ ret = ufs_qcom_config_vreg(dev, vreg, true);
+ if (ret)
+ goto out;
+
+ ret = regulator_enable(vreg->reg);
+ if (ret)
+ goto out;
+
+ vreg->enabled = true;
+out:
+ return ret;
+}
+
+static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg->enabled)
+ return ret;
+
+ ret = regulator_disable(vreg->reg);
+ if (ret)
+ goto out;
+
+ ret = ufs_qcom_config_vreg(dev, vreg, false);
+ if (ret)
+ goto out;
+
+ vreg->enabled = false;
+out:
+ return ret;
+}
+
static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
int ret = 0;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- ret = ufs_qcom_ice_suspend(host);
- if (ret)
- dev_err(hba->dev, "%s: failed ufs_qcom_ice_suspend %d\n",
- __func__, ret);
-
- /* Assert PHY soft reset */
- ufs_qcom_assert_reset(hba);
- goto out;
- }
-
/*
- * If UniPro link is not active, PHY ref_clk, main PHY analog power
- * rail and low noise analog power rail for PLL can be switched off.
+ * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
+ * power rail and low noise analog power rail for PLL can be
+ * switched off.
*/
if (!ufs_qcom_is_link_active(hba)) {
ufs_qcom_disable_lane_clks(host);
phy_power_off(phy);
- ufs_qcom_ice_suspend(host);
- }
+ if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
+ ret = ufs_qcom_disable_vreg(hba->dev,
+ host->vddp_ref_clk);
+ ufs_qcom_ice_suspend(host);
+
+ if (ufs_qcom_is_link_off(hba)) {
+ /* Assert PHY soft reset */
+ ufs_qcom_assert_reset(hba);
+ goto out;
+ }
+ }
/* Unvote PM QoS */
ufs_qcom_pm_qos_suspend(host);
@@ -720,6 +806,11 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
}
+ if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
+ hba->spm_lvl > UFS_PM_LVL_3))
+ ufs_qcom_enable_vreg(hba->dev,
+ host->vddp_ref_clk);
+
err = ufs_qcom_enable_lane_clks(host);
if (err)
goto out;
@@ -739,7 +830,35 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
static int ufs_qcom_full_reset(struct ufs_hba *hba)
{
- return -ENOTSUPP;
+ int ret = -ENOTSUPP;
+
+ if (!hba->core_reset) {
+ dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
+ ret);
+ goto out;
+ }
+
+ ret = reset_control_assert(hba->core_reset);
+ if (ret) {
+ dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(hba->core_reset);
+ if (ret)
+ dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+ __func__, ret);
+
+out:
+ return ret;
}
#ifdef CONFIG_SCSI_UFS_QCOM_ICE
@@ -757,7 +876,8 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
/* Use request LBA as the DUN value */
if (req->bio)
- *dun = req->bio->bi_iter.bi_sector;
+ *dun = (req->bio->bi_iter.bi_sector) >>
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
@@ -978,7 +1098,7 @@ static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
}
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
{
int err = 0;
@@ -1009,7 +1129,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
vote = ufs_qcom_get_bus_vote(host, mode);
if (vote >= 0)
- err = ufs_qcom_set_bus_vote(host, vote);
+ err = __ufs_qcom_set_bus_vote(host, vote);
else
err = vote;
@@ -1020,6 +1140,35 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
return err;
}
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int vote, err;
+
+ /*
+ * In case ufs_qcom_init() is not yet done, simply ignore.
+ * This ufs_qcom_set_bus_vote() shall be called from
+ * ufs_qcom_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ if (on) {
+ vote = host->bus_vote.saved_vote;
+ if (vote == host->bus_vote.min_bw_vote)
+ ufs_qcom_update_bus_bw_vote(host);
+ } else {
+ vote = host->bus_vote.min_bw_vote;
+ }
+
+ err = __ufs_qcom_set_bus_vote(host, vote);
+ if (err)
+ dev_err(hba->dev, "%s: set bus vote failed %d\n",
+ __func__, err);
+
+ return err;
+}
+
static ssize_t
show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1096,7 +1245,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
return 0;
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
{
return 0;
}
@@ -1373,7 +1522,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
- int vote = 0;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1398,9 +1546,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
err = ufs_qcom_ice_resume(host);
if (err)
@@ -1412,21 +1557,19 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* M-PHY RMMI interface clocks can be turned off */
ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba)) {
- if (!is_gating_context)
- /* turn off UFS local PHY ref_clk */
- ufs_qcom_phy_disable_ref_clk(host->generic_phy);
+ /*
+ * If auto hibern8 is supported then the link will already
+ * be in hibern8 state and the ref clock can be gated.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufs_qcom_is_link_active(hba)) {
+ /* turn off UFS local PHY ref_clk */
+ ufs_qcom_phy_disable_ref_clk(host->generic_phy);
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
- vote = host->bus_vote.min_bw_vote;
}
- err = ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
-
out:
return err;
}
@@ -1850,6 +1993,57 @@ static void ufs_qcom_save_host_ptr(struct ufs_hba *hba)
dev_err(hba->dev, "invalid host index %d\n", id);
}
+static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
+ struct ufs_vreg **out_vreg)
+{
+ int ret = 0;
+ char prop_name[MAX_PROP_SIZE];
+ struct ufs_vreg *vreg = NULL;
+ struct device *dev = host->hba->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "%s: non DT initialization\n", __func__);
+ goto out;
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+ if (!of_parse_phandle(np, prop_name, 0)) {
+ dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+ __func__, prop_name);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->name = name;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+ ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+ if (ret) {
+ dev_err(dev, "%s: unable to find %s err %d\n",
+ __func__, prop_name, ret);
+ goto out;
+ }
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+ vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+ vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+
+out:
+ if (!ret)
+ *out_vreg = vreg;
+ return ret;
+}
+
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -1877,14 +2071,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
/* Make a two way bind between the qcom host and the hba */
host->hba = hba;
- ufshcd_set_variant(hba, host);
+ spin_lock_init(&host->ice_work_lock);
- /*
- * voting/devoting device ref_clk source is time consuming hence
- * skip devoting it during aggressive clock gating. This clock
- * will still be gated off during runtime suspend.
- */
- hba->no_ref_clk_gating = true;
+ ufshcd_set_variant(hba, host);
err = ufs_qcom_ice_get_dev(host);
if (err == -EPROBE_DEFER) {
@@ -1969,14 +2158,24 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_phy_save_controller_version(host->generic_phy,
host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
+ err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
+ &host->vddp_ref_clk);
phy_init(host->generic_phy);
err = phy_power_on(host->generic_phy);
if (err)
goto out_unregister_bus;
+ if (host->vddp_ref_clk) {
+ err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
+ if (err) {
+ dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
+ __func__, err);
+ goto out_disable_phy;
+ }
+ }
err = ufs_qcom_init_lane_clks(host);
if (err)
- goto out_disable_phy;
+ goto out_disable_vddp;
ufs_qcom_parse_lpm(host);
if (host->disable_lpm)
@@ -1984,6 +2183,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
+ ufs_qcom_set_bus_vote(hba, true);
ufs_qcom_setup_clocks(hba, true, false);
host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
@@ -1999,6 +2199,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out;
+out_disable_vddp:
+ if (host->vddp_ref_clk)
+ ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
@@ -2049,79 +2252,21 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
return err;
}
-static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int err = 0;
-
- /* The default low power mode configuration is SVS2 */
- if (!ufs_qcom_cap_svs2(host))
- goto out;
-
- /*
- * The link should be put in hibern8 state before
- * configuring the PHY to enter/exit SVS2 mode.
- */
- err = ufshcd_uic_hibern8_enter(hba);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_configure_lpm(phy, enable);
- if (err)
- goto out;
-
- err = ufshcd_uic_hibern8_exit(hba);
-out:
- return err;
-}
-
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
- return ufs_qcom_configure_lpm(hba, false);
-}
-
-static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
- /* set unipro core clock cycles to 150 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
-}
-
-static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- u32 core_clk_ctrl_reg;
+ struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int err = 0;
if (!ufs_qcom_cap_qunipro(host))
goto out;
- err = ufs_qcom_configure_lpm(hba, true);
- if (err)
- goto out;
+ if (attr)
+ __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+ attr->hs_rate, false, true);
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- &core_clk_ctrl_reg);
-
- /* make sure CORE_CLK_DIV_EN is cleared */
- if (!err &&
- (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- core_clk_ctrl_reg);
- }
+ /* set unipro core clock cycles to 150 and clear clock divider */
+ err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
out:
return err;
}
@@ -2129,11 +2274,16 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int err = 0;
if (!ufs_qcom_cap_qunipro(host))
return 0;
+ if (attr)
+ ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+ attr->hs_rate, false);
+
if (ufs_qcom_cap_svs2(host))
/*
* For SVS2 set unipro core clock cycles to 37 and
@@ -2154,30 +2304,17 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
bool scale_up, enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
int err = 0;
switch (status) {
case PRE_CHANGE:
if (scale_up)
err = ufs_qcom_clk_scale_up_pre_change(hba);
- else
- err = ufs_qcom_clk_scale_down_pre_change(hba);
break;
case POST_CHANGE:
- if (scale_up)
- err = ufs_qcom_clk_scale_up_post_change(hba);
- else
+ if (!scale_up)
err = ufs_qcom_clk_scale_down_post_change(hba);
- if (err || !dev_req_params)
- goto out;
-
- ufs_qcom_cfg_timers(hba,
- dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate,
- false);
ufs_qcom_update_bus_bw_vote(host);
break;
default:
@@ -2186,7 +2323,6 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
break;
}
-out:
return err;
}
@@ -2277,17 +2413,21 @@ void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
- if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+ if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+ UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
- else
+ } else {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+ }
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
{
/* provide a legal default configuration */
- host->testbus.select_major = TSTBUS_UAWM;
- host->testbus.select_minor = 1;
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ host->testbus.select_minor = 37;
}
static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
@@ -2304,7 +2444,7 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
- if (host->testbus.select_minor > 0x1F) {
+ if (host->testbus.select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
__func__, host->testbus.select_minor);
@@ -2314,6 +2454,11 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
return true;
}
+/*
+ * The caller of this function must make sure that the controller
+ * is out of runtime suspend and appropriate clocks are enabled
+ * before accessing.
+ */
int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
{
int reg;
@@ -2373,7 +2518,8 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
break;
case TSTBUS_UNIPRO:
reg = UFS_UNIPRO_CFG;
- offset = 1;
+ offset = 20;
+ mask = 0xFFF;
break;
/*
* No need for a default case, since
@@ -2383,8 +2529,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
}
mask <<= offset;
- pm_runtime_get_sync(host->hba->dev);
- ufshcd_hold(host->hba, false);
ufshcd_rmwl(host->hba, TEST_BUS_SEL,
(u32)host->testbus.select_major << 19,
REG_UFS_CFG1);
@@ -2392,8 +2536,11 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
(u32)host->testbus.select_minor << offset,
reg);
ufs_qcom_enable_test_bus(host);
- ufshcd_release(host->hba, false);
- pm_runtime_put_sync(host->hba->dev);
+ /*
+ * Make sure the test bus configuration is
+ * committed before returning.
+ */
+ mb();
return 0;
}
@@ -2403,15 +2550,47 @@ static void ufs_qcom_testbus_read(struct ufs_hba *hba)
ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
}
-static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
+static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 *testbus = NULL;
+ int i, nminor = 256, testbus_len = nminor * sizeof(u32);
+
+ testbus = kmalloc(testbus_len, GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ kfree(testbus);
+}
+
+static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers ");
-
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+
+ if (no_sleep)
+ return;
+
+ /* sleep a bit intermittently as we are dumping too much data */
+ usleep_range(1000, 1100);
ufs_qcom_testbus_read(hba);
+ usleep_range(1000, 1100);
+ ufs_qcom_print_unipro_testbus(hba);
+ usleep_range(1000, 1100);
+ ufs_qcom_phy_dbg_register_dump(phy);
+ usleep_range(1000, 1100);
ufs_qcom_ice_print_regs(host);
}
@@ -2436,6 +2615,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.full_reset = ufs_qcom_full_reset,
.update_sec_cfg = ufs_qcom_update_sec_cfg,
.get_scale_down_gear = ufs_qcom_get_scale_down_gear,
+ .set_bus_vote = ufs_qcom_set_bus_vote,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
#ifdef CONFIG_DEBUG_FS
.add_debugfs = ufs_qcom_dbg_add_debugfs,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 42e7aad8..792ae42 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -100,6 +100,7 @@ enum {
#define QUNIPRO_SEL UFS_BIT(0)
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
+#define UFS_REG_TEST_BUS_EN BIT(30)
/* bit definitions for REG_UFS_CFG2 register */
#define UAWM_HW_CGC_EN (1 << 0)
@@ -369,8 +370,10 @@ struct ufs_qcom_host {
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
+ spinlock_t ice_work_lock;
struct work_struct ice_cfg_work;
struct request *req_pending;
+ struct ufs_vreg *vddp_ref_clk;
};
static inline u32
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
index 8953722e8..d41871a 100644
--- a/drivers/scsi/ufs/ufs_test.c
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -689,13 +689,13 @@ static void scenario_free_end_io_fn(struct request *rq, int err)
__blk_put_request(test_iosched->req_q, test_rq->rq);
spin_unlock_irqrestore(&test_iosched->lock, flags);
- test_iosched_free_test_req_data_buffer(test_rq);
- kfree(test_rq);
-
if (err)
pr_err("%s: request %d completed, err=%d", __func__,
test_rq->req_id, err);
+ test_iosched_free_test_req_data_buffer(test_rq);
+ kfree(test_rq);
+
check_test_completion(test_iosched);
}
@@ -984,14 +984,14 @@ static void long_test_free_end_io_fn(struct request *rq, int err)
return;
}
- test_iosched_free_test_req_data_buffer(test_rq);
- kfree(test_rq);
- utd->completed_req_count++;
-
if (err)
pr_err("%s: request %d completed, err=%d", __func__,
test_rq->req_id, err);
+ test_iosched_free_test_req_data_buffer(test_rq);
+ kfree(test_rq);
+ utd->completed_req_count++;
+
check_test_completion(test_iosched);
}
@@ -1007,7 +1007,7 @@ static void long_test_free_end_io_fn(struct request *rq, int err)
static int run_long_test(struct test_iosched *test_iosched)
{
int ret = 0;
- int direction, num_bios_per_request;
+ int direction, num_bios_per_request = 1;
static unsigned int inserted_requests;
u32 sector, seed, num_bios, seq_sector_delta;
struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
@@ -1028,14 +1028,12 @@ static int run_long_test(struct test_iosched *test_iosched)
/* Set test parameters */
switch (test_iosched->test_info.testcase) {
case UFS_TEST_LONG_RANDOM_READ:
- num_bios_per_request = 1;
utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
num_bios_per_request);
direction = READ;
break;
case UFS_TEST_LONG_RANDOM_WRITE:
- num_bios_per_request = 1;
utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
num_bios_per_request);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 9c8473e..de6ecbd 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -40,6 +40,22 @@
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+static int ufshcd_parse_reset_info(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ hba->core_reset = devm_reset_control_get(hba->dev,
+ "core_reset");
+ if (IS_ERR(hba->core_reset)) {
+ ret = PTR_ERR(hba->core_reset);
+ dev_err(hba->dev, "core_reset unavailable,err = %d\n",
+ ret);
+ hba->core_reset = NULL;
+ }
+
+ return ret;
+}
+
static int ufshcd_parse_clock_info(struct ufs_hba *hba)
{
int ret = 0;
@@ -297,6 +313,20 @@ static void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba)
hba->dev_ref_clk_freq = REF_CLK_FREQ_26_MHZ;
}
+static int ufshcd_parse_pinctrl_info(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ /* Try to obtain pinctrl handle */
+ hba->pctrl = devm_pinctrl_get(hba->dev);
+ if (IS_ERR(hba->pctrl)) {
+ ret = PTR_ERR(hba->pctrl);
+ hba->pctrl = NULL;
+ }
+
+ return ret;
+}
+
#ifdef CONFIG_SMP
/**
* ufshcd_pltfrm_suspend - suspend power management function
@@ -401,6 +431,20 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
+ err = ufshcd_parse_reset_info(hba);
+ if (err) {
+ dev_err(&pdev->dev, "%s: reset parse failed %d\n",
+ __func__, err);
+ goto dealloc_host;
+ }
+
+ err = ufshcd_parse_pinctrl_info(hba);
+ if (err) {
+ dev_dbg(&pdev->dev, "%s: unable to parse pinctrl data %d\n",
+ __func__, err);
+ /* let's not fail the probe */
+ }
+
ufshcd_parse_dev_ref_clk_freq(hba);
ufshcd_parse_pm_levels(hba);
ufshcd_parse_gear_limits(hba);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 7552357..8772bcb 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -47,6 +47,7 @@
#include "ufshci.h"
#include "ufs_quirks.h"
#include "ufs-debugfs.h"
+#include "ufs-qcom.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -367,7 +368,7 @@ static inline bool ufshcd_is_valid_pm_lvl(int lvl)
}
static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
@@ -389,6 +390,28 @@ static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void ufshcd_release_all(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
+static int ufshcd_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags);
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat);
+
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
+ .upthreshold = 35,
+ .downdifferential = 30,
+ .simple_scaling = 1,
+};
+
+static void *gov_data = &ufshcd_ondemand_data;
+#else
+static void *gov_data;
+#endif
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+ .polling_ms = 40,
+ .target = ufshcd_devfreq_target,
+ .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
@@ -441,6 +464,63 @@ void ufshcd_scsi_block_requests(struct ufs_hba *hba)
}
EXPORT_SYMBOL(ufshcd_scsi_block_requests);
+static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
+{
+ int ret = 0;
+
+ if (!hba->pctrl)
+ return 0;
+
+ /* Assert reset if ctrl == true */
+ if (ctrl)
+ ret = pinctrl_select_state(hba->pctrl,
+ pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
+ else
+ ret = pinctrl_select_state(hba->pctrl,
+ pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
+
+ if (ret < 0)
+ dev_err(hba->dev, "%s: %s failed with err %d\n",
+ __func__, ctrl ? "Assert" : "Deassert", ret);
+
+ return ret;
+}
+
+static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
+{
+ return ufshcd_device_reset_ctrl(hba, true);
+}
+
+static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
+{
+ return ufshcd_device_reset_ctrl(hba, false);
+}
+
+static int ufshcd_reset_device(struct ufs_hba *hba)
+{
+ int ret;
+
+ /* reset the connected UFS device */
+ ret = ufshcd_assert_device_reset(hba);
+ if (ret)
+ goto out;
+ /*
+ * The reset signal is active low.
+ * The UFS device shall detect more than or equal to 1us of positive
+ * or negative RST_n pulse width.
+ * To be on safe side, keep the reset low for atleast 10us.
+ */
+ usleep_range(10, 15);
+
+ ret = ufshcd_deassert_device_reset(hba);
+ if (ret)
+ goto out;
+ /* same as assert, wait for atleast 10us after deassert */
+ usleep_range(10, 15);
+out:
+ return ret;
+}
+
/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
@@ -534,7 +614,7 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
}
}
-static void ufshcd_print_host_regs(struct ufs_hba *hba)
+static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
{
if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
return;
@@ -567,7 +647,12 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
ufshcd_print_clk_freqs(hba);
- ufshcd_vops_dbg_register_dump(hba);
+ ufshcd_vops_dbg_register_dump(hba, no_sleep);
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+ __ufshcd_print_host_regs(hba, false);
}
static
@@ -1172,6 +1257,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
return ret;
}
+static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
+{
+ hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
+ cancel_work_sync(&hba->clk_gating.gate_work);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -1179,7 +1270,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ ufshcd_cancel_gate_work(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
@@ -1250,14 +1341,18 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
}
break;
case REQ_CLKS_OFF:
- if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /*
+ * If the timer was active but the callback was not running
+ * we have nothing to do, just change state and return.
+ */
+ if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
break;
}
/*
- * If we here, it means gating work is either done or
+ * If we are here, it means gating work is either done or
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
@@ -1266,7 +1361,8 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- schedule_work(&hba->clk_gating.ungate_work);
+ queue_work(hba->clk_gating.ungating_workq,
+ &hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
@@ -1297,11 +1393,18 @@ EXPORT_SYMBOL_GPL(ufshcd_hold);
static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_gating.gate_work.work);
+ clk_gating.gate_work);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.is_suspended) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
@@ -1335,7 +1438,12 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
- if (!ufshcd_is_link_active(hba) && !hba->no_ref_clk_gating)
+ /*
+ * If auto hibern8 is supported then the link will already
+ * be in hibern8 state and the ref clock can be gated.
+ */
+ if ((ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
ufshcd_disable_clocks(hba, true);
else
/* If link is active, device ref_clk can't be switched off */
@@ -1383,8 +1491,9 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- schedule_delayed_work(&hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
+ hrtimer_start(&hba->clk_gating.gate_hrtimer,
+ ms_to_ktime(hba->clk_gating.delay_ms),
+ HRTIMER_MODE_REL);
}
void ufshcd_release(struct ufs_hba *hba, bool no_sched)
@@ -1512,36 +1621,57 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
return count;
}
+static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
+ struct hrtimer *timer)
+{
+ struct ufs_hba *hba = container_of(timer, struct ufs_hba,
+ clk_gating.gate_hrtimer);
+
+ schedule_work(&hba->clk_gating.gate_work);
+
+ return HRTIMER_NORESTART;
+}
+
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
struct ufs_clk_gating *gating = &hba->clk_gating;
+ char wq_name[sizeof("ufs_clk_ungating_00")];
hba->clk_gating.state = CLKS_ON;
if (!ufshcd_is_clkgating_allowed(hba))
return;
- INIT_DELAYED_WORK(&gating->gate_work, ufshcd_gate_work);
+ /*
+ * Disable hibern8 during clk gating if
+ * auto hibern8 is supported
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+
+ INIT_WORK(&gating->gate_work, ufshcd_gate_work);
INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
+ /*
+ * Clock gating work must be executed only after auto hibern8
+ * timeout has expired in the hardware or after aggressive
+ * hibern8 on idle software timeout. Using jiffy based low
+ * resolution delayed work is not reliable to guarantee this,
+ * hence use a high resolution timer to make sure we schedule
+ * the gate work precisely more than hibern8 timeout.
+ *
+ * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
+ */
+ hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
+
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_ungating_%d",
+ hba->host->host_no);
+ hba->clk_gating.ungating_workq = create_singlethread_workqueue(wq_name);
gating->is_enabled = true;
- /*
- * Scheduling the delayed work after 1 jiffies will make the work to
- * get schedule any time from 0ms to 1000/HZ ms which is not desirable
- * for hibern8 enter work as it may impact the performance if it gets
- * scheduled almost immediately. Hence make sure that hibern8 enter
- * work gets scheduled atleast after 2 jiffies (any time between
- * 1000/HZ ms to 2000/HZ ms).
- */
- gating->delay_ms_pwr_save = jiffies_to_msecs(
- max_t(unsigned long,
- msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE),
- 2));
- gating->delay_ms_perf = jiffies_to_msecs(
- max_t(unsigned long,
- msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PERF),
- 2));
+ gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
+ gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
/* start with performance mode */
gating->delay_ms = gating->delay_ms_perf;
@@ -1598,8 +1728,9 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
}
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ ufshcd_cancel_gate_work(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ destroy_workqueue(hba->clk_gating.ungating_workq);
}
static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
@@ -1910,6 +2041,7 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
return;
if (ufshcd_is_auto_hibern8_supported(hba)) {
+ hba->hibern8_on_idle.delay_ms = 1;
hba->hibern8_on_idle.state = AUTO_HIBERN8;
/*
* Disable SW hibern8 enter on idle in case
@@ -1917,13 +2049,13 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
*/
hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
} else {
+ hba->hibern8_on_idle.delay_ms = 10;
INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
ufshcd_hibern8_enter_work);
INIT_WORK(&hba->hibern8_on_idle.exit_work,
ufshcd_hibern8_exit_work);
}
- hba->hibern8_on_idle.delay_ms = 10;
hba->hibern8_on_idle.is_enabled = true;
hba->hibern8_on_idle.delay_attr.show =
@@ -2360,9 +2492,6 @@ static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
goto out;
req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
- if (lrbp->cmd->request && lrbp->cmd->request->bio)
- dun = lrbp->cmd->request->bio->bi_iter.bi_sector;
-
req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
out:
@@ -2587,6 +2716,61 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
}
/**
+ * ufshcd_get_write_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Lock is predominantly held by shutdown context thus, ensuring
+ * that no requests from any other context may sneak through.
+ */
+static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
+{
+ down_write(&hba->lock);
+}
+
+/**
+ * ufshcd_get_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns 1 if acquired, < 0 on contention
+ *
+ * After shutdown's initiated, allow requests only directed to the
+ * well known device lun. The sync between scaling & issue is maintained
+ * as is and this restructuring syncs shutdown with these too.
+ */
+static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
+{
+ int err = 0;
+
+ err = down_read_trylock(&hba->lock);
+ if (err > 0)
+ goto out;
+ /* let requests for well known device lun to go through */
+ if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+ return 0;
+ else if (!ufshcd_is_shutdown_ongoing(hba))
+ return -EAGAIN;
+ else
+ return -EPERM;
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_put_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns none
+ */
+static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
+{
+ up_read(&hba->lock);
+}
+
+/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @cmd: command from SCSI Midlayer
* @done: call back function
@@ -2600,9 +2784,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
unsigned long flags;
int tag;
int err = 0;
+ bool has_read_lock = false;
hba = shost_priv(host);
+ if (!cmd || !cmd->request || !hba)
+ return -EINVAL;
+
tag = cmd->request->tag;
if (!ufshcd_valid_tag(hba, tag)) {
dev_err(hba->dev,
@@ -2611,10 +2799,27 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
BUG();
}
- if (!down_read_trylock(&hba->clk_scaling_lock))
- return SCSI_MLQUEUE_HOST_BUSY;
+ err = ufshcd_get_read_lock(hba, cmd->device->lun);
+ if (unlikely(err < 0)) {
+ if (err == -EPERM) {
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ if (err == -EAGAIN)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ } else if (err == 1) {
+ has_read_lock = true;
+ }
spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* if error handling is in progress, return host busy */
+ if (ufshcd_eh_in_progress(hba)) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_unlock;
+ }
+
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
@@ -2632,13 +2837,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
cmd->scsi_done(cmd);
goto out_unlock;
}
-
- /* if error handling is in progress, don't issue commands */
- if (ufshcd_eh_in_progress(hba)) {
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- goto out_unlock;
- }
spin_unlock_irqrestore(hba->host->host_lock, flags);
hba->req_abort_count = 0;
@@ -2679,13 +2877,12 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_vops_pm_qos_req_start(hba, cmd->request);
/* IO svc time latency histogram */
- if (hba != NULL && cmd->request != NULL) {
- if (hba->latency_hist_enabled &&
- (cmd->request->cmd_type == REQ_TYPE_FS)) {
- cmd->request->lat_hist_io_start = ktime_get();
- cmd->request->lat_hist_enabled = 1;
- } else
- cmd->request->lat_hist_enabled = 0;
+ if (hba->latency_hist_enabled &&
+ (cmd->request->cmd_type == REQ_TYPE_FS)) {
+ cmd->request->lat_hist_io_start = ktime_get();
+ cmd->request->lat_hist_enabled = 1;
+ } else {
+ cmd->request->lat_hist_enabled = 0;
}
WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -2764,7 +2961,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
- up_read(&hba->clk_scaling_lock);
+ if (has_read_lock)
+ ufshcd_put_read_lock(hba);
return err;
}
@@ -2956,7 +3154,12 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
struct completion wait;
unsigned long flags;
- down_read(&hba->clk_scaling_lock);
+ /*
+ * May get invoked from shutdown and IOCTL contexts.
+ * In shutdown context, it comes in with lock acquired.
+ */
+ if (!ufshcd_is_shutdown_ongoing(hba))
+ down_read(&hba->lock);
/*
* Get free slot, sleep if slots are unavailable.
@@ -2989,7 +3192,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
out_put_tag:
ufshcd_put_dev_cmd_tag(hba, tag);
wake_up(&hba->dev_cmd.tag_wq);
- up_read(&hba->clk_scaling_lock);
+ if (!ufshcd_is_shutdown_ongoing(hba))
+ up_read(&hba->lock);
return err;
}
@@ -3929,8 +4133,12 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
ret = (status != PWR_OK) ? status : -1;
}
out:
- if (ret)
+ if (ret) {
ufsdbg_set_err_state(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
ufshcd_save_tstamp_of_last_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3954,17 +4162,17 @@ int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
ufshcd_hold_all(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
- ret = -EBUSY;
- goto out;
- }
-
/*
* Wait for all the outstanding tasks/transfer requests.
* Verify by checking the doorbell registers are clear.
*/
start = ktime_get();
do {
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!tm_doorbell && !tr_doorbell) {
@@ -4036,32 +4244,50 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
static int ufshcd_link_recovery(struct ufs_hba *hba)
{
- int ret;
+ int ret = 0;
unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
+ /*
+ * Check if there is any race with fatal error handling.
+ * If so, wait for it to complete. Even though fatal error
+ * handling does reset and restore in some cases, don't assume
+ * anything out of it. We are just avoiding race here.
+ */
+ do {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ } while (1);
+
+
+ /*
+ * we don't know if previous reset had really reset the host controller
+ * or not. So let's force reset here to be sure.
+ */
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->force_host_reset = true;
+ schedule_work(&hba->eh_work);
+
+ /* wait for the reset work to finish */
+ do {
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (1);
+
+ if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+ ufshcd_is_link_active(hba)))
+ ret = -ENOLINK;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ret = ufshcd_vops_full_reset(hba);
- if (ret)
- dev_warn(hba->dev,
- "full reset returned %d, trying to recover the link\n",
- ret);
-
- ret = ufshcd_host_reset_and_restore(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ret)
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- ufshcd_clear_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (ret)
- dev_err(hba->dev, "%s: link recovery failed, err %d",
- __func__, ret);
-
return ret;
}
@@ -4076,7 +4302,13 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
- if (ret) {
+ /*
+ * Do full reinit if enter failed or if LINERESET was detected during
+ * Hibern8 operation. After LINERESET, link moves to default PWM-G1
+ * mode hence full reinit is required to move link to HS speeds.
+ */
+ if (ret || hba->full_init_linereset) {
+ hba->full_init_linereset = false;
ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
__func__, ret);
@@ -4084,8 +4316,7 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
* If link recovery fails then return error so that caller
* don't retry the hibern8 enter again.
*/
- if (ufshcd_link_recovery(hba))
- ret = -ENOLINK;
+ ret = ufshcd_link_recovery(hba);
} else {
dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
ktime_to_us(ktime_get()));
@@ -4121,6 +4352,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+ /* Do full reinit if exit failed */
if (ret) {
ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
@@ -4640,8 +4872,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
ret = ufshcd_make_hba_operational(hba);
out:
- if (ret)
+ if (ret) {
dev_err(hba->dev, "link startup failed %d\n", ret);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
return ret;
}
@@ -5023,7 +5259,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev,
"OCS error from controller = %x for tag %d\n",
ocs, lrbp->task_tag);
- ufshcd_print_host_regs(hba);
+ /*
+ * This is called in interrupt context, hence avoid sleep
+ * while printing debug registers. Also print only the minimum
+ * debug registers needed to debug OCS failure.
+ */
+ __ufshcd_print_host_regs(hba, true);
ufshcd_print_host_state(hba);
break;
} /* end of switch */
@@ -5045,19 +5286,48 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
+ retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
- complete(hba->uic_async_done);
+ if (intr_status & UFSHCD_UIC_PWR_MASK) {
+ if (hba->uic_async_done) {
+ complete(hba->uic_async_done);
+ retval = IRQ_HANDLED;
+ } else if (ufshcd_is_auto_hibern8_supported(hba)) {
+ /*
+ * If uic_async_done flag is not set then this
+ * is an Auto hibern8 err interrupt.
+ * Perform a host reset followed by a full
+ * link recovery.
+ */
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->force_host_reset = true;
+ dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
+ __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
+ "Enter" : "Exit",
+ intr_status, ufshcd_get_upmcrs(hba));
+ __ufshcd_print_host_regs(hba, true);
+ ufshcd_print_host_state(hba);
+ schedule_work(&hba->eh_work);
+ retval = IRQ_HANDLED;
+ }
+ }
+ return retval;
}
/**
@@ -5201,8 +5471,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs;
u32 tr_doorbell;
@@ -5220,7 +5494,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ if (completed_reqs) {
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
@@ -5637,17 +5916,32 @@ static void ufshcd_err_handler(struct work_struct *work)
int err = 0;
int tag;
bool needs_reset = false;
+ bool clks_enabled = false;
hba = container_of(work, struct ufs_hba, eh_work);
- ufsdbg_set_err_state(hba);
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold_all(hba);
-
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufsdbg_set_err_state(hba);
+
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
goto out;
+ /*
+ * Make sure the clocks are ON before we proceed with err
+ * handling. For the majority of cases err handler would be
+ * run with clocks ON. There is a possibility that the err
+ * handler was scheduled due to auto hibern8 error interrupt,
+ * in which case the clocks could be gated or be in the
+ * process of gating when the err handler runs.
+ */
+ if (unlikely((hba->clk_gating.state != CLKS_ON) &&
+ ufshcd_is_auto_hibern8_supported(hba))) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_hold(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ clks_enabled = true;
+ }
+
hba->ufshcd_state = UFSHCD_STATE_RESET;
ufshcd_set_eh_in_progress(hba);
@@ -5674,14 +5968,18 @@ static void ufshcd_err_handler(struct work_struct *work)
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
if (!hba->silence_err_logs) {
+ /* release lock as print host regs sleeps */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ spin_lock_irqsave(hba->host->host_lock, flags);
}
}
- if ((hba->saved_err & INT_FATAL_ERRORS) || hba->saved_ce_err ||
+ if ((hba->saved_err & INT_FATAL_ERRORS)
+ || hba->saved_ce_err || hba->force_host_reset ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
@@ -5769,6 +6067,7 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->saved_err = 0;
hba->saved_uic_err = 0;
hba->saved_ce_err = 0;
+ hba->force_host_reset = false;
}
skip_err_handling:
@@ -5780,12 +6079,12 @@ static void ufshcd_err_handler(struct work_struct *work)
}
hba->silence_err_logs = false;
- ufshcd_clear_eh_in_progress(hba);
+
+ if (clks_enabled)
+ __ufshcd_release(hba, false);
out:
+ ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_scsi_unblock_requests(hba);
- ufshcd_release_all(hba);
- pm_runtime_put_sync(hba->dev);
}
static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
@@ -5799,16 +6098,20 @@ static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ irqreturn_t retval = IRQ_NONE;
/* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
- /* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
@@ -5816,61 +6119,95 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
__func__, reg);
ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+
+ /*
+ * Don't ignore LINERESET indication during hibern8
+ * enter operation.
+ */
+ if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
+ struct uic_command *cmd = hba->active_uic_cmd;
+
+ if (cmd) {
+ if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
+ dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
+ __func__, reg);
+ hba->full_init_linereset = true;
+ }
+ }
+ }
+ retval |= IRQ_HANDLED;
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg)
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- } else if (hba->dev_info.quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ } else if (hba->dev_info.quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg &
+ UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+ retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg) {
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg) {
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg) {
+ if ((reg & UIC_DME_ERROR) &&
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ retval |= IRQ_HANDLED;
}
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
+ return retval;
}
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
queue_eh_work = true;
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
- ufshcd_update_uic_error(hba);
+ retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
@@ -5886,12 +6223,16 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
- /* block commands from scsi mid-layer */
- __ufshcd_scsi_block_requests(hba);
+ /*
+ * Set error handling in progress flag early so that we
+ * don't issue new requests any more.
+ */
+ ufshcd_set_eh_in_progress(hba);
hba->ufshcd_state = UFSHCD_STATE_ERROR;
schedule_work(&hba->eh_work);
}
+ retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
@@ -5899,28 +6240,44 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up(&hba->tm_wq);
+ if (hba->tm_condition) {
+ wake_up(&hba->tm_wq);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
ufsdbg_error_inject_dispatcher(hba,
ERR_INJECT_INTR, intr_status, &intr_status);
@@ -5928,16 +6285,18 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (hba->errors || hba->ce_error)
- ufshcd_check_errors(hba);
+ retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
- ufshcd_uic_cmd_compl(hba, intr_status);
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
- ufshcd_tmc_handler(hba);
+ retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_transfer_req_compl(hba);
+
+ return retval;
}
/**
@@ -5945,27 +6304,44 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status)
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
+
+ if (retval == IRQ_NONE) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+ __func__, intr_status);
+ ufshcd_hex_dump(hba, "host regs: ", hba->mmio_base,
+ UFSHCI_REG_SPACE_SIZE);
}
+
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -6391,6 +6767,16 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
int retries = MAX_HOST_RESET_RETRIES;
do {
+ err = ufshcd_vops_full_reset(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: full reset returned %d\n",
+ __func__, err);
+
+ err = ufshcd_reset_device(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+ __func__, err);
+
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
@@ -6420,13 +6806,12 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
*/
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- int err;
+ int err = SUCCESS;
unsigned long flags;
struct ufs_hba *hba;
hba = shost_priv(cmd->device->host);
- ufshcd_hold_all(hba);
/*
* Check if there is any race with fatal error handling.
* If so, wait for it to complete. Even though fatal error
@@ -6439,29 +6824,37 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
hba->ufshcd_state == UFSHCD_STATE_RESET))
break;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
flush_work(&hba->eh_work);
} while (1);
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /*
+ * we don't know if previous reset had really reset the host controller
+ * or not. So let's force reset here to be sure.
+ */
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->force_host_reset = true;
+ schedule_work(&hba->eh_work);
- ufshcd_update_error_stats(hba, UFS_ERR_EH);
- err = ufshcd_reset_and_restore(hba);
+ /* wait for the reset work to finish */
+ do {
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
+ flush_work(&hba->eh_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (1);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!err) {
- err = SUCCESS;
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- } else {
+ if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+ ufshcd_is_link_active(hba))) {
err = FAILED;
hba->ufshcd_state = UFSHCD_STATE_ERROR;
}
- ufshcd_clear_eh_in_progress(hba);
+
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release_all(hba);
return err;
}
@@ -6978,11 +7371,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
- /* Enable auto hibern8 if supported */
- if (ufshcd_is_auto_hibern8_supported(hba))
- ufshcd_set_auto_hibern8_timer(hba,
- hba->hibern8_on_idle.delay_ms);
-
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
/* set the default level for urgent bkops */
@@ -7064,20 +7452,38 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ufshcd_scsi_add_wlus(hba))
goto out;
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ hba->clk_scaling.is_scaled_up = true;
+ if (!hba->devfreq) {
+ hba->devfreq = devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+ "simple_ondemand",
+ gov_data);
+ if (IS_ERR(hba->devfreq)) {
+ ret = PTR_ERR(hba->devfreq);
+ dev_err(hba->dev, "Unable to register with devfreq %d\n",
+ ret);
+ goto out;
+ }
+ }
+ hba->clk_scaling.is_allowed = true;
+ }
+
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
}
- /* Resume devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info, &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- hba->clk_scaling.is_scaled_up = true;
- ufshcd_resume_clkscaling(hba);
- hba->clk_scaling.is_allowed = true;
- }
-
+ /*
+ * Enable auto hibern8 if supported, after full host and
+ * device initialization.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ ufshcd_set_auto_hibern8_timer(hba,
+ hba->hibern8_on_idle.delay_ms);
out:
/*
* If we failed to initialize the device or the device is not
@@ -7670,6 +8076,13 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (!head || list_empty(head))
goto out;
+ /* call vendor specific bus vote before enabling the clocks */
+ if (on) {
+ ret = ufshcd_vops_set_bus_vote(hba, on);
+ if (ret)
+ return ret;
+ }
+
/*
* vendor specific setup_clocks ops may depend on clocks managed by
* this standard driver hence call the vendor specific setup_clocks
@@ -7708,11 +8121,24 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
* this standard driver hence call the vendor specific setup_clocks
* after enabling the clocks managed here.
*/
- if (on)
+ if (on) {
ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+ if (ret)
+ goto out;
+ }
+
+ /*
+ * call vendor specific bus vote to remove the vote after
+ * disabling the clocks.
+ */
+ if (!on)
+ ret = ufshcd_vops_set_bus_vote(hba, on);
out:
if (ret) {
+ if (on)
+ /* Can't do much if this fails */
+ (void) ufshcd_vops_set_bus_vote(hba, false);
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
@@ -7884,7 +8310,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
if (ufshcd_is_clkscaling_supported(hba)) {
- ufshcd_suspend_clkscaling(hba);
+ if (hba->devfreq)
+ ufshcd_suspend_clkscaling(hba);
destroy_workqueue(hba->clk_scaling.workq);
}
ufshcd_disable_clocks(hba, false);
@@ -8335,9 +8762,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto vendor_suspend;
}
} else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
/*
- * ufshcd_host_reset_and_restore() should have already
+ * A full initialization of the host and the device is required
+ * since the link was put to off during suspend.
+ */
+ ret = ufshcd_reset_and_restore(hba);
+ /*
+ * ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
@@ -8676,6 +9107,35 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
ufshcd_add_spm_lvl_sysfs_nodes(hba);
}
+static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
+{
+ bool suspend = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_scaling.is_allowed) {
+ hba->clk_scaling.is_allowed = false;
+ suspend = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /**
+ * Scaling may be scheduled before, hence make sure it
+ * doesn't race with shutdown
+ */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ if (suspend)
+ ufshcd_suspend_clkscaling(hba);
+ }
+
+ /* Unregister so that devfreq_monitor can't race with shutdown */
+ if (hba->devfreq)
+ devfreq_remove_device(hba->devfreq);
+}
+
/**
* ufshcd_shutdown - shutdown routine
* @hba: per adapter instance
@@ -8686,11 +9146,35 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
*/
int ufshcd_shutdown(struct ufs_hba *hba)
{
- /*
- * TODO: This function should send the power down notification to
- * UFS device and then power off the UFS link. But we need to be sure
- * that there will not be any new UFS requests issued after this.
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ goto out;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold_all(hba);
+ ufshcd_mark_shutdown_ongoing(hba);
+ ufshcd_shutdown_clkscaling(hba);
+ /**
+ * (1) Acquire the lock to stop any more requests
+ * (2) Wait for all issued requests to complete
*/
+ ufshcd_get_write_lock(hba);
+ ufshcd_scsi_block_requests(hba);
+ ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ if (ret)
+ dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
+ __func__, ret);
+ /* Requests may have errored out above, let it be handled */
+ flush_work(&hba->eh_work);
+ /* reqs issued from contexts other than shutdown will fail from now */
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release_all(hba);
+ ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+ if (ret)
+ dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+ /* allow force shutdown even in case of errors */
return 0;
}
EXPORT_SYMBOL(ufshcd_shutdown);
@@ -8928,10 +9412,10 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* clock scaling is in progress
*/
ufshcd_scsi_block_requests(hba);
- down_write(&hba->clk_scaling_lock);
+ down_write(&hba->lock);
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
- up_write(&hba->clk_scaling_lock);
+ up_write(&hba->lock);
ufshcd_scsi_unblock_requests(hba);
}
@@ -8940,7 +9424,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
- up_write(&hba->clk_scaling_lock);
+ up_write(&hba->lock);
ufshcd_scsi_unblock_requests(hba);
}
@@ -9220,23 +9704,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
return 0;
}
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
-static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
- .upthreshold = 35,
- .downdifferential = 30,
- .simple_scaling = 1,
-};
-
-static void *gov_data = &ufshcd_ondemand_data;
-#else
-static void *gov_data;
-#endif
-
-static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 40,
- .target = ufshcd_devfreq_target,
- .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
{
hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
@@ -9352,7 +9819,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize mutex for device management commands */
mutex_init(&hba->dev_cmd.lock);
- init_rwsem(&hba->clk_scaling_lock);
+ init_rwsem(&hba->lock);
/* Initialize device management tag acquire wait queue */
init_waitqueue_head(&hba->dev_cmd.tag_wq);
@@ -9389,6 +9856,15 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto exit_gating;
}
+ /* Reset controller to power on reset (POR) state */
+ ufshcd_vops_full_reset(hba);
+
+ /* reset connected UFS device */
+ err = ufshcd_reset_device(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+ __func__, err);
+
/* Host controller enable */
err = ufshcd_hba_enable(hba);
if (err) {
@@ -9401,16 +9877,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
if (ufshcd_is_clkscaling_supported(hba)) {
char wq_name[sizeof("ufs_clkscaling_00")];
- hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
- "simple_ondemand", gov_data);
- if (IS_ERR(hba->devfreq)) {
- dev_err(hba->dev, "Unable to register with devfreq %ld\n",
- PTR_ERR(hba->devfreq));
- err = PTR_ERR(hba->devfreq);
- goto out_remove_scsi_host;
- }
- hba->clk_scaling.is_suspended = false;
-
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
@@ -9420,8 +9886,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->host_no);
hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
- /* Suspend devfreq until the UFS device is detected */
- ufshcd_suspend_clkscaling(hba);
ufshcd_clkscaling_init_sysfs(hba);
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 709801f..b70606b 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -3,7 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.h
* Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -55,6 +56,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include "unipro.h"
#include <asm/irq.h>
@@ -309,6 +311,7 @@ struct ufs_pwr_mode_info {
* @update_sec_cfg: called to restore host controller secure configuration
* @get_scale_down_gear: called to get the minimum supported gear to
* scale down
+ * @set_bus_vote: called to vote for the required bus bandwidth
* @add_debugfs: used to add debugfs entries
* @remove_debugfs: used to remove debugfs entries
*/
@@ -332,9 +335,10 @@ struct ufs_hba_variant_ops {
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
int (*full_reset)(struct ufs_hba *);
- void (*dbg_register_dump)(struct ufs_hba *hba);
+ void (*dbg_register_dump)(struct ufs_hba *hba, bool no_sleep);
int (*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
u32 (*get_scale_down_gear)(struct ufs_hba *);
+ int (*set_bus_vote)(struct ufs_hba *, bool);
#ifdef CONFIG_DEBUG_FS
void (*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
void (*remove_debugfs)(struct ufs_hba *hba);
@@ -393,8 +397,9 @@ enum clk_gating_state {
/**
* struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
+ * @gate_hrtimer: hrtimer to invoke @gate_work after some delay as
+ * specified in @delay_ms
+ * @gate_work: worker to turn off clocks
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
* @state: the current clocks state
@@ -412,7 +417,8 @@ enum clk_gating_state {
* completion before gating clocks.
*/
struct ufs_clk_gating {
- struct delayed_work gate_work;
+ struct hrtimer gate_hrtimer;
+ struct work_struct gate_work;
struct work_struct ungate_work;
enum clk_gating_state state;
unsigned long delay_ms;
@@ -425,6 +431,7 @@ struct ufs_clk_gating {
struct device_attribute enable_attr;
bool is_enabled;
int active_reqs;
+ struct workqueue_struct *ungating_workq;
};
/* Hibern8 state */
@@ -801,6 +808,7 @@ struct ufs_hba {
u32 saved_uic_err;
u32 saved_ce_err;
bool silence_err_logs;
+ bool force_host_reset;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -882,17 +890,33 @@ struct ufs_hba {
enum bkops_status urgent_bkops_lvl;
bool is_urgent_bkops_lvl_checked;
- struct rw_semaphore clk_scaling_lock;
+ /* sync b/w diff contexts */
+ struct rw_semaphore lock;
+ unsigned long shutdown_in_prog;
+ struct reset_control *core_reset;
/* If set, don't gate device ref_clk during clock gating */
bool no_ref_clk_gating;
int scsi_block_reqs_cnt;
+ bool full_init_linereset;
+ struct pinctrl *pctrl;
+
int latency_hist_enabled;
struct io_latency_state io_lat_s;
};
+static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
+{
+ set_bit(0, &hba->shutdown_in_prog);
+}
+
+static inline bool ufshcd_is_shutdown_ongoing(struct ufs_hba *hba)
+{
+ return !!(test_bit(0, &hba->shutdown_in_prog));
+}
+
/* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{
@@ -1233,10 +1257,11 @@ static inline int ufshcd_vops_full_reset(struct ufs_hba *hba)
}
-static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
+static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba,
+ bool no_sleep)
{
if (hba->var && hba->var->vops && hba->var->vops->dbg_register_dump)
- hba->var->vops->dbg_register_dump(hba);
+ hba->var->vops->dbg_register_dump(hba, no_sleep);
}
static inline int ufshcd_vops_update_sec_cfg(struct ufs_hba *hba,
@@ -1255,6 +1280,13 @@ static inline u32 ufshcd_vops_get_scale_down_gear(struct ufs_hba *hba)
return UFS_HS_G1;
}
+static inline int ufshcd_vops_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->set_bus_vote)
+ return hba->var->vops->set_bus_vote(hba, on);
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_FS
static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba,
struct dentry *root)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index d65dad0..c0e4650 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -190,6 +190,7 @@ enum {
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR UFS_BIT(4)
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
diff --git a/drivers/soc/qcom/msm-core.c b/drivers/soc/qcom/msm-core.c
index 5a0b261..fa5c6b2 100644
--- a/drivers/soc/qcom/msm-core.c
+++ b/drivers/soc/qcom/msm-core.c
@@ -402,9 +402,9 @@ static long msm_core_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
get_user(cluster, &argp->cluster);
- mpidr = (argp->cluster << (MAX_CORES_PER_CLUSTER *
+ mpidr = (cluster << (MAX_CORES_PER_CLUSTER *
MAX_NUM_OF_CLUSTERS));
- cpumask = argp->cpumask;
+ get_user(cpumask, &argp->cpumask);
switch (cmd) {
case EA_LEAKAGE:
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 1f5cfc0..51e03c3 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -983,7 +983,6 @@ static int msm_bus_rsc_init(struct platform_device *pdev,
if (IS_ERR_OR_NULL(rscdev->mbox)) {
MSM_BUS_ERR("%s: Failed to get mbox:%s", __func__,
node_dev->node_info->name);
- return PTR_ERR(rscdev->mbox);
}
// Add way to count # of VCDs, initialize LL
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index 7179fe0..40aac6a 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -217,6 +217,7 @@ static void apr_tal_notify_remote_rx_intent(void *handle, const void *priv,
*/
pr_debug("%s: remote queued an intent\n", __func__);
apr_ch->if_remote_intent_ready = true;
+ wake_up(&apr_ch->wait);
}
void apr_tal_notify_state(void *handle, const void *priv, unsigned int event)
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index a2b0f0e..414c123 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -394,8 +394,8 @@ static void audio_notifer_reg_all_clients(void)
int ret;
list_for_each_safe(ptr, next, &client_list) {
- client_data = list_entry(ptr,
- struct client_data, list);
+ client_data = list_entry(ptr, struct client_data, list);
+
ret = audio_notifer_reg_client(client_data);
if (ret < 0)
pr_err("%s: audio_notifer_reg_client failed for client %s, ret %d\n",
@@ -518,9 +518,8 @@ int audio_notifier_deregister(char *client_name)
goto done;
}
mutex_lock(¬ifier_mutex);
- list_for_each_safe(ptr, next, &client_data->list) {
- client_data = list_entry(ptr, struct client_data,
- list);
+ list_for_each_safe(ptr, next, &client_list) {
+ client_data = list_entry(ptr, struct client_data, list);
if (!strcmp(client_name, client_data->client_name)) {
ret2 = audio_notifer_dereg_client(client_data);
if (ret2 < 0) {
diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c
index 07e8991..f3b1b83 100644
--- a/drivers/soc/qcom/qdsp6v2/voice_svc.c
+++ b/drivers/soc/qcom/qdsp6v2/voice_svc.c
@@ -42,6 +42,12 @@ struct voice_svc_prvt {
struct list_head response_queue;
wait_queue_head_t response_wait;
spinlock_t response_lock;
+ /*
+ * This mutex ensures responses are processed in sequential order and
+ * that no two threads access and free the same response at the same
+ * time.
+ */
+ struct mutex response_mutex_lock;
};
struct apr_data {
@@ -361,6 +367,9 @@ static ssize_t voice_svc_write(struct file *file, const char __user *buf,
struct voice_svc_prvt *prtd;
struct voice_svc_write_msg *data = NULL;
uint32_t cmd;
+ struct voice_svc_register *register_data = NULL;
+ struct voice_svc_cmd_request *request_data = NULL;
+ uint32_t request_payload_size;
pr_debug("%s\n", __func__);
@@ -409,12 +418,19 @@ static ssize_t voice_svc_write(struct file *file, const char __user *buf,
*/
if (count == (sizeof(struct voice_svc_write_msg) +
sizeof(struct voice_svc_register))) {
- ret = process_reg_cmd(
- (struct voice_svc_register *)data->payload, prtd);
+ register_data =
+ (struct voice_svc_register *)data->payload;
+ if (register_data == NULL) {
+ pr_err("%s: register data is NULL", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = process_reg_cmd(register_data, prtd);
if (!ret)
ret = count;
} else {
- pr_err("%s: invalid payload size\n", __func__);
+ pr_err("%s: invalid data payload size for register command\n",
+ __func__);
ret = -EINVAL;
goto done;
}
@@ -423,19 +439,40 @@ static ssize_t voice_svc_write(struct file *file, const char __user *buf,
/*
* Check that count reflects the expected size to ensure
* sufficient memory was allocated. Since voice_svc_cmd_request
- * has a variable size, check the minimum value count must be.
+ * has a variable size, check the minimum value count must be to
+ * parse the message request then check the minimum size to hold
+ * the payload of the message request.
*/
if (count >= (sizeof(struct voice_svc_write_msg) +
sizeof(struct voice_svc_cmd_request))) {
- ret = voice_svc_send_req(
- (struct voice_svc_cmd_request *)data->payload, prtd);
- if (!ret)
- ret = count;
- } else {
- pr_err("%s: invalid payload size\n", __func__);
- ret = -EINVAL;
- goto done;
- }
+ request_data =
+ (struct voice_svc_cmd_request *)data->payload;
+ if (request_data == NULL) {
+ pr_err("%s: request data is NULL", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ request_payload_size = request_data->payload_size;
+
+ if (count >= (sizeof(struct voice_svc_write_msg) +
+ sizeof(struct voice_svc_cmd_request) +
+ request_payload_size)) {
+ ret = voice_svc_send_req(request_data, prtd);
+ if (!ret)
+ ret = count;
+ } else {
+ pr_err("%s: invalid request payload size\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ } else {
+ pr_err("%s: invalid data payload size for request command\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
break;
default:
pr_debug("%s: Invalid command: %u\n", __func__, cmd);
@@ -466,6 +503,7 @@ static ssize_t voice_svc_read(struct file *file, char __user *arg,
goto done;
}
+ mutex_lock(&prtd->response_mutex_lock);
spin_lock_irqsave(&prtd->response_lock, spin_flags);
if (list_empty(&prtd->response_queue)) {
@@ -479,7 +517,7 @@ static ssize_t voice_svc_read(struct file *file, char __user *arg,
pr_debug("%s: Read timeout\n", __func__);
ret = -ETIMEDOUT;
- goto done;
+ goto unlock;
} else if (ret > 0 && !list_empty(&prtd->response_queue)) {
pr_debug("%s: Interrupt received for response\n",
__func__);
@@ -487,7 +525,7 @@ static ssize_t voice_svc_read(struct file *file, char __user *arg,
pr_debug("%s: Interrupted by SIGNAL %d\n",
__func__, ret);
- goto done;
+ goto unlock;
}
spin_lock_irqsave(&prtd->response_lock, spin_flags);
@@ -506,7 +544,7 @@ static ssize_t voice_svc_read(struct file *file, char __user *arg,
__func__, count, size);
ret = -ENOMEM;
- goto done;
+ goto unlock;
}
if (!access_ok(VERIFY_WRITE, arg, size)) {
@@ -514,7 +552,7 @@ static ssize_t voice_svc_read(struct file *file, char __user *arg,
__func__);
ret = -EPERM;
- goto done;
+ goto unlock;
}
ret = copy_to_user(arg, &resp->resp,
@@ -524,7 +562,7 @@ static ssize_t voice_svc_read(struct file *file, char __user *arg,
pr_err("%s: copy_to_user failed %d\n", __func__, ret);
ret = -EPERM;
- goto done;
+ goto unlock;
}
spin_lock_irqsave(&prtd->response_lock, spin_flags);
@@ -538,6 +576,8 @@ static ssize_t voice_svc_read(struct file *file, char __user *arg,
ret = count;
+unlock:
+ mutex_unlock(&prtd->response_mutex_lock);
done:
return ret;
}
@@ -591,6 +631,7 @@ static int voice_svc_open(struct inode *inode, struct file *file)
INIT_LIST_HEAD(&prtd->response_queue);
init_waitqueue_head(&prtd->response_wait);
spin_lock_init(&prtd->response_lock);
+ mutex_init(&prtd->response_mutex_lock);
file->private_data = (void *)prtd;
/* Current APR implementation doesn't support session based
@@ -641,6 +682,7 @@ static int voice_svc_release(struct inode *inode, struct file *file)
pr_err("%s: Failed to dereg MVM %d\n", __func__, ret);
}
+ mutex_lock(&prtd->response_mutex_lock);
spin_lock_irqsave(&prtd->response_lock, spin_flags);
while (!list_empty(&prtd->response_queue)) {
@@ -654,6 +696,9 @@ static int voice_svc_release(struct inode *inode, struct file *file)
}
spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+ mutex_unlock(&prtd->response_mutex_lock);
+
+ mutex_destroy(&prtd->response_mutex_lock);
kfree(file->private_data);
file->private_data = NULL;
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 996ce64..aeecf29 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -454,7 +454,7 @@ EXPORT_SYMBOL(rpmh_write);
* @n: The array of count of elements in each batch, 0 terminated.
*
* Write a request to the mailbox controller without caching. If the request
- * state is ACTIVE_ONLY, then the requests are treated as completion requests
+ * state is ACTIVE or AWAKE, then the requests are treated as completion request
* and sent to the controller immediately. The function waits until all the
* commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
* request is sent as fire-n-forget and no ack is expected.
@@ -468,7 +468,8 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
int count = 0;
- int ret, i = 0;
+ int ret, i, j, k;
+ bool complete_set;
if (rpmh_standalone)
return 0;
@@ -479,6 +480,27 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
if (count >= RPMH_MAX_REQ_IN_BATCH)
return -EINVAL;
+ if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
+ /*
+ * Ensure the 'complete' bit is set for atleast one command in
+ * each set for active/awake requests.
+ */
+ for (i = 0, k = 0; i < count; i++, k += n[i]) {
+ complete_set = false;
+ for (j = 0; j < n[i]; j++) {
+ if (cmd[k + j].complete) {
+ complete_set = true;
+ break;
+ }
+ }
+ if (!complete_set) {
+ dev_err(rc->dev, "No completion set for batch");
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Create async request batches */
for (i = 0; i < count; i++) {
rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i], false);
if (IS_ERR_OR_NULL(rpm_msg[i]))
@@ -488,11 +510,11 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
cmd += n[i];
}
- if (state == RPMH_ACTIVE_ONLY_STATE) {
+ /* Send if Active or Awake and wait for the whole set to complete */
+ if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
might_sleep();
atomic_set(&wait_count, count);
for (i = 0; i < count; i++) {
- rpm_msg[i]->msg.is_complete = true;
/* Bypass caching and write to mailbox directly */
ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
if (ret < 0)
@@ -501,6 +523,7 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
return wait_event_interruptible(waitq,
atomic_read(&wait_count) == 0);
} else {
+ /* Send Sleep requests to the controller, expect no response */
for (i = 0; i < count; i++) {
ret = mbox_send_controller_data(rc->chan,
&rpm_msg[i]->msg);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 2a4000d..2db473a 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -118,6 +118,16 @@
help
Enable this to let the user space manage the platform thermals.
+config THERMAL_GOV_LOW_LIMITS
+ bool "Low limits mitigation governor"
+ help
+ Enable this to manage platform limits using low limits
+ governor.
+
+ Enable this governor to monitor and trigger floor mitigation.
+ This governor will monitor the limits going below a
+ trip threshold to trigger a floor mitigation.
+
config THERMAL_GOV_POWER_ALLOCATOR
bool "Power allocator thermal governor"
help
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index e12c199..2faed7f 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -14,6 +14,7 @@
thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o
thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
+thermal_sys-$(CONFIG_THERMAL_GOV_LOW_LIMITS) += gov_low_limits.o
thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o
# cpufreq cooling
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 5556b5b..a6245d5 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -73,6 +73,10 @@ struct power_table {
* cooling devices.
* @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
+ * @cpufreq_floor_state: integer value representing the frequency floor state
+ * of cpufreq cooling devices.
+ * @floor_freq: integer value representing the absolute value of the floor
+ * frequency.
* @max_level: maximum cooling level. [0..max_level-1: <freq>
* max_level: Core unavailable]
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
@@ -95,6 +99,8 @@ struct cpufreq_cooling_device {
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
unsigned int clipped_freq;
+ unsigned int cpufreq_floor_state;
+ unsigned int floor_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
@@ -222,7 +228,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- unsigned long clipped_freq;
+ unsigned long clipped_freq, floor_freq;
struct cpufreq_cooling_device *cpufreq_dev;
if (event != CPUFREQ_ADJUST)
@@ -243,11 +249,16 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
*
* But, if clipped_freq is greater than policy->max, we don't
* need to do anything.
+ *
+ * Similarly, if policy minimum set by the user is less than
+ * the floor_frequency, then adjust the policy->min.
*/
clipped_freq = cpufreq_dev->clipped_freq;
+ floor_freq = cpufreq_dev->floor_freq;
- if (policy->max > clipped_freq)
- cpufreq_verify_within_limits(policy, 0, clipped_freq);
+ if (policy->max > clipped_freq || policy->min < floor_freq)
+ cpufreq_verify_within_limits(policy, floor_freq,
+ clipped_freq);
break;
}
mutex_unlock(&cooling_list_lock);
@@ -495,6 +506,58 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
}
/**
+ * cpufreq_get_min_state - callback function to get the device floor state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the cooling device floor.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * floor state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_get_min_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+
+ *state = cpufreq_device->cpufreq_floor_state;
+
+ return 0;
+}
+
+/**
+ * cpufreq_set_min_state - callback function to set the device floor state.
+ * @cdev: thermal cooling device pointer.
+ * @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the cpufreq
+ * floor state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_set_min_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+ unsigned int floor_freq;
+
+ if (state > cpufreq_device->max_level)
+ state = cpufreq_device->max_level;
+
+ if (cpufreq_device->cpufreq_floor_state == state)
+ return 0;
+
+ floor_freq = cpufreq_device->freq_table[state];
+ cpufreq_device->cpufreq_floor_state = state;
+ cpufreq_device->floor_freq = floor_freq;
+
+ cpufreq_update_policy(cpu);
+
+ return 0;
+}
+
+/**
* cpufreq_get_cur_state - callback function to get the current cooling state.
* @cdev: thermal cooling device pointer.
* @state: fill this variable with the current cooling state.
@@ -765,6 +828,8 @@ static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
.get_max_state = cpufreq_get_max_state,
.get_cur_state = cpufreq_get_cur_state,
.set_cur_state = cpufreq_set_cur_state,
+ .set_min_state = cpufreq_set_min_state,
+ .get_min_state = cpufreq_get_min_state,
};
static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
@@ -929,6 +994,9 @@ __cpufreq_cooling_register(struct device_node *np,
goto remove_idr;
cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
+ cpufreq_dev->floor_freq =
+ cpufreq_dev->freq_table[cpufreq_dev->max_level];
+ cpufreq_dev->cpufreq_floor_state = cpufreq_dev->max_level;
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 81631b1..b2990c1 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -51,6 +51,7 @@ struct devfreq_cooling_device {
struct thermal_cooling_device *cdev;
struct devfreq *devfreq;
unsigned long cooling_state;
+ unsigned long cooling_min_state;
u32 *power_table;
u32 *freq_table;
size_t freq_table_size;
@@ -96,13 +97,15 @@ static void release_idr(struct idr *idr, int id)
/**
* partition_enable_opps() - disable all opps above a given state
* @dfc: Pointer to devfreq we are operating on
- * @cdev_state: cooling device state we're setting
+ * @cdev_max_state: Max cooling device state we're setting
+ * @cdev_min_state: Min cooling device state we're setting
*
* Go through the OPPs of the device, enabling all OPPs until
* @cdev_state and disabling those frequencies above it.
*/
static int partition_enable_opps(struct devfreq_cooling_device *dfc,
- unsigned long cdev_state)
+ unsigned long cdev_max_state,
+ unsigned long cdev_min_state)
{
int i;
struct device *dev = dfc->devfreq->dev.parent;
@@ -111,7 +114,8 @@ static int partition_enable_opps(struct devfreq_cooling_device *dfc,
struct dev_pm_opp *opp;
int ret = 0;
unsigned int freq = dfc->freq_table[i];
- bool want_enable = i >= cdev_state ? true : false;
+ bool want_enable = (i >= cdev_max_state) &&
+ (i <= cdev_min_state) ? true : false;
rcu_read_lock();
opp = dev_pm_opp_find_freq_exact(dev, freq, !want_enable);
@@ -144,6 +148,41 @@ static int devfreq_cooling_get_max_state(struct thermal_cooling_device *cdev,
return 0;
}
+static int devfreq_cooling_get_min_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct devfreq_cooling_device *dfc = cdev->devdata;
+
+ *state = dfc->cooling_min_state;
+
+ return 0;
+}
+
+static int devfreq_cooling_set_min_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct devfreq_cooling_device *dfc = cdev->devdata;
+ struct devfreq *df = dfc->devfreq;
+ struct device *dev = df->dev.parent;
+ int ret;
+
+ if (state == dfc->cooling_min_state)
+ return 0;
+
+ dev_dbg(dev, "Setting cooling min state %lu\n", state);
+
+ if (state >= dfc->freq_table_size)
+ state = dfc->freq_table_size - 1;
+
+ ret = partition_enable_opps(dfc, dfc->cooling_state, state);
+ if (ret)
+ return ret;
+
+ dfc->cooling_min_state = state;
+
+ return 0;
+}
+
static int devfreq_cooling_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
@@ -170,7 +209,7 @@ static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev,
if (state >= dfc->freq_table_size)
return -EINVAL;
- ret = partition_enable_opps(dfc, state);
+ ret = partition_enable_opps(dfc, state, dfc->cooling_min_state);
if (ret)
return ret;
@@ -361,6 +400,8 @@ static struct thermal_cooling_device_ops devfreq_cooling_ops = {
.get_max_state = devfreq_cooling_get_max_state,
.get_cur_state = devfreq_cooling_get_cur_state,
.set_cur_state = devfreq_cooling_set_cur_state,
+ .get_min_state = devfreq_cooling_get_min_state,
+ .set_min_state = devfreq_cooling_set_min_state,
};
/**
@@ -499,6 +540,7 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
if (err)
goto free_tables;
+ dfc->cooling_min_state = dfc->freq_table_size - 1;
snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id);
cdev = thermal_of_cooling_device_register(np, dev_name, dfc,
diff --git a/drivers/thermal/gov_low_limits.c b/drivers/thermal/gov_low_limits.c
new file mode 100644
index 0000000..cf2dbc4
--- /dev/null
+++ b/drivers/thermal/gov_low_limits.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2012 Intel Corp
+ * Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com>
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/thermal.h>
+#include <trace/events/thermal.h>
+
+#include "thermal_core.h"
+
+static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+{
+ int trip_temp, trip_hyst;
+ enum thermal_trip_type trip_type;
+ struct thermal_instance *instance;
+ bool throttle;
+ int old_target;
+
+ tz->ops->get_trip_temp(tz, trip, &trip_temp);
+ tz->ops->get_trip_type(tz, trip, &trip_type);
+ if (tz->ops->get_trip_hyst) {
+ tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
+ trip_hyst = trip_temp + trip_hyst;
+ } else {
+ trip_hyst = trip_temp;
+ }
+
+ mutex_lock(&tz->lock);
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ if ((tz->temperature <= trip_temp) ||
+ (instance->target != THERMAL_NO_TARGET
+ && tz->temperature <= trip_hyst))
+ throttle = true;
+ else
+ throttle = false;
+
+ dev_dbg(&tz->device,
+ "Trip%d[type=%d,temp=%d,hyst=%d],throttle=%d\n",
+ trip, trip_type, trip_temp, trip_hyst, throttle);
+
+ old_target = instance->target;
+ instance->target = (throttle) ? instance->upper
+ : THERMAL_NO_TARGET;
+ dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+ old_target, (int)instance->target);
+
+ if (old_target == instance->target)
+ continue;
+
+ if (old_target == THERMAL_NO_TARGET &&
+ instance->target != THERMAL_NO_TARGET) {
+ trace_thermal_zone_trip(tz, trip, trip_type);
+ tz->passive += 1;
+ } else if (old_target != THERMAL_NO_TARGET &&
+ instance->target == THERMAL_NO_TARGET) {
+ tz->passive -= 1;
+ }
+
+ instance->cdev->updated = false; /* cdev needs update */
+ }
+
+ mutex_unlock(&tz->lock);
+}
+
+/**
+ * low_limits_throttle - throttles devices associated with the given zone
+ * @tz - thermal_zone_device
+ * @trip - the trip point
+ *
+ * Throttling Logic: If the sensor reading goes below a trip point, the
+ * pre-defined mitigation will be applied for the cooling device.
+ * If the sensor reading goes above the trip hysteresis, the
+ * mitigation will be removed.
+ */
+static int low_limits_throttle(struct thermal_zone_device *tz, int trip)
+{
+ struct thermal_instance *instance;
+
+ thermal_zone_trip_update(tz, trip);
+
+ mutex_lock(&tz->lock);
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+ thermal_cdev_update(instance->cdev);
+
+ mutex_unlock(&tz->lock);
+
+ return 0;
+}
+
+static struct thermal_governor thermal_gov_low_limits_floor = {
+ .name = "low_limits_floor",
+ .throttle = low_limits_throttle,
+ .min_state_throttle = 1,
+};
+
+static struct thermal_governor thermal_gov_low_limits_cap = {
+ .name = "low_limits_cap",
+ .throttle = low_limits_throttle,
+};
+
+int thermal_gov_low_limits_register(void)
+{
+ thermal_register_governor(&thermal_gov_low_limits_cap);
+ return thermal_register_governor(&thermal_gov_low_limits_floor);
+}
+
+void thermal_gov_low_limits_unregister(void)
+{
+ thermal_unregister_governor(&thermal_gov_low_limits_cap);
+ thermal_unregister_governor(&thermal_gov_low_limits_floor);
+}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 5b627ea..7b45b9a 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -2348,6 +2348,10 @@ static int __init thermal_register_governors(void)
if (result)
return result;
+ result = thermal_gov_low_limits_register();
+ if (result)
+ return result;
+
return thermal_gov_power_allocator_register();
}
@@ -2357,6 +2361,7 @@ static void thermal_unregister_governors(void)
thermal_gov_fair_share_unregister();
thermal_gov_bang_bang_unregister();
thermal_gov_user_space_unregister();
+ thermal_gov_low_limits_unregister();
thermal_gov_power_allocator_unregister();
}
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 9408f3f..eca8c3c 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -98,6 +98,14 @@ static inline int thermal_gov_power_allocator_register(void) { return 0; }
static inline void thermal_gov_power_allocator_unregister(void) {}
#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
+#ifdef CONFIG_THERMAL_GOV_LOW_LIMITS
+int thermal_gov_low_limits_register(void);
+void thermal_gov_low_limits_unregister(void);
+#else
+static inline int thermal_gov_low_limits_register(void) { return 0; }
+static inline void thermal_gov_low_limits_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_LOW_LIMITS */
+
/* device tree support */
#ifdef CONFIG_THERMAL_OF
int of_parse_thermal_zones(void);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 9fcb063..233a11a 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2098,8 +2098,10 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
dwc3_msm_config_gdsc(mdwc, 0);
clk_disable_unprepare(mdwc->sleep_clk);
- if (mdwc->iommu_map)
+ if (mdwc->iommu_map) {
arm_iommu_detach_device(mdwc->dev);
+ dev_dbg(mdwc->dev, "IOMMU detached\n");
+ }
}
/* Remove bus voting */
@@ -2233,6 +2235,16 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
u32 tmp;
+ if (mdwc->iommu_map) {
+ ret = arm_iommu_attach_device(mdwc->dev,
+ mdwc->iommu_map);
+ if (ret)
+ dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
+ ret);
+ else
+ dev_dbg(mdwc->dev, "attached to IOMMU\n");
+ }
+
dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
dwc3_msm_power_collapse_por(mdwc);
@@ -2245,16 +2257,6 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
-
- if (mdwc->iommu_map) {
- ret = arm_iommu_attach_device(mdwc->dev,
- mdwc->iommu_map);
- if (ret)
- dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
- ret);
- else
- dev_dbg(mdwc->dev, "attached to IOMMU\n");
- }
}
atomic_set(&dwc->in_lpm, 0);
@@ -2812,12 +2814,22 @@ static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
if (ret) {
dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
ret);
- arm_iommu_release_mapping(mdwc->iommu_map);
- mdwc->iommu_map = NULL;
- return ret;
+ goto release_mapping;
}
+ ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
+ if (ret) {
+ dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
+ goto release_mapping;
+ }
+ dev_dbg(mdwc->dev, "attached to IOMMU\n");
+
return 0;
+
+release_mapping:
+ arm_iommu_release_mapping(mdwc->iommu_map);
+ mdwc->iommu_map = NULL;
+ return ret;
}
static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
@@ -3187,6 +3199,10 @@ static int dwc3_msm_probe(struct platform_device *pdev)
ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
&mdwc->num_gsi_event_buffers);
+ /* IOMMU will be reattached upon each resume/connect */
+ if (mdwc->iommu_map)
+ arm_iommu_detach_device(mdwc->dev);
+
/*
* Clocks and regulators will not be turned on until the first time
* runtime PM resume is called. This is to allow for booting up with
@@ -3253,8 +3269,10 @@ static int dwc3_msm_probe(struct platform_device *pdev)
if (mdwc->bus_perf_client)
msm_bus_scale_unregister_client(mdwc->bus_perf_client);
uninit_iommu:
- if (mdwc->iommu_map)
+ if (mdwc->iommu_map) {
+ arm_iommu_detach_device(mdwc->dev);
arm_iommu_release_mapping(mdwc->iommu_map);
+ }
err:
return ret;
}
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 900f268..8135da9 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -42,6 +42,7 @@
#define MSM_BUS_FAB_DC_NOC 6150
#define MSM_BUS_FAB_MC_VIRT 6151
#define MSM_BUS_FAB_MEM_NOC 6152
+#define MSM_BUS_FAB_IPA_VIRT 6153
#define MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
#define MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -86,6 +87,7 @@
#define MSM_BUS_BCM_CN0 7036
#define MSM_BUS_BCM_ACV 7037
#define MSM_BUS_BCM_ALC 7038
+#define MSM_BUS_BCM_QUP0 7039
#define MSM_BUS_RSC_APPS 8000
#define MSM_BUS_RSC_DISP 8001
@@ -241,7 +243,8 @@
#define MSM_BUS_MASTER_ANOC_PCIE_SNOC 140
#define MSM_BUS_MASTER_PIMEM 141
#define MSM_BUS_MASTER_MEM_NOC_SNOC 142
-#define MSM_BUS_MASTER_MASTER_LAST 143
+#define MSM_BUS_MASTER_IPA_CORE 143
+#define MSM_BUS_MASTER_MASTER_LAST 144
#define MSM_BUS_MASTER_LLCC_DISPLAY 20000
#define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -579,7 +582,8 @@
#define MSM_BUS_SLAVE_SNOC_MEM_NOC_GC 774
#define MSM_BUS_SLAVE_SNOC_MEM_NOC_SF 775
#define MSM_BUS_SLAVE_MEM_NOC_SNOC 776
-#define MSM_BUS_SLAVE_LAST 777
+#define MSM_BUS_SLAVE_IPA 777
+#define MSM_BUS_SLAVE_LAST 778
#define MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
#define MSM_BUS_SLAVE_LLCC_DISPLAY 20513
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 292d6a1..6f3da08 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -121,4 +121,10 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
}
#endif /* CONFIG_GENERIC_BUG */
+
+#ifdef CONFIG_PANIC_ON_DATA_CORRUPTION
+#define PANIC_CORRUPTION 1
+#else
+#define PANIC_CORRUPTION 0
+#endif /* CONFIG_PANIC_ON_DATA_CORRUPTION */
#endif /* _LINUX_BUG_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 31a7f91..8fd5fba 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -36,6 +36,8 @@
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
/* parents need enable during gate/ungate, set rate and re-parent */
#define CLK_OPS_PARENT_ENABLE BIT(12)
+ /* unused */
+#define CLK_IS_MEASURE BIT(14) /* measure clock */
struct clk;
struct clk_hw;
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 16d3d26..0668534 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -98,7 +98,7 @@ enum ipa_dp_evt_type {
};
/**
- * enum hdr_total_len_or_pad_type - type vof value held by TOTAL_LEN_OR_PAD
+ * enum hdr_total_len_or_pad_type - type of value held by TOTAL_LEN_OR_PAD
* field in header configuration register.
* @IPA_HDR_PAD: field is used as padding length
* @IPA_HDR_TOTAL_LEN: field is used as total length
@@ -433,6 +433,55 @@ typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
unsigned long data);
/**
+ * enum ipa_wdi_meter_evt_type - type of event client callback is
+ * for AP+STA mode metering
+ * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA -
+ * use ipa_get_wdi_sap_stats structure
+ * @IPA_SET_WIFI_QUOTA: set quota limit on STA -
+ * use ipa_set_wifi_quota structure
+ */
+enum ipa_wdi_meter_evt_type {
+ IPA_GET_WDI_SAP_STATS,
+ IPA_SET_WIFI_QUOTA,
+};
+
+struct ipa_get_wdi_sap_stats {
+ /* indicate to reset stats after query */
+ uint8_t reset_stats;
+ /* indicate valid stats from wlan-fw */
+ uint8_t stats_valid;
+ /* Tx: SAP->STA */
+ uint64_t ipv4_tx_packets;
+ uint64_t ipv4_tx_bytes;
+ /* Rx: STA->SAP */
+ uint64_t ipv4_rx_packets;
+ uint64_t ipv4_rx_bytes;
+ uint64_t ipv6_tx_packets;
+ uint64_t ipv6_tx_bytes;
+ uint64_t ipv6_rx_packets;
+ uint64_t ipv6_rx_bytes;
+};
+
+/**
+ * struct ipa_set_wifi_quota - structure used for
+ * IPA_SET_WIFI_QUOTA.
+ *
+ * @quota_bytes: Quota (in bytes) for the STA interface.
+ * @set_quota: Indicate whether to set the quota (use 1) or
+ * unset the quota.
+ *
+ */
+struct ipa_set_wifi_quota {
+ uint64_t quota_bytes;
+ uint8_t set_quota;
+ /* indicate valid quota set from wlan-fw */
+ uint8_t set_valid;
+};
+
+typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt,
+ void *data);
+
+/**
* struct ipa_connect_params - low-level client connect input parameters. Either
* client allocates the data and desc FIFO and specifies that in data+desc OR
* specifies sizes and pipe_mem pref and IPA does the allocation.
@@ -1003,6 +1052,7 @@ struct ipa_wdi_dl_params_smmu {
* @ul_smmu: WDI_RX configuration info when WLAN uses SMMU
* @dl_smmu: WDI_TX configuration info when WLAN uses SMMU
* @smmu_enabled: true if WLAN uses SMMU
+ * @ipa_wdi_meter_notifier_cb: Get WDI stats and quato info
*/
struct ipa_wdi_in_params {
struct ipa_sys_connect_params sys;
@@ -1013,6 +1063,9 @@ struct ipa_wdi_in_params {
struct ipa_wdi_dl_params_smmu dl_smmu;
} u;
bool smmu_enabled;
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+ ipa_wdi_meter_notifier_cb wdi_notify;
+#endif
};
enum ipa_upstream_type {
@@ -1273,6 +1326,9 @@ int ipa_resume_wdi_pipe(u32 clnt_hdl);
int ipa_suspend_wdi_pipe(u32 clnt_hdl);
int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa_get_smem_restr_bytes(void);
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+ uint64_t num_bytes);
+
/*
* To retrieve doorbell physical address of
* wlan pipes
@@ -1853,6 +1909,12 @@ static inline int ipa_suspend_wdi_pipe(u32 clnt_hdl)
return -EPERM;
}
+static inline int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+ uint64_t num_bytes)
+{
+ return -EPERM;
+}
+
static inline int ipa_uc_wdi_get_dbpa(
struct ipa_wdi_db_params *out)
{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 8462da2..2251428 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -84,6 +84,12 @@ enum armpmu_attr_groups {
ARMPMU_NR_ATTR_GROUPS
};
+enum armpmu_pmu_states {
+ ARM_PMU_STATE_OFF,
+ ARM_PMU_STATE_RUNNING,
+ ARM_PMU_STATE_GOING_DOWN,
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
@@ -108,6 +114,8 @@ struct arm_pmu {
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
int num_events;
+ int pmu_state;
+ int percpu_irq;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 531b8b1..3c80583 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -270,6 +270,8 @@ struct pmu {
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
+ u32 events_across_hotplug:1,
+ reserved:31;
/* number of address filters this PMU can do */
unsigned int nr_addr_filters;
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
index 7945fea..25e7a5f 100644
--- a/include/linux/phy/phy-qcom-ufs.h
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -58,5 +58,6 @@ void ufs_qcom_phy_save_controller_version(struct phy *phy,
u8 major, u16 minor, u16 step);
const char *ufs_qcom_phy_name(struct phy *phy);
int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable);
+void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy);
#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/qdsp6v2/rtac.h b/include/linux/qdsp6v2/rtac.h
index 3e5433b..eeea0eb 100644
--- a/include/linux/qdsp6v2/rtac.h
+++ b/include/linux/qdsp6v2/rtac.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2011, 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2013-2015, 2017, The Linux Foundation. All rights
+ * reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -95,4 +96,5 @@ int rtac_clear_mapping(uint32_t cal_type);
bool rtac_make_afe_callback(uint32_t *payload, u32 payload_size);
void rtac_set_afe_handle(void *handle);
void get_rtac_adm_data(struct rtac_adm *adm_data);
+void rtac_update_afe_topology(u32 port_id);
#endif
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index 4023e3a..a0e2283 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -181,6 +181,7 @@
#define PM660L_SUBTYPE 0x1A
#define PM660_SUBTYPE 0x1B
+/* PMI8998 REV_ID */
#define PMI8998_V1P0_REV1 0x00
#define PMI8998_V1P0_REV2 0x00
#define PMI8998_V1P0_REV3 0x00
@@ -196,6 +197,26 @@
#define PMI8998_V2P0_REV3 0x00
#define PMI8998_V2P0_REV4 0x02
+/* PM660 REV_ID */
+#define PM660_V1P0_REV1 0x00
+#define PM660_V1P0_REV2 0x00
+#define PM660_V1P0_REV3 0x00
+#define PM660_V1P0_REV4 0x01
+
+#define PM660_V1P1_REV1 0x00
+#define PM660_V1P1_REV2 0x00
+#define PM660_V1P1_REV3 0x01
+#define PM660_V1P1_REV4 0x01
+
+/* PMI8998 FAB_ID */
+#define PMI8998_FAB_ID_SMIC 0x11
+#define PMI8998_FAB_ID_GF 0x30
+
+/* PM660 FAB_ID */
+#define PM660_FAB_ID_GF 0x0
+#define PM660_FAB_ID_TSMC 0x2
+#define PM660_FAB_ID_MX 0x3
+
/* PM8005 */
#define PM8005_SUBTYPE 0x18
diff --git a/include/linux/regulator/qpnp-labibb-regulator.h b/include/linux/regulator/qpnp-labibb-regulator.h
new file mode 100644
index 0000000..2470695
--- /dev/null
+++ b/include/linux/regulator/qpnp-labibb-regulator.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_LABIBB_REGULATOR_H
+#define _QPNP_LABIBB_REGULATOR_H
+
+enum labibb_notify_event {
+ LAB_VREG_OK = 1,
+};
+
+int qpnp_labibb_notifier_register(struct notifier_block *nb);
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb);
+
+#endif
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 065d441..7a09cb1 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -1815,11 +1815,14 @@ struct afe_port_data_cmd_rt_proxy_port_read_v2 {
#define AFE_PORT_SAMPLE_RATE_16K 16000
#define AFE_PORT_SAMPLE_RATE_48K 48000
#define AFE_PORT_SAMPLE_RATE_96K 96000
+#define AFE_PORT_SAMPLE_RATE_176P4K 176400
#define AFE_PORT_SAMPLE_RATE_192K 192000
+#define AFE_PORT_SAMPLE_RATE_352P8K 352800
#define AFE_LINEAR_PCM_DATA 0x0
#define AFE_NON_LINEAR_DATA 0x1
#define AFE_LINEAR_PCM_DATA_PACKED_60958 0x2
#define AFE_NON_LINEAR_DATA_PACKED_60958 0x3
+#define AFE_GENERIC_COMPRESSED 0x8
/* This param id is used to configure I2S interface */
#define AFE_PARAM_ID_I2S_CONFIG 0x0001020D
@@ -2461,6 +2464,13 @@ struct afe_param_id_slimbus_cfg {
*/
#define AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS 0x000102A5
+
+/* ID of the parameter used to set the endianness value for the
+ * USB audio device. It should be used with
+ * AFE_MODULE_AUDIO_DEV_INTERFACE
+ */
+#define AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT 0x000102AA
+
/* Minor version used for tracking USB audio configuration */
#define AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG 0x1
@@ -2476,6 +2486,15 @@ struct afe_param_id_usb_audio_dev_params {
u32 dev_token;
} __packed;
+struct afe_param_id_usb_audio_dev_lpcm_fmt {
+/* Minor version used for tracking USB audio device parameter.
+ * Supported values: AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG
+ */
+ u32 cfg_minor_version;
+/* Endianness of actual end USB audio device */
+ u32 endian;
+} __packed;
+
/* ID of the parameter used by AFE_PARAM_ID_USB_AUDIO_CONFIG to configure
* USB audio interface. It should be used with AFE_MODULE_AUDIO_DEV_INTERFACE
*/
@@ -2520,13 +2539,18 @@ struct afe_param_id_usb_audio_cfg {
u16 reserved;
/* device token of actual end USB aduio device */
u32 dev_token;
+/* endianness of this interface */
+ u32 endian;
} __packed;
struct afe_usb_audio_dev_param_command {
struct apr_hdr hdr;
struct afe_port_cmd_set_param_v2 param;
struct afe_port_param_data_v2 pdata;
- struct afe_param_id_usb_audio_dev_params usb_dev;
+ union {
+ struct afe_param_id_usb_audio_dev_params usb_dev;
+ struct afe_param_id_usb_audio_dev_lpcm_fmt lpcm_fmt;
+ };
} __packed;
/* This param id is used to configure Real Time Proxy interface. */
@@ -2724,7 +2748,9 @@ struct afe_param_id_tdm_cfg {
* - #AFE_PORT_SAMPLE_RATE_16K
* - #AFE_PORT_SAMPLE_RATE_24K
* - #AFE_PORT_SAMPLE_RATE_32K
- * - #AFE_PORT_SAMPLE_RATE_48K @tablebulletend
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_176P4K
+ * - #AFE_PORT_SAMPLE_RATE_352P8K @tablebulletend
*/
u32 bit_width;
@@ -2733,10 +2759,11 @@ struct afe_param_id_tdm_cfg {
*/
u16 data_format;
- /* < Data format: linear and compressed
+ /* < Data format: linear ,compressed, generic compresssed
* @values
* - #AFE_LINEAR_PCM_DATA
- * - #AFE_NON_LINEAR_DATA @tablebulletend
+ * - #AFE_NON_LINEAR_DATA
+ * - #AFE_GENERIC_COMPRESSED
*/
u16 sync_mode;
@@ -3615,7 +3642,7 @@ struct afe_lpass_core_shared_clk_config_command {
#define DEFAULT_COPP_TOPOLOGY 0x00010314
#define DEFAULT_POPP_TOPOLOGY 0x00010BE4
#define COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY 0x0001076B
-#define COMPRESS_PASSTHROUGH_NONE_TOPOLOGY 0x00010774
+#define COMPRESSED_PASSTHROUGH_NONE_TOPOLOGY 0x00010774
#define VPM_TX_SM_ECNS_COPP_TOPOLOGY 0x00010F71
#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY 0x00010F72
#define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY 0x00010F75
@@ -3921,6 +3948,8 @@ struct asm_softvolume_params {
#define ASM_MEDIA_FMT_EVRCWB_FS 0x00010BF0
+#define ASM_MEDIA_FMT_GENERIC_COMPRESSED 0x00013212
+
#define ASM_MAX_EQ_BANDS 12
#define ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2 0x00010D98
@@ -3930,6 +3959,40 @@ u32 fmt_blk_size;
/* Media format block size in bytes.*/
} __packed;
+struct asm_generic_compressed_fmt_blk_t {
+ struct apr_hdr hdr;
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+
+ /*
+ * Channel mapping array of bitstream output.
+ * Channel[i] mapping describes channel i inside the buffer, where
+ * i < num_channels. All valid used channels must be
+ * present at the beginning of the array.
+ */
+ uint8_t channel_mapping[8];
+
+ /*
+ * Number of channels of the incoming bitstream.
+ * Supported values: 1,2,3,4,5,6,7,8
+ */
+ uint16_t num_channels;
+
+ /*
+ * Nominal bits per sample value of the incoming bitstream.
+ * Supported values: 16, 32
+ */
+ uint16_t bits_per_sample;
+
+ /*
+ * Nominal sampling rate of the incoming bitstream.
+ * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+ * 44100, 48000, 88200, 96000, 176400, 192000,
+ * 352800, 384000
+ */
+ uint32_t sampling_rate;
+
+} __packed;
+
struct asm_multi_channel_pcm_fmt_blk_v2 {
struct apr_hdr hdr;
struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
@@ -9984,6 +10047,108 @@ struct afe_port_group_create {
union afe_port_group_config data;
} __packed;
+/* ID of the parameter used by #AFE_MODULE_AUDIO_DEV_INTERFACE to specify
+ * the timing statistics of the corresponding device interface.
+ * Client can periodically query for the device time statistics to help adjust
+ * the PLL based on the drift value. The get param command must be sent to
+ * AFE port ID corresponding to device interface
+
+ * This parameter ID supports following get param commands:
+ * #AFE_PORT_CMD_GET_PARAM_V2 and
+ * #AFE_PORT_CMD_GET_PARAM_V3.
+ */
+#define AFE_PARAM_ID_DEV_TIMING_STATS 0x000102AD
+
+/* Version information used to handle future additions to AFE device
+ * interface timing statistics (for backward compatibility).
+ */
+#define AFE_API_VERSION_DEV_TIMING_STATS 0x1
+
+/* Enumeration for specifying a sink(Rx) device */
+#define AFE_SINK_DEVICE 0x0
+
+/* Enumeration for specifying a source(Tx) device */
+#define AFE_SOURCE_DEVICE 0x1
+
+/* Enumeration for specifying the drift reference is of type AV Timer */
+#define AFE_REF_TIMER_TYPE_AVTIMER 0x0
+
+/* Message payload structure for the
+ * AFE_PARAM_ID_DEV_TIMING_STATS parameter.
+ */
+struct afe_param_id_dev_timing_stats {
+ /* Minor version used to track the version of device interface timing
+ * statistics. Currently, the supported version is 1.
+ * @values #AFE_API_VERSION_DEV_TIMING_STATS
+ */
+ u32 minor_version;
+
+ /* Indicates the device interface direction as either
+ * source (Tx) or sink (Rx).
+ * @values
+ * #AFE_SINK_DEVICE
+ * #AFE_SOURCE_DEVICE
+ */
+ u16 device_direction;
+
+ /* Reference timer for drift accumulation and time stamp information.
+ * @values
+ * #AFE_REF_TIMER_TYPE_AVTIMER @tablebulletend
+ */
+ u16 reference_timer;
+
+ /*
+ * Flag to indicate if resync is required on the client side for
+ * drift correction. Flag is set to TRUE for the first get_param
+ * response after device interface starts. This flag value can be
+ * used by client to identify if device interface restart has
+ * happened and if any re-sync is required at their end for drift
+ * correction.
+ * @values
+ * 0: FALSE (Resync not required)
+ * 1: TRUE (Resync required) @tablebulletend
+ */
+ u32 resync_flag;
+
+ /* Accumulated drift value in microseconds. This value is updated
+ * every 100th ms.
+ * Positive drift value indicates AV timer is running faster than device
+ * Negative drift value indicates AV timer is running slower than device
+ * @values Any valid int32 number
+ */
+ s32 acc_drift_value;
+
+ /* Lower 32 bits of the 64-bit absolute timestamp of reference
+ * timer in microseconds.
+
+ * This timestamp corresponds to the time when the drift values
+ * are accumlated for every 100th ms.
+ * @values Any valid uint32 number
+ */
+ u32 ref_timer_abs_ts_lsw;
+
+ /* Upper 32 bits of the 64-bit absolute timestamp of reference
+ * timer in microseconds.
+ * This timestamp corresponds to the time when the drift values
+ * are accumlated for every 100th ms.
+ * @values Any valid uint32 number
+ */
+ u32 ref_timer_abs_ts_msw;
+} __packed;
+
+struct afe_av_dev_drift_get_param {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_get_param_v2 get_param;
+ struct afe_port_param_data_v2 pdata;
+ struct afe_param_id_dev_timing_stats timing_stats;
+} __packed;
+
+struct afe_av_dev_drift_get_param_resp {
+ uint32_t status;
+ struct afe_port_param_data_v2 pdata;
+ struct afe_param_id_dev_timing_stats timing_stats;
+} __packed;
+
/* Command for Matrix or Stream Router */
#define ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2 0x00010DCE
/* Module for AVSYNC */
@@ -10069,12 +10234,108 @@ struct asm_session_cmd_set_mtmx_strstr_params_v2 {
*/
};
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC which allows the
+ * audio client choose the rendering decision that the audio DSP should use.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_MODE_CMD 0x00012F0D
+
+/* Indicates that rendering decision will be based on default rate
+ * (session clock based rendering, device driven).
+ * 1. The default session clock based rendering is inherently driven
+ * by the timing of the device.
+ * 2. After the initial decision is made (first buffer after a run
+ * command), subsequent data rendering decisions are made with
+ * respect to the rate at which the device is rendering, thus deriving
+ * its timing from the device.
+ * 3. While this decision making is simple, it has some inherent limitations
+ * (mentioned in the next section).
+ * 4. If this API is not set, the session clock based rendering will be assumed
+ * and this will ensure that the DSP is backward compatible.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT 0
+
+/* Indicates that rendering decision will be based on local clock rate.
+ * 1. In the DSP loopback/client loopback use cases (frame based
+ * inputs), the incoming data into audio DSP is time-stamped at the
+ * local clock rate (STC).
+ * 2. This TS rate may match the incoming data rate or maybe different
+ * from the incoming data rate.
+ * 3. Regardless, the data will be time-stamped with local STC and
+ * therefore, the client is recommended to set this mode for these
+ * use cases. This method is inherently more robust to sequencing
+ * (AFE Start/Stop) and device switches, among other benefits.
+ * 4. This API will inform the DSP to compare every incoming buffer TS
+ * against local STC.
+ * 5. DSP will continue to honor render windows APIs, as before.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC 1
+
+/* Structure for rendering decision parameter */
+struct asm_session_mtmx_strtr_param_render_mode_t {
+ /* Specifies the type of rendering decision the audio DSP should use.
+ *
+ * @values
+ * - #ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT
+ * - #ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC
+ */
+ u32 flags;
+} __packed;
+
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC which allows the
+ * audio client to specify the clock recovery mechanism that the audio DSP
+ * should use.
+ */
+
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_CMD 0x00012F0E
+
+/* Indicates that default clock recovery will be used (no clock recovery).
+ * If the client wishes that no clock recovery be done, the client can
+ * choose this. This means that no attempt will made by the DSP to try and
+ * match the rates of the input and output audio.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE 0
+
+/* Indicates that independent clock recovery needs to be used.
+ * 1. In the DSP loopback/client loopback use cases (frame based inputs),
+ * the client should choose the independent clock recovery option.
+ * 2. This basically de-couples the audio and video from knowing each others
+ * clock sources and lets the audio DSP independently rate match the input
+ * and output rates.
+ * 3. After drift detection, the drift correction is achieved by either pulling
+ * the PLLs (if applicable) or by stream to device rate matching
+ * (for PCM use cases) by comparing drift with respect to STC.
+ * 4. For passthrough use cases, since the PLL pulling is the only option,
+ * a best effort will be made.
+ * If PLL pulling is not possible / available, the rendering will be
+ * done without rate matching.
+ */
+#define ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO 1
+
+/* Payload of the #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC parameter.
+ */
+struct asm_session_mtmx_strtr_param_clk_rec_t {
+ /* Specifies the type of clock recovery that the audio DSP should
+ * use for rate matching.
+ */
+
+ /* @values
+ * #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_DEFAULT
+ * #ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_INDEPENDENT
+ */
+ u32 flags;
+} __packed;
+
+union asm_session_mtmx_strtr_param_config {
+ struct asm_session_mtmx_strtr_param_window_v2_t window_param;
+ struct asm_session_mtmx_strtr_param_render_mode_t render_param;
+ struct asm_session_mtmx_strtr_param_clk_rec_t clk_rec_param;
+} __packed;
+
struct asm_mtmx_strtr_params {
struct apr_hdr hdr;
struct asm_session_cmd_set_mtmx_strstr_params_v2 param;
struct asm_stream_param_data_v2 data;
- u32 window_lsw;
- u32 window_msw;
+ union asm_session_mtmx_strtr_param_config config;
} __packed;
#define ASM_SESSION_CMD_GET_MTMX_STRTR_PARAMS_V2 0x00010DCF
@@ -10195,6 +10456,7 @@ enum {
COMPRESSED_PASSTHROUGH_CONVERT,
COMPRESSED_PASSTHROUGH_DSD,
LISTEN,
+ COMPRESSED_PASSTHROUGH_GEN,
};
#define AUDPROC_MODULE_ID_COMPRESSED_MUTE 0x00010770
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index 2537631..42d048f 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -58,9 +58,9 @@ enum {
struct route_payload {
unsigned int copp_idx[MAX_COPPS_PER_PORT];
unsigned int port_id[MAX_COPPS_PER_PORT];
- int app_type;
- int acdb_dev_id;
- int sample_rate;
+ int app_type[MAX_COPPS_PER_PORT];
+ int acdb_dev_id[MAX_COPPS_PER_PORT];
+ int sample_rate[MAX_COPPS_PER_PORT];
unsigned short num_copps;
unsigned int session_id;
};
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index afd68e7..8361175 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -42,6 +42,8 @@
#define AFE_CLK_VERSION_V1 1
#define AFE_CLK_VERSION_V2 2
+typedef int (*routing_cb)(int port);
+
enum {
/* IDX 0->4 */
IDX_PRIMARY_I2S_RX,
@@ -362,5 +364,8 @@ int afe_send_custom_tdm_header_cfg(
struct afe_param_id_custom_tdm_header_cfg *custom_tdm_header_cfg,
u16 port_id);
int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
- u32 rate);
+ u32 rate, u16 num_groups);
+void afe_set_routing_callback(routing_cb cb);
+int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats,
+ u16 port);
#endif /* __Q6AFE_V2_H__ */
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index cb23898..6bc93f5 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -54,6 +54,7 @@
#define FORMAT_DTS 0x001c
#define FORMAT_DSD 0x001d
#define FORMAT_APTX 0x001e
+#define FORMAT_GEN_COMPR 0x001f
#define ENCDEC_SBCBITRATE 0x0001
#define ENCDEC_IMMEDIATE_DECODE 0x0002
@@ -500,6 +501,11 @@ int q6asm_media_format_block_multi_ch_pcm_v2(
uint32_t rate, uint32_t channels,
bool use_default_chmap, char *channel_map,
uint16_t bits_per_sample);
+int q6asm_media_format_block_gen_compr(
+ struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ bool use_default_chmap, char *channel_map,
+ uint16_t bits_per_sample);
int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
uint32_t rate, uint32_t channels,
@@ -635,6 +641,14 @@ int q6asm_send_mtmx_strtr_window(struct audio_client *ac,
struct asm_session_mtmx_strtr_param_window_v2_t *window_param,
uint32_t param_id);
+/* Configure DSP render mode */
+int q6asm_send_mtmx_strtr_render_mode(struct audio_client *ac,
+ uint32_t render_mode);
+
+/* Configure DSP clock recovery mode */
+int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac,
+ uint32_t clk_rec_mode);
+
/* Retrieve the current DSP path delay */
int q6asm_get_path_delay(struct audio_client *ac);
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index fb882f5..fda50e9 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -268,6 +268,39 @@ struct drm_panel_hdr_properties {
__u32 blackness_level;
};
+/**
+ * struct drm_msm_event_req - Payload to event enable/disable ioctls.
+ * @object_id: DRM object id. e.g.: for crtc pass crtc id.
+ * @object_type: DRM object type. e.g.: for crtc set it to DRM_MODE_OBJECT_CRTC.
+ * @event: Event for which notification is being enabled/disabled.
+ * e.g.: for Histogram set - DRM_EVENT_HISTOGRAM.
+ * @client_context: Opaque pointer that will be returned during event response
+ * notification.
+ * @index: Object index(e.g.: crtc index), optional for user-space to set.
+ * Driver will override value based on object_id and object_type.
+ */
+struct drm_msm_event_req {
+ __u32 object_id;
+ __u32 object_type;
+ __u32 event;
+ __u64 client_context;
+ __u32 index;
+};
+
+/**
+ * struct drm_msm_event_resp - payload returned when read is called for
+ * custom notifications.
+ * @base: Event type and length of complete notification payload.
+ * @info: Contains information about DRM that which raised this event.
+ * @data: Custom payload that driver returns for event type.
+ * size of data = base.length - (sizeof(base) + sizeof(info))
+ */
+struct drm_msm_event_resp {
+ struct drm_event base;
+ struct drm_msm_event_req info;
+ __u8 data[];
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
@@ -284,6 +317,10 @@ struct drm_panel_hdr_properties {
#define DRM_MSM_REGISTER_EVENT 0x41
#define DRM_MSM_DEREGISTER_EVENT 0x42
+/* sde custom events */
+#define DRM_EVENT_HISTOGRAM 0x80000000
+#define DRM_EVENT_AD_BACKLIGHT 0x80000001
+
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
@@ -294,6 +331,10 @@ struct drm_panel_hdr_properties {
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#define DRM_IOCTL_SDE_WB_CONFIG \
DRM_IOW((DRM_COMMAND_BASE + DRM_SDE_WB_CONFIG), struct sde_drm_wb_cfg)
+#define DRM_IOCTL_MSM_REGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
+ DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
+ DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req)
#if defined(__cplusplus)
}
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index 1b17e1c..4201c95 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -5,11 +5,11 @@
#define ESOC_CODE 0xCC
-#define ESOC_CMD_EXE _IOW(ESOC_CODE, 1, __u32)
-#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, __u32)
-#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, __u32)
-#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, __u32)
-#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, __u32)
+#define ESOC_CMD_EXE _IOW(ESOC_CODE, 1, unsigned int)
+#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, unsigned int)
+#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, unsigned int)
+#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, unsigned int)
+#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, unsigned int)
#define ESOC_REG_REQ_ENG _IO(ESOC_CODE, 7)
#define ESOC_REG_CMD_ENG _IO(ESOC_CODE, 8)
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 941a816..c190446 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -327,6 +327,7 @@ enum kgsl_timestamp_type {
#define KGSL_PROP_DEVICE_QDSS_STM 0x19
#define KGSL_PROP_MIN_ACCESS_LENGTH 0x1A
#define KGSL_PROP_UBWC_MODE 0x1B
+#define KGSL_PROP_DEVICE_QTIMER 0x20
struct kgsl_shadowprop {
unsigned long gpuaddr;
@@ -339,6 +340,11 @@ struct kgsl_qdss_stm_prop {
uint64_t size;
};
+struct kgsl_qtimer_prop {
+ uint64_t gpuaddr;
+ uint64_t size;
+};
+
struct kgsl_version {
unsigned int drv_major;
unsigned int drv_minor;
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index d138beb..5f375c4 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -1,8 +1,8 @@
-header-y += cam_req_mgr.h
header-y += cam_defs.h
header-y += cam_isp.h
header-y += cam_isp_vfe.h
header-y += cam_isp_ife.h
+header-y += cam_req_mgr.h
header-y += cam_sensor.h
header-y += cam_sync.h
header-y += msm_media_info.h
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 18bd04a..3e2b24c 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -9,16 +9,17 @@
#define CAM_REQ_MGR_VNODE_NAME "cam-req-mgr-devnode"
-#define CAM_DEVICE_TYPE_BASE (MEDIA_ENT_F_OLD_BASE)
-#define CAM_VNODE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE)
-#define CAM_SENSOR_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 1)
-#define CAM_IFE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 2)
-#define CAM_ICP_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 3)
-#define CAM_LRME_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 4)
-#define CAM_JPEG_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 5)
-#define CAM_FD_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 6)
-#define CAM_CPAS_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 7)
-#define CAM_CSIPHY_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 8)
+#define CAM_DEVICE_TYPE_BASE (MEDIA_ENT_F_OLD_BASE)
+#define CAM_VNODE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE)
+#define CAM_SENSOR_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 1)
+#define CAM_IFE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 2)
+#define CAM_ICP_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 3)
+#define CAM_LRME_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 4)
+#define CAM_JPEG_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 5)
+#define CAM_FD_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 6)
+#define CAM_CPAS_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 7)
+#define CAM_CSIPHY_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 8)
+#define CAM_ACTUATOR_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 9)
/* cam_req_mgr hdl info */
#define CAM_REQ_MGR_HDL_IDX_POS 8
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index e04ccf0..3048105 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -70,7 +70,7 @@ struct snd_compr_tstamp {
__u32 pcm_frames;
__u32 pcm_io_frames;
__u32 sampling_rate;
- uint64_t timestamp;
+ __u64 timestamp;
} __attribute__((packed, aligned(4)));
/**
@@ -128,24 +128,46 @@ struct snd_compr_codec_caps {
* @reserved: reserved for furture use
*/
struct snd_compr_audio_info {
- uint32_t frame_size;
- uint32_t reserved[15];
+ __u32 frame_size;
+ __u32 reserved[15];
} __attribute__((packed, aligned(4)));
+#define SNDRV_COMPRESS_RENDER_MODE_AUDIO_MASTER 0
+#define SNDRV_COMPRESS_RENDER_MODE_STC_MASTER 1
+
+#define SNDRV_COMPRESS_CLK_REC_MODE_NONE 0
+#define SNDRV_COMPRESS_CLK_REC_MODE_AUTO 1
+
/**
* enum sndrv_compress_encoder
* @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
* end of the track
* @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
* beginning of the track
+ * @SNDRV_COMPRESS_PATH_DELAY: dsp path delay in microseconds
+ * @SNDRV_COMPRESS_RENDER_MODE: dsp render mode (audio master or stc)
+ * @SNDRV_COMPRESS_CLK_REC_MODE: clock recovery mode ( none or auto)
+ * @SNDRV_COMPRESS_RENDER_WINDOW: render window
+ * @SNDRV_COMPRESS_START_DELAY: start delay
*/
enum sndrv_compress_encoder {
SNDRV_COMPRESS_ENCODER_PADDING = 1,
SNDRV_COMPRESS_ENCODER_DELAY = 2,
SNDRV_COMPRESS_MIN_BLK_SIZE = 3,
SNDRV_COMPRESS_MAX_BLK_SIZE = 4,
+ SNDRV_COMPRESS_PATH_DELAY = 5,
+ SNDRV_COMPRESS_RENDER_MODE = 6,
+ SNDRV_COMPRESS_CLK_REC_MODE = 7,
+ SNDRV_COMPRESS_RENDER_WINDOW = 8,
+ SNDRV_COMPRESS_START_DELAY = 9,
};
+#define SNDRV_COMPRESS_PATH_DELAY SNDRV_COMPRESS_PATH_DELAY
+#define SNDRV_COMPRESS_RENDER_MODE SNDRV_COMPRESS_RENDER_MODE
+#define SNDRV_COMPRESS_CLK_REC_MODE SNDRV_COMPRESS_CLK_REC_MODE
+#define SNDRV_COMPRESS_RENDER_WINDOW SNDRV_COMPRESS_RENDER_WINDOW
+#define SNDRV_COMPRESS_START_DELAY SNDRV_COMPRESS_START_DELAY
+
/**
* struct snd_compr_metadata - compressed stream metadata
* @key: key id
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 24f0d77..87b9cd9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7586,6 +7586,7 @@ static struct pmu perf_swevent = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+ .events_across_hotplug = 1,
};
#ifdef CONFIG_EVENT_TRACING
@@ -7730,6 +7731,7 @@ static struct pmu perf_tracepoint = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+ .events_across_hotplug = 1,
};
static inline void perf_tp_register(void)
@@ -8460,6 +8462,7 @@ static struct pmu perf_cpu_clock = {
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
+ .events_across_hotplug = 1,
};
/*
@@ -8541,6 +8544,7 @@ static struct pmu perf_task_clock = {
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
+ .events_across_hotplug = 1,
};
static void perf_pmu_nop_void(struct pmu *pmu)
@@ -10715,6 +10719,76 @@ int perf_event_init_cpu(unsigned int cpu)
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
+static void
+check_hotplug_start_event(struct perf_event *event)
+{
+ if (event->attr.type == PERF_TYPE_SOFTWARE) {
+ switch (event->attr.config) {
+ case PERF_COUNT_SW_CPU_CLOCK:
+ cpu_clock_event_start(event, 0);
+ break;
+ case PERF_COUNT_SW_TASK_CLOCK:
+ break;
+ default:
+ if (event->pmu->start)
+ event->pmu->start(event, 0);
+ break;
+ }
+ }
+}
+
+static int perf_event_start_swevents(unsigned int cpu)
+{
+ struct perf_event_context *ctx;
+ struct pmu *pmu;
+ struct perf_event *event;
+ int idx;
+
+ idx = srcu_read_lock(&pmus_srcu);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+ mutex_lock(&ctx->mutex);
+ raw_spin_lock(&ctx->lock);
+ list_for_each_entry(event, &ctx->event_list, event_entry)
+ check_hotplug_start_event(event);
+ raw_spin_unlock(&ctx->lock);
+ mutex_unlock(&ctx->mutex);
+ }
+ srcu_read_unlock(&pmus_srcu, idx);
+ return 0;
+}
+
+/*
+ * If keeping events across hotplugging is supported, do not
+ * remove the event list so event lives beyond CPU hotplug.
+ * The context is exited via an fd close path when userspace
+ * is done and the target CPU is online. If software clock
+ * event is active, then stop hrtimer associated with it.
+ * Start the timer when the CPU comes back online.
+ */
+static void
+check_hotplug_remove_from_context(struct perf_event *event,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ if (!event->pmu->events_across_hotplug) {
+ __perf_remove_from_context(event, cpuctx,
+ ctx, (void *)DETACH_GROUP);
+ } else if (event->attr.type == PERF_TYPE_SOFTWARE) {
+ switch (event->attr.config) {
+ case PERF_COUNT_SW_CPU_CLOCK:
+ cpu_clock_event_stop(event, 0);
+ break;
+ case PERF_COUNT_SW_TASK_CLOCK:
+ break;
+ default:
+ if (event->pmu->stop)
+ event->pmu->stop(event, 0);
+ break;
+ }
+ }
+}
+
static void __perf_event_exit_context(void *__info)
{
struct perf_event_context *ctx = __info;
@@ -10723,7 +10797,7 @@ static void __perf_event_exit_context(void *__info)
raw_spin_lock(&ctx->lock);
list_for_each_entry(event, &ctx->event_list, event_entry)
- __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
+ check_hotplug_remove_from_context(event, cpuctx, ctx);
raw_spin_unlock(&ctx->lock);
}
@@ -10842,6 +10916,21 @@ static int __init perf_event_sysfs_init(void)
}
device_initcall(perf_event_sysfs_init);
+static int perf_cpu_hp_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
+ "PERF/CORE/AP_PERF_ONLINE",
+ perf_event_start_swevents,
+ perf_event_exit_cpu);
+ if (ret)
+ pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
+ ret);
+ return ret;
+}
+subsys_initcall(perf_cpu_hp_init);
+
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 22d67f0..0854263 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -703,12 +703,22 @@ void pm_qos_remove_request(struct pm_qos_request *req)
/* silent return to keep pcm code cleaner */
if (!pm_qos_request_active(req)) {
- WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+ WARN(1, "pm_qos_remove_request() called for unknown object\n");
return;
}
cancel_delayed_work_sync(&req->work);
+#ifdef CONFIG_SMP
+ if (req->type == PM_QOS_REQ_AFFINE_IRQ) {
+ int ret = 0;
+ /* Get the current affinity */
+ ret = irq_set_affinity_notifier(req->irq, NULL);
+ if (ret)
+ WARN(1, "IRQ affinity notify set failed\n");
+ }
+#endif
+
trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
req, PM_QOS_REMOVE_REQ,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 84c5076..7ae9b24 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2116,6 +2116,7 @@ __acquires(&pool->lock)
current->comm, preempt_count(), task_pid_nr(current),
worker->current_func);
debug_show_held_locks(current);
+ BUG_ON(PANIC_CORRUPTION);
dump_stack();
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index fa9c7cd..64ec3fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -257,6 +257,17 @@
If unsure, say N.
+config PAGE_OWNER_ENABLE_DEFAULT
+ bool "Enable Track page owner by default"
+ depends on PAGE_OWNER
+ ---help---
+ This keeps track of what call chain is the owner of a page, may
+ help to find bare alloc_page(s) leaks. If you include this
+ feature on your build, it is enabled by default. You should pass
+ "page_owner=off" to boot parameter in order to disable it. Eats
+ a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
+ for user-space helper.
+
config DEBUG_FS
bool "Debug Filesystem"
select SRCU
@@ -2029,6 +2040,19 @@
memtest=17, mean do 17 test patterns.
If you are unsure how to answer this question, answer N.
+config MEMTEST_ENABLE_DEFAULT
+ int "Enable Memtest pattern test by default? (0-17)"
+ range 0 17
+ default "0"
+ depends on MEMTEST
+ help
+ This option helps to select Memtest to be enabled through
+ kernel defconfig options. Alternatively it can be enabled
+ using memtest=<patterns> kernel command line.
+
+ Default value is kept as "0" so that it is kept as disabled.
+ To enable enter any value between 1-17 range.
+
config TEST_STATIC_KEYS
tristate "Test static keys"
default n
@@ -2038,6 +2062,13 @@
If unsure, say N.
+config PANIC_ON_DATA_CORRUPTION
+ bool "Cause a Kernel Panic When Data Corruption is detected"
+ help
+ Select this option to upgrade warnings for potentially
+ recoverable data corruption scenarios to system-halting panics,
+ for easier detection and debug.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 3859bf6..7a5c1c0 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -11,6 +11,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/rculist.h>
+#include <linux/bug.h>
/*
* Insert a new entry between two known consecutive entries.
@@ -34,6 +35,10 @@ void __list_add(struct list_head *new,
WARN(new == prev || new == next,
"list_add double add: new=%p, prev=%p, next=%p.\n",
new, prev, next);
+
+ BUG_ON((prev->next != next || next->prev != prev ||
+ new == prev || new == next) && PANIC_CORRUPTION);
+
next->prev = new;
new->next = next;
new->prev = prev;
@@ -58,9 +63,11 @@ void __list_del_entry(struct list_head *entry)
"list_del corruption. prev->next should be %p, "
"but was %p\n", entry, prev->next) ||
WARN(next->prev != entry,
- "list_del corruption. next->prev should be %p, "
- "but was %p\n", entry, next->prev))
+ "list_del corruption. next->prev should be %p, but was %p\n",
+ entry, next->prev)) {
+ BUG_ON(PANIC_CORRUPTION);
return;
+ }
__list_del(prev, next);
}
diff --git a/mm/memtest.c b/mm/memtest.c
index 8eaa4c3..15a423e 100644
--- a/mm/memtest.c
+++ b/mm/memtest.c
@@ -80,8 +80,8 @@ static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end)
}
/* default is disabled */
-static unsigned int memtest_pattern __initdata;
-
+static unsigned int memtest_pattern __initdata =
+ CONFIG_MEMTEST_ENABLE_DEFAULT;
static int __init parse_memtest(char *arg)
{
int ret = 0;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 60634dc..d2db436 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -25,7 +25,8 @@ struct page_owner {
depot_stack_handle_t handle;
};
-static bool page_owner_disabled = true;
+static bool page_owner_disabled =
+ !IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT);
DEFINE_STATIC_KEY_FALSE(page_owner_inited);
static depot_stack_handle_t dummy_handle;
@@ -41,6 +42,9 @@ static int early_page_owner_param(char *buf)
if (strcmp(buf, "on") == 0)
page_owner_disabled = false;
+ if (strcmp(buf, "off") == 0)
+ page_owner_disabled = true;
+
return 0;
}
early_param("page_owner", early_page_owner_param);
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 2e647c6..0abd75e 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -106,7 +106,8 @@ static bool single_bit_flip(unsigned char a, unsigned char b)
return error && !(error & (error - 1));
}
-static void check_poison_mem(unsigned char *mem, size_t bytes)
+static void check_poison_mem(struct page *page,
+ unsigned char *mem, size_t bytes)
{
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
unsigned char *start;
@@ -127,12 +128,15 @@ static void check_poison_mem(unsigned char *mem, size_t bytes)
if (!__ratelimit(&ratelimit))
return;
else if (start == end && single_bit_flip(*start, PAGE_POISON))
- pr_err("pagealloc: single bit error\n");
+ pr_err("pagealloc: single bit error on page with phys start 0x%lx\n",
+ (unsigned long)page_to_phys(page));
else
- pr_err("pagealloc: memory corruption\n");
+ pr_err("pagealloc: memory corruption on page with phys start 0x%lx\n",
+ (unsigned long)page_to_phys(page));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
end - start + 1, 1);
+ BUG_ON(PANIC_CORRUPTION);
dump_stack();
}
@@ -144,7 +148,7 @@ static void unpoison_page(struct page *page)
return;
addr = kmap_atomic(page);
- check_poison_mem(addr, PAGE_SIZE);
+ check_poison_mem(page, addr, PAGE_SIZE);
clear_page_poison(page);
kunmap_atomic(addr);
}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index e0198d2..fe135b4 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1159,7 +1159,7 @@
config SND_SOC_MSM_HDMI_CODEC_RX
bool "HDMI Audio Playback"
- depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998)
+ depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
help
HDMI audio drivers should be built only if the platform
supports hdmi panel.
diff --git a/sound/soc/codecs/msm_hdmi_codec_rx.c b/sound/soc/codecs/msm_hdmi_codec_rx.c
index abc100f..46cfe7d 100644
--- a/sound/soc/codecs/msm_hdmi_codec_rx.c
+++ b/sound/soc/codecs/msm_hdmi_codec_rx.c
@@ -20,10 +20,17 @@
#include <linux/msm_ext_display.h>
#define MSM_EXT_DISP_PCM_RATES SNDRV_PCM_RATE_48000
+#define AUD_EXT_DISP_ACK_DISCONNECT (AUDIO_ACK_CONNECT ^ AUDIO_ACK_CONNECT)
+#define AUD_EXT_DISP_ACK_CONNECT (AUDIO_ACK_CONNECT)
+#define AUD_EXT_DISP_ACK_ENABLE (AUDIO_ACK_SET_ENABLE | AUDIO_ACK_ENABLE)
static const char *const ext_disp_audio_type_text[] = {"None", "HDMI", "DP"};
+static const char *const ext_disp_audio_ack_text[] = {"Disconnect", "Connect",
+ "Ack_Enable"};
static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_audio_type, ext_disp_audio_type_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_audio_ack_state,
+ ext_disp_audio_ack_text);
struct msm_ext_disp_audio_codec_rx_data {
struct platform_device *ext_disp_core_pdev;
@@ -175,6 +182,55 @@ static int msm_ext_disp_audio_type_get(struct snd_kcontrol *kcontrol,
return rc;
}
+static int msm_ext_disp_audio_ack_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct msm_ext_disp_audio_codec_rx_data *codec_data;
+ u32 ack_state = 0;
+ int rc;
+
+ codec_data = snd_soc_codec_get_drvdata(codec);
+ if (!codec_data ||
+ !codec_data->ext_disp_ops.acknowledge) {
+ dev_err(codec->dev,
+ "%s: codec_data or ops acknowledge() is NULL\n",
+ __func__);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ switch (ucontrol->value.enumerated.item[0]) {
+ case 0:
+ ack_state = AUD_EXT_DISP_ACK_DISCONNECT;
+ break;
+ case 1:
+ ack_state = AUD_EXT_DISP_ACK_CONNECT;
+ break;
+ case 2:
+ ack_state = AUD_EXT_DISP_ACK_ENABLE;
+ break;
+ default:
+ rc = -EINVAL;
+ dev_err(codec->dev,
+ "%s: invalid value %d for mixer ctl\n",
+ __func__, ucontrol->value.enumerated.item[0]);
+ goto done;
+ }
+ dev_dbg(codec->dev, "%s: control %d, ack set value 0x%x\n",
+ __func__, ucontrol->value.enumerated.item[0], ack_state);
+
+ rc = codec_data->ext_disp_ops.acknowledge(
+ codec_data->ext_disp_core_pdev, ack_state);
+ if (rc < 0) {
+ dev_err(codec->dev, "%s: error from acknowledge(), err:%d\n",
+ __func__, rc);
+ }
+
+done:
+ return rc;
+}
+
static const struct snd_kcontrol_new msm_ext_disp_codec_rx_controls[] = {
{
.access = SNDRV_CTL_ELEM_ACCESS_READ |
@@ -194,6 +250,8 @@ static const struct snd_kcontrol_new msm_ext_disp_codec_rx_controls[] = {
},
SOC_ENUM_EXT("External Display Type", ext_disp_audio_type,
msm_ext_disp_audio_type_get, NULL),
+ SOC_ENUM_EXT("External Display Audio Ack", ext_disp_audio_ack_state,
+ NULL, msm_ext_disp_audio_ack_set),
};
static int msm_ext_disp_audio_codec_rx_dai_startup(
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index a118ecc..502aa4f 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -48,10 +48,14 @@
#define MSM_SDW_VERSION_1_0 0x0001
#define MSM_SDW_VERSION_ENTRY_SIZE 32
+/*
+ * 200 Milliseconds sufficient for DSP bring up in the modem
+ * after Sub System Restart
+ */
+#define ADSP_STATE_READY_TIMEOUT_MS 200
+
static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
static struct snd_soc_dai_driver msm_sdw_dai[];
-static bool initial_boot = true;
-static bool is_ssr_en;
static bool skip_irq = true;
static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
@@ -467,10 +471,9 @@ static int msm_sdw_codec_enable_vi_feedback(struct snd_soc_dapm_widget *w,
MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
0x20);
snd_soc_update_bits(codec,
- MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x00);
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x04);
snd_soc_update_bits(codec,
- MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x0F, 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x10);
snd_soc_update_bits(codec,
@@ -493,10 +496,10 @@ static int msm_sdw_codec_enable_vi_feedback(struct snd_soc_dapm_widget *w,
0x20);
snd_soc_update_bits(codec,
MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x10,
0x10);
@@ -1036,6 +1039,13 @@ static int msm_sdw_swrm_read(void *handle, int reg)
__func__, reg);
sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
+
+ /*
+ * Add sleep as SWR slave access read takes time.
+ * Allow for RD_DONE to complete for previous register if any.
+ */
+ usleep_range(50, 55);
+
/* read_lock */
mutex_lock(&msm_sdw->sdw_read_lock);
ret = regmap_bulk_write(msm_sdw->regmap, sdw_rd_addr_base,
@@ -1225,7 +1235,7 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- u8 rx_clk_fs_rate, rx_fs_rate;
+ u8 clk_fs_rate, fs_rate;
dev_dbg(dai->codec->dev,
"%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
@@ -1234,28 +1244,28 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
switch (params_rate(params)) {
case 8000:
- rx_clk_fs_rate = 0x00;
- rx_fs_rate = 0x00;
+ clk_fs_rate = 0x00;
+ fs_rate = 0x00;
break;
case 16000:
- rx_clk_fs_rate = 0x01;
- rx_fs_rate = 0x01;
+ clk_fs_rate = 0x01;
+ fs_rate = 0x01;
break;
case 32000:
- rx_clk_fs_rate = 0x02;
- rx_fs_rate = 0x03;
+ clk_fs_rate = 0x02;
+ fs_rate = 0x03;
break;
case 48000:
- rx_clk_fs_rate = 0x03;
- rx_fs_rate = 0x04;
+ clk_fs_rate = 0x03;
+ fs_rate = 0x04;
break;
case 96000:
- rx_clk_fs_rate = 0x04;
- rx_fs_rate = 0x05;
+ clk_fs_rate = 0x04;
+ fs_rate = 0x05;
break;
case 192000:
- rx_clk_fs_rate = 0x05;
- rx_fs_rate = 0x06;
+ clk_fs_rate = 0x05;
+ fs_rate = 0x06;
break;
default:
dev_err(dai->codec->dev,
@@ -1264,30 +1274,45 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x1C, (rx_clk_fs_rate << 2));
- snd_soc_update_bits(dai->codec,
- MSM_SDW_RX7_RX_PATH_CTL, 0x0F, rx_fs_rate);
- snd_soc_update_bits(dai->codec,
- MSM_SDW_RX8_RX_PATH_CTL, 0x0F, rx_fs_rate);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x1C,
+ (clk_fs_rate << 2));
+ } else {
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x1C,
+ (clk_fs_rate << 2));
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_RX7_RX_PATH_CTL, 0x0F,
+ fs_rate);
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_RX8_RX_PATH_CTL, 0x0F,
+ fs_rate);
+ }
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x20);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x20);
+ else
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x20);
break;
case SNDRV_PCM_FORMAT_S24_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x00);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x00);
+ else
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x00);
break;
default:
dev_err(dai->codec->dev, "%s: wrong format selected\n",
__func__);
return -EINVAL;
}
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x20);
return 0;
}
@@ -1403,7 +1428,7 @@ static struct snd_soc_dai_driver msm_sdw_dai[] = {
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
- .channels_max = 2,
+ .channels_max = 4,
},
.ops = &msm_sdw_dai_ops,
},
@@ -1412,9 +1437,9 @@ static struct snd_soc_dai_driver msm_sdw_dai[] = {
.id = AIF1_SDW_VIFEED,
.capture = {
.stream_name = "VIfeed_SDW",
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = MSM_SDW_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rate_max = 8000,
+ .rate_max = 48000,
.rate_min = 8000,
.channels_min = 2,
.channels_max = 4,
@@ -1629,6 +1654,8 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
struct msm_sdw_priv *msm_sdw = container_of(nb,
struct msm_sdw_priv,
service_nb);
+ bool adsp_ready = false;
+ unsigned long timeout;
pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
@@ -1641,15 +1668,34 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
SWR_DEVICE_DOWN, NULL);
break;
case AUDIO_NOTIFIER_SERVICE_UP:
- if (initial_boot) {
- initial_boot = false;
- break;
+ if (!q6core_is_adsp_ready()) {
+ dev_dbg(msm_sdw->dev, "ADSP isn't ready\n");
+ timeout = jiffies +
+ msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ while (!time_after(jiffies, timeout)) {
+ if (!q6core_is_adsp_ready()) {
+ dev_dbg(msm_sdw->dev,
+ "ADSP isn't ready\n");
+ } else {
+ dev_dbg(msm_sdw->dev,
+ "ADSP is ready\n");
+ adsp_ready = true;
+ goto powerup;
+ }
+ }
+ } else {
+ adsp_ready = true;
+ dev_dbg(msm_sdw->dev, "%s: DSP is ready\n", __func__);
}
- msm_sdw->dev_up = true;
- msm_sdw_init_reg(msm_sdw->codec);
- regcache_mark_dirty(msm_sdw->regmap);
- regcache_sync(msm_sdw->regmap);
- msm_sdw_set_spkr_mode(msm_sdw->codec, msm_sdw->spkr_mode);
+powerup:
+ if (adsp_ready) {
+ msm_sdw->dev_up = true;
+ msm_sdw_init_reg(msm_sdw->codec);
+ regcache_mark_dirty(msm_sdw->regmap);
+ regcache_sync(msm_sdw->regmap);
+ msm_sdw_set_spkr_mode(msm_sdw->codec,
+ msm_sdw->spkr_mode);
+ }
break;
default:
break;
@@ -1676,17 +1722,14 @@ static int msm_sdw_codec_probe(struct snd_soc_codec *codec)
msm_sdw_init_reg(codec);
msm_sdw->version = MSM_SDW_VERSION_1_0;
- if (is_ssr_en) {
- msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
- ret = audio_notifier_register("msm_sdw",
- AUDIO_NOTIFIER_ADSP_DOMAIN,
- &msm_sdw->service_nb);
- if (ret < 0)
- dev_err(msm_sdw->dev,
- "%s: Audio notifier register failed ret = %d\n",
- __func__, ret);
- }
-
+ msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
+ ret = audio_notifier_register("msm_sdw",
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &msm_sdw->service_nb);
+ if (ret < 0)
+ dev_err(msm_sdw->dev,
+ "%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
return 0;
}
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 3da57df..52e6815 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -48,18 +48,11 @@
#define BUS_DOWN 1
/*
- *50 Milliseconds sufficient for DSP bring up in the modem
+ * 50 Milliseconds sufficient for DSP bring up in the lpass
* after Sub System Restart
*/
#define ADSP_STATE_READY_TIMEOUT_MS 50
-enum {
- BOOST_SWITCH = 0,
- BOOST_ALWAYS,
- BYPASS_ALWAYS,
- BOOST_ON_FOREVER,
-};
-
#define EAR_PMD 0
#define EAR_PMU 1
#define SPK_PMD 2
@@ -81,20 +74,16 @@ enum {
((value - min_value)/step_size)
enum {
- RX_MIX1_INP_SEL_ZERO = 0,
- RX_MIX1_INP_SEL_IIR1,
- RX_MIX1_INP_SEL_IIR2,
- RX_MIX1_INP_SEL_RX1,
- RX_MIX1_INP_SEL_RX2,
- RX_MIX1_INP_SEL_RX3,
+ BOOST_SWITCH = 0,
+ BOOST_ALWAYS,
+ BYPASS_ALWAYS,
+ BOOST_ON_FOREVER,
};
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[];
/* By default enable the internal speaker boost */
static bool spkr_boost_en = true;
-static bool initial_boot = true;
-static bool is_ssr_en;
static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
"cdc-vdd-mic-bias",
@@ -876,11 +865,12 @@ static int msm_anlg_cdc_dig_register_notifier(void *handle,
struct notifier_block *nblock,
bool enable)
{
- struct sdm660_cdc *handle_cdc = handle;
+ struct sdm660_cdc_priv *handle_cdc = handle;
if (enable)
return blocking_notifier_chain_register(&handle_cdc->notifier,
nblock);
+
return blocking_notifier_chain_unregister(&handle_cdc->notifier,
nblock);
}
@@ -895,10 +885,10 @@ static int msm_anlg_cdc_mbhc_register_notifier(struct wcd_mbhc *wcd_mbhc,
if (enable)
return blocking_notifier_chain_register(
- &sdm660_cdc->notifier,
+ &sdm660_cdc->notifier_mbhc,
nblock);
- return blocking_notifier_chain_unregister(&sdm660_cdc->notifier,
+ return blocking_notifier_chain_unregister(&sdm660_cdc->notifier_mbhc,
nblock);
}
@@ -946,7 +936,7 @@ static const uint32_t wcd_imped_val[] = {4, 8, 12, 13, 16,
static void msm_anlg_cdc_dig_notifier_call(struct snd_soc_codec *codec,
const enum dig_cdc_notify_event event)
{
- struct sdm660_cdc *sdm660_cdc = codec->control_data;
+ struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
pr_debug("%s: notifier call event %d\n", __func__, event);
blocking_notifier_call_chain(&sdm660_cdc->notifier,
@@ -960,7 +950,7 @@ static void msm_anlg_cdc_notifier_call(struct snd_soc_codec *codec,
snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s: notifier call event %d\n", __func__, event);
- blocking_notifier_call_chain(&sdm660_cdc->notifier, event,
+ blocking_notifier_call_chain(&sdm660_cdc->notifier_mbhc, event,
&sdm660_cdc->mbhc);
}
@@ -1453,7 +1443,6 @@ static int msm_anlg_cdc_codec_enable_clock_block(struct snd_soc_codec *codec,
} else {
snd_soc_update_bits(codec,
MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
- msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_OFF);
}
return 0;
}
@@ -2048,12 +2037,6 @@ static const char * const wsa_spk_text[] = {
"ZERO", "WSA"
};
-
-
-static const char * const iir_inp1_text[] = {
- "ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3"
-};
-
static const struct soc_enum adc2_enum =
SOC_ENUM_SINGLE(SND_SOC_NOPM, 0,
ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
@@ -2601,7 +2584,7 @@ static int msm_anlg_cdc_codec_enable_micbias(struct snd_soc_dapm_widget *w,
static void update_clkdiv(void *handle, int val)
{
- struct sdm660_cdc *handle_cdc = handle;
+ struct sdm660_cdc_priv *handle_cdc = handle;
struct snd_soc_codec *codec = handle_cdc->codec;
snd_soc_update_bits(codec,
@@ -2611,10 +2594,7 @@ static void update_clkdiv(void *handle, int val)
static int get_cdc_version(void *handle)
{
- struct sdm660_cdc *handle_cdc = handle;
- struct snd_soc_codec *codec = handle_cdc->codec;
- struct sdm660_cdc_priv *sdm660_cdc =
- snd_soc_codec_get_drvdata(codec);
+ struct sdm660_cdc_priv *sdm660_cdc = handle;
return get_codec_version(sdm660_cdc);
}
@@ -3500,18 +3480,24 @@ static const struct snd_soc_dapm_widget msm_anlg_cdc_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_INPUT("AMIC2"),
SND_SOC_DAPM_INPUT("AMIC3"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX1"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX2"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX3"),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX1", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX2", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX3", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_OUTPUT("EAR"),
SND_SOC_DAPM_OUTPUT("WSA_SPK OUT"),
SND_SOC_DAPM_OUTPUT("HEADPHONE"),
SND_SOC_DAPM_OUTPUT("SPK_OUT"),
SND_SOC_DAPM_OUTPUT("LINEOUT"),
- SND_SOC_DAPM_OUTPUT("ADC1_OUT"),
- SND_SOC_DAPM_OUTPUT("ADC2_OUT"),
- SND_SOC_DAPM_OUTPUT("ADC3_OUT"),
+ SND_SOC_DAPM_AIF_OUT("ADC1_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("ADC2_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("ADC3_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
};
static const struct sdm660_cdc_reg_mask_val msm_anlg_cdc_reg_defaults[] = {
@@ -3677,11 +3663,12 @@ static int msm_anlg_cdc_bringup(struct snd_soc_codec *codec)
MSM89XX_PMIC_ANALOG_SEC_ACCESS,
0xA5);
snd_soc_write(codec, MSM89XX_PMIC_ANALOG_PERPH_RESET_CTL4, 0x00);
+
return 0;
}
static struct regulator *msm_anlg_cdc_find_regulator(
- const struct sdm660_cdc *sdm660_cdc,
+ const struct sdm660_cdc_priv *sdm660_cdc,
const char *name)
{
int i;
@@ -3772,11 +3759,11 @@ static int msm_anlg_cdc_device_down(struct snd_soc_codec *codec)
snd_soc_write(codec,
MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
- msm_anlg_cdc_bringup(codec);
atomic_set(&pdata->int_mclk0_enabled, false);
msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_DOWN);
set_bit(BUS_DOWN, &sdm660_cdc_priv->status_mask);
snd_soc_card_change_online_state(codec->component.card, 0);
+
return 0;
}
@@ -3794,14 +3781,6 @@ static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
/* delay is required to make sure sound card state updated */
usleep_range(5000, 5100);
- msm_anlg_cdc_codec_init_reg(codec);
- msm_anlg_cdc_update_reg_defaults(codec);
-
- regcache_mark_dirty(codec->component.regmap);
- regcache_sync_region(codec->component.regmap,
- MSM89XX_PMIC_DIGITAL_REVISION1,
- MSM89XX_PMIC_CDC_MAX_REGISTER);
-
snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_SET,
MSM89XX_PMIC_DIGITAL_INT_EN_SET__POR);
snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
@@ -3850,10 +3829,6 @@ static int sdm660_cdc_notifier_service_cb(struct notifier_block *nb,
msm_anlg_cdc_device_down(codec);
break;
case AUDIO_NOTIFIER_SERVICE_UP:
- if (initial_boot) {
- initial_boot = false;
- break;
- }
dev_dbg(codec->dev,
"ADSP is about to power up. bring up codec\n");
@@ -3916,7 +3891,7 @@ EXPORT_SYMBOL(msm_anlg_cdc_update_int_spk_boost);
static void msm_anlg_cdc_set_micb_v(struct snd_soc_codec *codec)
{
- struct sdm660_cdc *sdm660_cdc = codec->control_data;
+ struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
struct sdm660_cdc_pdata *pdata = sdm660_cdc->dev->platform_data;
u8 reg_val;
@@ -4053,17 +4028,16 @@ int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
return -ENOMEM;
}
sdm660_cdc_priv->version_entry = version_entry;
- if (is_ssr_en) {
- sdm660_cdc_priv->audio_ssr_nb.notifier_call =
- sdm660_cdc_notifier_service_cb;
- ret = audio_notifier_register("pmic_analog_cdc",
- AUDIO_NOTIFIER_ADSP_DOMAIN,
- &sdm660_cdc_priv->audio_ssr_nb);
- if (ret < 0) {
- pr_err("%s: Audio notifier register failed ret = %d\n",
- __func__, ret);
- return ret;
- }
+
+ sdm660_cdc_priv->audio_ssr_nb.notifier_call =
+ sdm660_cdc_notifier_service_cb;
+ ret = audio_notifier_register("pmic_analog_cdc",
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &sdm660_cdc_priv->audio_ssr_nb);
+ if (ret < 0) {
+ pr_err("%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
+ return ret;
}
return 0;
}
@@ -4071,63 +4045,54 @@ EXPORT_SYMBOL(msm_anlg_codec_info_create_codec_entry);
static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec)
{
- struct sdm660_cdc_priv *sdm660_cdc_priv;
- struct sdm660_cdc *handle_cdc;
+ struct sdm660_cdc_priv *sdm660_cdc;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
int ret;
- sdm660_cdc_priv = devm_kzalloc(codec->dev,
- sizeof(struct sdm660_cdc_priv),
- GFP_KERNEL);
- if (!sdm660_cdc_priv)
- return -ENOMEM;
-
- codec->control_data = dev_get_drvdata(codec->dev);
- snd_soc_codec_set_drvdata(codec, sdm660_cdc_priv);
- sdm660_cdc_priv->codec = codec;
- handle_cdc = codec->control_data;
- handle_cdc->codec = codec;
+ sdm660_cdc = dev_get_drvdata(codec->dev);
+ sdm660_cdc->codec = codec;
/* codec resmgr module init */
- sdm660_cdc_priv->spkdrv_reg =
- msm_anlg_cdc_find_regulator(codec->control_data,
+ sdm660_cdc->spkdrv_reg =
+ msm_anlg_cdc_find_regulator(sdm660_cdc,
MSM89XX_VDD_SPKDRV_NAME);
- sdm660_cdc_priv->pmic_rev =
+ sdm660_cdc->pmic_rev =
snd_soc_read(codec,
MSM89XX_PMIC_DIGITAL_REVISION1);
- sdm660_cdc_priv->codec_version =
+ sdm660_cdc->codec_version =
snd_soc_read(codec,
MSM89XX_PMIC_DIGITAL_PERPH_SUBTYPE);
- sdm660_cdc_priv->analog_major_rev =
+ sdm660_cdc->analog_major_rev =
snd_soc_read(codec,
MSM89XX_PMIC_ANALOG_REVISION4);
- if (sdm660_cdc_priv->codec_version == CONGA) {
+ if (sdm660_cdc->codec_version == CONGA) {
dev_dbg(codec->dev, "%s :Conga REV: %d\n", __func__,
- sdm660_cdc_priv->codec_version);
- sdm660_cdc_priv->ext_spk_boost_set = true;
+ sdm660_cdc->codec_version);
+ sdm660_cdc->ext_spk_boost_set = true;
} else {
dev_dbg(codec->dev, "%s :PMIC REV: %d\n", __func__,
- sdm660_cdc_priv->pmic_rev);
- if (sdm660_cdc_priv->pmic_rev == TOMBAK_1_0 &&
- sdm660_cdc_priv->codec_version == CAJON_2_0) {
- if (sdm660_cdc_priv->analog_major_rev == 0x02) {
- sdm660_cdc_priv->codec_version = DRAX_CDC;
+ sdm660_cdc->pmic_rev);
+ if (sdm660_cdc->pmic_rev == TOMBAK_1_0 &&
+ sdm660_cdc->codec_version == CAJON_2_0) {
+ if (sdm660_cdc->analog_major_rev == 0x02) {
+ sdm660_cdc->codec_version = DRAX_CDC;
dev_dbg(codec->dev,
"%s : Drax codec detected\n", __func__);
} else {
- sdm660_cdc_priv->codec_version = DIANGU;
+ sdm660_cdc->codec_version = DIANGU;
dev_dbg(codec->dev, "%s : Diangu detected\n",
__func__);
}
- } else if (sdm660_cdc_priv->pmic_rev == TOMBAK_1_0 &&
+ } else if (sdm660_cdc->pmic_rev == TOMBAK_1_0 &&
(snd_soc_read(codec, MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
& 0x80)) {
- sdm660_cdc_priv->codec_version = CAJON;
+ sdm660_cdc->codec_version = CAJON;
dev_dbg(codec->dev, "%s : Cajon detected\n", __func__);
- } else if (sdm660_cdc_priv->pmic_rev == TOMBAK_2_0 &&
+ } else if (sdm660_cdc->pmic_rev == TOMBAK_2_0 &&
(snd_soc_read(codec, MSM89XX_PMIC_ANALOG_NCP_FBCTRL)
& 0x80)) {
- sdm660_cdc_priv->codec_version = CAJON_2_0;
+ sdm660_cdc->codec_version = CAJON_2_0;
dev_dbg(codec->dev, "%s : Cajon 2.0 detected\n",
__func__);
}
@@ -4136,8 +4101,8 @@ static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec)
* set to default boost option BOOST_SWITCH, user mixer path can change
* it to BOOST_ALWAYS or BOOST_BYPASS based on solution chosen.
*/
- sdm660_cdc_priv->boost_option = BOOST_SWITCH;
- sdm660_cdc_priv->hph_mode = NORMAL_MODE;
+ sdm660_cdc->boost_option = BOOST_SWITCH;
+ sdm660_cdc->hph_mode = NORMAL_MODE;
msm_anlg_cdc_dt_parse_boost_info(codec);
msm_anlg_cdc_set_boost_v(codec);
@@ -4154,50 +4119,52 @@ static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec)
wcd9xxx_spmi_set_codec(codec);
- sdm660_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].supply =
+ sdm660_cdc->on_demand_list[ON_DEMAND_MICBIAS].supply =
msm_anlg_cdc_find_regulator(
- codec->control_data,
+ sdm660_cdc,
on_demand_supply_name[ON_DEMAND_MICBIAS]);
- atomic_set(&sdm660_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].ref,
+ atomic_set(&sdm660_cdc->on_demand_list[ON_DEMAND_MICBIAS].ref,
0);
- BLOCKING_INIT_NOTIFIER_HEAD(&sdm660_cdc_priv->notifier);
-
- sdm660_cdc_priv->fw_data = devm_kzalloc(codec->dev,
- sizeof(*(sdm660_cdc_priv->fw_data)),
+ sdm660_cdc->fw_data = devm_kzalloc(codec->dev,
+ sizeof(*(sdm660_cdc->fw_data)),
GFP_KERNEL);
- if (!sdm660_cdc_priv->fw_data)
+ if (!sdm660_cdc->fw_data)
return -ENOMEM;
- set_bit(WCD9XXX_MBHC_CAL, sdm660_cdc_priv->fw_data->cal_bit);
- ret = wcd_cal_create_hwdep(sdm660_cdc_priv->fw_data,
+ set_bit(WCD9XXX_MBHC_CAL, sdm660_cdc->fw_data->cal_bit);
+ ret = wcd_cal_create_hwdep(sdm660_cdc->fw_data,
WCD9XXX_CODEC_HWDEP_NODE, codec);
if (ret < 0) {
dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
return ret;
}
- wcd_mbhc_init(&sdm660_cdc_priv->mbhc, codec, &mbhc_cb, &intr_ids,
+ wcd_mbhc_init(&sdm660_cdc->mbhc, codec, &mbhc_cb, &intr_ids,
wcd_mbhc_registers, true);
- sdm660_cdc_priv->int_mclk0_enabled = false;
+ sdm660_cdc->int_mclk0_enabled = false;
/*Update speaker boost configuration*/
- sdm660_cdc_priv->spk_boost_set = spkr_boost_en;
+ sdm660_cdc->spk_boost_set = spkr_boost_en;
pr_debug("%s: speaker boost configured = %d\n",
- __func__, sdm660_cdc_priv->spk_boost_set);
+ __func__, sdm660_cdc->spk_boost_set);
/* Set initial MICBIAS voltage level */
msm_anlg_cdc_set_micb_v(codec);
/* Set initial cap mode */
msm_anlg_cdc_configure_cap(codec, false, false);
+
+ snd_soc_dapm_ignore_suspend(dapm, "PDM Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "PDM Capture");
+
return 0;
}
static int msm_anlg_cdc_soc_remove(struct snd_soc_codec *codec)
{
struct sdm660_cdc_priv *sdm660_cdc_priv =
- snd_soc_codec_get_drvdata(codec);
+ dev_get_drvdata(codec->dev);
sdm660_cdc_priv->spkdrv_reg = NULL;
sdm660_cdc_priv->on_demand_list[ON_DEMAND_MICBIAS].supply = NULL;
@@ -4209,7 +4176,7 @@ static int msm_anlg_cdc_soc_remove(struct snd_soc_codec *codec)
}
static int msm_anlg_cdc_enable_static_supplies_to_optimum(
- struct sdm660_cdc *sdm660_cdc,
+ struct sdm660_cdc_priv *sdm660_cdc,
struct sdm660_cdc_pdata *pdata)
{
int i;
@@ -4242,7 +4209,7 @@ static int msm_anlg_cdc_enable_static_supplies_to_optimum(
}
static int msm_anlg_cdc_disable_static_supplies_to_optimum(
- struct sdm660_cdc *sdm660_cdc,
+ struct sdm660_cdc_priv *sdm660_cdc,
struct sdm660_cdc_pdata *pdata)
{
int i;
@@ -4266,24 +4233,10 @@ static int msm_anlg_cdc_disable_static_supplies_to_optimum(
static int msm_anlg_cdc_suspend(struct snd_soc_codec *codec)
{
- struct msm_asoc_mach_data *pdata = NULL;
- struct sdm660_cdc *sdm660_cdc = codec->control_data;
+ struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
struct sdm660_cdc_pdata *sdm660_cdc_pdata =
sdm660_cdc->dev->platform_data;
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
- __func__, atomic_read(&pdata->int_mclk0_rsc_ref),
- atomic_read(&pdata->int_mclk0_enabled));
- if (atomic_read(&pdata->int_mclk0_enabled) == true) {
- cancel_delayed_work_sync(&pdata->disable_int_mclk0_work);
- mutex_lock(&pdata->cdc_int_mclk0_mutex);
- pdata->digital_cdc_core_clk.enable = 0;
- afe_set_lpass_clock_v2(AFE_PORT_ID_INT0_MI2S_RX,
- &pdata->digital_cdc_core_clk);
- atomic_set(&pdata->int_mclk0_enabled, false);
- mutex_unlock(&pdata->cdc_int_mclk0_mutex);
- }
msm_anlg_cdc_disable_static_supplies_to_optimum(sdm660_cdc,
sdm660_cdc_pdata);
return 0;
@@ -4292,7 +4245,7 @@ static int msm_anlg_cdc_suspend(struct snd_soc_codec *codec)
static int msm_anlg_cdc_resume(struct snd_soc_codec *codec)
{
struct msm_asoc_mach_data *pdata = NULL;
- struct sdm660_cdc *sdm660_cdc = codec->control_data;
+ struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
struct sdm660_cdc_pdata *sdm660_cdc_pdata =
sdm660_cdc->dev->platform_data;
@@ -4322,7 +4275,7 @@ static struct snd_soc_codec_driver soc_codec_dev_sdm660_cdc = {
.get_regmap = msm_anlg_get_regmap,
};
-static int msm_anlg_cdc_init_supplies(struct sdm660_cdc *sdm660_cdc,
+static int msm_anlg_cdc_init_supplies(struct sdm660_cdc_priv *sdm660_cdc,
struct sdm660_cdc_pdata *pdata)
{
int ret;
@@ -4397,7 +4350,7 @@ static int msm_anlg_cdc_init_supplies(struct sdm660_cdc *sdm660_cdc,
}
static int msm_anlg_cdc_enable_static_supplies(
- struct sdm660_cdc *sdm660_cdc,
+ struct sdm660_cdc_priv *sdm660_cdc,
struct sdm660_cdc_pdata *pdata)
{
int i;
@@ -4422,7 +4375,7 @@ static int msm_anlg_cdc_enable_static_supplies(
return ret;
}
-static void msm_anlg_cdc_disable_supplies(struct sdm660_cdc *sdm660_cdc,
+static void msm_anlg_cdc_disable_supplies(struct sdm660_cdc_priv *sdm660_cdc,
struct sdm660_cdc_pdata *pdata)
{
int i;
@@ -4449,7 +4402,7 @@ static const struct of_device_id sdm660_codec_of_match[] = {
static void msm_anlg_add_child_devices(struct work_struct *work)
{
- struct sdm660_cdc *pdata;
+ struct sdm660_cdc_priv *pdata;
struct platform_device *pdev;
struct device_node *node;
struct msm_dig_ctrl_data *dig_ctrl_data = NULL, *temp;
@@ -4457,7 +4410,7 @@ static void msm_anlg_add_child_devices(struct work_struct *work)
struct msm_dig_ctrl_platform_data *platdata;
char plat_dev_name[MSM_DIG_CDC_STRING_LEN];
- pdata = container_of(work, struct sdm660_cdc,
+ pdata = container_of(work, struct sdm660_cdc_priv,
msm_anlg_add_child_devices_work);
if (!pdata) {
pr_err("%s: Memory for pdata does not exist\n",
@@ -4538,7 +4491,7 @@ static void msm_anlg_add_child_devices(struct work_struct *work)
static int msm_anlg_cdc_probe(struct platform_device *pdev)
{
int ret = 0;
- struct sdm660_cdc *sdm660_cdc = NULL;
+ struct sdm660_cdc_priv *sdm660_cdc = NULL;
struct sdm660_cdc_pdata *pdata;
int adsp_state;
@@ -4565,7 +4518,7 @@ static int msm_anlg_cdc_probe(struct platform_device *pdev)
__func__);
goto rtn;
}
- sdm660_cdc = devm_kzalloc(&pdev->dev, sizeof(struct sdm660_cdc),
+ sdm660_cdc = devm_kzalloc(&pdev->dev, sizeof(struct sdm660_cdc_priv),
GFP_KERNEL);
if (sdm660_cdc == NULL) {
ret = -ENOMEM;
@@ -4589,7 +4542,6 @@ static int msm_anlg_cdc_probe(struct platform_device *pdev)
/* Allow supplies to be ready */
usleep_range(5, 6);
- dev_set_drvdata(&pdev->dev, sdm660_cdc);
wcd9xxx_spmi_set_dev(pdev, 0);
wcd9xxx_spmi_set_dev(pdev, 1);
if (wcd9xxx_spmi_irq_init()) {
@@ -4599,6 +4551,7 @@ static int msm_anlg_cdc_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev,
"%s: irq initialization passed\n", __func__);
}
+ dev_set_drvdata(&pdev->dev, sdm660_cdc);
ret = snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_sdm660_cdc,
@@ -4610,6 +4563,9 @@ static int msm_anlg_cdc_probe(struct platform_device *pdev)
__func__, ret);
goto err_supplies;
}
+ BLOCKING_INIT_NOTIFIER_HEAD(&sdm660_cdc->notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&sdm660_cdc->notifier_mbhc);
+
sdm660_cdc->dig_plat_data.handle = (void *) sdm660_cdc;
sdm660_cdc->dig_plat_data.update_clkdiv = update_clkdiv;
sdm660_cdc->dig_plat_data.get_cdc_version = get_cdc_version;
@@ -4628,7 +4584,7 @@ static int msm_anlg_cdc_probe(struct platform_device *pdev)
static int msm_anlg_cdc_remove(struct platform_device *pdev)
{
- struct sdm660_cdc *sdm660_cdc = dev_get_drvdata(&pdev->dev);
+ struct sdm660_cdc_priv *sdm660_cdc = dev_get_drvdata(&pdev->dev);
struct sdm660_cdc_pdata *pdata = sdm660_cdc->dev->platform_data;
snd_soc_unregister_codec(&pdev->dev);
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h
index bb3af57..0c9e9a6 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h
@@ -172,7 +172,7 @@ struct msm_dig_ctrl_platform_data {
bool enable);
};
-struct sdm660_cdc {
+struct sdm660_cdc_priv {
struct device *dev;
u32 num_of_supplies;
struct regulator_bulk_data *supplies;
@@ -182,15 +182,6 @@ struct sdm660_cdc {
/* digital codec data structure */
struct msm_dig_ctrl_data *dig_ctrl_data;
struct blocking_notifier_head notifier;
-};
-
-struct sdm660_cdc_pdata {
- struct wcd_micbias_setting micbias;
- struct sdm660_cdc_regulator regulator[MAX_REGULATOR];
-};
-
-struct sdm660_cdc_priv {
- struct snd_soc_codec *codec;
u16 pmic_rev;
u16 codec_version;
u16 analog_major_rev;
@@ -207,7 +198,7 @@ struct sdm660_cdc_priv {
bool ext_spk_boost_set;
struct on_demand_supply on_demand_list[ON_DEMAND_SUPPLIES_MAX];
struct regulator *spkdrv_reg;
- struct blocking_notifier_head notifier;
+ struct blocking_notifier_head notifier_mbhc;
/* mbhc module */
struct wcd_mbhc mbhc;
/* cal info for codec */
@@ -222,6 +213,12 @@ struct sdm660_cdc_priv {
struct snd_info_entry *version_entry;
};
+struct sdm660_cdc_pdata {
+ struct wcd_micbias_setting micbias;
+ struct sdm660_cdc_regulator regulator[MAX_REGULATOR];
+};
+
+
extern int msm_anlg_cdc_mclk_enable(struct snd_soc_codec *codec,
int mclk_enable, bool dapm);
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 08f5556..f140b19 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -74,15 +74,17 @@ static int msm_digcdc_clock_control(bool flag)
pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
+ mutex_lock(&pdata->cdc_int_mclk0_mutex);
if (flag) {
if (atomic_read(&pdata->int_mclk0_enabled) == false) {
pdata->digital_cdc_core_clk.enable = 1;
ret = afe_set_lpass_clock_v2(
- AFE_PORT_ID_PRIMARY_MI2S_RX,
+ AFE_PORT_ID_INT0_MI2S_RX,
&pdata->digital_cdc_core_clk);
if (ret < 0) {
pr_err("%s:failed to enable the MCLK\n",
__func__);
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
return ret;
}
pr_debug("enabled digital codec core clk\n");
@@ -94,6 +96,7 @@ static int msm_digcdc_clock_control(bool flag)
dev_dbg(registered_digcodec->dev,
"disable MCLK, workq to disable set already\n");
}
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
return 0;
}
@@ -282,7 +285,7 @@ static int msm_dig_cdc_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
int event)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct msm_dig *msm_dig_cdc = dev_get_drvdata(codec->dev);
+ struct msm_dig_priv *msm_dig_cdc = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
@@ -539,14 +542,14 @@ static void tx_hpf_corner_freq_callback(struct work_struct *work)
struct delayed_work *hpf_delayed_work;
struct hpf_work *hpf_work;
struct snd_soc_codec *codec;
- struct msm_dig *msm_dig_cdc;
+ struct msm_dig_priv *msm_dig_cdc;
u16 tx_mux_ctl_reg;
u8 hpf_cut_of_freq;
hpf_delayed_work = to_delayed_work(work);
hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
codec = hpf_work->dig_cdc->codec;
- msm_dig_cdc = codec->control_data;
+ msm_dig_cdc = hpf_work->dig_cdc;
hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
tx_mux_ctl_reg = MSM89XX_CDC_CORE_TX1_MUX_CTL +
@@ -823,8 +826,7 @@ static int msm_dig_cdc_codec_enable_dec(struct snd_soc_dapm_widget *w,
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct msm_asoc_mach_data *pdata = NULL;
unsigned int decimator;
- struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
- struct msm_dig *msm_dig_cdc = codec->control_data;
+ struct msm_dig_priv *msm_dig_cdc = snd_soc_codec_get_drvdata(codec);
char *dec_name = NULL;
char *widget_name = NULL;
char *temp;
@@ -894,7 +896,7 @@ static int msm_dig_cdc_codec_enable_dec(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
for (i = 0; i < NUM_DECIMATORS; i++) {
if (decimator == i + 1)
- dig_cdc->dec_active[i] = true;
+ msm_dig_cdc->dec_active[i] = true;
}
dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg);
@@ -954,7 +956,7 @@ static int msm_dig_cdc_codec_enable_dec(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
for (i = 0; i < NUM_DECIMATORS; i++) {
if (decimator == i + 1)
- dig_cdc->dec_active[i] = false;
+ msm_dig_cdc->dec_active[i] = false;
}
break;
}
@@ -969,7 +971,7 @@ static int msm_dig_cdc_event_notify(struct notifier_block *block,
{
enum dig_cdc_notify_event event = (enum dig_cdc_notify_event)val;
struct snd_soc_codec *codec = registered_digcodec;
- struct msm_dig *msm_dig_cdc = codec->control_data;
+ struct msm_dig_priv *msm_dig_cdc = snd_soc_codec_get_drvdata(codec);
struct msm_asoc_mach_data *pdata = NULL;
pdata = snd_soc_card_get_drvdata(codec->component.card);
@@ -1152,36 +1154,35 @@ int msm_dig_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
return -ENOMEM;
}
msm_dig->version_entry = version_entry;
+ if (msm_dig->get_cdc_version)
+ msm_dig->version = msm_dig->get_cdc_version(msm_dig->handle);
+ else
+ msm_dig->version = DRAX_CDC;
+
return 0;
}
EXPORT_SYMBOL(msm_dig_codec_info_create_codec_entry);
static int msm_dig_cdc_soc_probe(struct snd_soc_codec *codec)
{
- struct msm_dig_priv *dig_cdc = NULL;
- struct msm_dig *msm_dig_cdc = dev_get_drvdata(codec->dev);
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
int i, ret;
- dig_cdc = devm_kzalloc(codec->dev, sizeof(struct msm_dig_priv),
- GFP_KERNEL);
- if (!dig_cdc)
- return -ENOMEM;
- snd_soc_codec_set_drvdata(codec, dig_cdc);
- dig_cdc->codec = codec;
- codec->control_data = msm_dig_cdc;
+ msm_dig_cdc->codec = codec;
snd_soc_add_codec_controls(codec, compander_kcontrols,
ARRAY_SIZE(compander_kcontrols));
for (i = 0; i < NUM_DECIMATORS; i++) {
- tx_hpf_work[i].dig_cdc = dig_cdc;
+ tx_hpf_work[i].dig_cdc = msm_dig_cdc;
tx_hpf_work[i].decimator = i + 1;
INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
tx_hpf_corner_freq_callback);
}
for (i = 0; i < MSM89XX_RX_MAX; i++)
- dig_cdc->comp_enabled[i] = COMPANDER_NONE;
+ msm_dig_cdc->comp_enabled[i] = COMPANDER_NONE;
/* Register event notifier */
msm_dig_cdc->nblock.notifier_call = msm_dig_cdc_event_notify;
@@ -1195,15 +1196,23 @@ static int msm_dig_cdc_soc_probe(struct snd_soc_codec *codec)
return ret;
}
}
- /* Assign to DRAX_CDC for initial version */
- dig_cdc->version = DRAX_CDC;
registered_digcodec = codec;
+
+ snd_soc_dapm_ignore_suspend(dapm, "AIF1 Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF1 Capture");
+ snd_soc_dapm_ignore_suspend(dapm, "ADC1_IN");
+ snd_soc_dapm_ignore_suspend(dapm, "ADC2_IN");
+ snd_soc_dapm_ignore_suspend(dapm, "ADC3_IN");
+ snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX1");
+ snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX2");
+ snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX3");
+
return 0;
}
static int msm_dig_cdc_soc_remove(struct snd_soc_codec *codec)
{
- struct msm_dig *msm_dig_cdc = dev_get_drvdata(codec->dev);
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
if (msm_dig_cdc->register_notifier)
msm_dig_cdc->register_notifier(msm_dig_cdc->handle,
@@ -1965,14 +1974,32 @@ static struct snd_soc_dai_driver msm_codec_dais[] = {
static struct regmap *msm_digital_get_regmap(struct device *dev)
{
- struct msm_dig *msm_dig_cdc = dev_get_drvdata(dev);
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(dev);
return msm_dig_cdc->regmap;
}
+static int msm_dig_cdc_suspend(struct snd_soc_codec *codec)
+{
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+ msm_dig_cdc->dapm_bias_off = 1;
+ return 0;
+}
+
+static int msm_dig_cdc_resume(struct snd_soc_codec *codec)
+{
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+ msm_dig_cdc->dapm_bias_off = 0;
+ return 0;
+}
+
static struct snd_soc_codec_driver soc_msm_dig_codec = {
.probe = msm_dig_cdc_soc_probe,
.remove = msm_dig_cdc_soc_remove,
+ .suspend = msm_dig_cdc_suspend,
+ .resume = msm_dig_cdc_resume,
.controls = msm_dig_snd_controls,
.num_controls = ARRAY_SIZE(msm_dig_snd_controls),
.dapm_widgets = msm_dig_dapm_widgets,
@@ -2002,10 +2029,10 @@ static int msm_dig_cdc_probe(struct platform_device *pdev)
{
int ret;
u32 dig_cdc_addr;
- struct msm_dig *msm_dig_cdc;
+ struct msm_dig_priv *msm_dig_cdc;
struct dig_ctrl_platform_data *pdata;
- msm_dig_cdc = devm_kzalloc(&pdev->dev, sizeof(struct msm_dig),
+ msm_dig_cdc = devm_kzalloc(&pdev->dev, sizeof(struct msm_dig_priv),
GFP_KERNEL);
if (!msm_dig_cdc)
return -ENOMEM;
@@ -2016,7 +2043,6 @@ static int msm_dig_cdc_probe(struct platform_device *pdev)
ret = -EINVAL;
goto rtn;
}
- dev_set_drvdata(&pdev->dev, msm_dig_cdc);
ret = of_property_read_u32(pdev->dev.of_node, "reg",
&dig_cdc_addr);
@@ -2041,6 +2067,7 @@ static int msm_dig_cdc_probe(struct platform_device *pdev)
msm_dig_cdc->handle = pdata->handle;
msm_dig_cdc->register_notifier = pdata->register_notifier;
+ dev_set_drvdata(&pdev->dev, msm_dig_cdc);
snd_soc_register_codec(&pdev->dev, &soc_msm_dig_codec,
msm_codec_dais, ARRAY_SIZE(msm_codec_dais));
dev_dbg(&pdev->dev, "%s: registered DIG CODEC 0x%x\n",
@@ -2055,6 +2082,44 @@ static int msm_dig_cdc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int msm_dig_suspend(struct device *dev)
+{
+ struct msm_asoc_mach_data *pdata =
+ snd_soc_card_get_drvdata(registered_digcodec->component.card);
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(dev);
+
+ if (msm_dig_cdc->dapm_bias_off) {
+ pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
+ __func__, atomic_read(&pdata->int_mclk0_rsc_ref),
+ atomic_read(&pdata->int_mclk0_enabled));
+
+ if (atomic_read(&pdata->int_mclk0_enabled) == true) {
+ cancel_delayed_work_sync(
+ &pdata->disable_int_mclk0_work);
+ mutex_lock(&pdata->cdc_int_mclk0_mutex);
+ pdata->digital_cdc_core_clk.enable = 0;
+ afe_set_lpass_clock_v2(AFE_PORT_ID_INT0_MI2S_RX,
+ &pdata->digital_cdc_core_clk);
+ atomic_set(&pdata->int_mclk0_enabled, false);
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+ }
+ }
+
+ return 0;
+}
+
+static int msm_dig_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops msm_dig_pm_ops = {
+ .suspend = msm_dig_suspend,
+ .resume = msm_dig_resume,
+};
+#endif
+
static const struct of_device_id msm_dig_cdc_of_match[] = {
{.compatible = "qcom,msm-digital-codec"},
{},
@@ -2065,6 +2130,9 @@ static struct platform_driver msm_digcodec_driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = msm_dig_cdc_of_match,
+#ifdef CONFIG_PM
+ .pm = &msm_dig_pm_ops,
+#endif
},
.probe = msm_dig_cdc_probe,
.remove = msm_dig_cdc_remove,
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h
index 6d83290..f0e7a9c 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h
@@ -43,13 +43,11 @@ struct msm_dig_priv {
/* Entry for version info */
struct snd_info_entry *entry;
struct snd_info_entry *version_entry;
-};
-
-struct msm_dig {
char __iomem *dig_base;
struct regmap *regmap;
struct notifier_block nblock;
u32 mute_mask;
+ int dapm_bias_off;
void *handle;
void (*update_clkdiv)(void *handle, int val);
int (*get_cdc_version)(void *handle);
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.c b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
index 580591a..3e23e37 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -97,6 +97,11 @@ static const struct snd_soc_dapm_route tavil_dsd_audio_map[] = {
{"DSD_FILTER_1", NULL, "DSD_R IF MUX"},
{"DSD_FILTER_1", NULL, "RX INT2 NATIVE SUPPLY"},
{"RX INT2 MIX3", "DSD HPHR Switch", "DSD_FILTER_1"},
+
+ {"DSD_FILTER_0", NULL, "RX INT3 NATIVE SUPPLY"},
+ {"RX INT3 MIX3", "DSD LO1 Switch", "DSD_FILTER_0"},
+ {"DSD_FILTER_1", NULL, "RX INT4 NATIVE SUPPLY"},
+ {"RX INT4 MIX3", "DSD LO2 Switch", "DSD_FILTER_1"},
};
static bool is_valid_dsd_interpolator(int interp_num)
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
index d001fa2..d40546a 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
@@ -35,6 +35,7 @@ struct wcd934x_mbhc {
bool is_hph_recover;
};
+#ifdef CONFIG_SND_SOC_WCD934X_MBHC
extern int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
struct snd_soc_codec *codec,
struct fw_info *fw_data);
@@ -46,4 +47,38 @@ extern int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
struct snd_soc_codec *codec);
extern int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
uint32_t *zl, uint32_t *zr);
+#else
+static inline int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
+ struct snd_soc_codec *codec,
+ struct fw_info *fw_data)
+{
+ return 0;
+}
+static inline void tavil_mbhc_hs_detect_exit(struct snd_soc_codec *codec)
+{
+}
+static inline int tavil_mbhc_hs_detect(struct snd_soc_codec *codec,
+ struct wcd_mbhc_config *mbhc_cfg)
+{
+ return 0;
+}
+static inline void tavil_mbhc_deinit(struct snd_soc_codec *codec)
+{
+}
+static inline int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
+ struct snd_soc_codec *codec)
+{
+ return 0;
+}
+static inline int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
+ uint32_t *zl, uint32_t *zr)
+{
+ if (zl)
+ *zl = 0;
+ if (zr)
+ *zr = 0;
+ return -EINVAL;
+}
+#endif
+
#endif /* __WCD934X_MBHC_H__ */
diff --git a/sound/soc/codecs/wcd934x/wcd934x-routing.h b/sound/soc/codecs/wcd934x/wcd934x-routing.h
index cd165af..afd93b2 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-routing.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-routing.h
@@ -872,7 +872,8 @@ const struct snd_soc_dapm_route tavil_audio_map[] = {
{"RX INT3 SEC MIX", NULL, "RX INT3_1 INTERP"},
{"RX INT3 MIX2", NULL, "RX INT3 SEC MIX"},
{"RX INT3 MIX2", NULL, "RX INT3 MIX2 INP"},
- {"RX INT3 DAC", NULL, "RX INT3 MIX2"},
+ {"RX INT3 MIX3", NULL, "RX INT3 MIX2"},
+ {"RX INT3 DAC", NULL, "RX INT3 MIX3"},
{"RX INT3 DAC", NULL, "RX_BIAS"},
{"LINEOUT1 PA", NULL, "RX INT3 DAC"},
{"LINEOUT1", NULL, "LINEOUT1 PA"},
@@ -882,7 +883,8 @@ const struct snd_soc_dapm_route tavil_audio_map[] = {
{"RX INT4 SEC MIX", NULL, "RX INT4_1 MIX1"},
{"RX INT4 MIX2", NULL, "RX INT4 SEC MIX"},
{"RX INT4 MIX2", NULL, "RX INT4 MIX2 INP"},
- {"RX INT4 DAC", NULL, "RX INT4 MIX2"},
+ {"RX INT4 MIX3", NULL, "RX INT4 MIX2"},
+ {"RX INT4 DAC", NULL, "RX INT4 MIX3"},
{"RX INT4 DAC", NULL, "RX_BIAS"},
{"LINEOUT2 PA", NULL, "RX INT4 DAC"},
{"LINEOUT2", NULL, "LINEOUT2 PA"},
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index eb856c2..4b6fcb0b 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -2292,6 +2292,9 @@ static int tavil_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u16 lineout_vol_reg = 0, lineout_mix_vol_reg = 0;
+ u16 dsd_mute_reg = 0, dsd_clk_reg = 0;
+ struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+ struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
@@ -2299,9 +2302,13 @@ static int tavil_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
if (w->shift == 7) {
lineout_vol_reg = WCD934X_CDC_RX3_RX_PATH_CTL;
lineout_mix_vol_reg = WCD934X_CDC_RX3_RX_PATH_MIX_CTL;
+ dsd_mute_reg = WCD934X_CDC_DSD0_CFG2;
+ dsd_clk_reg = WCD934X_CDC_DSD0_PATH_CTL;
} else if (w->shift == 6) {
lineout_vol_reg = WCD934X_CDC_RX4_RX_PATH_CTL;
lineout_mix_vol_reg = WCD934X_CDC_RX4_RX_PATH_MIX_CTL;
+ dsd_mute_reg = WCD934X_CDC_DSD1_CFG2;
+ dsd_clk_reg = WCD934X_CDC_DSD1_PATH_CTL;
}
} else {
dev_err(codec->dev, "%s: Error enabling lineout PA\n",
@@ -2326,6 +2333,12 @@ static int tavil_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec,
lineout_mix_vol_reg,
0x10, 0x00);
+ if (dsd_conf && (snd_soc_read(codec, dsd_clk_reg) & 0x01))
+ snd_soc_update_bits(codec, dsd_mute_reg, 0x04, 0x00);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ if (dsd_conf && (snd_soc_read(codec, dsd_clk_reg) & 0x01))
+ snd_soc_update_bits(codec, dsd_mute_reg, 0x04, 0x04);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -6818,6 +6831,16 @@ static const struct snd_kcontrol_new hphr_mixer[] = {
tavil_dsd_mixer_get, tavil_dsd_mixer_put),
};
+static const struct snd_kcontrol_new lo1_mixer[] = {
+ SOC_SINGLE_EXT("DSD LO1 Switch", SND_SOC_NOPM, INTERP_LO1, 1, 0,
+ tavil_dsd_mixer_get, tavil_dsd_mixer_put),
+};
+
+static const struct snd_kcontrol_new lo2_mixer[] = {
+ SOC_SINGLE_EXT("DSD LO2 Switch", SND_SOC_NOPM, INTERP_LO2, 1, 0,
+ tavil_dsd_mixer_get, tavil_dsd_mixer_put),
+};
+
static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM,
AIF1_PB, 0, tavil_codec_enable_slimrx,
@@ -6950,7 +6973,11 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("RX INT2 MIX3", SND_SOC_NOPM, 0, 0, hphr_mixer,
ARRAY_SIZE(hphr_mixer)),
SND_SOC_DAPM_MIXER("RX INT3 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT3 MIX3", SND_SOC_NOPM, 0, 0, lo1_mixer,
+ ARRAY_SIZE(lo1_mixer)),
SND_SOC_DAPM_MIXER("RX INT4 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT4 MIX3", SND_SOC_NOPM, 0, 0, lo2_mixer,
+ ARRAY_SIZE(lo2_mixer)),
SND_SOC_DAPM_MIXER("RX INT7 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER_E("RX INT7 CHAIN", SND_SOC_NOPM, 0, 0,
NULL, 0, tavil_codec_spk_boost_event,
@@ -7339,11 +7366,11 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_PGA_E("LINEOUT1 PA", WCD934X_ANA_LO_1_2, 7, 0, NULL, 0,
tavil_codec_enable_lineout_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("LINEOUT2 PA", WCD934X_ANA_LO_1_2, 6, 0, NULL, 0,
tavil_codec_enable_lineout_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("ANC EAR PA", WCD934X_ANA_EAR, 7, 0, NULL, 0,
tavil_codec_enable_ear_pa, SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
@@ -8068,13 +8095,8 @@ static struct snd_soc_dai_driver tavil_dai[] = {
static void tavil_codec_power_gate_digital_core(struct tavil_priv *tavil)
{
- struct snd_soc_codec *codec = tavil->codec;
-
- if (!codec)
- return;
-
mutex_lock(&tavil->power_lock);
- dev_dbg(codec->dev, "%s: Entering power gating function, %d\n",
+ dev_dbg(tavil->dev, "%s: Entering power gating function, %d\n",
__func__, tavil->power_active_ref);
if (tavil->power_active_ref > 0)
@@ -8083,16 +8105,16 @@ static void tavil_codec_power_gate_digital_core(struct tavil_priv *tavil)
wcd9xxx_set_power_state(tavil->wcd9xxx,
WCD_REGION_POWER_COLLAPSE_BEGIN,
WCD9XXX_DIG_CORE_REGION_1);
- snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x04, 0x04);
- snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x01, 0x00);
- snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x02, 0x00);
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x04, 0x04);
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x01, 0x00);
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x02, 0x00);
wcd9xxx_set_power_state(tavil->wcd9xxx, WCD_REGION_POWER_DOWN,
WCD9XXX_DIG_CORE_REGION_1);
exit:
- dev_dbg(codec->dev, "%s: Exiting power gating function, %d\n",
+ dev_dbg(tavil->dev, "%s: Exiting power gating function, %d\n",
__func__, tavil->power_active_ref);
mutex_unlock(&tavil->power_lock);
}
@@ -8101,34 +8123,32 @@ static void tavil_codec_power_gate_work(struct work_struct *work)
{
struct tavil_priv *tavil;
struct delayed_work *dwork;
- struct snd_soc_codec *codec;
dwork = to_delayed_work(work);
tavil = container_of(dwork, struct tavil_priv, power_gate_work);
- codec = tavil->codec;
-
- if (!codec)
- return;
tavil_codec_power_gate_digital_core(tavil);
}
/* called under power_lock acquisition */
-static int tavil_dig_core_remove_power_collapse(struct snd_soc_codec *codec)
+static int tavil_dig_core_remove_power_collapse(struct tavil_priv *tavil)
{
- struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
-
- snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
- snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
- snd_soc_update_bits(codec, WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x00);
- snd_soc_update_bits(codec, WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x02);
- snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+ regmap_write(tavil->wcd9xxx->regmap,
+ WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x05);
+ regmap_write(tavil->wcd9xxx->regmap,
+ WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x07);
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x00);
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x02);
+ regmap_write(tavil->wcd9xxx->regmap,
+ WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x03);
wcd9xxx_set_power_state(tavil->wcd9xxx,
WCD_REGION_POWER_COLLAPSE_REMOVE,
WCD9XXX_DIG_CORE_REGION_1);
- regcache_mark_dirty(codec->component.regmap);
- regcache_sync_region(codec->component.regmap,
+ regcache_mark_dirty(tavil->wcd9xxx->regmap);
+ regcache_sync_region(tavil->wcd9xxx->regmap,
WCD934X_DIG_CORE_REG_MIN,
WCD934X_DIG_CORE_REG_MAX);
@@ -8138,7 +8158,6 @@ static int tavil_dig_core_remove_power_collapse(struct snd_soc_codec *codec)
static int tavil_dig_core_power_collapse(struct tavil_priv *tavil,
int req_state)
{
- struct snd_soc_codec *codec;
int cur_state;
/* Exit if feature is disabled */
@@ -8159,10 +8178,6 @@ static int tavil_dig_core_power_collapse(struct tavil_priv *tavil,
goto unlock_mutex;
}
- codec = tavil->codec;
- if (!codec)
- goto unlock_mutex;
-
if (req_state == POWER_COLLAPSE) {
if (tavil->power_active_ref == 0) {
schedule_delayed_work(&tavil->power_gate_work,
@@ -8180,7 +8195,7 @@ static int tavil_dig_core_power_collapse(struct tavil_priv *tavil,
tavil->wcd9xxx,
WCD9XXX_DIG_CORE_REGION_1);
if (cur_state == WCD_REGION_POWER_DOWN) {
- tavil_dig_core_remove_power_collapse(codec);
+ tavil_dig_core_remove_power_collapse(tavil);
} else {
mutex_unlock(&tavil->power_lock);
cancel_delayed_work_sync(
@@ -9299,6 +9314,7 @@ static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
snd_soc_dapm_ignore_suspend(dapm, "AIF3 Playback");
snd_soc_dapm_ignore_suspend(dapm, "AIF3 Capture");
snd_soc_dapm_ignore_suspend(dapm, "AIF4 Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF4 MAD TX");
snd_soc_dapm_ignore_suspend(dapm, "VIfeed");
snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/codecs/wsa881x-temp-sensor.c b/sound/soc/codecs/wsa881x-temp-sensor.c
index 0079d0f..5ab0ecf 100644
--- a/sound/soc/codecs/wsa881x-temp-sensor.c
+++ b/sound/soc/codecs/wsa881x-temp-sensor.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
#define LOW_TEMP_THRESHOLD 5
#define HIGH_TEMP_THRESHOLD 45
#define TEMP_INVALID 0xFFFF
-
+#define WSA881X_TEMP_RETRY 3
/*
* wsa881x_get_temp - get wsa temperature
* @thermal: thermal zone device
@@ -44,6 +44,7 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
int temp_val;
int t1 = T1_TEMP;
int t2 = T2_TEMP;
+ u8 retry = WSA881X_TEMP_RETRY;
if (!thermal)
return -EINVAL;
@@ -60,6 +61,7 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
pr_err("%s: pdata is NULL\n", __func__);
return -EINVAL;
}
+temp_retry:
if (pdata->wsa_temp_reg_read) {
ret = pdata->wsa_temp_reg_read(codec, ®);
if (ret) {
@@ -101,6 +103,10 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
printk_ratelimited("%s: T0: %d is out of range[%d, %d]\n",
__func__, temp_val, LOW_TEMP_THRESHOLD,
HIGH_TEMP_THRESHOLD);
+ if (retry--) {
+ msleep(20);
+ goto temp_retry;
+ }
}
if (temp)
*temp = temp_val;
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index fe15a57..062bae2 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -986,6 +986,7 @@ static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
{
struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
struct swr_device *dev;
+ u8 retry = WSA881X_NUM_RETRY;
u8 devnum = 0;
if (!wsa881x) {
@@ -994,7 +995,12 @@ static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
}
dev = wsa881x->swr_slave;
if (dev && (wsa881x->state == WSA881X_DEV_DOWN)) {
- if (swr_get_logical_dev_num(dev, dev->addr, &devnum)) {
+ while (swr_get_logical_dev_num(dev, dev->addr, &devnum) &&
+ retry--) {
+ /* Retry after 1 msec delay */
+ usleep_range(1000, 1100);
+ }
+ if (retry == 0) {
dev_err(codec->dev,
"%s get devnum %d for dev addr %lx failed\n",
__func__, devnum, dev->addr);
@@ -1106,8 +1112,9 @@ static int wsa881x_swr_startup(struct swr_device *swr_dev)
usleep_range(5000, 5010);
ret = swr_get_logical_dev_num(swr_dev, swr_dev->addr, &devnum);
if (ret) {
- dev_dbg(&swr_dev->dev, "%s failed to get devnum, err:%d\n",
- __func__, ret);
+ dev_dbg(&swr_dev->dev,
+ "%s get devnum %d for dev addr %lx failed\n",
+ __func__, devnum, swr_dev->addr);
goto err;
}
swr_dev->dev_num = devnum;
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 8f8ab44..18585749 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -30,16 +30,6 @@
is inducing kernel panic upon encountering critical
errors from DSP audio modules
-config DOLBY_DAP
- bool "Enable Dolby DAP"
- depends on SND_SOC_MSM_QDSP6V2_INTF
- help
- To add support for dolby DAP post processing.
- This support is to configure the post processing parameters
- to DSP. The configuration includes sending the end point
- device, end point dependent post processing parameters and
- the various posrt processing parameters
-
config DOLBY_DS2
bool "Enable Dolby DS2"
depends on SND_SOC_MSM_QDSP6V2_INTF
@@ -122,9 +112,10 @@
select MSM_CDC_PINCTRL
select SND_SOC_MSM_SDW
select SND_SOC_SDM660_CDC
+ select SND_SOC_MSM_HDMI_CODEC_RX
select QTI_PP
select DTS_SRS_TM
- select DOLBY_DS2
+ select DOLBY_LICENSE
select SND_HWDEP
select MSM_ULTRASOUND
select DTS_EAGLE
@@ -153,10 +144,11 @@
select SND_SOC_WCD9335
select SND_SOC_WCD934X
select SND_SOC_WSA881X
+ select SND_SOC_MSM_HDMI_CODEC_RX
select MFD_CORE
select QTI_PP
select DTS_SRS_TM
- select DOLBY_DS2
+ select DOLBY_LICENSE
select SND_SOC_CPE
select SND_SOC_WCD_CPE
select SND_HWDEP
diff --git a/sound/soc/msm/msm-cpe-lsm.c b/sound/soc/msm/msm-cpe-lsm.c
index 4f83e79..7b65dda 100644
--- a/sound/soc/msm/msm-cpe-lsm.c
+++ b/sound/soc/msm/msm-cpe-lsm.c
@@ -2148,7 +2148,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "LSM_REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_model, (void *)arg,
@@ -2278,7 +2279,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SNDRV_LSM_SET_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&det_params, (void *) arg,
@@ -2305,14 +2307,16 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (!arg) {
dev_err(rtd->dev,
"%s: %s: No Param data to set\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data, arg,
@@ -2320,7 +2324,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "p_data", sizeof(p_data));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (p_data.num_params > LSM_PARAMS_MAX) {
@@ -2328,7 +2333,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = p_data.num_params *
@@ -2339,12 +2345,15 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %zd\n",
__func__, "SET_MODULE_PARAMS", p_size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
params = kzalloc(p_size, GFP_KERNEL);
- if (!params)
- return -ENOMEM;
+ if (!params) {
+ err = -ENOMEM;
+ goto done;
+ }
if (copy_from_user(params, p_data.params,
p_data.data_size)) {
@@ -2352,7 +2361,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %d\n",
__func__, "params", p_data.data_size);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
err = msm_cpe_lsm_process_params(substream, &p_data, params);
@@ -2463,7 +2473,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "LSM_REG_SND_MODEL_V2_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
dev_dbg(rtd->dev,
@@ -2683,7 +2694,9 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SNDRV_LSM_SET_PARAMS32");
- return -EINVAL;
+
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&det_params32, arg,
@@ -2727,7 +2740,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data_32, arg,
@@ -2736,7 +2750,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "SET_MODULE_PARAMS_32",
sizeof(p_data_32));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_data.params = compat_ptr(p_data_32.params);
@@ -2748,7 +2763,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (p_data.data_size !=
@@ -2757,21 +2773,25 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.data_size);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = sizeof(struct lsm_params_info_32) *
p_data.num_params;
params32 = kzalloc(p_size, GFP_KERNEL);
- if (!params32)
- return -ENOMEM;
+ if (!params32) {
+ err = -ENOMEM;
+ goto done;
+ }
p_size = sizeof(struct lsm_params_info) * p_data.num_params;
params = kzalloc(p_size, GFP_KERNEL);
if (!params) {
kfree(params32);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
if (copy_from_user(params32, p_data.params,
@@ -2781,7 +2801,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
__func__, "params32", p_data.data_size);
kfree(params32);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_info_32 = (struct lsm_params_info_32 *) params32;
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 782fa9a..755b62a 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -964,7 +964,7 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 2,
+ .channels_max = 4,
.rate_min = 8000,
.rate_max = 192000,
},
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index ac31ca3..51c27b7 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -159,6 +159,21 @@ struct msm_wsa881x_dev_info {
u32 index;
};
+enum pinctrl_pin_state {
+ STATE_DISABLE = 0, /* All pins are in sleep state */
+ STATE_MI2S_ACTIVE, /* IS2 = active, TDM = sleep */
+ STATE_TDM_ACTIVE, /* IS2 = sleep, TDM = active */
+};
+
+struct msm_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *mi2s_disable;
+ struct pinctrl_state *tdm_disable;
+ struct pinctrl_state *mi2s_active;
+ struct pinctrl_state *tdm_active;
+ enum pinctrl_pin_state curr_state;
+};
+
struct msm_asoc_mach_data {
u32 mclk_freq;
int us_euro_gpio; /* used by gpio driver API */
@@ -166,6 +181,7 @@ struct msm_asoc_mach_data {
struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
struct snd_info_entry *codec_root;
+ struct msm_pinctrl_info pinctrl_info;
};
struct msm_asoc_wcd93xx_codec {
@@ -174,6 +190,9 @@ struct msm_asoc_wcd93xx_codec {
void (*mbhc_hs_detect_exit)(struct snd_soc_codec *codec);
};
+static const char *const pin_states[] = {"sleep", "i2s-active",
+ "tdm-active"};
+
enum {
TDM_0 = 0,
TDM_1,
@@ -402,7 +421,8 @@ static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
"KHZ_88P2", "KHZ_96", "KHZ_176P4",
"KHZ_192", "KHZ_352P8", "KHZ_384"};
static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
- "KHZ_192"};
+ "KHZ_192", "KHZ_32", "KHZ_44P1",
+ "KHZ_88P2", "KHZ_176P4"};
static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -514,6 +534,9 @@ static struct wcd_mbhc_config wcd_mbhc_cfg = {
.key_code[7] = 0,
.linein_th = 5000,
.moisture_en = true,
+ .mbhc_micbias = MIC_BIAS_2,
+ .anc_micbias = MIC_BIAS_2,
+ .enable_anc_mic_detect = false,
};
static struct snd_soc_dapm_route wcd_audio_paths_tasha[] = {
@@ -1479,6 +1502,22 @@ static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
return idx;
switch (ext_disp_rx_cfg[idx].sample_rate) {
+ case SAMPLING_RATE_176P4KHZ:
+ sample_rate_val = 6;
+ break;
+
+ case SAMPLING_RATE_88P2KHZ:
+ sample_rate_val = 5;
+ break;
+
+ case SAMPLING_RATE_44P1KHZ:
+ sample_rate_val = 4;
+ break;
+
+ case SAMPLING_RATE_32KHZ:
+ sample_rate_val = 3;
+ break;
+
case SAMPLING_RATE_192KHZ:
sample_rate_val = 2;
break;
@@ -1509,6 +1548,18 @@ static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
return idx;
switch (ucontrol->value.integer.value[0]) {
+ case 6:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_176P4KHZ;
+ break;
+ case 5:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_88P2KHZ;
+ break;
+ case 4:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_44P1KHZ;
+ break;
+ case 3:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_32KHZ;
+ break;
case 2:
ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
break;
@@ -3997,6 +4048,275 @@ static int msm_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable)
return ret;
}
+static int msm_set_pinctrl(struct msm_pinctrl_info *pinctrl_info,
+ enum pinctrl_pin_state new_state)
+{
+ int ret = 0;
+ int curr_state = 0;
+
+ if (pinctrl_info == NULL) {
+ pr_err("%s: pinctrl_info is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ curr_state = pinctrl_info->curr_state;
+ pinctrl_info->curr_state = new_state;
+ pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
+ pin_states[curr_state], pin_states[pinctrl_info->curr_state]);
+
+ if (curr_state == pinctrl_info->curr_state) {
+ pr_debug("%s: Already in same state\n", __func__);
+ goto err;
+ }
+
+ if (curr_state != STATE_DISABLE &&
+ pinctrl_info->curr_state != STATE_DISABLE) {
+ pr_debug("%s: state already active cannot switch\n", __func__);
+ ret = -EIO;
+ goto err;
+ }
+
+ switch (pinctrl_info->curr_state) {
+ case STATE_MI2S_ACTIVE:
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->mi2s_active);
+ if (ret) {
+ pr_err("%s: MI2S state select failed with %d\n",
+ __func__, ret);
+ ret = -EIO;
+ goto err;
+ }
+ break;
+ case STATE_TDM_ACTIVE:
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->tdm_active);
+ if (ret) {
+ pr_err("%s: TDM state select failed with %d\n",
+ __func__, ret);
+ ret = -EIO;
+ goto err;
+ }
+ break;
+ case STATE_DISABLE:
+ if (curr_state == STATE_MI2S_ACTIVE) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->mi2s_disable);
+ } else {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->tdm_disable);
+ }
+ if (ret) {
+ pr_err("%s: state disable failed with %d\n",
+ __func__, ret);
+ ret = -EIO;
+ goto err;
+ }
+ break;
+ default:
+ pr_err("%s: TLMM pin state is invalid\n", __func__);
+ return -EINVAL;
+ }
+
+err:
+ return ret;
+}
+
+static void msm_release_pinctrl(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+ if (pinctrl_info->pinctrl) {
+ devm_pinctrl_put(pinctrl_info->pinctrl);
+ pinctrl_info->pinctrl = NULL;
+ }
+}
+
+static int msm_get_pinctrl(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = NULL;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ pinctrl_info = &pdata->pinctrl_info;
+
+ if (pinctrl_info == NULL) {
+ pr_err("%s: pinctrl_info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR_OR_NULL(pinctrl)) {
+ pr_err("%s: Unable to get pinctrl handle\n", __func__);
+ return -EINVAL;
+ }
+ pinctrl_info->pinctrl = pinctrl;
+
+ /* get all the states handles from Device Tree */
+ pinctrl_info->mi2s_disable = pinctrl_lookup_state(pinctrl,
+ "quat-mi2s-sleep");
+ if (IS_ERR(pinctrl_info->mi2s_disable)) {
+ pr_err("%s: could not get mi2s_disable pinstate\n", __func__);
+ goto err;
+ }
+ pinctrl_info->mi2s_active = pinctrl_lookup_state(pinctrl,
+ "quat-mi2s-active");
+ if (IS_ERR(pinctrl_info->mi2s_active)) {
+ pr_err("%s: could not get mi2s_active pinstate\n", __func__);
+ goto err;
+ }
+ pinctrl_info->tdm_disable = pinctrl_lookup_state(pinctrl,
+ "quat-tdm-sleep");
+ if (IS_ERR(pinctrl_info->tdm_disable)) {
+ pr_err("%s: could not get tdm_disable pinstate\n", __func__);
+ goto err;
+ }
+ pinctrl_info->tdm_active = pinctrl_lookup_state(pinctrl,
+ "quat-tdm-active");
+ if (IS_ERR(pinctrl_info->tdm_active)) {
+ pr_err("%s: could not get tdm_active pinstate\n",
+ __func__);
+ goto err;
+ }
+ /* Reset the TLMM pins to a default state */
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->mi2s_disable);
+ if (ret != 0) {
+ pr_err("%s: Disable TLMM pins failed with %d\n",
+ __func__, ret);
+ ret = -EIO;
+ goto err;
+ }
+ pinctrl_info->curr_state = STATE_DISABLE;
+
+ return 0;
+
+err:
+ devm_pinctrl_put(pinctrl);
+ pinctrl_info->pinctrl = NULL;
+ return -EINVAL;
+}
+
+static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ if (cpu_dai->id == AFE_PORT_ID_QUATERNARY_TDM_RX) {
+ channels->min = channels->max =
+ tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_rx_cfg[TDM_QUAT][TDM_0].bit_format);
+ rate->min = rate->max =
+ tdm_rx_cfg[TDM_QUAT][TDM_0].sample_rate;
+ } else if (cpu_dai->id == AFE_PORT_ID_SECONDARY_TDM_RX) {
+ channels->min = channels->max =
+ tdm_rx_cfg[TDM_SEC][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_rx_cfg[TDM_SEC][TDM_0].bit_format);
+ rate->min = rate->max = tdm_rx_cfg[TDM_SEC][TDM_0].sample_rate;
+ } else {
+ pr_err("%s: dai id 0x%x not supported\n",
+ __func__, cpu_dai->id);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+ __func__, cpu_dai->id, channels->max, rate->max,
+ params_format(params));
+
+ return 0;
+}
+
+static int msm8998_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+ int channels, slot_width, slots;
+ unsigned int slot_mask;
+ unsigned int slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+
+ pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+ slots = tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+ /*2 slot config - bits 0 and 1 set for the first two slots */
+ slot_mask = 0x0000FFFF >> (16-slots);
+ slot_width = 32;
+ channels = slots;
+
+ pr_debug("%s: slot_width %d slots %d\n", __func__, slot_width, slots);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ pr_debug("%s: slot_width %d\n", __func__, slot_width);
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+ slots, slot_width);
+ if (ret < 0) {
+ pr_err("%s: failed to set tdm slot, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai,
+ 0, NULL, channels, slot_offset);
+ if (ret < 0) {
+ pr_err("%s: failed to set channel map, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+ } else {
+ pr_err("%s: invalid use case, err:%d\n",
+ __func__, ret);
+ }
+
+end:
+ return ret;
+}
+
+static int msm8998_tdm_snd_startup(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+ ret = msm_set_pinctrl(pinctrl_info, STATE_TDM_ACTIVE);
+ if (ret)
+ pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static void msm8998_tdm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+ ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+ if (ret)
+ pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+ __func__, ret);
+
+}
+
+static struct snd_soc_ops msm8998_tdm_be_ops = {
+ .hw_params = msm8998_tdm_snd_hw_params,
+ .startup = msm8998_tdm_snd_startup,
+ .shutdown = msm8998_tdm_snd_shutdown
+};
+
static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
{
int ret = 0;
@@ -4004,6 +4324,9 @@ static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int index = cpu_dai->id;
unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+ struct snd_soc_card *card = rtd->card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
dev_dbg(rtd->card->dev,
"%s: substream = %s stream = %d, dai name %s, dai ID %d\n",
@@ -4017,6 +4340,15 @@ static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
__func__, cpu_dai->id);
goto done;
}
+ if (index == QUAT_MI2S) {
+ ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+ if (ret) {
+ pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+ __func__, ret);
+ goto done;
+ }
+ }
+
/*
* Muxtex protection in case the same MI2S
* interface using for both TX and RX so
@@ -4069,6 +4401,9 @@ static void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
int ret;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int index = rtd->cpu_dai->id;
+ struct snd_soc_card *card = rtd->card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
@@ -4087,6 +4422,13 @@ static void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
}
}
mutex_unlock(&mi2s_intf_conf[index].lock);
+
+ if (index == QUAT_MI2S) {
+ ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+ if (ret)
+ pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+ __func__, ret);
+ }
}
static struct snd_soc_ops msm_mi2s_be_ops = {
@@ -5214,8 +5556,8 @@ static struct snd_soc_dai_link msm_common_be_dai_links[] = {
.no_pcm = 1,
.dpcm_playback = 1,
.id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
- .be_hw_params_fixup = msm_be_hw_params_fixup,
- .ops = &msm_tdm_be_ops,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8998_tdm_be_ops,
.ignore_suspend = 1,
},
{
@@ -5389,6 +5731,22 @@ static struct snd_soc_dai_link msm_tasha_be_dai_links[] = {
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
+ /* Slimbus VI Recording */
+ {
+ .name = LPASS_BE_SLIMBUS_TX_VI,
+ .stream_name = "Slimbus4 Capture",
+ .cpu_dai_name = "msm-dai-q6-dev.16393",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "tasha_codec",
+ .codec_dai_name = "tasha_vifeedback",
+ .be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_be_ops,
+ .ignore_suspend = 1,
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ },
};
static struct snd_soc_dai_link msm_tavil_be_dai_links[] = {
@@ -5561,6 +5919,22 @@ static struct snd_soc_dai_link msm_tavil_be_dai_links[] = {
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
+ /* Slimbus VI Recording */
+ {
+ .name = LPASS_BE_SLIMBUS_TX_VI,
+ .stream_name = "Slimbus4 Capture",
+ .cpu_dai_name = "msm-dai-q6-dev.16393",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "tavil_codec",
+ .codec_dai_name = "tavil_vifeedback",
+ .be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_be_ops,
+ .ignore_suspend = 1,
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ },
};
static struct snd_soc_dai_link msm_wcn_be_dai_links[] = {
@@ -6816,14 +7190,19 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
pdev->dev.of_node->full_name);
dev_dbg(&pdev->dev, "Jack type properties set to default");
} else {
- if (!strcmp(mbhc_audio_jack_type, "4-pole-jack"))
+ if (!strcmp(mbhc_audio_jack_type, "4-pole-jack")) {
+ wcd_mbhc_cfg.enable_anc_mic_detect = false;
dev_dbg(&pdev->dev, "This hardware has 4 pole jack");
- else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack"))
+ } else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack")) {
+ wcd_mbhc_cfg.enable_anc_mic_detect = true;
dev_dbg(&pdev->dev, "This hardware has 5 pole jack");
- else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack"))
+ } else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack")) {
+ wcd_mbhc_cfg.enable_anc_mic_detect = true;
dev_dbg(&pdev->dev, "This hardware has 6 pole jack");
- else
+ } else {
+ wcd_mbhc_cfg.enable_anc_mic_detect = false;
dev_dbg(&pdev->dev, "Unknown value, set to default");
+ }
}
/*
* Parse US-Euro gpio info from DT. Report no error if us-euro
@@ -6849,6 +7228,17 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
ret);
+ /* Parse pinctrl info from devicetree */
+ ret = msm_get_pinctrl(pdev);
+ if (!ret) {
+ pr_debug("%s: pinctrl parsing successful\n", __func__);
+ } else {
+ dev_dbg(&pdev->dev,
+ "%s: Parsing pinctrl failed with %d. Cannot use Ports\n",
+ __func__, ret);
+ ret = 0;
+ }
+
i2s_auxpcm_init(pdev);
is_initial_boot = true;
@@ -6866,6 +7256,7 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
gpio_free(pdata->us_euro_gpio);
pdata->us_euro_gpio = 0;
}
+ msm_release_pinctrl(pdev);
devm_kfree(&pdev->dev, pdata);
return ret;
}
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 469ab1a..d4db55f 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -10,7 +10,6 @@
msm-dai-stub-v2.o
obj-$(CONFIG_SND_HWDEP) += msm-pcm-routing-devdep.o
obj-$(CONFIG_DTS_EAGLE) += msm-dts-eagle.o
-obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
obj-$(CONFIG_DOLBY_DS2) += msm-ds2-dap-config.o
obj-$(CONFIG_DOLBY_LICENSE) += msm-ds2-dap-config.o
obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 9cd233c..e8e4e04 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -56,8 +56,8 @@
#define FLAC_BLK_SIZE_LIMIT 65535
/* Timestamp mode payload offsets */
-#define TS_LSW_OFFSET 6
-#define TS_MSW_OFFSET 7
+#define CAPTURE_META_DATA_TS_OFFSET_LSW 6
+#define CAPTURE_META_DATA_TS_OFFSET_MSW 7
/* decoder parameter length */
#define DDP_DEC_MAX_NUM_PARAM 18
@@ -100,7 +100,7 @@ struct msm_compr_gapless_state {
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000,
- 88200, 96000, 176400, 192000, 352800, 384000, 2822400, 5644800
+ 88200, 96000, 128000, 176400, 192000, 352800, 384000, 2822400, 5644800
};
struct msm_compr_pdata {
@@ -160,6 +160,10 @@ struct msm_compr_audio {
uint32_t stream_available;
uint32_t next_stream;
+ uint32_t run_mode;
+ uint32_t start_delay_lsw;
+ uint32_t start_delay_msw;
+
uint64_t marker_timestamp;
struct msm_compr_gapless_state gapless_state;
@@ -215,6 +219,99 @@ static int msm_compr_send_dec_params(struct snd_compr_stream *cstream,
struct msm_compr_dec_params *dec_params,
int stream_id);
+static int msm_compr_set_render_mode(struct msm_compr_audio *prtd,
+ uint32_t render_mode) {
+ int ret = -EINVAL;
+ struct audio_client *ac = prtd->audio_client;
+
+ pr_debug("%s, got render mode %u\n", __func__, render_mode);
+
+ if (render_mode == SNDRV_COMPRESS_RENDER_MODE_AUDIO_MASTER) {
+ render_mode = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT;
+ } else if (render_mode == SNDRV_COMPRESS_RENDER_MODE_STC_MASTER) {
+ render_mode = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC;
+ prtd->run_mode = ASM_SESSION_CMD_RUN_STARTIME_RUN_WITH_DELAY;
+ } else {
+ pr_err("%s, Invalid render mode %u\n", __func__,
+ render_mode);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = q6asm_send_mtmx_strtr_render_mode(ac, render_mode);
+ if (ret) {
+ pr_err("%s, Render mode can't be set error %d\n", __func__,
+ ret);
+ }
+exit:
+ return ret;
+}
+
+static int msm_compr_set_clk_rec_mode(struct audio_client *ac,
+ uint32_t clk_rec_mode) {
+ int ret = -EINVAL;
+
+ pr_debug("%s, got clk rec mode %u\n", __func__, clk_rec_mode);
+
+ if (clk_rec_mode == SNDRV_COMPRESS_CLK_REC_MODE_NONE) {
+ clk_rec_mode = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE;
+ } else if (clk_rec_mode == SNDRV_COMPRESS_CLK_REC_MODE_AUTO) {
+ clk_rec_mode = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO;
+ } else {
+ pr_err("%s, Invalid clk rec_mode mode %u\n", __func__,
+ clk_rec_mode);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = q6asm_send_mtmx_strtr_clk_rec_mode(ac, clk_rec_mode);
+ if (ret) {
+ pr_err("%s, clk rec mode can't be set, error %d\n", __func__,
+ ret);
+ }
+
+exit:
+ return ret;
+}
+
+static int msm_compr_set_render_window(struct audio_client *ac,
+ uint32_t ws_lsw, uint32_t ws_msw,
+ uint32_t we_lsw, uint32_t we_msw)
+{
+ int ret = -EINVAL;
+ struct asm_session_mtmx_strtr_param_window_v2_t asm_mtmx_strtr_window;
+ uint32_t param_id;
+
+ pr_debug("%s, ws_lsw 0x%x ws_msw 0x%x we_lsw 0x%x we_ms 0x%x\n",
+ __func__, ws_lsw, ws_msw, we_lsw, we_msw);
+
+ memset(&asm_mtmx_strtr_window, 0,
+ sizeof(struct asm_session_mtmx_strtr_param_window_v2_t));
+ asm_mtmx_strtr_window.window_lsw = ws_lsw;
+ asm_mtmx_strtr_window.window_msw = ws_msw;
+ param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_START_V2;
+ ret = q6asm_send_mtmx_strtr_window(ac, &asm_mtmx_strtr_window,
+ param_id);
+ if (ret) {
+ pr_err("%s, start window can't be set error %d\n", __func__,
+ ret);
+ goto exit;
+ }
+
+ asm_mtmx_strtr_window.window_lsw = we_lsw;
+ asm_mtmx_strtr_window.window_msw = we_msw;
+ param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_WINDOW_END_V2;
+ ret = q6asm_send_mtmx_strtr_window(ac, &asm_mtmx_strtr_window,
+ param_id);
+ if (ret) {
+ pr_err("%s, end window can't be set error %d\n", __func__,
+ ret);
+ }
+
+exit:
+ return ret;
+}
+
static int msm_compr_set_volume(struct snd_compr_stream *cstream,
uint32_t volume_l, uint32_t volume_r)
{
@@ -314,6 +411,7 @@ static int msm_compr_send_buffer(struct msm_compr_audio *prtd)
int buffer_length;
uint64_t bytes_available;
struct audio_aio_write_param param;
+ struct snd_codec_metadata *buff_addr;
if (!atomic_read(&prtd->start)) {
pr_err("%s: stream is not in started state\n", __func__);
@@ -347,23 +445,34 @@ static int msm_compr_send_buffer(struct msm_compr_audio *prtd)
}
if (buffer_length) {
- param.paddr = prtd->buffer_paddr + prtd->byte_offset;
+ param.paddr = prtd->buffer_paddr + prtd->byte_offset;
WARN(prtd->byte_offset % 32 != 0, "offset %x not multiple of 32\n",
prtd->byte_offset);
} else {
- param.paddr = prtd->buffer_paddr;
+ param.paddr = prtd->buffer_paddr;
}
-
param.len = buffer_length;
- param.msw_ts = 0;
- param.lsw_ts = 0;
- param.flags = NO_TIMESTAMP;
+ if (prtd->ts_header_offset) {
+ buff_addr = (struct snd_codec_metadata *)
+ (prtd->buffer + prtd->byte_offset);
+ param.len = buff_addr->length;
+ param.msw_ts = (uint32_t)
+ ((buff_addr->timestamp & 0xFFFFFFFF00000000LL) >> 32);
+ param.lsw_ts = (uint32_t) (buff_addr->timestamp & 0xFFFFFFFFLL);
+ param.paddr += prtd->ts_header_offset;
+ param.flags = SET_TIMESTAMP;
+ param.metadata_len = prtd->ts_header_offset;
+ } else {
+ param.msw_ts = 0;
+ param.lsw_ts = 0;
+ param.flags = NO_TIMESTAMP;
+ param.metadata_len = 0;
+ }
param.uid = buffer_length;
- param.metadata_len = 0;
param.last_buffer = prtd->last_buffer;
pr_debug("%s: sending %d bytes to DSP byte_offset = %d\n",
- __func__, buffer_length, prtd->byte_offset);
+ __func__, param.len, prtd->byte_offset);
if (q6asm_async_write(prtd->audio_client, ¶m) < 0) {
pr_err("%s:q6asm_async_write failed\n", __func__);
} else {
@@ -482,9 +591,21 @@ static void compr_event_handler(uint32_t opcode,
* written to ADSP in the last write, update offset and
* total copied data accordingly.
*/
-
- prtd->byte_offset += token;
- prtd->copied_total += token;
+ if (prtd->ts_header_offset) {
+ /* Always assume that the data will be sent to DSP on
+ * frame boundary.
+ * i.e, one frame of userspace write will result in
+ * one kernel write to DSP. This is needed as
+ * timestamp will be sent per frame.
+ */
+ prtd->byte_offset +=
+ prtd->codec_param.buffer.fragment_size;
+ prtd->copied_total +=
+ prtd->codec_param.buffer.fragment_size;
+ } else {
+ prtd->byte_offset += token;
+ prtd->copied_total += token;
+ }
if (prtd->byte_offset >= prtd->buffer_size)
prtd->byte_offset -= prtd->buffer_size;
@@ -539,10 +660,10 @@ static void compr_event_handler(uint32_t opcode,
*buff_addr = prtd->ts_header_offset;
buff_addr++;
/* Write the TS LSW */
- *buff_addr = payload[TS_LSW_OFFSET];
+ *buff_addr = payload[CAPTURE_META_DATA_TS_OFFSET_LSW];
buff_addr++;
/* Write the TS MSW */
- *buff_addr = payload[TS_MSW_OFFSET];
+ *buff_addr = payload[CAPTURE_META_DATA_TS_OFFSET_MSW];
}
/* Always assume read_size is same as fragment_size */
read_size = prtd->codec_param.buffer.fragment_size;
@@ -1229,6 +1350,12 @@ static int msm_compr_configure_dsp_for_playback
prtd->buffer_paddr = ac->port[dir].buf[0].phys;
prtd->buffer_size = runtime->fragments * runtime->fragment_size;
+ /* Bit-0 of flags represent timestamp mode */
+ if (prtd->codec_param.codec.flags & COMPRESSED_TIMESTAMP_FLAG)
+ prtd->ts_header_offset = sizeof(struct snd_codec_metadata);
+ else
+ prtd->ts_header_offset = 0;
+
ret = msm_compr_send_media_format_block(cstream, ac->stream_id, false);
if (ret < 0)
pr_err("%s, failed to send media format block\n", __func__);
@@ -1591,6 +1718,7 @@ static int msm_compr_playback_free(struct snd_compr_stream *cstream)
kfree(pdata->dec_params[soc_prtd->dai_link->id]);
pdata->dec_params[soc_prtd->dai_link->id] = NULL;
kfree(prtd);
+ runtime->private_data = NULL;
return 0;
}
@@ -1650,6 +1778,7 @@ static int msm_compr_capture_free(struct snd_compr_stream *cstream)
q6asm_audio_client_free(ac);
kfree(prtd);
+ runtime->private_data = NULL;
return 0;
}
@@ -1969,7 +2098,8 @@ static int msm_compr_trigger(struct snd_compr_stream *cstream, int cmd)
msm_compr_read_buffer(prtd);
}
/* issue RUN command for the stream */
- q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+ q6asm_run_nowait(prtd->audio_client, prtd->run_mode,
+ prtd->start_delay_msw, prtd->start_delay_lsw);
break;
case SNDRV_PCM_TRIGGER_STOP:
spin_lock_irqsave(&prtd->lock, flags);
@@ -2053,7 +2183,8 @@ static int msm_compr_trigger(struct snd_compr_stream *cstream, int cmd)
prtd->gapless_state.gapless_transition);
if (!prtd->gapless_state.gapless_transition) {
atomic_set(&prtd->start, 1);
- q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+ q6asm_run_nowait(prtd->audio_client, prtd->run_mode,
+ 0, 0);
}
break;
case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
@@ -2725,11 +2856,14 @@ static int msm_compr_set_metadata(struct snd_compr_stream *cstream,
return -EINVAL;
}
- if (prtd->compr_passthr != LEGACY_PCM) {
+ if (((metadata->key == SNDRV_COMPRESS_ENCODER_PADDING) ||
+ (metadata->key == SNDRV_COMPRESS_ENCODER_DELAY)) &&
+ (prtd->compr_passthr != LEGACY_PCM)) {
pr_debug("%s: No trailing silence for compress_type[%d]\n",
__func__, prtd->compr_passthr);
return 0;
}
+
ac = prtd->audio_client;
if (metadata->key == SNDRV_COMPRESS_ENCODER_PADDING) {
pr_debug("%s, got encoder padding %u",
@@ -2739,11 +2873,63 @@ static int msm_compr_set_metadata(struct snd_compr_stream *cstream,
pr_debug("%s, got encoder delay %u",
__func__, metadata->value[0]);
prtd->gapless_state.initial_samples_drop = metadata->value[0];
+ } else if (metadata->key == SNDRV_COMPRESS_RENDER_MODE) {
+ return msm_compr_set_render_mode(prtd, metadata->value[0]);
+ } else if (metadata->key == SNDRV_COMPRESS_CLK_REC_MODE) {
+ return msm_compr_set_clk_rec_mode(ac, metadata->value[0]);
+ } else if (metadata->key == SNDRV_COMPRESS_RENDER_WINDOW) {
+ return msm_compr_set_render_window(
+ ac,
+ metadata->value[0],
+ metadata->value[1],
+ metadata->value[2],
+ metadata->value[3]);
+ } else if (metadata->key == SNDRV_COMPRESS_START_DELAY) {
+ prtd->start_delay_lsw = metadata->value[0];
+ prtd->start_delay_msw = metadata->value[1];
}
return 0;
}
+static int msm_compr_get_metadata(struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata)
+{
+ struct msm_compr_audio *prtd;
+ struct audio_client *ac;
+ int ret = -EINVAL;
+
+ pr_debug("%s\n", __func__);
+
+ if (!metadata || !cstream || !cstream->runtime)
+ return ret;
+
+ if (metadata->key != SNDRV_COMPRESS_PATH_DELAY) {
+ pr_err("%s, unsupported key %d\n", __func__, metadata->key);
+ return ret;
+ }
+
+ prtd = cstream->runtime->private_data;
+ if (!prtd || !prtd->audio_client) {
+ pr_err("%s: prtd or audio client is NULL\n", __func__);
+ return ret;
+ }
+
+ ac = prtd->audio_client;
+ ret = q6asm_get_path_delay(prtd->audio_client);
+ if (ret) {
+ pr_err("%s: get_path_delay failed, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pr_debug("%s, path delay(in us) %u\n", __func__, ac->path_delay);
+
+ metadata->value[0] = ac->path_delay;
+
+ return ret;
+}
+
+
static int msm_compr_set_next_track_param(struct snd_compr_stream *cstream,
union snd_codec_options *codec_options)
{
@@ -3231,48 +3417,45 @@ static int msm_compr_playback_app_type_cfg_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate = 48000;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- return -EINVAL;
- }
-
app_type = ucontrol->value.integer.value[0];
acdb_dev_id = ucontrol->value.integer.value[1];
if (ucontrol->value.integer.value[2] != 0)
sample_rate = ucontrol->value.integer.value[2];
- pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
- __func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_RX);
- msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
- acdb_dev_id, sample_rate, SESSION_TYPE_RX);
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+ __func__, ret);
- return 0;
+ return ret;
}
static int msm_compr_playback_app_type_cfg_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[3];
int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- ret = -EINVAL;
- goto done;
- }
-
- ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_RX,
- &app_type, &acdb_dev_id, &sample_rate);
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
if (ret < 0) {
pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
__func__, ret);
@@ -3282,8 +3465,8 @@ static int msm_compr_playback_app_type_cfg_get(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] = app_type;
ucontrol->value.integer.value[1] = acdb_dev_id;
ucontrol->value.integer.value[2] = sample_rate;
- pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fe_id, SESSION_TYPE_RX,
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
app_type, acdb_dev_id, sample_rate);
done:
return ret;
@@ -3293,48 +3476,45 @@ static int msm_compr_capture_app_type_cfg_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate = 48000;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- return -EINVAL;
- }
-
app_type = ucontrol->value.integer.value[0];
acdb_dev_id = ucontrol->value.integer.value[1];
if (ucontrol->value.integer.value[2] != 0)
sample_rate = ucontrol->value.integer.value[2];
- pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
- __func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_TX);
- msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
- acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+ __func__, ret);
- return 0;
+ return ret;
}
static int msm_compr_capture_app_type_cfg_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- ret = -EINVAL;
- goto done;
- }
-
- ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_TX,
- &app_type, &acdb_dev_id, &sample_rate);
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
if (ret < 0) {
pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
__func__, ret);
@@ -3344,8 +3524,8 @@ static int msm_compr_capture_app_type_cfg_get(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] = app_type;
ucontrol->value.integer.value[1] = acdb_dev_id;
ucontrol->value.integer.value[2] = sample_rate;
- pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fe_id, SESSION_TYPE_TX,
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
app_type, acdb_dev_id, sample_rate);
done:
return ret;
@@ -3897,6 +4077,7 @@ static struct snd_compr_ops msm_compr_ops = {
.pointer = msm_compr_pointer,
.set_params = msm_compr_set_params,
.set_metadata = msm_compr_set_metadata,
+ .get_metadata = msm_compr_get_metadata,
.set_next_track_param = msm_compr_set_next_track_param,
.ack = msm_compr_ack,
.copy = msm_compr_copy,
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
index dffac45..9b072ea 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
@@ -124,6 +124,45 @@ static const struct soc_enum hdmi_config_enum[] = {
SOC_ENUM_SINGLE_EXT(2, hdmi_format),
};
+static int msm_dai_q6_ext_disp_drift_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = sizeof(struct afe_param_id_dev_timing_stats);
+
+ return 0;
+}
+
+static int msm_dai_q6_ext_disp_drift_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = -EINVAL;
+ struct afe_param_id_dev_timing_stats timing_stats;
+ struct snd_soc_dai *dai = kcontrol->private_data;
+ struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ pr_err("%s: afe port not started. status_mask = %ld\n",
+ __func__, *dai_data->status_mask);
+ goto done;
+ }
+
+ memset(&timing_stats, 0, sizeof(struct afe_param_id_dev_timing_stats));
+ ret = afe_get_av_dev_drift(&timing_stats, dai->id);
+ if (ret) {
+ pr_err("%s: Error getting AFE Drift for port %d, err=%d\n",
+ __func__, dai->id, ret);
+
+ ret = -EINVAL;
+ goto done;
+ }
+
+ memcpy(ucontrol->value.bytes.data, (void *)&timing_stats,
+ sizeof(struct afe_param_id_dev_timing_stats));
+done:
+ return ret;
+}
+
static const struct snd_kcontrol_new hdmi_config_controls[] = {
SOC_ENUM_EXT("HDMI RX Format", hdmi_config_enum[0],
msm_dai_q6_ext_disp_format_get,
@@ -132,6 +171,13 @@ static const struct snd_kcontrol_new hdmi_config_controls[] = {
HDMI_RX_CA_MAX, 0, 1,
msm_dai_q6_ext_disp_ca_get,
msm_dai_q6_ext_disp_ca_put),
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "HDMI RX Drift",
+ .info = msm_dai_q6_ext_disp_drift_info,
+ .get = msm_dai_q6_ext_disp_drift_get,
+ },
};
static const struct snd_kcontrol_new display_port_config_controls[] = {
@@ -142,6 +188,13 @@ static const struct snd_kcontrol_new display_port_config_controls[] = {
HDMI_RX_CA_MAX, 0, 1,
msm_dai_q6_ext_disp_ca_get,
msm_dai_q6_ext_disp_ca_put),
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "DISPLAY Port RX Drift",
+ .info = msm_dai_q6_ext_disp_drift_info,
+ .get = msm_dai_q6_ext_disp_drift_get,
+ },
};
/* Current implementation assumes hw_param is called once
@@ -297,6 +350,10 @@ static int msm_dai_q6_hdmi_dai_probe(struct snd_soc_dai *dai)
kcontrol = &hdmi_config_controls[1];
rc = snd_ctl_add(dai->component->card->snd_card,
snd_ctl_new1(kcontrol, dai_data));
+
+ kcontrol = &hdmi_config_controls[2];
+ rc = snd_ctl_add(dai->component->card->snd_card,
+ snd_ctl_new1(kcontrol, dai));
} else if (dai->driver->id == DISPLAY_PORT_RX) {
kcontrol = &display_port_config_controls[0];
rc = snd_ctl_add(dai->component->card->snd_card,
@@ -305,6 +362,10 @@ static int msm_dai_q6_hdmi_dai_probe(struct snd_soc_dai *dai)
kcontrol = &display_port_config_controls[1];
rc = snd_ctl_add(dai->component->card->snd_card,
snd_ctl_new1(kcontrol, dai_data));
+
+ kcontrol = &display_port_config_controls[2];
+ rc = snd_ctl_add(dai->component->card->snd_card,
+ snd_ctl_new1(kcontrol, dai));
} else {
dev_err(dai->dev, "%s: Invalid id:%d\n",
__func__, dai->driver->id);
@@ -370,8 +431,10 @@ static struct snd_soc_dai_driver msm_dai_q6_hdmi_hdmi_rx_dai = {
.playback = {
.stream_name = "HDMI Playback",
.aif_name = "HDMI",
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_192000,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
.channels_min = 2,
.channels_max = 8,
@@ -389,7 +452,9 @@ static struct snd_soc_dai_driver msm_dai_q6_display_port_rx_dai[] = {
.playback = {
.stream_name = "Display Port Playback",
.aif_name = "DISPLAY_PORT",
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 9abe04a..0c46763 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -218,6 +218,7 @@ struct msm_dai_q6_tdm_dai_data {
u32 rate;
u32 channels;
u32 bitwidth;
+ u32 num_group_ports;
struct afe_clk_set clk_set; /* hold LPASS clock config. */
union afe_port_group_config group_cfg; /* hold tdm group config */
struct afe_tdm_port_config port_cfg; /* hold tdm config */
@@ -259,6 +260,7 @@ static const struct soc_enum sb_config_enum[] = {
static const char *const tdm_data_format[] = {
"LPCM",
"Compr",
+ "Gen Compr"
};
static const char *const tdm_header_type[] = {
@@ -268,8 +270,8 @@ static const char *const tdm_header_type[] = {
};
static const struct soc_enum tdm_config_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, tdm_data_format),
- SOC_ENUM_SINGLE_EXT(3, tdm_header_type),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_data_format), tdm_data_format),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_header_type), tdm_header_type),
};
static DEFINE_MUTEX(tdm_mutex);
@@ -297,6 +299,8 @@ static struct afe_param_id_group_device_tdm_cfg tdm_group_cfg = {
0xFF,
};
+static u32 num_tdm_group_ports;
+
static struct afe_clk_set tdm_clk_set = {
AFE_API_VERSION_CLOCK_SET,
Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT,
@@ -2068,6 +2072,42 @@ static int msm_dai_q6_usb_audio_cfg_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_dai_q6_usb_audio_endian_cfg_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+ u32 val = ucontrol->value.integer.value[0];
+
+ if (dai_data) {
+ dai_data->port_config.usb_audio.endian = val;
+ pr_debug("%s: endian = 0x%x\n", __func__,
+ dai_data->port_config.usb_audio.endian);
+ } else {
+ pr_err("%s: dai_data is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_dai_q6_usb_audio_endian_cfg_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+ if (dai_data) {
+ ucontrol->value.integer.value[0] =
+ dai_data->port_config.usb_audio.endian;
+ pr_debug("%s: endian = 0x%x\n", __func__,
+ dai_data->port_config.usb_audio.endian);
+ } else {
+ pr_err("%s: dai_data is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int msm_dai_q6_afe_enc_cfg_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -2315,9 +2355,15 @@ static const struct snd_kcontrol_new usb_audio_cfg_controls[] = {
SOC_SINGLE_EXT("USB_AUDIO_RX dev_token", 0, 0, UINT_MAX, 0,
msm_dai_q6_usb_audio_cfg_get,
msm_dai_q6_usb_audio_cfg_put),
+ SOC_SINGLE_EXT("USB_AUDIO_RX endian", 0, 0, 1, 0,
+ msm_dai_q6_usb_audio_endian_cfg_get,
+ msm_dai_q6_usb_audio_endian_cfg_put),
SOC_SINGLE_EXT("USB_AUDIO_TX dev_token", 0, 0, UINT_MAX, 0,
msm_dai_q6_usb_audio_cfg_get,
msm_dai_q6_usb_audio_cfg_put),
+ SOC_SINGLE_EXT("USB_AUDIO_TX endian", 0, 0, 1, 0,
+ msm_dai_q6_usb_audio_endian_cfg_get,
+ msm_dai_q6_usb_audio_endian_cfg_put),
};
static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
@@ -2382,10 +2428,16 @@ static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
rc = snd_ctl_add(dai->component->card->snd_card,
snd_ctl_new1(&usb_audio_cfg_controls[0],
dai_data));
+ rc = snd_ctl_add(dai->component->card->snd_card,
+ snd_ctl_new1(&usb_audio_cfg_controls[1],
+ dai_data));
break;
case AFE_PORT_ID_USB_TX:
rc = snd_ctl_add(dai->component->card->snd_card,
- snd_ctl_new1(&usb_audio_cfg_controls[1],
+ snd_ctl_new1(&usb_audio_cfg_controls[2],
+ dai_data));
+ rc = snd_ctl_add(dai->component->card->snd_card,
+ snd_ctl_new1(&usb_audio_cfg_controls[3],
dai_data));
break;
}
@@ -4885,7 +4937,6 @@ static struct platform_driver msm_dai_q6_spdif_driver = {
static int msm_dai_tdm_q6_probe(struct platform_device *pdev)
{
int rc = 0;
- u32 num_ports = 0;
const uint32_t *port_id_array = NULL;
uint32_t array_length = 0;
int i = 0;
@@ -4908,18 +4959,19 @@ static int msm_dai_tdm_q6_probe(struct platform_device *pdev)
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,msm-cpudai-tdm-group-num-ports",
- &num_ports);
+ &num_tdm_group_ports);
if (rc) {
dev_err(&pdev->dev, "%s: Group Num Ports from DT file %s\n",
__func__, "qcom,msm-cpudai-tdm-group-num-ports");
goto rtn;
}
dev_dbg(&pdev->dev, "%s: Group Num Ports from DT file 0x%x\n",
- __func__, num_ports);
+ __func__, num_tdm_group_ports);
- if (num_ports > AFE_GROUP_DEVICE_NUM_PORTS) {
+ if (num_tdm_group_ports > AFE_GROUP_DEVICE_NUM_PORTS) {
dev_err(&pdev->dev, "%s Group Num Ports %d greater than Max %d\n",
- __func__, num_ports, AFE_GROUP_DEVICE_NUM_PORTS);
+ __func__, num_tdm_group_ports,
+ AFE_GROUP_DEVICE_NUM_PORTS);
rc = -EINVAL;
goto rtn;
}
@@ -4933,18 +4985,19 @@ static int msm_dai_tdm_q6_probe(struct platform_device *pdev)
rc = -EINVAL;
goto rtn;
}
- if (array_length != sizeof(uint32_t) * num_ports) {
+ if (array_length != sizeof(uint32_t) * num_tdm_group_ports) {
dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
- __func__, array_length, sizeof(uint32_t) * num_ports);
+ __func__, array_length,
+ sizeof(uint32_t) * num_tdm_group_ports);
rc = -EINVAL;
goto rtn;
}
- for (i = 0; i < num_ports; i++)
+ for (i = 0; i < num_tdm_group_ports; i++)
tdm_group_cfg.port_id[i] =
(u16)be32_to_cpu(port_id_array[i]);
/* Unused index should be filled with 0 or AFE_PORT_INVALID */
- for (i = num_ports; i < AFE_GROUP_DEVICE_NUM_PORTS; i++)
+ for (i = num_tdm_group_ports; i < AFE_GROUP_DEVICE_NUM_PORTS; i++)
tdm_group_cfg.port_id[i] =
AFE_PORT_INVALID;
@@ -5011,7 +5064,20 @@ static int msm_dai_q6_tdm_data_format_put(struct snd_kcontrol *kcontrol,
struct msm_dai_q6_tdm_dai_data *dai_data = kcontrol->private_data;
int value = ucontrol->value.integer.value[0];
- dai_data->port_cfg.tdm.data_format = value;
+ switch (value) {
+ case 0:
+ dai_data->port_cfg.tdm.data_format = AFE_LINEAR_PCM_DATA;
+ break;
+ case 1:
+ dai_data->port_cfg.tdm.data_format = AFE_NON_LINEAR_DATA;
+ break;
+ case 2:
+ dai_data->port_cfg.tdm.data_format = AFE_GENERIC_COMPRESSED;
+ break;
+ default:
+ pr_err("%s: data_format invalid\n", __func__);
+ break;
+ }
pr_debug("%s: data_format = %d\n",
__func__, dai_data->port_cfg.tdm.data_format);
return 0;
@@ -5948,6 +6014,9 @@ static int msm_dai_q6_tdm_set_tdm_slot(struct snd_soc_dai *dai,
/* HW only supports 16 and 8 slots configuration */
switch (slots) {
+ case 2:
+ cap_mask = 0x03;
+ break;
case 8:
cap_mask = 0xFF;
break;
@@ -6368,17 +6437,25 @@ static int msm_dai_q6_tdm_prepare(struct snd_pcm_substream *substream,
__func__, dai->id);
goto rtn;
}
- rc = afe_port_group_enable(group_id,
- &dai_data->group_cfg, true);
- if (rc < 0) {
- dev_err(dai->dev, "%s: fail to enable AFE group 0x%x\n",
+
+ /*
+ * if only one port, don't do group enable as there
+ * is no group need for only one port
+ */
+ if (dai_data->num_group_ports > 1) {
+ rc = afe_port_group_enable(group_id,
+ &dai_data->group_cfg, true);
+ if (rc < 0) {
+ dev_err(dai->dev,
+ "%s: fail to enable AFE group 0x%x\n",
__func__, group_id);
- goto rtn;
+ goto rtn;
+ }
}
}
rc = afe_tdm_port_start(dai->id, &dai_data->port_cfg,
- dai_data->rate);
+ dai_data->rate, dai_data->num_group_ports);
if (rc < 0) {
if (atomic_read(group_ref) == 0) {
afe_port_group_enable(group_id,
@@ -6472,13 +6549,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM0 Playback",
.aif_name = "PRI_TDM_RX_0",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_RX,
@@ -6490,13 +6569,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM1 Playback",
.aif_name = "PRI_TDM_RX_1",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_RX_1,
@@ -6508,13 +6589,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM2 Playback",
.aif_name = "PRI_TDM_RX_2",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_RX_2,
@@ -6526,13 +6609,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM3 Playback",
.aif_name = "PRI_TDM_RX_3",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_RX_3,
@@ -6544,13 +6629,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM4 Playback",
.aif_name = "PRI_TDM_RX_4",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_RX_4,
@@ -6562,13 +6649,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM5 Playback",
.aif_name = "PRI_TDM_RX_5",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_RX_5,
@@ -6580,13 +6669,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM6 Playback",
.aif_name = "PRI_TDM_RX_6",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_RX_6,
@@ -6598,13 +6689,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM7 Playback",
.aif_name = "PRI_TDM_RX_7",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_RX_7,
@@ -6616,13 +6709,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM0 Capture",
.aif_name = "PRI_TDM_TX_0",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_TX,
@@ -6634,13 +6729,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM1 Capture",
.aif_name = "PRI_TDM_TX_1",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_TX_1,
@@ -6652,13 +6749,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM2 Capture",
.aif_name = "PRI_TDM_TX_2",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_TX_2,
@@ -6670,13 +6769,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM3 Capture",
.aif_name = "PRI_TDM_TX_3",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_TX_3,
@@ -6688,13 +6789,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM4 Capture",
.aif_name = "PRI_TDM_TX_4",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_TX_4,
@@ -6706,13 +6809,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM5 Capture",
.aif_name = "PRI_TDM_TX_5",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_TX_5,
@@ -6724,13 +6829,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM6 Capture",
.aif_name = "PRI_TDM_TX_6",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_TX_6,
@@ -6742,13 +6849,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Primary TDM7 Capture",
.aif_name = "PRI_TDM_TX_7",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_PRIMARY_TDM_TX_7,
@@ -6760,13 +6869,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM0 Playback",
.aif_name = "SEC_TDM_RX_0",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_RX,
@@ -6778,13 +6889,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM1 Playback",
.aif_name = "SEC_TDM_RX_1",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_RX_1,
@@ -6796,13 +6909,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM2 Playback",
.aif_name = "SEC_TDM_RX_2",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_RX_2,
@@ -6814,13 +6929,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM3 Playback",
.aif_name = "SEC_TDM_RX_3",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_RX_3,
@@ -6832,13 +6949,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM4 Playback",
.aif_name = "SEC_TDM_RX_4",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_RX_4,
@@ -6850,13 +6969,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM5 Playback",
.aif_name = "SEC_TDM_RX_5",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_RX_5,
@@ -6868,13 +6989,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM6 Playback",
.aif_name = "SEC_TDM_RX_6",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_RX_6,
@@ -6886,13 +7009,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM7 Playback",
.aif_name = "SEC_TDM_RX_7",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_RX_7,
@@ -6904,13 +7029,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM0 Capture",
.aif_name = "SEC_TDM_TX_0",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_TX,
@@ -6922,13 +7049,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM1 Capture",
.aif_name = "SEC_TDM_TX_1",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_TX_1,
@@ -6940,13 +7069,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM2 Capture",
.aif_name = "SEC_TDM_TX_2",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_TX_2,
@@ -6958,13 +7089,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM3 Capture",
.aif_name = "SEC_TDM_TX_3",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_TX_3,
@@ -6976,13 +7109,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM4 Capture",
.aif_name = "SEC_TDM_TX_4",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_TX_4,
@@ -6994,13 +7129,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM5 Capture",
.aif_name = "SEC_TDM_TX_5",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_TX_5,
@@ -7012,13 +7149,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM6 Capture",
.aif_name = "SEC_TDM_TX_6",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_TX_6,
@@ -7030,13 +7169,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Secondary TDM7 Capture",
.aif_name = "SEC_TDM_TX_7",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_SECONDARY_TDM_TX_7,
@@ -7048,13 +7189,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM0 Playback",
.aif_name = "TERT_TDM_RX_0",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_RX,
@@ -7066,13 +7209,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM1 Playback",
.aif_name = "TERT_TDM_RX_1",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_RX_1,
@@ -7084,13 +7229,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM2 Playback",
.aif_name = "TERT_TDM_RX_2",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_RX_2,
@@ -7102,13 +7249,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM3 Playback",
.aif_name = "TERT_TDM_RX_3",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_RX_3,
@@ -7120,13 +7269,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM4 Playback",
.aif_name = "TERT_TDM_RX_4",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_RX_4,
@@ -7138,13 +7289,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM5 Playback",
.aif_name = "TERT_TDM_RX_5",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_RX_5,
@@ -7156,13 +7309,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM6 Playback",
.aif_name = "TERT_TDM_RX_6",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_RX_6,
@@ -7174,13 +7329,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM7 Playback",
.aif_name = "TERT_TDM_RX_7",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_RX_7,
@@ -7192,13 +7349,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM0 Capture",
.aif_name = "TERT_TDM_TX_0",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_TX,
@@ -7210,13 +7369,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM1 Capture",
.aif_name = "TERT_TDM_TX_1",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_TX_1,
@@ -7228,13 +7389,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM2 Capture",
.aif_name = "TERT_TDM_TX_2",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_TX_2,
@@ -7246,13 +7409,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM3 Capture",
.aif_name = "TERT_TDM_TX_3",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_TX_3,
@@ -7264,13 +7429,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM4 Capture",
.aif_name = "TERT_TDM_TX_4",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_TX_4,
@@ -7282,13 +7449,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM5 Capture",
.aif_name = "TERT_TDM_TX_5",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_TX_5,
@@ -7300,13 +7469,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM6 Capture",
.aif_name = "TERT_TDM_TX_6",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_TX_6,
@@ -7318,13 +7489,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Tertiary TDM7 Capture",
.aif_name = "TERT_TDM_TX_7",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_TERTIARY_TDM_TX_7,
@@ -7336,13 +7509,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM0 Playback",
.aif_name = "QUAT_TDM_RX_0",
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_RX,
@@ -7354,13 +7529,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM1 Playback",
.aif_name = "QUAT_TDM_RX_1",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_RX_1,
@@ -7372,13 +7549,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM2 Playback",
.aif_name = "QUAT_TDM_RX_2",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_RX_2,
@@ -7390,13 +7569,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM3 Playback",
.aif_name = "QUAT_TDM_RX_3",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_RX_3,
@@ -7408,13 +7589,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM4 Playback",
.aif_name = "QUAT_TDM_RX_4",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_RX_4,
@@ -7426,13 +7609,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM5 Playback",
.aif_name = "QUAT_TDM_RX_5",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_RX_5,
@@ -7444,13 +7629,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM6 Playback",
.aif_name = "QUAT_TDM_RX_6",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_RX_6,
@@ -7462,13 +7649,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM7 Playback",
.aif_name = "QUAT_TDM_RX_7",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_RX_7,
@@ -7480,13 +7669,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM0 Capture",
.aif_name = "QUAT_TDM_TX_0",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_TX,
@@ -7498,13 +7689,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM1 Capture",
.aif_name = "QUAT_TDM_TX_1",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_TX_1,
@@ -7516,13 +7709,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM2 Capture",
.aif_name = "QUAT_TDM_TX_2",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_TX_2,
@@ -7534,13 +7729,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM3 Capture",
.aif_name = "QUAT_TDM_TX_3",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_TX_3,
@@ -7552,13 +7749,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM4 Capture",
.aif_name = "QUAT_TDM_TX_4",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_TX_4,
@@ -7570,13 +7769,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM5 Capture",
.aif_name = "QUAT_TDM_TX_5",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_TX_5,
@@ -7588,13 +7789,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM6 Capture",
.aif_name = "QUAT_TDM_TX_6",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_TX_6,
@@ -7606,13 +7809,15 @@ static struct snd_soc_dai_driver msm_dai_q6_tdm_dai[] = {
.stream_name = "Quaternary TDM7 Capture",
.aif_name = "QUAT_TDM_TX_7",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 352800,
},
.ops = &msm_dai_q6_tdm_ops,
.id = AFE_PORT_ID_QUATERNARY_TDM_TX_7,
@@ -7805,6 +8010,9 @@ static int msm_dai_q6_tdm_dev_probe(struct platform_device *pdev)
dai_data->clk_set = tdm_clk_set;
/* copy static group cfg per parent node */
dai_data->group_cfg.tdm_cfg = tdm_group_cfg;
+ /* copy static num group ports per parent node */
+ dai_data->num_group_ports = num_tdm_group_ports;
+
dev_set_drvdata(&pdev->dev, dai_data);
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
deleted file mode 100644
index 41cb983..0000000
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ /dev/null
@@ -1,1096 +0,0 @@
-/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <sound/control.h>
-#include <sound/q6adm-v2.h>
-#include <sound/q6core.h>
-
-#include "msm-dolby-dap-config.h"
-
-#ifndef DOLBY_PARAM_VCNB_MAX_LENGTH
-#define DOLBY_PARAM_VCNB_MAX_LENGTH 40
-#endif
-
-/* dolby endp based parameters */
-struct dolby_dap_endp_params_s {
- int device;
- int device_ch_caps;
- int dap_device;
- int params_id[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
- int params_len[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
- int params_offset[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
- int params_val[DOLBY_ENDDEP_PARAM_LENGTH];
-};
-
-const struct dolby_dap_endp_params_s
- dolby_dap_endp_params[NUM_DOLBY_ENDP_DEVICE] = {
- {EARPIECE, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {SPEAKER, 2, DOLBY_ENDP_INT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {WIRED_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {WIRED_HEADPHONE, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_SCO, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_SCO_HEADSET, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_SCO_CARKIT, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_A2DP, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_A2DP_HEADPHONES, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_A2DP_SPEAKER, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {AUX_DIGITAL, 2, DOLBY_ENDP_HDMI,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-496, -496, 0}
- },
- {AUX_DIGITAL, 6, DOLBY_ENDP_HDMI,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-496, -496, 0}
- },
- {AUX_DIGITAL, 8, DOLBY_ENDP_HDMI,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-496, -496, 0}
- },
- {ANLG_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {DGTL_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {USB_ACCESSORY, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {USB_DEVICE, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {REMOTE_SUBMIX, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {PROXY, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {PROXY, 6, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {FM, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {FM_TX, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
-};
-
-/* dolby param ids to/from dsp */
-static uint32_t dolby_dap_params_id[ALL_DOLBY_PARAMS] = {
- DOLBY_PARAM_ID_VDHE, DOLBY_PARAM_ID_VSPE, DOLBY_PARAM_ID_DSSF,
- DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLE,
- DOLBY_PARAM_ID_DVMC, DOLBY_PARAM_ID_DVME, DOLBY_PARAM_ID_IENB,
- DOLBY_PARAM_ID_IEBF, DOLBY_PARAM_ID_IEON, DOLBY_PARAM_ID_DEON,
- DOLBY_PARAM_ID_NGON, DOLBY_PARAM_ID_GEON, DOLBY_PARAM_ID_GENB,
- DOLBY_PARAM_ID_GEBF, DOLBY_PARAM_ID_AONB, DOLBY_PARAM_ID_AOBF,
- DOLBY_PARAM_ID_AOBG, DOLBY_PARAM_ID_AOON, DOLBY_PARAM_ID_ARNB,
- DOLBY_PARAM_ID_ARBF, DOLBY_PARAM_ID_PLB, DOLBY_PARAM_ID_PLMD,
- DOLBY_PARAM_ID_DHSB, DOLBY_PARAM_ID_DHRG, DOLBY_PARAM_ID_DSSB,
- DOLBY_PARAM_ID_DSSA, DOLBY_PARAM_ID_DVLA, DOLBY_PARAM_ID_IEBT,
- DOLBY_PARAM_ID_IEA, DOLBY_PARAM_ID_DEA, DOLBY_PARAM_ID_DED,
- DOLBY_PARAM_ID_GEBG, DOLBY_PARAM_ID_AOCC, DOLBY_PARAM_ID_ARBI,
- DOLBY_PARAM_ID_ARBL, DOLBY_PARAM_ID_ARBH, DOLBY_PARAM_ID_AROD,
- DOLBY_PARAM_ID_ARTP, DOLBY_PARAM_ID_VMON, DOLBY_PARAM_ID_VMB,
- DOLBY_PARAM_ID_VCNB, DOLBY_PARAM_ID_VCBF, DOLBY_PARAM_ID_PREG,
- DOLBY_PARAM_ID_VEN, DOLBY_PARAM_ID_PSTG, DOLBY_COMMIT_ALL_TO_DSP,
- DOLBY_COMMIT_TO_DSP, DOLBY_USE_CACHE, DOLBY_AUTO_ENDP,
- DOLBY_AUTO_ENDDEP_PARAMS
-};
-
-/* modifed state: 0x00000000 - Not updated
- * > 0x00000000 && < 0x00010000
- * Updated and not committed to DSP
- * 0x00010001 - Updated and committed to DSP
- * > 0x00010001 - Modified the committed value
- */
-static int dolby_dap_params_modified[MAX_DOLBY_PARAMS] = { 0 };
-/* param offset */
-static uint32_t dolby_dap_params_offset[MAX_DOLBY_PARAMS] = {
- DOLBY_PARAM_VDHE_OFFSET, DOLBY_PARAM_VSPE_OFFSET,
- DOLBY_PARAM_DSSF_OFFSET, DOLBY_PARAM_DVLI_OFFSET,
- DOLBY_PARAM_DVLO_OFFSET, DOLBY_PARAM_DVLE_OFFSET,
- DOLBY_PARAM_DVMC_OFFSET, DOLBY_PARAM_DVME_OFFSET,
- DOLBY_PARAM_IENB_OFFSET, DOLBY_PARAM_IEBF_OFFSET,
- DOLBY_PARAM_IEON_OFFSET, DOLBY_PARAM_DEON_OFFSET,
- DOLBY_PARAM_NGON_OFFSET, DOLBY_PARAM_GEON_OFFSET,
- DOLBY_PARAM_GENB_OFFSET, DOLBY_PARAM_GEBF_OFFSET,
- DOLBY_PARAM_AONB_OFFSET, DOLBY_PARAM_AOBF_OFFSET,
- DOLBY_PARAM_AOBG_OFFSET, DOLBY_PARAM_AOON_OFFSET,
- DOLBY_PARAM_ARNB_OFFSET, DOLBY_PARAM_ARBF_OFFSET,
- DOLBY_PARAM_PLB_OFFSET, DOLBY_PARAM_PLMD_OFFSET,
- DOLBY_PARAM_DHSB_OFFSET, DOLBY_PARAM_DHRG_OFFSET,
- DOLBY_PARAM_DSSB_OFFSET, DOLBY_PARAM_DSSA_OFFSET,
- DOLBY_PARAM_DVLA_OFFSET, DOLBY_PARAM_IEBT_OFFSET,
- DOLBY_PARAM_IEA_OFFSET, DOLBY_PARAM_DEA_OFFSET,
- DOLBY_PARAM_DED_OFFSET, DOLBY_PARAM_GEBG_OFFSET,
- DOLBY_PARAM_AOCC_OFFSET, DOLBY_PARAM_ARBI_OFFSET,
- DOLBY_PARAM_ARBL_OFFSET, DOLBY_PARAM_ARBH_OFFSET,
- DOLBY_PARAM_AROD_OFFSET, DOLBY_PARAM_ARTP_OFFSET,
- DOLBY_PARAM_VMON_OFFSET, DOLBY_PARAM_VMB_OFFSET,
- DOLBY_PARAM_VCNB_OFFSET, DOLBY_PARAM_VCBF_OFFSET,
- DOLBY_PARAM_PREG_OFFSET, DOLBY_PARAM_VEN_OFFSET,
- DOLBY_PARAM_PSTG_OFFSET
-};
-/* param_length */
-static uint32_t dolby_dap_params_length[MAX_DOLBY_PARAMS] = {
- DOLBY_PARAM_VDHE_LENGTH, DOLBY_PARAM_VSPE_LENGTH,
- DOLBY_PARAM_DSSF_LENGTH, DOLBY_PARAM_DVLI_LENGTH,
- DOLBY_PARAM_DVLO_LENGTH, DOLBY_PARAM_DVLE_LENGTH,
- DOLBY_PARAM_DVMC_LENGTH, DOLBY_PARAM_DVME_LENGTH,
- DOLBY_PARAM_IENB_LENGTH, DOLBY_PARAM_IEBF_LENGTH,
- DOLBY_PARAM_IEON_LENGTH, DOLBY_PARAM_DEON_LENGTH,
- DOLBY_PARAM_NGON_LENGTH, DOLBY_PARAM_GEON_LENGTH,
- DOLBY_PARAM_GENB_LENGTH, DOLBY_PARAM_GEBF_LENGTH,
- DOLBY_PARAM_AONB_LENGTH, DOLBY_PARAM_AOBF_LENGTH,
- DOLBY_PARAM_AOBG_LENGTH, DOLBY_PARAM_AOON_LENGTH,
- DOLBY_PARAM_ARNB_LENGTH, DOLBY_PARAM_ARBF_LENGTH,
- DOLBY_PARAM_PLB_LENGTH, DOLBY_PARAM_PLMD_LENGTH,
- DOLBY_PARAM_DHSB_LENGTH, DOLBY_PARAM_DHRG_LENGTH,
- DOLBY_PARAM_DSSB_LENGTH, DOLBY_PARAM_DSSA_LENGTH,
- DOLBY_PARAM_DVLA_LENGTH, DOLBY_PARAM_IEBT_LENGTH,
- DOLBY_PARAM_IEA_LENGTH, DOLBY_PARAM_DEA_LENGTH,
- DOLBY_PARAM_DED_LENGTH, DOLBY_PARAM_GEBG_LENGTH,
- DOLBY_PARAM_AOCC_LENGTH, DOLBY_PARAM_ARBI_LENGTH,
- DOLBY_PARAM_ARBL_LENGTH, DOLBY_PARAM_ARBH_LENGTH,
- DOLBY_PARAM_AROD_LENGTH, DOLBY_PARAM_ARTP_LENGTH,
- DOLBY_PARAM_VMON_LENGTH, DOLBY_PARAM_VMB_LENGTH,
- DOLBY_PARAM_VCNB_LENGTH, DOLBY_PARAM_VCBF_LENGTH,
- DOLBY_PARAM_PREG_LENGTH, DOLBY_PARAM_VEN_LENGTH,
- DOLBY_PARAM_PSTG_LENGTH
-};
-
-/* param_value */
-static uint32_t dolby_dap_params_value[TOTAL_LENGTH_DOLBY_PARAM] = {0};
-
-struct dolby_dap_params_get_s {
- int32_t port_id;
- uint32_t device_id;
- uint32_t param_id;
- uint32_t offset;
- uint32_t length;
-};
-
-struct dolby_dap_params_states_s {
- bool use_cache;
- bool auto_endp;
- bool enddep_params;
- int port_id[AFE_MAX_PORTS];
- int copp_idx[AFE_MAX_PORTS];
- int port_open_count;
- int port_ids_dolby_can_be_enabled;
- int device;
-};
-
-static struct dolby_dap_params_get_s dolby_dap_params_get = {-1, DEVICE_OUT_ALL,
- 0, 0, 0};
-static struct dolby_dap_params_states_s dolby_dap_params_states = { true, true,
- true, {DOLBY_INVALID_PORT_ID},
- {-1}, 0, DEVICE_OUT_ALL, 0 };
-/*
- * port_ids_dolby_can_be_enabled is set to 0x7FFFFFFF.
- * this needs to be removed after interface validation
- */
-
-static int msm_dolby_dap_map_device_to_dolby_endpoint(int device)
-{
- int i, dolby_dap_device = DOLBY_ENDP_EXT_SPEAKERS;
-
- for (i = 0; i < NUM_DOLBY_ENDP_DEVICE; i++) {
- if (dolby_dap_endp_params[i].device == device) {
- dolby_dap_device = dolby_dap_endp_params[i].dap_device;
- break;
- }
- }
- /* default the endpoint to speaker if corresponding device entry */
- /* not found */
- if (i >= NUM_DOLBY_ENDP_DEVICE)
- dolby_dap_params_states.device = SPEAKER;
- return dolby_dap_device;
-}
-
-static int msm_dolby_dap_send_end_point(int port_id, int copp_idx)
-{
- int rc = 0;
- char *params_value;
- int *update_params_value;
- uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH +
- DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
-
- pr_debug("%s\n", __func__);
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed", __func__);
- return -ENOMEM;
- }
- update_params_value = (int *)params_value;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP;
- *update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t);
- *update_params_value++ =
- msm_dolby_dap_map_device_to_dolby_endpoint(
- dolby_dap_params_states.device);
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: send dolby params failed\n", __func__);
- rc = -EINVAL;
- }
- kfree(params_value);
- return rc;
-}
-
-static int msm_dolby_dap_send_enddep_params(int port_id, int copp_idx,
- int device_channels)
-{
- int i, j, rc = 0, idx, offset;
- char *params_value;
- int *update_params_value;
- uint32_t params_length = (DOLBY_ENDDEP_PARAM_LENGTH +
- DOLBY_NUM_ENDP_DEPENDENT_PARAMS *
- DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
-
- pr_debug("%s\n", __func__);
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed", __func__);
- return -ENOMEM;
- }
- update_params_value = (int *)params_value;
- for (idx = 0; idx < NUM_DOLBY_ENDP_DEVICE; idx++) {
- if (dolby_dap_endp_params[idx].device ==
- dolby_dap_params_states.device) {
- if (dolby_dap_params_states.device == AUX_DIGITAL ||
- dolby_dap_params_states.device == PROXY) {
- if (dolby_dap_endp_params[idx].device_ch_caps ==
- device_channels)
- break;
- } else {
- break;
- }
- }
- }
- if (idx >= NUM_DOLBY_ENDP_DEVICE) {
- pr_err("%s: device is not set accordingly\n", __func__);
- kfree(params_value);
- return -EINVAL;
- }
- for (i = 0; i < DOLBY_ENDDEP_PARAM_LENGTH; i++) {
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ =
- dolby_dap_endp_params[idx].params_id[i];
- *update_params_value++ =
- dolby_dap_endp_params[idx].params_len[i] *
- sizeof(uint32_t);
- offset = dolby_dap_endp_params[idx].params_offset[i];
- for (j = 0; j < dolby_dap_endp_params[idx].params_len[i]; j++)
- *update_params_value++ =
- dolby_dap_endp_params[idx].params_val[offset+j];
- }
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: send dolby params failed\n", __func__);
- rc = -EINVAL;
- }
- kfree(params_value);
- return rc;
-}
-
-static int msm_dolby_dap_send_cached_params(int port_id, int copp_idx,
- int commit)
-{
- char *params_value;
- int *update_params_value, rc = 0;
- uint32_t index_offset, i, j;
- uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
- MAX_DOLBY_PARAMS * DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
-
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value)
- return -ENOMEM;
-
- update_params_value = (int *)params_value;
- params_length = 0;
- for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
- if ((dolby_dap_params_modified[i] == 0) ||
- ((commit) &&
- ((dolby_dap_params_modified[i] & 0x00010000) &&
- ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))))
- continue;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = dolby_dap_params_id[i];
- *update_params_value++ = dolby_dap_params_length[i] *
- sizeof(uint32_t);
- index_offset = dolby_dap_params_offset[i];
- for (j = 0; j < dolby_dap_params_length[i]; j++) {
- *update_params_value++ =
- dolby_dap_params_value[index_offset+j];
- }
- params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
- dolby_dap_params_length[i]) * sizeof(uint32_t);
- }
- pr_debug("%s, valid param length: %d", __func__, params_length);
- if (params_length) {
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: send dolby params failed\n", __func__);
- kfree(params_value);
- return -EINVAL;
- }
- for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
- if ((dolby_dap_params_modified[i] == 0) ||
- ((commit) &&
- ((dolby_dap_params_modified[i] & 0x00010000) &&
- ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))
- ))
- continue;
- dolby_dap_params_modified[i] = 0x00010001;
- }
- }
- kfree(params_value);
- return 0;
-}
-
-int msm_dolby_dap_init(int port_id, int copp_idx, int channels,
- bool is_custom_stereo_on)
-{
- int ret = 0;
- int index = adm_validate_and_get_port_index(port_id);
-
- if (index < 0) {
- pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
- port_id);
- return -EINVAL;
- }
- if ((port_id != DOLBY_INVALID_PORT_ID) &&
- (port_id & dolby_dap_params_states.port_ids_dolby_can_be_enabled)) {
- dolby_dap_params_states.port_id[index] = port_id;
- dolby_dap_params_states.copp_idx[index] = copp_idx;
- dolby_dap_params_states.port_open_count++;
- if (dolby_dap_params_states.auto_endp) {
- ret = msm_dolby_dap_send_end_point(port_id, copp_idx);
- if (ret) {
- pr_err("%s: err sending endppoint\n", __func__);
- return ret;
- }
- }
- if (dolby_dap_params_states.use_cache) {
- ret = msm_dolby_dap_send_cached_params(port_id,
- copp_idx, 0);
- if (ret) {
- pr_err("%s: err sending cached params\n",
- __func__);
- return ret;
- }
- }
- if (dolby_dap_params_states.enddep_params) {
- msm_dolby_dap_send_enddep_params(port_id, copp_idx,
- channels);
- if (ret) {
- pr_err("%s: err sending endp dependent params\n",
- __func__);
- return ret;
- }
- }
- if (is_custom_stereo_on)
- dolby_dap_set_custom_stereo_onoff(port_id, copp_idx,
- is_custom_stereo_on);
- }
- return ret;
-}
-
-void msm_dolby_dap_deinit(int port_id)
-{
- int index = adm_validate_and_get_port_index(port_id);
-
- if (index < 0) {
- pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
- port_id);
- return;
- }
- dolby_dap_params_states.port_open_count--;
- if ((dolby_dap_params_states.port_id[index] == port_id) &&
- (!dolby_dap_params_states.port_open_count)) {
- dolby_dap_params_states.port_id[index] = DOLBY_INVALID_PORT_ID;
- dolby_dap_params_states.copp_idx[index] = -1;
- }
-}
-
-static int msm_dolby_dap_set_vspe_vdhe(int port_id, int copp_idx,
- bool is_custom_stereo_enabled)
-{
- char *params_value;
- int *update_params_value, rc = 0;
- uint32_t index_offset, i, j;
- uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
- 2 * DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
- if (port_id == DOLBY_INVALID_PORT_ID) {
- pr_err("%s: Not a Dolby topology. Do not set custom stereo mixing\n",
- __func__);
- return -EINVAL;
- }
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value)
- return -ENOMEM;
-
- update_params_value = (int *)params_value;
- params_length = 0;
- /* for VDHE and VSPE DAP params at index 0 and 1 in table */
- for (i = 0; i < 2; i++) {
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = dolby_dap_params_id[i];
- *update_params_value++ = dolby_dap_params_length[i] *
- sizeof(uint32_t);
- index_offset = dolby_dap_params_offset[i];
- for (j = 0; j < dolby_dap_params_length[i]; j++) {
- if (is_custom_stereo_enabled)
- *update_params_value++ = 0;
- else
- *update_params_value++ =
- dolby_dap_params_value[index_offset+j];
- }
- params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
- dolby_dap_params_length[i]) * sizeof(uint32_t);
- }
- pr_debug("%s, valid param length: %d", __func__, params_length);
- if (params_length) {
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: send vdhe/vspe params failed with rc=%d\n",
- __func__, rc);
- kfree(params_value);
- return -EINVAL;
- }
- }
- kfree(params_value);
- return 0;
-}
-
-int dolby_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
- bool is_custom_stereo_enabled)
-{
- char *params_value;
- int *update_params_value, rc = 0;
- uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
- DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
- if (port_id == DOLBY_INVALID_PORT_ID)
- return -EINVAL;
-
- msm_dolby_dap_set_vspe_vdhe(port_id, copp_idx,
- is_custom_stereo_enabled);
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed\n", __func__);
- return -ENOMEM;
- }
- update_params_value = (int *)params_value;
- params_length = 0;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = DOLBY_ENABLE_CUSTOM_STEREO;
- *update_params_value++ = sizeof(uint32_t);
- if (is_custom_stereo_enabled)
- *update_params_value++ = 1;
- else
- *update_params_value++ = 0;
- params_length += (DOLBY_PARAM_PAYLOAD_SIZE + 1) * sizeof(uint32_t);
- pr_debug("%s, valid param length: %d", __func__, params_length);
- if (params_length) {
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: setting ds1 custom stereo param failed with rc=%d\n",
- __func__, rc);
- kfree(params_value);
- return -EINVAL;
- }
- }
- kfree(params_value);
- return 0;
-}
-
-static int msm_dolby_dap_map_device_to_port_id(int device)
-{
- int port_id = SLIMBUS_0_RX;
-
- device = DEVICE_OUT_ALL;
- /*update the device when single stream to multiple device is handled*/
- if (device == DEVICE_OUT_ALL) {
- port_id = PRIMARY_I2S_RX | SLIMBUS_0_RX | HDMI_RX |
- INT_BT_SCO_RX | INT_FM_RX |
- RT_PROXY_PORT_001_RX |
- AFE_PORT_ID_PRIMARY_PCM_RX |
- MI2S_RX | SECONDARY_I2S_RX |
- SLIMBUS_1_RX | SLIMBUS_4_RX | SLIMBUS_3_RX |
- AFE_PORT_ID_SECONDARY_MI2S_RX;
- } else {
- /* update port_id based on the device */
- }
- return port_id;
-}
-
-int msm_dolby_dap_param_to_set_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* not used while setting the parameters */
- return 0;
-}
-
-int msm_dolby_dap_param_to_set_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int rc = 0, port_id, copp_idx;
- uint32_t idx, j, current_offset;
- uint32_t device = ucontrol->value.integer.value[0];
- uint32_t param_id = ucontrol->value.integer.value[1];
- uint32_t offset = ucontrol->value.integer.value[2];
- uint32_t length = ucontrol->value.integer.value[3];
-
- dolby_dap_params_states.port_ids_dolby_can_be_enabled =
- msm_dolby_dap_map_device_to_port_id(device);
- for (idx = 0; idx < ALL_DOLBY_PARAMS; idx++) {
- /*paramid from user space*/
- if (param_id == dolby_dap_params_id[idx])
- break;
- }
- if (idx > ALL_DOLBY_PARAMS-1) {
- pr_err("%s: invalid param id 0x%x to set\n", __func__,
- param_id);
- return -EINVAL;
- }
- switch (idx) {
- case DOLBY_COMMIT_ALL_IDX: {
- /* COMIIT ALL: Send all parameters to DSP */
- pr_debug("%s: COMMIT_ALL recvd\n", __func__);
- for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
- port_id = dolby_dap_params_states.port_id[idx];
- copp_idx =
- dolby_dap_params_states.copp_idx[idx];
- if ((copp_idx > 0) &&
- (copp_idx < MAX_COPPS_PER_PORT) &&
- (port_id != DOLBY_INVALID_PORT_ID))
- rc |= msm_dolby_dap_send_cached_params(
- port_id,
- copp_idx,
- 0);
- }
- }
- break;
- case DOLBY_COMMIT_IDX: {
- pr_debug("%s: COMMIT recvd\n", __func__);
- /* COMMIT: Send only modified parameters to DSP */
- for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
- port_id = dolby_dap_params_states.port_id[idx];
- copp_idx =
- dolby_dap_params_states.copp_idx[idx];
- if ((copp_idx > 0) &&
- (copp_idx < MAX_COPPS_PER_PORT) &&
- (port_id == DOLBY_INVALID_PORT_ID))
- rc |= msm_dolby_dap_send_cached_params(
- port_id,
- copp_idx,
- 1);
- }
- }
- break;
- case DOLBY_USE_CACHE_IDX: {
- pr_debug("%s: USE CACHE recvd val: %ld\n", __func__,
- ucontrol->value.integer.value[4]);
- dolby_dap_params_states.use_cache =
- ucontrol->value.integer.value[4];
- }
- break;
- case DOLBY_AUTO_ENDP_IDX: {
- pr_debug("%s: AUTO_ENDP recvd val: %ld\n", __func__,
- ucontrol->value.integer.value[4]);
- dolby_dap_params_states.auto_endp =
- ucontrol->value.integer.value[4];
- }
- break;
- case DOLBY_AUTO_ENDDEP_IDX: {
- pr_debug("%s: USE_ENDDEP_PARAMS recvd val: %ld\n",
- __func__, ucontrol->value.integer.value[4]);
- dolby_dap_params_states.enddep_params =
- ucontrol->value.integer.value[4];
- }
- break;
- default: {
- /* cache the parameters */
- dolby_dap_params_modified[idx] += 1;
- current_offset = dolby_dap_params_offset[idx] + offset;
- if (current_offset >= TOTAL_LENGTH_DOLBY_PARAM) {
- pr_err("%s: invalid offset %d at idx %d\n",
- __func__, offset, idx);
- return -EINVAL;
- }
- if ((length == 0) || (current_offset + length - 1
- < current_offset) || (current_offset + length
- > TOTAL_LENGTH_DOLBY_PARAM)) {
- pr_err("%s: invalid length %d at idx %d\n",
- __func__, length, idx);
- return -EINVAL;
- }
- dolby_dap_params_length[idx] = length;
- pr_debug("%s: param recvd deviceId=0x%x paramId=0x%x offset=%d length=%d\n",
- __func__, device, param_id, offset, length);
- for (j = 0; j < length; j++) {
- dolby_dap_params_value[
- dolby_dap_params_offset[idx] +
- offset + j]
- = ucontrol->value.integer.value[4+j];
- pr_debug("value[%d]: %ld\n", j,
- ucontrol->value.integer.value[4+j]);
- }
- }
- }
-
- return rc;
-}
-
-int msm_dolby_dap_param_to_get_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int rc = 0, i, index;
- char *params_value;
- int *update_params_value;
- uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM *
- sizeof(uint32_t);
- uint32_t param_payload_len =
- DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
- int port_id = dolby_dap_params_get.port_id, copp_idx;
-
- if (port_id == DOLBY_INVALID_PORT_ID) {
- pr_err("%s, port_id not set, do not query ADM\n", __func__);
- return -EINVAL;
- }
- index = adm_validate_and_get_port_index(port_id);
- if (index < 0) {
- pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
- port_id);
- return -EINVAL;
- }
- copp_idx = dolby_dap_params_states.copp_idx[index];
- if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
- pr_debug("%s: get params called before copp open.copp_idx:%d\n",
- __func__, copp_idx);
- return -EINVAL;
- }
- if (dolby_dap_params_get.length > 128 - DOLBY_PARAM_PAYLOAD_SIZE) {
- pr_err("%s: Incorrect parameter length", __func__);
- return -EINVAL;
- }
- params_value = kzalloc(params_length + param_payload_len, GFP_KERNEL);
- if (!params_value)
- return -ENOMEM;
-
- if (dolby_dap_params_get.param_id == DOLBY_PARAM_ID_VER) {
- rc = adm_get_params(port_id, copp_idx,
- DOLBY_BUNDLE_MODULE_ID, DOLBY_PARAM_ID_VER,
- params_length + param_payload_len,
- params_value);
- } else {
- for (i = 0; i < MAX_DOLBY_PARAMS; i++)
- if (dolby_dap_params_id[i] ==
- dolby_dap_params_get.param_id)
- break;
- if (i > MAX_DOLBY_PARAMS-1) {
- pr_err("%s: invalid param id to set", __func__);
- rc = -EINVAL;
- } else {
- params_length = dolby_dap_params_length[i] *
- sizeof(uint32_t);
- rc = adm_get_params(port_id, copp_idx,
- DOLBY_BUNDLE_MODULE_ID,
- dolby_dap_params_id[i],
- params_length + param_payload_len,
- params_value);
- }
- }
- if (rc) {
- pr_err("%s: get parameters failed rc:%d\n", __func__, rc);
- kfree(params_value);
- return -EINVAL;
- }
- update_params_value = (int *)params_value;
- ucontrol->value.integer.value[0] = dolby_dap_params_get.device_id;
- ucontrol->value.integer.value[1] = dolby_dap_params_get.param_id;
- ucontrol->value.integer.value[2] = dolby_dap_params_get.offset;
- ucontrol->value.integer.value[3] = dolby_dap_params_get.length;
-
- pr_debug("%s: FROM DSP value[0] 0x%x value[1] %d value[2] 0x%x\n",
- __func__, update_params_value[0],
- update_params_value[1], update_params_value[2]);
- for (i = 0; i < dolby_dap_params_get.length; i++) {
- ucontrol->value.integer.value[DOLBY_PARAM_PAYLOAD_SIZE+i] =
- update_params_value[i];
- pr_debug("value[%d]:%d\n", i, update_params_value[i]);
- }
- pr_debug("%s: Returning param_id=0x%x offset=%d length=%d\n",
- __func__, dolby_dap_params_get.param_id,
- dolby_dap_params_get.offset,
- dolby_dap_params_get.length);
- kfree(params_value);
- return 0;
-}
-
-int msm_dolby_dap_param_to_get_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int port_id, idx, copp_idx;
-
- dolby_dap_params_get.device_id = ucontrol->value.integer.value[0];
- port_id = msm_dolby_dap_map_device_to_port_id(
- dolby_dap_params_get.device_id);
- for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
- port_id = dolby_dap_params_states.port_id[idx];
- copp_idx = dolby_dap_params_states.copp_idx[idx];
- if ((copp_idx < 0) ||
- (copp_idx >= MAX_COPPS_PER_PORT) ||
- (port_id == DOLBY_INVALID_PORT_ID))
- continue;
- else
- break;
- }
- if (idx == AFE_MAX_PORTS)
- port_id = SLIMBUS_0_RX;
- dolby_dap_params_get.port_id = port_id;
- dolby_dap_params_get.param_id = ucontrol->value.integer.value[1];
- dolby_dap_params_get.offset = ucontrol->value.integer.value[2];
- dolby_dap_params_get.length = ucontrol->value.integer.value[3];
- pr_debug("%s: param_id=0x%x offset=%d length=%d\n", __func__,
- dolby_dap_params_get.param_id, dolby_dap_params_get.offset,
- dolby_dap_params_get.length);
- return 0;
-}
-
-int msm_dolby_dap_param_visualizer_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- uint32_t length = dolby_dap_params_value[DOLBY_PARAM_VCNB_OFFSET];
- char *visualizer_data;
- int i, rc;
- int *update_visualizer_data;
- uint32_t offset, params_length =
- (2*length + DOLBY_VIS_PARAM_HEADER_SIZE)*sizeof(uint32_t);
- uint32_t param_payload_len =
- DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
- int port_id, copp_idx, idx;
-
- if (length > DOLBY_PARAM_VCNB_MAX_LENGTH || length <= 0) {
- pr_err("%s Incorrect VCNB length", __func__);
- ucontrol->value.integer.value[0] = 0;
- return -EINVAL;
- }
- for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
- port_id = dolby_dap_params_states.port_id[idx];
- copp_idx = dolby_dap_params_states.copp_idx[idx];
- if ((copp_idx < 0) ||
- (copp_idx >= MAX_COPPS_PER_PORT) ||
- (port_id == DOLBY_INVALID_PORT_ID))
- continue;
- else
- break;
- }
- if (idx == AFE_MAX_PORTS) {
- pr_debug("%s, port_id not set, returning error", __func__);
- ucontrol->value.integer.value[0] = 0;
- return -EINVAL;
- }
- visualizer_data = kzalloc(params_length, GFP_KERNEL);
- if (!visualizer_data)
- return -ENOMEM;
-
- offset = 0;
- params_length = length * sizeof(uint32_t);
- rc = adm_get_params(port_id, copp_idx, DOLBY_BUNDLE_MODULE_ID,
- DOLBY_PARAM_ID_VCBG,
- params_length + param_payload_len,
- visualizer_data + offset);
- if (rc) {
- pr_err("%s: get parameters failed\n", __func__);
- kfree(visualizer_data);
- return -EINVAL;
- }
-
- offset = length * sizeof(uint32_t);
- rc = adm_get_params(port_id, copp_idx, DOLBY_BUNDLE_MODULE_ID,
- DOLBY_PARAM_ID_VCBE,
- params_length + param_payload_len,
- visualizer_data + offset);
- if (rc) {
- pr_err("%s: get parameters failed\n", __func__);
- kfree(visualizer_data);
- return -EINVAL;
- }
-
- ucontrol->value.integer.value[0] = 2*length;
- pr_debug("%s: visualizer data length %ld\n", __func__,
- ucontrol->value.integer.value[0]);
- update_visualizer_data = (int *)visualizer_data;
- for (i = 0; i < 2*length; i++) {
- ucontrol->value.integer.value[1+i] = update_visualizer_data[i];
- pr_debug("value[%d] %d\n", i, update_visualizer_data[i]);
- }
- kfree(visualizer_data);
- return 0;
-}
-
-int msm_dolby_dap_param_visualizer_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* not used while getting the visualizer data */
- return 0;
-}
-
-int msm_dolby_dap_endpoint_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* not used while setting the endpoint */
- return 0;
-}
-
-int msm_dolby_dap_endpoint_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int device = ucontrol->value.integer.value[0];
-
- dolby_dap_params_states.device = device;
- return 0;
-}
-
-int msm_dolby_dap_security_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* not used while setting the manfr id*/
- return 0;
-}
-
-int msm_dolby_dap_security_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int manufacturer_id = ucontrol->value.integer.value[0];
-
- core_set_dolby_manufacturer_id(manufacturer_id);
- return 0;
-}
-
-int msm_dolby_dap_license_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.integer.value[0] =
- core_get_license_status(DOLBY_DS1_LICENSE_ID);
- return 0;
-}
-
-int msm_dolby_dap_license_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- return core_set_license(ucontrol->value.integer.value[0],
- DOLBY_DS1_LICENSE_ID);
-}
-
-static const struct snd_kcontrol_new dolby_license_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 License", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 1, msm_dolby_dap_license_control_get,
- msm_dolby_dap_license_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_security_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 Security", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 1, msm_dolby_dap_security_control_get,
- msm_dolby_dap_security_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_to_set_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 DAP Set Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
- 0, 128, msm_dolby_dap_param_to_set_control_get,
- msm_dolby_dap_param_to_set_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_to_get_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 DAP Get Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
- 0, 128, msm_dolby_dap_param_to_get_control_get,
- msm_dolby_dap_param_to_get_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_visualizer_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 DAP Get Visualizer", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 41, msm_dolby_dap_param_visualizer_control_get,
- msm_dolby_dap_param_visualizer_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_end_point_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 DAP Endpoint", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 1, msm_dolby_dap_endpoint_control_get,
- msm_dolby_dap_endpoint_control_put),
-};
-
-void msm_dolby_dap_add_controls(struct snd_soc_platform *platform)
-{
- snd_soc_add_platform_controls(platform,
- dolby_license_controls,
- ARRAY_SIZE(dolby_license_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_security_controls,
- ARRAY_SIZE(dolby_security_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_dap_param_to_set_controls,
- ARRAY_SIZE(dolby_dap_param_to_set_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_dap_param_to_get_controls,
- ARRAY_SIZE(dolby_dap_param_to_get_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_dap_param_visualizer_controls,
- ARRAY_SIZE(dolby_dap_param_visualizer_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_dap_param_end_point_controls,
- ARRAY_SIZE(dolby_dap_param_end_point_controls));
-}
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 695c02d..37dd31f 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -86,6 +86,7 @@ struct lsm_priv {
atomic_t buf_count;
atomic_t read_abort;
wait_queue_head_t period_wait;
+ struct mutex lsm_api_lock;
int appl_cnt;
int dma_write;
};
@@ -954,10 +955,18 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
dev_dbg(rtd->dev, "%s: Get event status\n", __func__);
atomic_set(&prtd->event_wait_stop, 0);
+
+ /*
+ * Release the api lock before wait to allow
+ * other IOCTLs to be invoked while waiting
+ * for event
+ */
+ mutex_unlock(&prtd->lsm_api_lock);
rc = wait_event_freezable(prtd->event_wait,
(cmpxchg(&prtd->event_avail, 1, 0) ||
(xchg = atomic_cmpxchg(&prtd->event_wait_stop,
1, 0))));
+ mutex_lock(&prtd->lsm_api_lock);
dev_dbg(rtd->dev, "%s: wait_event_freezable %d event_wait_stop %d\n",
__func__, rc, xchg);
if (!rc && !xchg) {
@@ -1281,6 +1290,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
rtd = substream->private_data;
prtd = runtime->private_data;
+ mutex_lock(&prtd->lsm_api_lock);
+
switch (cmd) {
case SNDRV_LSM_EVENT_STATUS: {
struct snd_lsm_event_status *user = NULL, userarg32;
@@ -1289,7 +1300,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
if (copy_from_user(&userarg32, arg, sizeof(userarg32))) {
dev_err(rtd->dev, "%s: err copyuser ioctl %s\n",
__func__, "SNDRV_LSM_EVENT_STATUS");
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (userarg32.payload_size >
@@ -1297,7 +1309,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
__func__, userarg32.payload_size,
LISTEN_MAX_STATUS_PAYLOAD_SIZE);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
size = sizeof(*user) + userarg32.payload_size;
@@ -1306,7 +1319,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: Allocation failed event status size %d\n",
__func__, size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
} else {
cmd = SNDRV_LSM_EVENT_STATUS;
user->payload_size = userarg32.payload_size;
@@ -1423,7 +1437,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_modelv232, arg,
@@ -1464,7 +1479,7 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SET_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
}
if (copy_from_user(&det_params32, arg,
@@ -1507,7 +1522,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data_32, arg,
@@ -1516,7 +1532,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "SET_MODULE_PARAMS_32",
sizeof(p_data_32));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_data.params = compat_ptr(p_data_32.params);
@@ -1528,7 +1545,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (p_data.data_size !=
@@ -1537,15 +1555,18 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.data_size);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = sizeof(struct lsm_params_info_32) *
p_data.num_params;
params32 = kzalloc(p_size, GFP_KERNEL);
- if (!params32)
- return -ENOMEM;
+ if (!params32) {
+ err = -ENOMEM;
+ goto done;
+ }
p_size = sizeof(struct lsm_params_info) * p_data.num_params;
params = kzalloc(p_size, GFP_KERNEL);
@@ -1554,7 +1575,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: no memory for params, size = %zd\n",
__func__, p_size);
kfree(params32);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
if (copy_from_user(params32, p_data.params,
@@ -1564,7 +1586,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
__func__, "params32", p_data.data_size);
kfree(params32);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_info_32 = (struct lsm_params_info_32 *) params32;
@@ -1607,6 +1630,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
err = msm_lsm_ioctl_shared(substream, cmd, arg);
break;
}
+done:
+ mutex_unlock(&prtd->lsm_api_lock);
return err;
}
#else
@@ -1631,6 +1656,7 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
prtd = runtime->private_data;
rtd = substream->private_data;
+ mutex_lock(&prtd->lsm_api_lock);
switch (cmd) {
case SNDRV_LSM_REG_SND_MODEL_V2: {
struct snd_lsm_sound_model_v2 snd_model_v2;
@@ -1639,7 +1665,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_model_v2, arg, sizeof(snd_model_v2))) {
@@ -1666,7 +1693,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SET_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
pr_debug("%s: SNDRV_LSM_SET_PARAMS\n", __func__);
@@ -1687,7 +1715,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: LSM_SET_PARAMS failed, err %d\n",
__func__, err);
- return err;
+
+ goto done;
}
case SNDRV_LSM_SET_MODULE_PARAMS: {
@@ -1699,7 +1728,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data, arg,
@@ -1707,7 +1737,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "p_data", sizeof(p_data));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (p_data.num_params > LSM_PARAMS_MAX) {
@@ -1715,7 +1746,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = p_data.num_params *
@@ -1726,12 +1758,15 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %zd\n",
__func__, "SET_MODULE_PARAMS", p_size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
params = kzalloc(p_size, GFP_KERNEL);
- if (!params)
- return -ENOMEM;
+ if (!params) {
+ err = -ENOMEM;
+ goto done;
+ }
if (copy_from_user(params, p_data.params,
p_data.data_size)) {
@@ -1739,7 +1774,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %d\n",
__func__, "params", p_data.data_size);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
err = msm_lsm_process_params(substream, &p_data, params);
@@ -1760,7 +1796,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: err copyuser event_status\n",
__func__);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (userarg.payload_size >
@@ -1768,7 +1805,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
__func__, userarg.payload_size,
LISTEN_MAX_STATUS_PAYLOAD_SIZE);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
size = sizeof(struct snd_lsm_event_status) +
@@ -1778,7 +1816,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: Allocation failed event status size %d\n",
__func__, size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
user->payload_size = userarg.payload_size;
err = msm_lsm_ioctl_shared(substream, cmd, user);
@@ -1801,7 +1840,7 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
if (err)
dev_err(rtd->dev,
"%s: lsmevent failed %d", __func__, err);
- return err;
+ goto done;
}
case SNDRV_LSM_EVENT_STATUS_V3: {
@@ -1868,6 +1907,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
err = msm_lsm_ioctl_shared(substream, cmd, arg);
break;
}
+done:
+ mutex_unlock(&prtd->lsm_api_lock);
return err;
}
@@ -1884,6 +1925,7 @@ static int msm_lsm_open(struct snd_pcm_substream *substream)
__func__);
return -ENOMEM;
}
+ mutex_init(&prtd->lsm_api_lock);
spin_lock_init(&prtd->event_lock);
init_waitqueue_head(&prtd->event_wait);
init_waitqueue_head(&prtd->period_wait);
@@ -2043,6 +2085,7 @@ static int msm_lsm_close(struct snd_pcm_substream *substream)
kfree(prtd->event_status);
prtd->event_status = NULL;
spin_unlock_irqrestore(&prtd->event_lock, flags);
+ mutex_destroy(&prtd->lsm_api_lock);
kfree(prtd);
runtime->private_data = NULL;
@@ -2199,26 +2242,26 @@ static int msm_lsm_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if ((fe_id < MSM_FRONTEND_DAI_LSM1) ||
- (fe_id > MSM_FRONTEND_DAI_LSM8)) {
- pr_err("%s: Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- return -EINVAL;
- }
-
app_type = ucontrol->value.integer.value[0];
acdb_dev_id = ucontrol->value.integer.value[1];
sample_rate = ucontrol->value.integer.value[2];
- pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
- __func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_TX);
- msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
- acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+ __func__, ret);
return 0;
}
@@ -2227,21 +2270,17 @@ static int msm_lsm_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if ((fe_id < MSM_FRONTEND_DAI_LSM1) ||
- (fe_id > MSM_FRONTEND_DAI_LSM8)) {
- pr_err("%s: Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- return -EINVAL;
- }
-
- ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_TX,
- &app_type, &acdb_dev_id, &sample_rate);
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
if (ret < 0) {
pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
__func__, ret);
@@ -2251,8 +2290,8 @@ static int msm_lsm_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] = app_type;
ucontrol->value.integer.value[1] = acdb_dev_id;
ucontrol->value.integer.value[2] = sample_rate;
- pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fe_id, SESSION_TYPE_TX,
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
app_type, acdb_dev_id, sample_rate);
done:
return ret;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
index 8d43186..ab9b310 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
@@ -682,6 +682,7 @@ static int msm_afe_close(struct snd_pcm_substream *substream)
mutex_unlock(&prtd->lock);
prtd->prepared--;
kfree(prtd);
+ runtime->private_data = NULL;
return 0;
}
static int msm_afe_prepare(struct snd_pcm_substream *substream)
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
index c22f348..f668e95 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
@@ -557,48 +557,45 @@ static int msm_pcm_playback_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate = 48000;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s: Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- return -EINVAL;
- }
-
app_type = ucontrol->value.integer.value[0];
acdb_dev_id = ucontrol->value.integer.value[1];
if (ucontrol->value.integer.value[2] != 0)
sample_rate = ucontrol->value.integer.value[2];
- pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
- __func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_RX);
- msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
- acdb_dev_id, sample_rate, SESSION_TYPE_RX);
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+ __func__, ret);
- return 0;
+ return ret;
}
static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[3];
int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s: Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- ret = -EINVAL;
- goto done;
- }
-
- ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_RX,
- &app_type, &acdb_dev_id, &sample_rate);
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
if (ret < 0) {
pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
__func__, ret);
@@ -608,8 +605,8 @@ static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] = app_type;
ucontrol->value.integer.value[1] = acdb_dev_id;
ucontrol->value.integer.value[2] = sample_rate;
- pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fe_id, SESSION_TYPE_RX,
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
app_type, acdb_dev_id, sample_rate);
done:
return ret;
@@ -619,48 +616,45 @@ static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate = 48000;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s: Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- return -EINVAL;
- }
-
app_type = ucontrol->value.integer.value[0];
acdb_dev_id = ucontrol->value.integer.value[1];
if (ucontrol->value.integer.value[2] != 0)
sample_rate = ucontrol->value.integer.value[2];
- pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
- __func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_TX);
- msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
- acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+ __func__, ret);
- return 0;
+ return ret;
}
static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s: Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- ret = -EINVAL;
- goto done;
- }
-
- ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_TX,
- &app_type, &acdb_dev_id, &sample_rate);
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
if (ret < 0) {
pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
__func__, ret);
@@ -670,8 +664,8 @@ static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] = app_type;
ucontrol->value.integer.value[1] = acdb_dev_id;
ucontrol->value.integer.value[2] = sample_rate;
- pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fe_id, SESSION_TYPE_TX,
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
app_type, acdb_dev_id, sample_rate);
done:
return ret;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index ecf194f..9b7c6fb 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -139,6 +139,17 @@ static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
.mask = 0,
};
+static unsigned long msm_pcm_fe_topology[MSM_FRONTEND_DAI_MAX];
+
+/* default value is DTS (i.e read from device tree) */
+static char const *msm_pcm_fe_topology_text[] = {
+ "DTS", "ULL", "ULL_PP", "LL" };
+
+static const struct soc_enum msm_pcm_fe_topology_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(msm_pcm_fe_topology_text),
+ msm_pcm_fe_topology_text),
+};
+
static void event_handler(uint32_t opcode,
uint32_t token, uint32_t *payload, void *priv)
{
@@ -258,6 +269,8 @@ static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
uint16_t bits_per_sample;
int ret;
int dir = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? IN : OUT;
+ unsigned long topology;
+ int perf_mode;
pdata = (struct msm_plat_data *)
dev_get_drvdata(soc_prtd->platform->dev);
@@ -268,11 +281,24 @@ static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+ topology = msm_pcm_fe_topology[soc_prtd->dai_link->id];
+
+ if (!strcmp(msm_pcm_fe_topology_text[topology], "ULL_PP"))
+ perf_mode = ULL_POST_PROCESSING_PCM_MODE;
+ else if (!strcmp(msm_pcm_fe_topology_text[topology], "ULL"))
+ perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+ else if (!strcmp(msm_pcm_fe_topology_text[topology], "LL"))
+ perf_mode = LOW_LATENCY_PCM_MODE;
+ else
+ /* use the default from the device tree */
+ perf_mode = pdata->perf_mode;
+
+
/* need to set LOW_LATENCY_PCM_MODE for capture since
* push mode does not support ULL
*/
prtd->audio_client->perf_mode = (dir == IN) ?
- pdata->perf_mode :
+ perf_mode :
LOW_LATENCY_PCM_MODE;
/* rate and channels are sent to audio driver */
@@ -544,6 +570,8 @@ static int msm_pcm_close(struct snd_pcm_substream *substream)
SNDRV_PCM_STREAM_PLAYBACK :
SNDRV_PCM_STREAM_CAPTURE);
kfree(prtd);
+ runtime->private_data = NULL;
+
return 0;
}
@@ -721,6 +749,269 @@ static int msm_pcm_add_chmap_control(struct snd_soc_pcm_runtime *rtd)
return 0;
}
+static int msm_pcm_fe_topology_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ const struct soc_enum *e = &msm_pcm_fe_topology_enum[0];
+
+ return snd_ctl_enum_info(uinfo, 1, e->items, e->texts);
+}
+
+static int msm_pcm_fe_topology_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ unsigned long fe_id = kcontrol->private_value;
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: %lu topology %s\n", __func__, fe_id,
+ msm_pcm_fe_topology_text[msm_pcm_fe_topology[fe_id]]);
+ ucontrol->value.enumerated.item[0] = msm_pcm_fe_topology[fe_id];
+ return 0;
+}
+
+static int msm_pcm_fe_topology_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ unsigned long fe_id = kcontrol->private_value;
+ unsigned int item;
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+ return -EINVAL;
+ }
+
+ item = ucontrol->value.enumerated.item[0];
+ if (item >= ARRAY_SIZE(msm_pcm_fe_topology_text)) {
+ pr_err("%s Received out of bound topology %lu\n", __func__,
+ fe_id);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: %lu new topology %s\n", __func__, fe_id,
+ msm_pcm_fe_topology_text[item]);
+ msm_pcm_fe_topology[fe_id] = item;
+ return 0;
+}
+
+static int msm_pcm_add_fe_topology_control(struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = "PCM_Dev";
+ const char *deviceNo = "NN";
+ const char *topo_text = "Topology";
+ char *mixer_str = NULL;
+ int ctl_len;
+ int ret;
+ struct snd_kcontrol_new topology_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .name = "?",
+ .info = msm_pcm_fe_topology_info,
+ .get = msm_pcm_fe_topology_get,
+ .put = msm_pcm_fe_topology_put,
+ .private_value = 0,
+ },
+ };
+
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
+ strlen(topo_text) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+ if (!mixer_str)
+ return -ENOMEM;
+
+ snprintf(mixer_str, ctl_len, "%s %d %s", mixer_ctl_name,
+ rtd->pcm->device, topo_text);
+
+ topology_control[0].name = mixer_str;
+ topology_control[0].private_value = rtd->dai_link->id;
+ ret = snd_soc_add_platform_controls(rtd->platform, topology_control,
+ ARRAY_SIZE(topology_control));
+ msm_pcm_fe_topology[rtd->dai_link->id] = 0;
+ kfree(mixer_str);
+ return ret;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
+ int app_type;
+ int acdb_dev_id;
+ int sample_rate = 48000;
+
+ app_type = ucontrol->value.integer.value[0];
+ acdb_dev_id = ucontrol->value.integer.value[1];
+ if (ucontrol->value.integer.value[2] != 0)
+ sample_rate = ucontrol->value.integer.value[2];
+
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_playback_app_type_cfg_ctl_put failed, err %d\n",
+ __func__, ret);
+
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+ return ret;
+}
+
+static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
+ int app_type;
+ int acdb_dev_id;
+ int sample_rate;
+
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
+ if (ret < 0) {
+ pr_err("%s: msm_pcm_playback_app_type_cfg_ctl_get failed, err: %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ucontrol->value.integer.value[0] = app_type;
+ ucontrol->value.integer.value[1] = acdb_dev_id;
+ ucontrol->value.integer.value[2] = sample_rate;
+
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+done:
+ return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
+ int app_type;
+ int acdb_dev_id;
+ int sample_rate = 48000;
+
+ app_type = ucontrol->value.integer.value[0];
+ acdb_dev_id = ucontrol->value.integer.value[1];
+ if (ucontrol->value.integer.value[2] != 0)
+ sample_rate = ucontrol->value.integer.value[2];
+
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_capture_app_type_cfg_ctl_put failed, err: %d\n",
+ __func__, ret);
+
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+
+ return ret;
+}
+
+static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
+ int app_type;
+ int acdb_dev_id;
+ int sample_rate;
+
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
+ if (ret < 0) {
+ pr_err("%s: msm_pcm_capture_app_type_cfg_ctl_get failed, err: %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ucontrol->value.integer.value[0] = app_type;
+ ucontrol->value.integer.value[1] = acdb_dev_id;
+ ucontrol->value.integer.value[2] = sample_rate;
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+done:
+ return ret;
+}
+
+static int msm_pcm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm = rtd->pcm;
+ struct snd_pcm_usr *app_type_info;
+ struct snd_kcontrol *kctl;
+ const char *playback_mixer_ctl_name = "Audio Stream";
+ const char *capture_mixer_ctl_name = "Audio Stream Capture";
+ const char *deviceNo = "NN";
+ const char *suffix = "App Type Cfg";
+ int ctl_len, ret = 0;
+
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+ ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+ strlen(deviceNo) + 1 +
+ strlen(suffix) + 1;
+ pr_debug("%s: Playback app type cntrl add\n", __func__);
+ ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ NULL, 1, ctl_len, rtd->dai_link->id,
+ &app_type_info);
+ if (ret < 0) {
+ pr_err("%s: playback app type cntrl add failed, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ kctl = app_type_info->kctl;
+ snprintf(kctl->id.name, ctl_len, "%s %d %s",
+ playback_mixer_ctl_name, rtd->pcm->device, suffix);
+ kctl->put = msm_pcm_playback_app_type_cfg_ctl_put;
+ kctl->get = msm_pcm_playback_app_type_cfg_ctl_get;
+ }
+
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+ ctl_len = strlen(capture_mixer_ctl_name) + 1 +
+ strlen(deviceNo) + 1 + strlen(suffix) + 1;
+ pr_debug("%s: Capture app type cntrl add\n", __func__);
+ ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ NULL, 1, ctl_len, rtd->dai_link->id,
+ &app_type_info);
+ if (ret < 0) {
+ pr_err("%s: capture app type cntrl add failed, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ kctl = app_type_info->kctl;
+ snprintf(kctl->id.name, ctl_len, "%s %d %s",
+ capture_mixer_ctl_name, rtd->pcm->device, suffix);
+ kctl->put = msm_pcm_capture_app_type_cfg_ctl_put;
+ kctl->get = msm_pcm_capture_app_type_cfg_ctl_get;
+ }
+
+ return 0;
+}
+
+
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
@@ -741,6 +1032,19 @@ static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: Could not add pcm Volume Control %d\n",
__func__, ret);
}
+
+ ret = msm_pcm_add_fe_topology_control(rtd);
+ if (ret) {
+ pr_err("%s: Could not add pcm topology control %d\n",
+ __func__, ret);
+ }
+
+ ret = msm_pcm_add_app_type_controls(rtd);
+ if (ret) {
+ pr_err("%s: Could not add app type controls failed %d\n",
+ __func__, ret);
+ }
+
pcm->nonatomic = true;
exit:
return ret;
@@ -778,8 +1082,12 @@ static int msm_pcm_probe(struct platform_device *pdev)
rc = of_property_read_string(pdev->dev.of_node,
"qcom,latency-level", &latency_level);
- if (!rc && !strcmp(latency_level, "ultra"))
- perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+ if (!rc) {
+ if (!strcmp(latency_level, "ultra"))
+ perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
+ else if (!strcmp(latency_level, "ull-pp"))
+ perf_mode = ULL_POST_PROCESSING_PCM_MODE;
+ }
}
pdata = devm_kzalloc(&pdev->dev,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index 45a3ce9..1799d0d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -110,7 +110,7 @@ static struct snd_pcm_hardware msm_pcm_hardware_playback = {
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
- 88200, 96000, 176400, 192000, 384000
+ 88200, 96000, 176400, 192000, 352800, 384000
};
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
@@ -286,6 +286,7 @@ static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
struct msm_plat_data *pdata;
struct snd_pcm_hw_params *params;
int ret;
+ uint32_t fmt_type = FORMAT_LINEAR_PCM;
uint16_t bits_per_sample;
uint16_t sample_word_size;
@@ -334,38 +335,67 @@ static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
sample_word_size = 16;
break;
}
+ if (prtd->compress_enable) {
+ fmt_type = FORMAT_GEN_COMPR;
+ pr_debug("%s: Compressed enabled!\n", __func__);
+ ret = q6asm_open_write_compressed(prtd->audio_client, fmt_type,
+ COMPRESSED_PASSTHROUGH_GEN);
+ if (ret < 0) {
+ pr_err("%s: q6asm_open_write_compressed failed (%d)\n",
+ __func__, ret);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return -ENOMEM;
+ }
+ } else {
+ ret = q6asm_open_write_v4(prtd->audio_client,
+ fmt_type, bits_per_sample);
- ret = q6asm_open_write_v4(prtd->audio_client,
- FORMAT_LINEAR_PCM, bits_per_sample);
+ if (ret < 0) {
+ pr_err("%s: q6asm_open_write_v4 failed (%d)\n",
+ __func__, ret);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return -ENOMEM;
+ }
- if (ret < 0) {
- pr_err("%s: q6asm_open_write_v2 failed\n", __func__);
- q6asm_audio_client_free(prtd->audio_client);
- prtd->audio_client = NULL;
- return -ENOMEM;
+ ret = q6asm_send_cal(prtd->audio_client);
+ if (ret < 0)
+ pr_debug("%s : Send cal failed : %d", __func__, ret);
}
-
- ret = q6asm_send_cal(prtd->audio_client);
- if (ret < 0)
- pr_debug("%s : Send cal failed : %d", __func__, ret);
-
pr_debug("%s: session ID %d\n", __func__,
prtd->audio_client->session);
prtd->session_id = prtd->audio_client->session;
- ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->id,
+
+ if (prtd->compress_enable) {
+ ret = msm_pcm_routing_reg_phy_compr_stream(
+ soc_prtd->dai_link->id,
+ prtd->audio_client->perf_mode,
+ prtd->session_id,
+ SNDRV_PCM_STREAM_PLAYBACK,
+ COMPRESSED_PASSTHROUGH_GEN);
+ } else {
+ ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->id,
prtd->audio_client->perf_mode,
prtd->session_id, substream->stream);
+ }
if (ret) {
pr_err("%s: stream reg failed ret:%d\n", __func__, ret);
return ret;
}
-
- ret = q6asm_media_format_block_multi_ch_pcm_v4(
+ if (prtd->compress_enable) {
+ ret = q6asm_media_format_block_gen_compr(
+ prtd->audio_client, runtime->rate,
+ runtime->channels, !prtd->set_channel_map,
+ prtd->channel_map, bits_per_sample);
+ } else {
+ ret = q6asm_media_format_block_multi_ch_pcm_v4(
prtd->audio_client, runtime->rate,
runtime->channels, !prtd->set_channel_map,
prtd->channel_map, bits_per_sample,
sample_word_size, ASM_LITTLE_ENDIAN,
DEFAULT_QF);
+ }
if (ret < 0)
pr_info("%s: CMD Format block failed\n", __func__);
@@ -774,6 +804,8 @@ static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->id,
SNDRV_PCM_STREAM_PLAYBACK);
kfree(prtd);
+ runtime->private_data = NULL;
+
return 0;
}
@@ -879,6 +911,7 @@ static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->id,
SNDRV_PCM_STREAM_CAPTURE);
kfree(prtd);
+ runtime->private_data = NULL;
return 0;
}
@@ -1091,6 +1124,136 @@ static int msm_pcm_add_volume_control(struct snd_soc_pcm_runtime *rtd)
return 0;
}
+static int msm_pcm_compress_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 0x2000;
+ return 0;
+}
+
+static int msm_pcm_compress_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_platform *platform = snd_soc_component_to_platform(comp);
+ struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+ struct snd_pcm_substream *substream;
+ struct msm_audio *prtd;
+
+ if (!pdata) {
+ pr_err("%s pdata is NULL\n", __func__);
+ return -ENODEV;
+ }
+ substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (!substream) {
+ pr_err("%s substream not found\n", __func__);
+ return -EINVAL;
+ }
+ if (!substream->runtime) {
+ pr_err("%s substream runtime not found\n", __func__);
+ return 0;
+ }
+ prtd = substream->runtime->private_data;
+ if (prtd)
+ ucontrol->value.integer.value[0] = prtd->compress_enable;
+ return 0;
+}
+
+static int msm_pcm_compress_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int rc = 0;
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_platform *platform = snd_soc_component_to_platform(comp);
+ struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+ struct snd_pcm_substream *substream;
+ struct msm_audio *prtd;
+ int compress = ucontrol->value.integer.value[0];
+
+ if (!pdata) {
+ pr_err("%s pdata is NULL\n", __func__);
+ return -ENODEV;
+ }
+ substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ pr_debug("%s: compress : 0x%x\n", __func__, compress);
+ if (!substream) {
+ pr_err("%s substream not found\n", __func__);
+ return -EINVAL;
+ }
+ if (!substream->runtime) {
+ pr_err("%s substream runtime not found\n", __func__);
+ return 0;
+ }
+ prtd = substream->runtime->private_data;
+ if (prtd) {
+ pr_debug("%s: setting compress flag to 0x%x\n",
+ __func__, compress);
+ prtd->compress_enable = compress;
+ }
+ return rc;
+}
+
+static int msm_pcm_add_compress_control(struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = "Playback ";
+ const char *mixer_ctl_end_name = " Compress";
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len;
+ int ret = 0;
+ struct msm_plat_data *pdata;
+ struct snd_kcontrol_new pcm_compress_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_pcm_compress_ctl_info,
+ .get = msm_pcm_compress_ctl_get,
+ .put = msm_pcm_compress_ctl_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s: NULL rtd\n", __func__);
+ return -EINVAL;
+ }
+
+ ctl_len = strlen(mixer_ctl_name) + strlen(deviceNo) +
+ strlen(mixer_ctl_end_name) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+ if (!mixer_str)
+ return -ENOMEM;
+
+ snprintf(mixer_str, ctl_len, "%s%d%s", mixer_ctl_name,
+ rtd->pcm->device, mixer_ctl_end_name);
+
+ pcm_compress_control[0].name = mixer_str;
+ pcm_compress_control[0].private_value = rtd->dai_link->id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ pdata = dev_get_drvdata(rtd->platform->dev);
+ if (pdata) {
+ if (!pdata->pcm) {
+ pdata->pcm = rtd->pcm;
+ snd_soc_add_platform_controls(rtd->platform,
+ pcm_compress_control,
+ ARRAY_SIZE
+ (pcm_compress_control));
+ pr_debug("%s: add control success plt = %pK\n",
+ __func__, rtd->platform);
+ }
+ } else {
+ pr_err("%s: NULL pdata\n", __func__);
+ ret = -EINVAL;
+ }
+ kfree(mixer_str);
+ return ret;
+}
+
static int msm_pcm_chmap_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1182,48 +1345,45 @@ static int msm_pcm_playback_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate = 48000;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- return -EINVAL;
- }
-
app_type = ucontrol->value.integer.value[0];
acdb_dev_id = ucontrol->value.integer.value[1];
if (ucontrol->value.integer.value[2] != 0)
sample_rate = ucontrol->value.integer.value[2];
- pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
- __func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_RX);
- msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
- acdb_dev_id, sample_rate, SESSION_TYPE_RX);
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+ __func__, ret);
- return 0;
+ return ret;
}
static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_RX;
+ int be_id = ucontrol->value.integer.value[3];
int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- ret = -EINVAL;
- goto done;
- }
-
- ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_RX,
- &app_type, &acdb_dev_id, &sample_rate);
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
if (ret < 0) {
pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
__func__, ret);
@@ -1233,8 +1393,8 @@ static int msm_pcm_playback_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] = app_type;
ucontrol->value.integer.value[1] = acdb_dev_id;
ucontrol->value.integer.value[2] = sample_rate;
- pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fe_id, SESSION_TYPE_RX,
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
app_type, acdb_dev_id, sample_rate);
done:
return ret;
@@ -1244,48 +1404,45 @@ static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
+ int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate = 48000;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s: Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- return -EINVAL;
- }
-
app_type = ucontrol->value.integer.value[0];
acdb_dev_id = ucontrol->value.integer.value[1];
if (ucontrol->value.integer.value[2] != 0)
sample_rate = ucontrol->value.integer.value[2];
- pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
- __func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_TX);
- msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
- acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+ pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
+ __func__, fe_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+ ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+ be_id, app_type,
+ acdb_dev_id, sample_rate);
+ if (ret < 0)
+ pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+ __func__, ret);
- return 0;
+ return ret;
}
static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u64 fe_id = kcontrol->private_value;
+ int session_type = SESSION_TYPE_TX;
+ int be_id = ucontrol->value.integer.value[3];
int ret = 0;
int app_type;
int acdb_dev_id;
int sample_rate;
- pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
- if (fe_id >= MSM_FRONTEND_DAI_MAX) {
- pr_err("%s: Received out of bounds fe_id %llu\n",
- __func__, fe_id);
- ret = -EINVAL;
- goto done;
- }
-
- ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_TX,
- &app_type, &acdb_dev_id, &sample_rate);
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
+ be_id, &app_type,
+ &acdb_dev_id,
+ &sample_rate);
if (ret < 0) {
pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
__func__, ret);
@@ -1295,8 +1452,8 @@ static int msm_pcm_capture_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] = app_type;
ucontrol->value.integer.value[1] = acdb_dev_id;
ucontrol->value.integer.value[2] = sample_rate;
- pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fe_id, SESSION_TYPE_TX,
+ pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, session_type, be_id,
app_type, acdb_dev_id, sample_rate);
done:
return ret;
@@ -1388,6 +1545,11 @@ static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: Could not add pcm Volume Control %d\n",
__func__, ret);
+ ret = msm_pcm_add_compress_control(rtd);
+ if (ret)
+ pr_err("%s: Could not add pcm Compress Control %d\n",
+ __func__, ret);
+
return ret;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
index 5290d34..3b3f048 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
@@ -109,6 +109,7 @@ struct msm_audio {
int cmd_interrupt;
bool meta_data_mode;
uint32_t volume;
+ bool compress_enable;
/* array of frame info */
struct msm_audio_in_frame_info in_frame_info[CAPTURE_MAX_NUM_PERIODS];
};
@@ -123,6 +124,7 @@ struct output_meta_data_st {
struct msm_plat_data {
int perf_mode;
+ struct snd_pcm *pcm;
};
#endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index a9e00cd..465634b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -56,8 +56,6 @@
#define DS2_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFF
#endif
-static int get_cal_path(int path_type);
-
static struct mutex routing_lock;
static struct cal_type_data *cal_data;
@@ -102,12 +100,15 @@ enum {
#define TERT_MI2S_TX_TEXT "TERT_MI2S_TX"
#define QUAT_MI2S_TX_TEXT "QUAT_MI2S_TX"
#define ADM_LSM_TX_TEXT "ADM_LSM_TX"
+#define INT3_MI2S_TX_TEXT "INT3_MI2S_TX"
+
#define LSM_FUNCTION_TEXT "LSM Function"
static const char * const lsm_port_text[] = {
"None",
SLIMBUS_0_TX_TEXT, SLIMBUS_1_TX_TEXT, SLIMBUS_2_TX_TEXT,
SLIMBUS_3_TX_TEXT, SLIMBUS_4_TX_TEXT, SLIMBUS_5_TX_TEXT,
- TERT_MI2S_TX_TEXT, QUAT_MI2S_TX_TEXT, ADM_LSM_TX_TEXT
+ TERT_MI2S_TX_TEXT, QUAT_MI2S_TX_TEXT, ADM_LSM_TX_TEXT,
+ INT3_MI2S_TX_TEXT
};
struct msm_pcm_route_bdai_pp_params {
@@ -123,6 +124,18 @@ static struct msm_pcm_route_bdai_pp_params
{DISPLAY_PORT_RX, 0, 0, 0},
};
+/*
+ * The be_dai_name_table is passed to HAL so that it can specify the
+ * BE ID for the BE it wants to enable based on the name. Thus there
+ * is a matching table and structure in HAL that need to be updated
+ * if any changes to these are made.
+ */
+struct msm_pcm_route_bdai_name {
+ unsigned int be_id;
+ char be_name[LPASS_BE_NAME_MAX_LENGTH];
+};
+static struct msm_pcm_route_bdai_name be_dai_name_table[MSM_BACKEND_DAI_MAX];
+
static int msm_routing_send_device_pp_params(int port_id, int copp_idx);
static int msm_routing_get_bit_width(unsigned int format)
@@ -130,6 +143,9 @@ static int msm_routing_get_bit_width(unsigned int format)
int bit_width;
switch (format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ bit_width = 32;
+ break;
case SNDRV_PCM_FORMAT_S24_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
bit_width = 24;
@@ -660,7 +676,7 @@ static unsigned long session_copp_map[MSM_FRONTEND_DAI_MAX][2]
static struct msm_pcm_routing_app_type_data app_type_cfg[MAX_APP_TYPES];
static struct msm_pcm_routing_app_type_data lsm_app_type_cfg[MAX_APP_TYPES];
static struct msm_pcm_stream_app_type_cfg
- fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MAX][2];
+ fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MAX][2][MSM_BACKEND_DAI_MAX];
/* The caller of this should aqcuire routing lock */
void msm_pcm_routing_get_bedai_info(int be_idx,
@@ -728,45 +744,64 @@ static bool is_mm_lsm_fe_id(int fe_id)
return rc;
}
-
-void msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int app_type,
- int acdb_dev_id, int sample_rate, int session_type)
+int msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int session_type,
+ int be_id, int app_type,
+ int acdb_dev_id, int sample_rate)
{
- pr_debug("%s: fedai_id %d, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fedai_id, session_type, app_type,
- acdb_dev_id, sample_rate);
+ int ret = 0;
+
+ pr_debug("%s: fedai_id %d, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fedai_id, session_type, be_id,
+ app_type, acdb_dev_id, sample_rate);
+
if (!is_mm_lsm_fe_id(fedai_id)) {
pr_err("%s: Invalid machine driver ID %d\n",
__func__, fedai_id);
- return;
+ ret = -EINVAL;
+ goto done;
}
if (session_type != SESSION_TYPE_RX &&
session_type != SESSION_TYPE_TX) {
pr_err("%s: Invalid session type %d\n",
__func__, session_type);
- return;
+ ret = -EINVAL;
+ goto done;
}
- fe_dai_app_type_cfg[fedai_id][session_type].app_type = app_type;
- fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id = acdb_dev_id;
- fe_dai_app_type_cfg[fedai_id][session_type].sample_rate = sample_rate;
+ if (be_id < 0 || be_id >= MSM_BACKEND_DAI_MAX) {
+ pr_err("%s: Received out of bounds be_id %d\n",
+ __func__, be_id);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type = app_type;
+ fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id =
+ acdb_dev_id;
+ fe_dai_app_type_cfg[fedai_id][session_type][be_id].sample_rate =
+ sample_rate;
+
+done:
+ return ret;
}
/**
* msm_pcm_routing_get_stream_app_type_cfg
*
- * Receives fedai_id, session_type and populates app_type, acdb_dev_id, &
- * sample rate. Returns 0 on success. On failure returns
+ * Receives fedai_id, session_type, be_id, and populates app_type,
+ * acdb_dev_id, & sample rate. Returns 0 on success. On failure returns
* -EINVAL and does not alter passed values.
*
* fedai_id - Passed value, front end ID for which app type config is wanted
* session_type - Passed value, session type for which app type config
* is wanted
+ * be_id - Passed value, back end device id for which app type config is wanted
* app_type - Returned value, app type used by app type config
* acdb_dev_id - Returned value, ACDB device ID used by app type config
* sample_rate - Returned value, sample rate used by app type config
*/
int msm_pcm_routing_get_stream_app_type_cfg(int fedai_id, int session_type,
- int *app_type, int *acdb_dev_id, int *sample_rate)
+ int be_id, int *app_type,
+ int *acdb_dev_id, int *sample_rate)
{
int ret = 0;
@@ -788,18 +823,25 @@ int msm_pcm_routing_get_stream_app_type_cfg(int fedai_id, int session_type,
ret = -EINVAL;
goto done;
} else if (session_type != SESSION_TYPE_RX &&
- session_type != SESSION_TYPE_TX) {
+ session_type != SESSION_TYPE_TX) {
pr_err("%s: Invalid session type %d\n",
__func__, session_type);
ret = -EINVAL;
goto done;
+ } else if (be_id < 0 || be_id >= MSM_BACKEND_DAI_MAX) {
+ pr_err("%s: Received out of bounds be_id %d\n",
+ __func__, be_id);
+ return -EINVAL;
}
- *app_type = fe_dai_app_type_cfg[fedai_id][session_type].app_type;
- *acdb_dev_id = fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
- *sample_rate = fe_dai_app_type_cfg[fedai_id][session_type].sample_rate;
- pr_debug("%s: fedai_id %d, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
- __func__, fedai_id, session_type,
+ *app_type = fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type;
+ *acdb_dev_id =
+ fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id;
+ *sample_rate =
+ fe_dai_app_type_cfg[fedai_id][session_type][be_id].sample_rate;
+
+ pr_debug("%s: fedai_id %d, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fedai_id, session_type, be_id,
*app_type, *acdb_dev_id, *sample_rate);
done:
return ret;
@@ -857,25 +899,28 @@ static struct cal_block_data *msm_routing_find_topology(int path,
return msm_routing_find_topology_by_path(path);
}
-static int msm_routing_get_adm_topology(int path, int fedai_id,
- int session_type)
+static int msm_routing_get_adm_topology(int fedai_id, int session_type,
+ int be_id)
{
int topology = NULL_COPP_TOPOLOGY;
struct cal_block_data *cal_block = NULL;
int app_type = 0, acdb_dev_id = 0;
- pr_debug("%s\n", __func__);
- path = get_cal_path(path);
+ pr_debug("%s: fedai_id %d, session_type %d, be_id %d\n",
+ __func__, fedai_id, session_type, be_id);
+
if (cal_data == NULL)
goto done;
mutex_lock(&cal_data->lock);
- app_type = fe_dai_app_type_cfg[fedai_id][session_type].app_type;
- acdb_dev_id = fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
+ app_type = fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type;
+ acdb_dev_id =
+ fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id;
- cal_block = msm_routing_find_topology(path, app_type, acdb_dev_id);
+ cal_block = msm_routing_find_topology(session_type, app_type,
+ acdb_dev_id);
if (cal_block == NULL)
goto unlock;
@@ -905,8 +950,8 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
int i, port_type, j, num_copps = 0;
struct route_payload payload;
- port_type = ((path_type == ADM_PATH_PLAYBACK ||
- path_type == ADM_PATH_COMPRESSED_RX) ?
+ port_type = ((path_type == ADM_PATH_PLAYBACK ||
+ path_type == ADM_PATH_COMPRESSED_RX) ?
MSM_AFE_PORT_TYPE_RX : MSM_AFE_PORT_TYPE_TX);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
@@ -921,6 +966,18 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
payload.port_id[num_copps] =
msm_bedais[i].port_id;
payload.copp_idx[num_copps] = j;
+ payload.app_type[num_copps] =
+ fe_dai_app_type_cfg
+ [fedai_id][sess_type][i]
+ .app_type;
+ payload.acdb_dev_id[num_copps] =
+ fe_dai_app_type_cfg
+ [fedai_id][sess_type][i]
+ .acdb_dev_id;
+ payload.sample_rate[num_copps] =
+ fe_dai_app_type_cfg
+ [fedai_id][sess_type][i]
+ .sample_rate;
num_copps++;
}
}
@@ -930,12 +987,6 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
if (num_copps) {
payload.num_copps = num_copps;
payload.session_id = fe_dai_map[fedai_id][sess_type].strm_id;
- payload.app_type =
- fe_dai_app_type_cfg[fedai_id][sess_type].app_type;
- payload.acdb_dev_id =
- fe_dai_app_type_cfg[fedai_id][sess_type].acdb_dev_id;
- payload.sample_rate =
- fe_dai_app_type_cfg[fedai_id][sess_type].sample_rate;
adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
}
@@ -1001,7 +1052,8 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
int dspst_id, int stream_type,
uint32_t passthr_mode)
{
- int i, j, session_type, path_type, port_type, topology, num_copps = 0;
+ int i, j, session_type, path_type, port_type, topology;
+ int num_copps = 0;
struct route_payload payload;
u32 channels, sample_rate;
u16 bit_width = 16;
@@ -1069,13 +1121,13 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
bit_width = msm_routing_get_bit_width(
msm_bedais[i].format);
app_type =
- fe_dai_app_type_cfg[fe_id][session_type].app_type;
+ fe_dai_app_type_cfg[fe_id][session_type][i].app_type;
if (app_type && is_lsm) {
app_type_idx =
msm_pcm_routing_get_lsm_app_type_idx(app_type);
sample_rate =
- fe_dai_app_type_cfg[fe_id][session_type].
- sample_rate;
+ fe_dai_app_type_cfg[fe_id][session_type][i]
+ .sample_rate;
bit_width =
lsm_app_type_cfg[app_type_idx].bit_width;
} else if (app_type) {
@@ -1083,19 +1135,21 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
msm_pcm_routing_get_app_type_idx(
app_type);
sample_rate =
- fe_dai_app_type_cfg[fe_id][session_type].sample_rate;
+ fe_dai_app_type_cfg[fe_id][session_type][i].sample_rate;
bit_width =
app_type_cfg[app_type_idx].bit_width;
} else {
sample_rate = msm_bedais[i].sample_rate;
}
acdb_dev_id =
- fe_dai_app_type_cfg[fe_id][session_type].acdb_dev_id;
- topology = msm_routing_get_adm_topology(path_type,
- fe_id, session_type);
-
- if (passthr_mode == COMPRESSED_PASSTHROUGH_DSD)
- topology = COMPRESS_PASSTHROUGH_NONE_TOPOLOGY;
+ fe_dai_app_type_cfg[fe_id][session_type][i].acdb_dev_id;
+ topology = msm_routing_get_adm_topology(fe_id,
+ session_type,
+ i);
+ if ((passthr_mode == COMPRESSED_PASSTHROUGH_DSD)
+ || (passthr_mode ==
+ COMPRESSED_PASSTHROUGH_GEN))
+ topology = COMPRESSED_PASSTHROUGH_NONE_TOPOLOGY;
pr_debug("%s: Before adm open topology %d\n", __func__,
topology);
@@ -1130,10 +1184,24 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
payload.port_id[num_copps] =
msm_bedais[i].port_id;
payload.copp_idx[num_copps] = j;
+ payload.app_type[num_copps] =
+ fe_dai_app_type_cfg
+ [fe_id][session_type][i]
+ .app_type;
+ payload.acdb_dev_id[num_copps] =
+ fe_dai_app_type_cfg
+ [fe_id][session_type][i]
+ .acdb_dev_id;
+ payload.sample_rate[num_copps] =
+ fe_dai_app_type_cfg
+ [fe_id][session_type][i]
+ .sample_rate;
num_copps++;
}
}
- if (passthr_mode != COMPRESSED_PASSTHROUGH_DSD) {
+ if (passthr_mode != COMPRESSED_PASSTHROUGH_DSD
+ && passthr_mode !=
+ COMPRESSED_PASSTHROUGH_GEN) {
msm_routing_send_device_pp_params(
msm_bedais[i].port_id,
copp_idx);
@@ -1143,12 +1211,6 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
if (num_copps) {
payload.num_copps = num_copps;
payload.session_id = fe_dai_map[fe_id][session_type].strm_id;
- payload.app_type =
- fe_dai_app_type_cfg[fe_id][session_type].app_type;
- payload.acdb_dev_id =
- fe_dai_app_type_cfg[fe_id][session_type].acdb_dev_id;
- payload.sample_rate =
- fe_dai_app_type_cfg[fe_id][session_type].sample_rate;
adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
}
@@ -1247,22 +1309,24 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
msm_bedais[i].format);
app_type =
- fe_dai_app_type_cfg[fedai_id][session_type].app_type;
+ fe_dai_app_type_cfg[fedai_id][session_type][i].app_type;
if (app_type) {
app_type_idx =
msm_pcm_routing_get_app_type_idx(app_type);
sample_rate =
- fe_dai_app_type_cfg[fedai_id][session_type].
- sample_rate;
+ fe_dai_app_type_cfg[fedai_id][session_type][i]
+ .sample_rate;
bits_per_sample =
app_type_cfg[app_type_idx].bit_width;
} else
sample_rate = msm_bedais[i].sample_rate;
acdb_dev_id =
- fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
- topology = msm_routing_get_adm_topology(path_type,
- fedai_id, session_type);
+ fe_dai_app_type_cfg[fedai_id][session_type][i]
+ .acdb_dev_id;
+ topology = msm_routing_get_adm_topology(fedai_id,
+ session_type,
+ i);
copp_idx = adm_open(msm_bedais[i].port_id, path_type,
sample_rate, channels, topology,
perf_mode, bits_per_sample,
@@ -1293,6 +1357,18 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
payload.port_id[num_copps] =
msm_bedais[i].port_id;
payload.copp_idx[num_copps] = j;
+ payload.app_type[num_copps] =
+ fe_dai_app_type_cfg
+ [fedai_id][session_type]
+ [i].app_type;
+ payload.acdb_dev_id[num_copps] =
+ fe_dai_app_type_cfg
+ [fedai_id][session_type]
+ [i].acdb_dev_id;
+ payload.sample_rate[num_copps] =
+ fe_dai_app_type_cfg
+ [fedai_id][session_type]
+ [i].sample_rate;
num_copps++;
}
}
@@ -1307,12 +1383,6 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
if (num_copps) {
payload.num_copps = num_copps;
payload.session_id = fe_dai_map[fedai_id][session_type].strm_id;
- payload.app_type =
- fe_dai_app_type_cfg[fedai_id][session_type].app_type;
- payload.acdb_dev_id =
- fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
- payload.sample_rate =
- fe_dai_app_type_cfg[fedai_id][session_type].sample_rate;
adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
}
@@ -1491,30 +1561,31 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
msm_bedais[reg].format);
app_type =
- fe_dai_app_type_cfg[val][session_type].app_type;
+ fe_dai_app_type_cfg[val][session_type][reg].app_type;
if (app_type && is_lsm) {
app_type_idx =
msm_pcm_routing_get_lsm_app_type_idx(app_type);
sample_rate =
- fe_dai_app_type_cfg[val][session_type].
- sample_rate;
+ fe_dai_app_type_cfg[val][session_type][reg]
+ .sample_rate;
bits_per_sample =
lsm_app_type_cfg[app_type_idx].bit_width;
} else if (app_type) {
app_type_idx =
msm_pcm_routing_get_app_type_idx(app_type);
sample_rate =
- fe_dai_app_type_cfg[val][session_type].
- sample_rate;
+ fe_dai_app_type_cfg[val][session_type][reg]
+ .sample_rate;
bits_per_sample =
app_type_cfg[app_type_idx].bit_width;
} else
sample_rate = msm_bedais[reg].sample_rate;
- topology = msm_routing_get_adm_topology(path_type, val,
- session_type);
+ topology = msm_routing_get_adm_topology(val,
+ session_type,
+ reg);
acdb_dev_id =
- fe_dai_app_type_cfg[val][session_type].acdb_dev_id;
+ fe_dai_app_type_cfg[val][session_type][reg].acdb_dev_id;
copp_idx = adm_open(msm_bedais[reg].port_id, path_type,
sample_rate, channels, topology,
fdai->perf_mode, bits_per_sample,
@@ -1853,6 +1924,68 @@ static int msm_routing_put_voice_stub_mixer(struct snd_kcontrol *kcontrol,
return 1;
}
+/*
+ * Return the mapping between port ID and backend ID to enable the AFE callback
+ * to determine the acdb_dev_id from the port id
+ */
+int msm_pcm_get_be_id_from_port_id(int port_id)
+{
+ int i;
+ int be_id = -EINVAL;
+
+ for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+ if (msm_bedais[i].port_id == port_id) {
+ be_id = i;
+ break;
+ }
+ }
+
+ return be_id;
+}
+
+/*
+ * Return the registered dev_acdb_id given a port ID to enable identifying the
+ * correct AFE calibration information by comparing the header information.
+ */
+static int msm_pcm_get_dev_acdb_id_by_port_id(int port_id)
+{
+ int acdb_id = -EINVAL;
+ int i = 0;
+ int session;
+ int port_type = afe_get_port_type(port_id);
+ int be_id = msm_pcm_get_be_id_from_port_id(port_id);
+
+ pr_debug("%s:port_id %d be_id %d, port_type 0x%x\n",
+ __func__, port_id, be_id, port_type);
+
+ if (port_type == MSM_AFE_PORT_TYPE_TX) {
+ session = SESSION_TYPE_TX;
+ } else if (port_type == MSM_AFE_PORT_TYPE_RX) {
+ session = SESSION_TYPE_RX;
+ } else {
+ pr_err("%s: Invalid port type %d\n", __func__, port_type);
+ acdb_id = -EINVAL;
+ goto exit;
+ }
+
+ if (be_id < 0) {
+ pr_err("%s: Error getting backend id %d\n", __func__, be_id);
+ goto exit;
+ }
+
+ mutex_lock(&routing_lock);
+ i = find_first_bit(&msm_bedais[be_id].fe_sessions[0],
+ MSM_FRONTEND_DAI_MAX);
+ if (i < MSM_FRONTEND_DAI_MAX)
+ acdb_id = fe_dai_app_type_cfg[i][session][be_id].acdb_dev_id;
+
+ pr_debug("%s: FE[%d] session[%d] BE[%d] acdb_id(%d)\n",
+ __func__, i, session, be_id, acdb_id);
+ mutex_unlock(&routing_lock);
+exit:
+ return acdb_id;
+}
+
static int msm_routing_get_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -2196,6 +2329,9 @@ static int msm_routing_lsm_port_put(struct snd_kcontrol *kcontrol,
case 9:
lsm_port = ADM_LSM_PORT_ID;
break;
+ case 10:
+ lsm_port = AFE_PORT_ID_INT3_MI2S_TX;
+ break;
default:
pr_err("Default lsm port");
break;
@@ -2224,17 +2360,21 @@ static int msm_routing_lsm_func_get(struct snd_kcontrol *kcontrol,
return -EINVAL;
}
- /*Check for Tertiary TX port*/
- if (!strcmp(kcontrol->id.name, lsm_port_text[7])) {
- ucontrol->value.integer.value[0] = MADSWAUDIO;
- return 0;
- }
-
port_id = i * 2 + 1 + SLIMBUS_0_RX;
- if (!strcmp(kcontrol->id.name, lsm_port_text[8]))
+ /*Check for Tertiary/Quaternary/INT3 TX port*/
+ if (strnstr(kcontrol->id.name, lsm_port_text[7],
+ strlen(lsm_port_text[7])))
+ port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+
+ if (strnstr(kcontrol->id.name, lsm_port_text[8],
+ strlen(lsm_port_text[8])))
port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+ if (strnstr(kcontrol->id.name, lsm_port_text[10],
+ strlen(lsm_port_text[10])))
+ port_id = AFE_PORT_ID_INT3_MI2S_TX;
+
mad_type = afe_port_get_mad_type(port_id);
pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
mad_type);
@@ -2301,17 +2441,19 @@ static int msm_routing_lsm_func_put(struct snd_kcontrol *kcontrol,
return -EINVAL;
}
- /*Check for Tertiary TX port*/
+ /*Check for Tertiary/Quaternary/INT3 TX port*/
if (strnstr(kcontrol->id.name, lsm_port_text[7],
- strlen(lsm_port_text[7]))) {
+ strlen(lsm_port_text[7])))
port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
- mad_type = MAD_SW_AUDIO;
- }
if (strnstr(kcontrol->id.name, lsm_port_text[8],
strlen(lsm_port_text[8])))
port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+ if (strnstr(kcontrol->id.name, lsm_port_text[10],
+ strlen(lsm_port_text[10])))
+ port_id = AFE_PORT_ID_INT3_MI2S_TX;
+
pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
mad_type);
return afe_port_set_mad_type(port_id, mad_type);
@@ -2631,14 +2773,9 @@ static int msm_routing_ec_ref_rx_put(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_widget_list *wlist =
dapm_kcontrol_get_wlist(kcontrol);
struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- int mux = ucontrol->value.enumerated.item[0];
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
- if (mux >= e->items) {
- pr_err("%s: Invalid mux value %d\n", __func__, mux);
- return -EINVAL;
- }
mutex_lock(&routing_lock);
switch (ucontrol->value.integer.value[0]) {
@@ -2741,7 +2878,8 @@ static int msm_routing_ec_ref_rx_put(struct snd_kcontrol *kcontrol,
pr_debug("%s: msm_route_ec_ref_rx = %d\n",
__func__, msm_route_ec_ref_rx);
mutex_unlock(&routing_lock);
- snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e, update);
+ snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
+ msm_route_ec_ref_rx, e, update);
return 0;
}
@@ -9112,6 +9250,9 @@ static const struct snd_kcontrol_new lsm1_mixer_controls[] = {
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
};
static const struct snd_kcontrol_new lsm2_mixer_controls[] = {
@@ -9136,6 +9277,9 @@ static const struct snd_kcontrol_new lsm2_mixer_controls[] = {
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
};
static const struct snd_kcontrol_new lsm3_mixer_controls[] = {
@@ -9160,6 +9304,9 @@ static const struct snd_kcontrol_new lsm3_mixer_controls[] = {
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
};
static const struct snd_kcontrol_new lsm4_mixer_controls[] = {
@@ -9184,6 +9331,9 @@ static const struct snd_kcontrol_new lsm4_mixer_controls[] = {
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
};
static const struct snd_kcontrol_new lsm5_mixer_controls[] = {
@@ -9208,6 +9358,9 @@ static const struct snd_kcontrol_new lsm5_mixer_controls[] = {
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
};
static const struct snd_kcontrol_new lsm6_mixer_controls[] = {
@@ -9232,6 +9385,9 @@ static const struct snd_kcontrol_new lsm6_mixer_controls[] = {
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
};
static const struct snd_kcontrol_new lsm7_mixer_controls[] = {
@@ -9256,6 +9412,9 @@ static const struct snd_kcontrol_new lsm7_mixer_controls[] = {
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
};
static const struct snd_kcontrol_new lsm8_mixer_controls[] = {
@@ -9280,6 +9439,9 @@ static const struct snd_kcontrol_new lsm8_mixer_controls[] = {
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
};
static const struct snd_kcontrol_new slim_fm_switch_mixer_controls =
@@ -9394,6 +9556,8 @@ static const struct snd_kcontrol_new lsm_controls[] = {
msm_routing_lsm_func_get, msm_routing_lsm_func_put),
SOC_ENUM_EXT(QUAT_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+ SOC_ENUM_EXT(INT3_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+ msm_routing_lsm_func_get, msm_routing_lsm_func_put),
/* kcontrol of lsm_port */
SOC_ENUM_EXT("LSM1 Port", lsm_port_enum,
msm_routing_lsm_port_get,
@@ -9860,6 +10024,9 @@ static int msm_audio_sound_focus_derive_port_id(struct snd_kcontrol *kcontrol,
} else if (!strcmp(kcontrol->id.name + strlen(prefix),
"TERT_MI2S")) {
*port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+ } else if (!strcmp(kcontrol->id.name + strlen(prefix),
+ "INT3_MI2S")) {
+ *port_id = AFE_PORT_ID_INT3_MI2S_TX;
} else {
pr_err("%s: mixer ctl name=%s, could not derive valid port id\n",
__func__, kcontrol->id.name);
@@ -10064,6 +10231,36 @@ static const struct snd_kcontrol_new msm_source_tracking_controls[] = {
.info = msm_source_tracking_info,
.get = msm_audio_source_tracking_get,
},
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sound Focus Voice Tx INT3_MI2S",
+ .info = msm_sound_focus_info,
+ .get = msm_voice_sound_focus_get,
+ .put = msm_voice_sound_focus_put,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Source Tracking Voice Tx INT3_MI2S",
+ .info = msm_source_tracking_info,
+ .get = msm_voice_source_tracking_get,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sound Focus Audio Tx INT3_MI2S",
+ .info = msm_sound_focus_info,
+ .get = msm_audio_sound_focus_get,
+ .put = msm_audio_sound_focus_put,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Source Tracking Audio Tx INT3_MI2S",
+ .info = msm_source_tracking_info,
+ .get = msm_audio_source_tracking_get,
+ },
};
static int spkr_prot_put_vi_lch_port(struct snd_kcontrol *kcontrol,
@@ -11527,11 +11724,14 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Audio Mixer"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia6", "MM_DL6"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia9", "MM_DL9"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia12", "MM_DL12"},
@@ -12018,6 +12218,42 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
+
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
+
{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -12927,7 +13163,6 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIM4_UL_HL", NULL, "SLIMBUS_4_TX"},
{"SLIM8_UL_HL", NULL, "SLIMBUS_8_TX"},
-
{"LSM1 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
{"LSM1 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
{"LSM1 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
@@ -12935,6 +13170,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"LSM1 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
{"LSM1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"LSM1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM1 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"LSM1_UL_HL", NULL, "LSM1 Mixer"},
{"LSM2 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
@@ -12944,6 +13180,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"LSM2 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
{"LSM2 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"LSM2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM2 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"LSM2_UL_HL", NULL, "LSM2 Mixer"},
@@ -12954,6 +13191,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"LSM3 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
{"LSM3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"LSM3 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM3 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"LSM3_UL_HL", NULL, "LSM3 Mixer"},
@@ -12964,6 +13202,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"LSM4 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
{"LSM4 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"LSM4 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM4 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"LSM4_UL_HL", NULL, "LSM4 Mixer"},
{"LSM5 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
@@ -12973,6 +13212,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"LSM5 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
{"LSM5 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"LSM5 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM5 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"LSM5_UL_HL", NULL, "LSM5 Mixer"},
{"LSM6 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
@@ -13891,21 +14131,21 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
bedai->format);
app_type =
- fe_dai_app_type_cfg[i][session_type].app_type;
+ fe_dai_app_type_cfg[i][session_type][be_id].app_type;
if (app_type && is_lsm) {
app_type_idx =
msm_pcm_routing_get_lsm_app_type_idx(app_type);
sample_rate =
- fe_dai_app_type_cfg[i][session_type].
- sample_rate;
+ fe_dai_app_type_cfg[i][session_type][be_id]
+ .sample_rate;
bits_per_sample =
lsm_app_type_cfg[app_type_idx].bit_width;
} else if (app_type) {
app_type_idx =
msm_pcm_routing_get_app_type_idx(app_type);
sample_rate =
- fe_dai_app_type_cfg[i][session_type].
- sample_rate;
+ fe_dai_app_type_cfg[i][session_type]
+ [be_id].sample_rate;
bits_per_sample =
app_type_cfg[app_type_idx].bit_width;
} else
@@ -13919,9 +14159,9 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
else
channels = bedai->adm_override_ch;
acdb_dev_id =
- fe_dai_app_type_cfg[i][session_type].acdb_dev_id;
- topology = msm_routing_get_adm_topology(path_type, i,
- session_type);
+ fe_dai_app_type_cfg[i][session_type][be_id].acdb_dev_id;
+ topology = msm_routing_get_adm_topology(i, session_type,
+ be_id);
copp_idx = adm_open(bedai->port_id, path_type,
sample_rate, channels, topology,
fdai->perf_mode, bits_per_sample,
@@ -14208,6 +14448,65 @@ static const struct snd_kcontrol_new aptx_dec_license_controls[] = {
msm_aptx_dec_license_control_put),
};
+static int msm_routing_be_dai_name_table_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = sizeof(be_dai_name_table);
+ return 0;
+}
+
+static int msm_routing_be_dai_name_table_tlv_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *bytes,
+ unsigned int size)
+{
+ int i;
+ int ret;
+
+ if (size < sizeof(be_dai_name_table)) {
+ pr_err("%s: invalid size %d requested, returning\n",
+ __func__, size);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Fill be_dai_name_table from msm_bedais table to reduce code changes
+ * needed when adding new backends
+ */
+ for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+ be_dai_name_table[i].be_id = i;
+ strlcpy(be_dai_name_table[i].be_name,
+ msm_bedais[i].name,
+ LPASS_BE_NAME_MAX_LENGTH);
+ }
+
+ ret = copy_to_user(bytes, &be_dai_name_table,
+ sizeof(be_dai_name_table));
+ if (ret) {
+ pr_err("%s: failed to copy be_dai_name_table\n", __func__);
+ ret = -EFAULT;
+ }
+
+done:
+ return ret;
+}
+
+static const struct snd_kcontrol_new
+ msm_routing_be_dai_name_table_mixer_controls[] = {
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+ SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
+ .info = msm_routing_be_dai_name_table_info,
+ .name = "Backend DAI Name Table",
+ .tlv.c = snd_soc_bytes_tlv_callback,
+ .private_value = (unsigned long) &(struct soc_bytes_ext) {
+ .max = sizeof(be_dai_name_table),
+ .get = msm_routing_be_dai_name_table_tlv_get,
+ }
+ },
+};
+
static const struct snd_pcm_ops msm_routing_pcm_ops = {
.hw_params = msm_pcm_routing_hw_params,
.close = msm_pcm_routing_close,
@@ -14260,6 +14559,10 @@ static int msm_routing_probe(struct snd_soc_platform *platform)
device_pp_params_mixer_controls,
ARRAY_SIZE(device_pp_params_mixer_controls));
+ snd_soc_add_platform_controls(platform,
+ msm_routing_be_dai_name_table_mixer_controls,
+ ARRAY_SIZE(msm_routing_be_dai_name_table_mixer_controls));
+
msm_dts_eagle_add_controls(platform);
snd_soc_add_platform_controls(platform, msm_source_tracking_controls,
@@ -14335,15 +14638,6 @@ int msm_routing_check_backend_enabled(int fedai_id)
return 0;
}
-static int get_cal_path(int path_type)
-{
- if (path_type == ADM_PATH_PLAYBACK ||
- path_type == ADM_PATH_COMPRESSED_RX)
- return RX_DEVICE;
- else
- return TX_DEVICE;
-}
-
static int msm_routing_set_cal(int32_t cal_type,
size_t data_size, void *data)
{
@@ -14401,6 +14695,11 @@ static int __init msm_soc_routing_platform_init(void)
if (msm_routing_init_cal_data())
pr_err("%s: could not init cal data!\n", __func__);
+ afe_set_routing_callback(
+ (routing_cb)msm_pcm_get_dev_acdb_id_by_port_id);
+
+ memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+
return platform_driver_register(&msm_routing_pcm_driver);
}
module_init(msm_soc_routing_platform_init);
@@ -14408,6 +14707,8 @@ module_init(msm_soc_routing_platform_init);
static void __exit msm_soc_routing_platform_exit(void)
{
msm_routing_delete_cal_data();
+ memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+ mutex_destroy(&routing_lock);
platform_driver_unregister(&msm_routing_pcm_driver);
}
module_exit(msm_soc_routing_platform_exit);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index dc1d0e0..fcd155e 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -13,6 +13,12 @@
#define _MSM_PCM_ROUTING_H
#include <sound/apr_audio-v2.h>
+/*
+ * These names are used by HAL to specify the BE. If any changes are
+ * made to the string names or the max name length corresponding
+ * changes need to be made in the HAL to ensure they still match.
+ */
+#define LPASS_BE_NAME_MAX_LENGTH 24
#define LPASS_BE_PRI_I2S_RX "PRIMARY_I2S_RX"
#define LPASS_BE_PRI_I2S_TX "PRIMARY_I2S_TX"
#define LPASS_BE_SLIMBUS_0_RX "SLIMBUS_0_RX"
@@ -64,6 +70,7 @@
#define LPASS_BE_SLIMBUS_3_TX "SLIMBUS_3_TX"
#define LPASS_BE_SLIMBUS_4_RX "SLIMBUS_4_RX"
#define LPASS_BE_SLIMBUS_4_TX "SLIMBUS_4_TX"
+#define LPASS_BE_SLIMBUS_TX_VI "SLIMBUS_TX_VI"
#define LPASS_BE_SLIMBUS_5_RX "SLIMBUS_5_RX"
#define LPASS_BE_SLIMBUS_5_TX "SLIMBUS_5_TX"
#define LPASS_BE_SLIMBUS_6_RX "SLIMBUS_6_RX"
@@ -468,8 +475,10 @@ void msm_pcm_routing_get_fedai_info(int fe_idx, int sess_type,
void msm_pcm_routing_acquire_lock(void);
void msm_pcm_routing_release_lock(void);
-void msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int app_type,
- int acdb_dev_id, int sample_rate, int session_type);
+int msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int session_type,
+ int be_id, int app_type,
+ int acdb_dev_id, int sample_rate);
int msm_pcm_routing_get_stream_app_type_cfg(int fedai_id, int session_type,
- int *app_type, int *acdb_dev_id, int *sample_rate);
+ int be_id, int *app_type,
+ int *acdb_dev_id, int *sample_rate);
#endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index 1a739a4..02225f0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -815,20 +815,25 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
if (prtd->mode == MODE_PCM) {
ret = copy_from_user(&buf_node->frame.voc_pkt,
buf, count);
+ if (ret) {
+ pr_err("%s: copy from user failed %d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
buf_node->frame.pktlen = count;
} else {
ret = copy_from_user(&buf_node->frame,
buf, count);
+ if (ret) {
+ pr_err("%s: copy from user failed %d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
if (buf_node->frame.pktlen >= count)
buf_node->frame.pktlen = count -
(sizeof(buf_node->frame.frm_hdr) +
sizeof(buf_node->frame.pktlen));
}
- if (ret) {
- pr_err("%s: copy from user failed %d\n",
- __func__, ret);
- return -EFAULT;
- }
spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
list_add_tail(&buf_node->list, &prtd->in_queue);
spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index 4815895..c60b27f 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -405,6 +405,7 @@ static int msm_afe_lb_vol_ctrl;
static int msm_afe_sec_mi2s_lb_vol_ctrl;
static int msm_afe_tert_mi2s_lb_vol_ctrl;
static int msm_afe_quat_mi2s_lb_vol_ctrl;
+static int msm_afe_slimbus_7_lb_vol_ctrl;
static int msm_afe_slimbus_8_lb_vol_ctrl;
static const DECLARE_TLV_DB_LINEAR(fm_rx_vol_gain, 0, INT_RX_VOL_MAX_STEPS);
static const DECLARE_TLV_DB_LINEAR(afe_lb_vol_gain, 0, INT_RX_VOL_MAX_STEPS);
@@ -477,6 +478,29 @@ static int msm_qti_pp_set_tert_mi2s_lb_vol_mixer(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_qti_pp_get_slimbus_7_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_afe_slimbus_7_lb_vol_ctrl;
+ return 0;
+}
+
+static int msm_qti_pp_set_slimbus_7_lb_vol_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = afe_loopback_gain(SLIMBUS_7_TX,
+ ucontrol->value.integer.value[0]);
+
+ if (ret)
+ pr_err("%s: failed to set LB vol for SLIMBUS_7_TX, err %d\n",
+ __func__, ret);
+ else
+ msm_afe_slimbus_7_lb_vol_ctrl =
+ ucontrol->value.integer.value[0];
+
+ return ret;
+}
+
static int msm_qti_pp_get_slimbus_8_lb_vol_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -861,6 +885,14 @@ static const struct snd_kcontrol_new tert_mi2s_lb_vol_mixer_controls[] = {
msm_qti_pp_set_tert_mi2s_lb_vol_mixer, afe_lb_vol_gain),
};
+static const struct snd_kcontrol_new slimbus_7_lb_vol_mixer_controls[] = {
+ SOC_SINGLE_EXT_TLV("SLIMBUS_7 LOOPBACK Volume", SND_SOC_NOPM, 0,
+ INT_RX_VOL_GAIN, 0,
+ msm_qti_pp_get_slimbus_7_lb_vol_mixer,
+ msm_qti_pp_set_slimbus_7_lb_vol_mixer,
+ afe_lb_vol_gain),
+};
+
static const struct snd_kcontrol_new slimbus_8_lb_vol_mixer_controls[] = {
SOC_SINGLE_EXT_TLV("SLIMBUS_8 LOOPBACK Volume", SND_SOC_NOPM, 0,
INT_RX_VOL_GAIN, 0, msm_qti_pp_get_slimbus_8_lb_vol_mixer,
@@ -1065,6 +1097,9 @@ void msm_qti_pp_add_controls(struct snd_soc_platform *platform)
snd_soc_add_platform_controls(platform, tert_mi2s_lb_vol_mixer_controls,
ARRAY_SIZE(tert_mi2s_lb_vol_mixer_controls));
+ snd_soc_add_platform_controls(platform, slimbus_7_lb_vol_mixer_controls,
+ ARRAY_SIZE(slimbus_7_lb_vol_mixer_controls));
+
snd_soc_add_platform_controls(platform, slimbus_8_lb_vol_mixer_controls,
ARRAY_SIZE(slimbus_8_lb_vol_mixer_controls));
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 113fdae..90d640d 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -2833,8 +2833,8 @@ int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode,
[port_idx][copp_idx]),
get_cal_path(path),
payload_map.session_id,
- payload_map.app_type,
- payload_map.acdb_dev_id);
+ payload_map.app_type[i],
+ payload_map.acdb_dev_id[i]);
if (!test_bit(ADM_STATUS_CALIBRATION_REQUIRED,
(void *)&this_adm.copp.adm_status[port_idx]
@@ -2845,9 +2845,9 @@ int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode,
}
send_adm_cal(payload_map.port_id[i], copp_idx,
get_cal_path(path), perf_mode,
- payload_map.app_type,
- payload_map.acdb_dev_id,
- payload_map.sample_rate);
+ payload_map.app_type[i],
+ payload_map.acdb_dev_id[i],
+ payload_map.sample_rate[i]);
/* ADM COPP calibration is already sent */
clear_bit(ADM_STATUS_CALIBRATION_REQUIRED,
(void *)&this_adm.copp.
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index af91731..176b8aa 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -113,12 +113,15 @@ struct afe_ctl {
struct audio_cal_info_sp_ex_vi_ftm_cfg ex_ftm_cfg;
struct afe_sp_th_vi_get_param_resp th_vi_resp;
struct afe_sp_ex_vi_get_param_resp ex_vi_resp;
+ struct afe_av_dev_drift_get_param_resp av_dev_drift_resp;
int vi_tx_port;
int vi_rx_port;
uint32_t afe_sample_rates[AFE_MAX_PORTS];
struct aanc_data aanc_info;
struct mutex afe_cmd_lock;
int set_custom_topology;
+ int dev_acdb_id[AFE_MAX_PORTS];
+ routing_cb rt_cb;
};
static atomic_t afe_ports_mad_type[SLIMBUS_PORT_LAST - SLIMBUS_0_RX];
@@ -188,6 +191,38 @@ static void afe_callback_debug_print(struct apr_client_data *data)
__func__, data->opcode, data->payload_size);
}
+static void av_dev_drift_afe_cb_handler(uint32_t *payload,
+ uint32_t payload_size)
+{
+ u32 param_id;
+ struct afe_av_dev_drift_get_param_resp *resp =
+ (struct afe_av_dev_drift_get_param_resp *) payload;
+
+ if (!(&(resp->pdata))) {
+ pr_err("%s: Error: resp pdata is NULL\n", __func__);
+ return;
+ }
+
+ param_id = resp->pdata.param_id;
+ if (param_id == AFE_PARAM_ID_DEV_TIMING_STATS) {
+ if (payload_size < sizeof(this_afe.av_dev_drift_resp)) {
+ pr_err("%s: Error: received size %d, resp size %zu\n",
+ __func__, payload_size,
+ sizeof(this_afe.av_dev_drift_resp));
+ return;
+ }
+ memcpy(&this_afe.av_dev_drift_resp, payload,
+ sizeof(this_afe.av_dev_drift_resp));
+ if (!this_afe.av_dev_drift_resp.status) {
+ atomic_set(&this_afe.state, 0);
+ } else {
+ pr_debug("%s: av_dev_drift_resp status: %d", __func__,
+ this_afe.av_dev_drift_resp.status);
+ atomic_set(&this_afe.state, -1);
+ }
+ }
+}
+
static int32_t sp_make_afe_callback(uint32_t *payload, uint32_t payload_size)
{
u32 param_id;
@@ -273,6 +308,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
mutex_lock(&this_afe.cal_data[AFE_CUST_TOPOLOGY_CAL]->lock);
this_afe.set_custom_topology = 1;
mutex_unlock(&this_afe.cal_data[AFE_CUST_TOPOLOGY_CAL]->lock);
+ rtac_clear_mapping(AFE_RTAC_CAL);
if (this_afe.apr) {
apr_reset(this_afe.apr);
@@ -307,10 +343,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
}
afe_callback_debug_print(data);
if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2) {
- u8 *payload = data->payload;
-
- if (rtac_make_afe_callback(data->payload, data->payload_size))
- return 0;
+ uint32_t *payload = data->payload;
if (!payload || (data->token >= AFE_MAX_PORTS)) {
pr_err("%s: Error: size %d payload %pK token %d\n",
@@ -318,9 +351,19 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
payload, data->token);
return -EINVAL;
}
- if (sp_make_afe_callback(data->payload, data->payload_size))
- return -EINVAL;
+ if (payload[2] == AFE_PARAM_ID_DEV_TIMING_STATS) {
+ av_dev_drift_afe_cb_handler(data->payload,
+ data->payload_size);
+ } else {
+ if (rtac_make_afe_callback(data->payload,
+ data->payload_size))
+ return 0;
+
+ if (sp_make_afe_callback(data->payload,
+ data->payload_size))
+ return -EINVAL;
+ }
wake_up(&this_afe.wait[data->token]);
} else if (data->payload_size) {
uint32_t *payload;
@@ -1180,6 +1223,7 @@ static int afe_send_hw_delay(u16 port_id, u32 rate)
pr_debug("%s:\n", __func__);
+ memset(&delay_entry, 0, sizeof(delay_entry));
delay_entry.sample_rate = rate;
if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX)
ret = afe_get_cal_hw_delay(TX_DEVICE, &delay_entry);
@@ -1247,6 +1291,10 @@ static struct cal_block_data *afe_find_cal_topo_id_by_port(
struct cal_block_data *cal_block = NULL;
int32_t path;
struct audio_cal_info_afe_top *afe_top;
+ int afe_port_index = q6audio_get_port_index(port_id);
+
+ if (afe_port_index < 0)
+ goto err_exit;
list_for_each_safe(ptr, next,
&cal_type->cal_blocks) {
@@ -1258,12 +1306,25 @@ static struct cal_block_data *afe_find_cal_topo_id_by_port(
afe_top =
(struct audio_cal_info_afe_top *)cal_block->cal_info;
if (afe_top->path == path) {
- pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
- __func__, afe_top->topology, afe_top->acdb_id,
- q6audio_get_port_id(port_id));
- return cal_block;
+ if (this_afe.dev_acdb_id[afe_port_index] > 0) {
+ if (afe_top->acdb_id ==
+ this_afe.dev_acdb_id[afe_port_index]) {
+ pr_debug("%s: top_id:%x acdb_id:%d afe_port_id:%d\n",
+ __func__, afe_top->topology,
+ afe_top->acdb_id,
+ q6audio_get_port_id(port_id));
+ return cal_block;
+ }
+ } else {
+ pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
+ __func__, afe_top->topology, afe_top->acdb_id,
+ q6audio_get_port_id(port_id));
+ return cal_block;
+ }
}
}
+
+err_exit:
return NULL;
}
@@ -1366,6 +1427,7 @@ static int afe_send_port_topology_id(u16 port_id)
}
this_afe.topology[index] = topology_id;
+ rtac_update_afe_topology(port_id);
done:
pr_debug("%s: AFE set topology id 0x%x enable for port 0x%x ret %d\n",
__func__, topology_id, port_id, ret);
@@ -1407,10 +1469,46 @@ static int remap_cal_data(struct cal_block_data *cal_block, int cal_index)
return ret;
}
+static struct cal_block_data *afe_find_cal(int cal_index, int port_id)
+{
+ struct list_head *ptr, *next;
+ struct cal_block_data *cal_block = NULL;
+ struct audio_cal_info_afe *afe_cal_info = NULL;
+ int afe_port_index = q6audio_get_port_index(port_id);
+
+ pr_debug("%s: cal_index %d port_id %d port_index %d\n", __func__,
+ cal_index, port_id, afe_port_index);
+ if (afe_port_index < 0) {
+ pr_err("%s: Error getting AFE port index %d\n",
+ __func__, afe_port_index);
+ goto exit;
+ }
+
+ list_for_each_safe(ptr, next,
+ &this_afe.cal_data[cal_index]->cal_blocks) {
+ cal_block = list_entry(ptr, struct cal_block_data, list);
+ afe_cal_info = cal_block->cal_info;
+ if ((afe_cal_info->acdb_id ==
+ this_afe.dev_acdb_id[afe_port_index]) &&
+ (afe_cal_info->sample_rate ==
+ this_afe.afe_sample_rates[afe_port_index])) {
+ pr_debug("%s: cal block is a match, size is %zd\n",
+ __func__, cal_block->cal_data.size);
+ goto exit;
+ }
+ }
+ pr_err("%s: no matching cal_block found\n", __func__);
+ cal_block = NULL;
+
+exit:
+ return cal_block;
+}
+
static void send_afe_cal_type(int cal_index, int port_id)
{
struct cal_block_data *cal_block = NULL;
int ret;
+ int afe_port_index = q6audio_get_port_index(port_id);
pr_debug("%s:\n", __func__);
@@ -1420,8 +1518,22 @@ static void send_afe_cal_type(int cal_index, int port_id)
goto done;
}
+ if (afe_port_index < 0) {
+ pr_err("%s: Error getting AFE port index %d\n",
+ __func__, afe_port_index);
+ goto done;
+ }
+
mutex_lock(&this_afe.cal_data[cal_index]->lock);
- cal_block = cal_utils_get_only_cal_block(this_afe.cal_data[cal_index]);
+
+ if (((cal_index == AFE_COMMON_RX_CAL) ||
+ (cal_index == AFE_COMMON_TX_CAL)) &&
+ (this_afe.dev_acdb_id[afe_port_index] > 0))
+ cal_block = afe_find_cal(cal_index, port_id);
+ else
+ cal_block = cal_utils_get_only_cal_block(
+ this_afe.cal_data[cal_index]);
+
if (cal_block == NULL) {
pr_err("%s cal_block not found!!\n", __func__);
goto unlock;
@@ -1933,7 +2045,8 @@ int afe_port_set_mad_type(u16 port_id, enum afe_mad_type mad_type)
{
int i;
- if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX) {
+ if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX ||
+ port_id == AFE_PORT_ID_INT3_MI2S_TX) {
mad_type = MAD_SW_AUDIO;
return 0;
}
@@ -1951,7 +2064,8 @@ enum afe_mad_type afe_port_get_mad_type(u16 port_id)
{
int i;
- if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX)
+ if (port_id == AFE_PORT_ID_TERTIARY_MI2S_TX ||
+ port_id == AFE_PORT_ID_INT3_MI2S_TX)
return MAD_SW_AUDIO;
i = port_id - SLIMBUS_0_RX;
@@ -2489,7 +2603,7 @@ int afe_send_custom_tdm_header_cfg(
}
int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
- u32 rate)
+ u32 rate, u16 num_groups)
{
struct afe_audioif_config_command config;
int ret = 0;
@@ -2522,9 +2636,17 @@ int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
return ret;
}
- /* Also send the topology id here: */
+ if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+ this_afe.afe_sample_rates[index] = rate;
+
+ if (this_afe.rt_cb)
+ this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+ }
+
+ /* Also send the topology id here if multiple ports: */
port_index = afe_get_port_index(port_id);
- if (!(this_afe.afe_cal_mode[port_index] == AFE_CAL_MODE_NONE)) {
+ if (!(this_afe.afe_cal_mode[port_index] == AFE_CAL_MODE_NONE) &&
+ num_groups > 1) {
/* One time call: only for first time */
afe_send_custom_topology();
afe_send_port_topology_id(port_id);
@@ -2586,11 +2708,14 @@ int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
ret = -EINVAL;
goto fail_cmd;
}
-
- ret = afe_send_slot_mapping_cfg(&tdm_port->slot_mapping, port_id);
- if (ret < 0) {
- pr_err("%s: afe send failed %d\n", __func__, ret);
- goto fail_cmd;
+ /* slot mapping is not need if there is only one group */
+ if (num_groups > 1) {
+ ret = afe_send_slot_mapping_cfg(&tdm_port->slot_mapping,
+ port_id);
+ if (ret < 0) {
+ pr_err("%s: afe send failed %d\n", __func__, ret);
+ goto fail_cmd;
+ }
}
if (tdm_port->custom_tdm_header.header_type) {
@@ -2616,6 +2741,11 @@ void afe_set_cal_mode(u16 port_id, enum afe_cal_mode afe_cal_mode)
this_afe.afe_cal_mode[port_index] = afe_cal_mode;
}
+void afe_set_routing_callback(routing_cb cb)
+{
+ this_afe.rt_cb = cb;
+}
+
int afe_port_send_usb_dev_param(u16 port_id, union afe_port_config *afe_config)
{
struct afe_usb_audio_dev_param_command config;
@@ -2661,6 +2791,21 @@ int afe_port_send_usb_dev_param(u16 port_id, union afe_port_config *afe_config)
ret = -EINVAL;
goto exit;
}
+
+ config.pdata.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT;
+ config.pdata.param_size = sizeof(config.lpcm_fmt);
+ config.lpcm_fmt.cfg_minor_version =
+ AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
+ config.lpcm_fmt.endian = afe_config->usb_audio.endian;
+
+ ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ if (ret) {
+ pr_err("%s: AFE device param cmd LPCM_FMT failed %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
exit:
return ret;
}
@@ -2684,6 +2829,11 @@ static int q6afe_send_enc_config(u16 port_id,
}
memset(&config, 0, sizeof(config));
index = q6audio_get_port_index(port_id);
+ if (index < 0) {
+ pr_err("%s: Invalid index number: %d\n", __func__, index);
+ return -EINVAL;
+ }
+
config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
config.hdr.pkt_size = sizeof(config);
@@ -2838,6 +2988,13 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config,
return ret;
}
+ if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+ this_afe.afe_sample_rates[index] = rate;
+
+ if (this_afe.rt_cb)
+ this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+ }
+
mutex_lock(&this_afe.afe_cmd_lock);
/* Also send the topology id here: */
port_index = afe_get_port_index(port_id);
@@ -3041,7 +3198,6 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config,
port_index = afe_get_port_index(port_id);
if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
- this_afe.afe_sample_rates[port_index] = rate;
/*
* If afe_port_start() for tx port called before
* rx port, then aanc rx sample rate is zero. So,
@@ -3403,6 +3559,14 @@ int afe_open(u16 port_id,
pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
return -EINVAL;
}
+
+ if ((index >= 0) && (index < AFE_MAX_PORTS)) {
+ this_afe.afe_sample_rates[index] = rate;
+
+ if (this_afe.rt_cb)
+ this_afe.dev_acdb_id[index] = this_afe.rt_cb(port_id);
+ }
+
/* Also send the topology id here: */
afe_send_custom_topology(); /* One time call: only for first time */
afe_send_port_topology_id(port_id);
@@ -5378,6 +5542,7 @@ int afe_close(int port_id)
if ((port_index >= 0) && (port_index < AFE_MAX_PORTS)) {
this_afe.afe_sample_rates[port_index] = 0;
this_afe.topology[port_index] = 0;
+ this_afe.dev_acdb_id[port_index] = 0;
} else {
pr_err("%s: port %d\n", __func__, port_index);
ret = -EINVAL;
@@ -6046,6 +6211,88 @@ int afe_get_sp_ex_vi_ftm_data(struct afe_sp_ex_vi_get_param *ex_vi)
return ret;
}
+int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats,
+ u16 port)
+{
+ int ret = -EINVAL;
+ int index = 0;
+ struct afe_av_dev_drift_get_param av_dev_drift;
+
+ if (!timing_stats) {
+ pr_err("%s: Invalid params\n", __func__);
+ goto exit;
+ }
+
+ ret = q6audio_validate_port(port);
+ if (ret < 0) {
+ pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ index = q6audio_get_port_index(port);
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: Invalid AFE port index[%d]\n",
+ __func__, index);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ memset(&av_dev_drift, 0, sizeof(struct afe_av_dev_drift_get_param));
+
+ av_dev_drift.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ av_dev_drift.hdr.pkt_size = sizeof(av_dev_drift);
+ av_dev_drift.hdr.src_port = 0;
+ av_dev_drift.hdr.dest_port = 0;
+ av_dev_drift.hdr.token = index;
+ av_dev_drift.hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2;
+ av_dev_drift.get_param.mem_map_handle = 0;
+ av_dev_drift.get_param.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ av_dev_drift.get_param.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
+ av_dev_drift.get_param.payload_address_lsw = 0;
+ av_dev_drift.get_param.payload_address_msw = 0;
+ av_dev_drift.get_param.payload_size = sizeof(av_dev_drift)
+ - sizeof(av_dev_drift.get_param) - sizeof(av_dev_drift.hdr);
+ av_dev_drift.get_param.port_id = q6audio_get_port_id(port);
+ av_dev_drift.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ av_dev_drift.pdata.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
+ av_dev_drift.pdata.param_size = sizeof(av_dev_drift.timing_stats);
+ atomic_set(&this_afe.status, 0);
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *)&av_dev_drift);
+ if (ret < 0) {
+ pr_err("%s: get param port 0x%x param id[0x%x] failed %d\n",
+ __func__, port, av_dev_drift.get_param.param_id, ret);
+ goto exit;
+ }
+
+ ret = wait_event_timeout(this_afe.wait[index],
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (atomic_read(&this_afe.status) > 0) {
+ pr_err("%s: config cmd failed [%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&this_afe.status)));
+ ret = adsp_err_get_lnx_err_code(
+ atomic_read(&this_afe.status));
+ goto exit;
+ }
+
+ memcpy(timing_stats, &this_afe.av_dev_drift_resp.timing_stats,
+ sizeof(this_afe.av_dev_drift_resp.timing_stats));
+ ret = 0;
+exit:
+ return ret;
+}
+
int afe_spk_prot_get_calib_data(struct afe_spkr_prot_get_vi_calib *calib_resp)
{
int ret = -EINVAL;
@@ -6861,6 +7108,8 @@ static int __init afe_init(void)
mutex_init(&this_afe.afe_cmd_lock);
for (i = 0; i < AFE_MAX_PORTS; i++) {
this_afe.afe_cal_mode[i] = AFE_CAL_MODE_DEFAULT;
+ this_afe.afe_sample_rates[i] = 0;
+ this_afe.dev_acdb_id[i] = 0;
init_waitqueue_head(&this_afe.wait[i]);
}
wakeup_source_init(&wl.ws, "spkr-prot");
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index ccb4038..b52c83b 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -2480,6 +2480,10 @@ int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
case FORMAT_DSD:
open.fmt_id = ASM_MEDIA_FMT_DSD;
break;
+ case FORMAT_GEN_COMPR:
+ open.fmt_id = ASM_MEDIA_FMT_GENERIC_COMPRESSED;
+ break;
+
default:
pr_err("%s: Invalid format[%d]\n", __func__, format);
rc = -EINVAL;
@@ -2489,7 +2493,8 @@ int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
* stream is not IEC 61937 or IEC 60958 packetizied
*/
if (passthrough_flag == COMPRESSED_PASSTHROUGH ||
- passthrough_flag == COMPRESSED_PASSTHROUGH_DSD) {
+ passthrough_flag == COMPRESSED_PASSTHROUGH_DSD ||
+ passthrough_flag == COMPRESSED_PASSTHROUGH_GEN) {
open.flags = 0x0;
pr_debug("%s: Flag 0 COMPRESSED_PASSTHROUGH\n", __func__);
} else if (passthrough_flag == COMPRESSED_PASSTHROUGH_CONVERT) {
@@ -2656,6 +2661,9 @@ static int __q6asm_open_write(struct audio_client *ac, uint32_t format,
case FORMAT_APTX:
open.dec_fmt_id = ASM_MEDIA_FMT_APTX;
break;
+ case FORMAT_GEN_COMPR:
+ open.dec_fmt_id = ASM_MEDIA_FMT_GENERIC_COMPRESSED;
+ break;
default:
pr_err("%s: Invalid format 0x%x\n", __func__, format);
rc = -EINVAL;
@@ -5184,6 +5192,82 @@ int q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac,
}
EXPORT_SYMBOL(q6asm_media_format_block_multi_ch_pcm_v4);
+/*
+ * q6asm_media_format_block_gen_compr - set up generic compress format params
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @use_default_chmap: true if default channel map to be used
+ * @channel_map: input channel map
+ * @bits_per_sample: bit width of gen compress stream
+ */
+int q6asm_media_format_block_gen_compr(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ bool use_default_chmap, char *channel_map,
+ uint16_t bits_per_sample)
+{
+ struct asm_generic_compressed_fmt_blk_t fmt;
+ u8 *channel_mapping;
+ int rc = 0;
+
+ pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]\n",
+ __func__, ac->session, rate,
+ channels, bits_per_sample);
+
+ memset(&fmt, 0, sizeof(fmt));
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+ sizeof(fmt.fmt_blk);
+ fmt.num_channels = channels;
+ fmt.bits_per_sample = bits_per_sample;
+ fmt.sampling_rate = rate;
+
+ channel_mapping = fmt.channel_mapping;
+
+ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (use_default_chmap) {
+ if (q6asm_map_channels(channel_mapping, channels, false)) {
+ pr_err("%s: map channels failed %d\n",
+ __func__, channels);
+ return -EINVAL;
+ }
+ } else {
+ memcpy(channel_mapping, channel_map,
+ PCM_FORMAT_MAX_NUM_CHANNEL);
+ }
+
+ atomic_set(&ac->cmd_state, -1);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for format update\n", __func__);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
+
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ }
+ return 0;
+fail_cmd:
+ return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_gen_compr);
+
static int __q6asm_media_format_block_multi_aac(struct audio_client *ac,
struct asm_aac_cfg *cfg, int stream_id)
{
@@ -7365,9 +7449,13 @@ int q6asm_async_write(struct audio_client *ac,
else if (ac->io_mode == io_compressed ||
ac->io_mode == io_compressed_stream)
lbuf_phys_addr = (param->paddr - param->metadata_len);
- else
- lbuf_phys_addr = param->paddr;
-
+ else {
+ if (param->flags & SET_TIMESTAMP)
+ lbuf_phys_addr = param->paddr -
+ sizeof(struct snd_codec_metadata);
+ else
+ lbuf_phys_addr = param->paddr;
+ }
dev_vdbg(ac->dev, "%s: token[0x%x], buf_addr[%pK], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_phys_addr: 0x[%pK]\n",
__func__,
write.hdr.token, ¶m->paddr,
@@ -7833,16 +7921,18 @@ int q6asm_send_mtmx_strtr_window(struct audio_client *ac,
matrix.param.data_payload_addr_lsw = 0;
matrix.param.data_payload_addr_msw = 0;
matrix.param.mem_map_handle = 0;
- matrix.param.data_payload_size = sizeof(matrix) -
- sizeof(matrix.hdr) - sizeof(matrix.param);
+ matrix.param.data_payload_size =
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct asm_session_mtmx_strtr_param_window_v2_t);
matrix.param.direction = 0; /* RX */
matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
matrix.data.param_id = param_id;
- matrix.data.param_size = matrix.param.data_payload_size -
- sizeof(matrix.data);
+ matrix.data.param_size =
+ sizeof(struct asm_session_mtmx_strtr_param_window_v2_t);
matrix.data.reserved = 0;
- matrix.window_lsw = window_param->window_lsw;
- matrix.window_msw = window_param->window_msw;
+ memcpy(&(matrix.config.window_param),
+ window_param,
+ sizeof(struct asm_session_mtmx_strtr_param_window_v2_t));
rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
if (rc < 0) {
@@ -7872,7 +7962,177 @@ int q6asm_send_mtmx_strtr_window(struct audio_client *ac,
rc = 0;
fail_cmd:
return rc;
-};
+}
+
+int q6asm_send_mtmx_strtr_render_mode(struct audio_client *ac,
+ uint32_t render_mode)
+{
+ struct asm_mtmx_strtr_params matrix;
+ struct asm_session_mtmx_strtr_param_render_mode_t render_param;
+ int sz = 0;
+ int rc = 0;
+
+ pr_debug("%s: render mode is %d\n", __func__, render_mode);
+
+ if (!ac) {
+ pr_err("%s: audio client handle is NULL\n", __func__);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (ac->apr == NULL) {
+ pr_err("%s: ac->apr is NULL\n", __func__);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if ((render_mode != ASM_SESSION_MTMX_STRTR_PARAM_RENDER_DEFAULT) &&
+ (render_mode != ASM_SESSION_MTMX_STRTR_PARAM_RENDER_LOCAL_STC)) {
+ pr_err("%s: Invalid render mode %d\n", __func__, render_mode);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ memset(&render_param, 0,
+ sizeof(struct asm_session_mtmx_strtr_param_render_mode_t));
+ render_param.flags = render_mode;
+
+ memset(&matrix, 0, sizeof(struct asm_mtmx_strtr_params));
+ sz = sizeof(struct asm_mtmx_strtr_params);
+ q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state, -1);
+ matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+ matrix.param.data_payload_addr_lsw = 0;
+ matrix.param.data_payload_addr_msw = 0;
+ matrix.param.mem_map_handle = 0;
+ matrix.param.data_payload_size =
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct asm_session_mtmx_strtr_param_render_mode_t);
+ matrix.param.direction = 0; /* RX */
+ matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+ matrix.data.param_id = ASM_SESSION_MTMX_STRTR_PARAM_RENDER_MODE_CMD;
+ matrix.data.param_size =
+ sizeof(struct asm_session_mtmx_strtr_param_render_mode_t);
+ matrix.data.reserved = 0;
+ memcpy(&(matrix.config.render_param),
+ &render_param,
+ sizeof(struct asm_session_mtmx_strtr_param_render_mode_t));
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+ if (rc < 0) {
+ pr_err("%s: Render mode send failed paramid [0x%x]\n",
+ __func__, matrix.data.param_id);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout, Render mode send paramid [0x%x]\n",
+ __func__, matrix.data.param_id);
+ rc = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ goto exit;
+ }
+ rc = 0;
+exit:
+ return rc;
+}
+
+int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac,
+ uint32_t clk_rec_mode)
+{
+ struct asm_mtmx_strtr_params matrix;
+ struct asm_session_mtmx_strtr_param_clk_rec_t clk_rec_param;
+ int sz = 0;
+ int rc = 0;
+
+ pr_debug("%s: clk rec mode is %d\n", __func__, clk_rec_mode);
+
+ if (!ac) {
+ pr_err("%s: audio client handle is NULL\n", __func__);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (ac->apr == NULL) {
+ pr_err("%s: ac->apr is NULL\n", __func__);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if ((clk_rec_mode != ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_NONE) &&
+ (clk_rec_mode != ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_AUTO)) {
+ pr_err("%s: Invalid clk rec mode %d\n", __func__, clk_rec_mode);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ memset(&clk_rec_param, 0,
+ sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t));
+ clk_rec_param.flags = clk_rec_mode;
+
+ memset(&matrix, 0, sizeof(struct asm_mtmx_strtr_params));
+ sz = sizeof(struct asm_mtmx_strtr_params);
+ q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state, -1);
+ matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+ matrix.param.data_payload_addr_lsw = 0;
+ matrix.param.data_payload_addr_msw = 0;
+ matrix.param.mem_map_handle = 0;
+ matrix.param.data_payload_size =
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t);
+ matrix.param.direction = 0; /* RX */
+ matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+ matrix.data.param_id = ASM_SESSION_MTMX_STRTR_PARAM_CLK_REC_CMD;
+ matrix.data.param_size =
+ sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t);
+ matrix.data.reserved = 0;
+ memcpy(&(matrix.config.clk_rec_param),
+ &clk_rec_param,
+ sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t));
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+ if (rc < 0) {
+ pr_err("%s: clk rec mode send failed paramid [0x%x]\n",
+ __func__, matrix.data.param_id);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout, clk rec mode send paramid [0x%x]\n",
+ __func__, matrix.data.param_id);
+ rc = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ goto exit;
+ }
+ rc = 0;
+exit:
+ return rc;
+}
static int __q6asm_cmd(struct audio_client *ac, int cmd, uint32_t stream_id)
{
diff --git a/sound/soc/msm/qdsp6v2/rtac.c b/sound/soc/msm/qdsp6v2/rtac.c
index 7a7db18..cd02501 100644
--- a/sound/soc/msm/qdsp6v2/rtac.c
+++ b/sound/soc/msm/qdsp6v2/rtac.c
@@ -400,6 +400,24 @@ void add_popp(u32 dev_idx, u32 port_id, u32 popp_id)
return;
}
+void rtac_update_afe_topology(u32 port_id)
+{
+ u32 i = 0;
+
+ mutex_lock(&rtac_adm_mutex);
+ for (i = 0; i < rtac_adm_data.num_of_dev; i++) {
+ if (rtac_adm_data.device[i].afe_port == port_id) {
+ rtac_adm_data.device[i].afe_topology =
+ afe_get_topology(port_id);
+ pr_debug("%s: port_id = 0x%x topology_id = 0x%x copp_id = %d\n",
+ __func__, port_id,
+ rtac_adm_data.device[i].afe_topology,
+ rtac_adm_data.device[i].copp);
+ }
+ }
+ mutex_unlock(&rtac_adm_mutex);
+}
+
void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id,
u32 app_type, u32 acdb_id)
{
diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c
index e191683..f1fbce3 100644
--- a/sound/soc/msm/sdm660-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -36,6 +36,11 @@ struct dev_config {
u32 channels;
};
+enum {
+ DP_RX_IDX,
+ EXT_DISP_RX_IDX_MAX,
+};
+
/* TDM default config */
static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
{ /* PRI TDM */
@@ -124,6 +129,10 @@ static struct dev_config tdm_tx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
}
};
+/* Default configuration of external display BE */
+static struct dev_config ext_disp_rx_cfg[] = {
+ [DP_RX_IDX] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+};
static struct dev_config usb_rx_cfg = {
.sample_rate = SAMPLING_RATE_48KHZ,
.bit_format = SNDRV_PCM_FORMAT_S16_LE,
@@ -251,6 +260,8 @@ static const char *const mi2s_ch_text[] = {"One", "Two", "Three", "Four",
"Eight"};
static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
"S32_LE"};
+static char const *mi2s_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
+ "S32_LE"};
static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -264,7 +275,11 @@ static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
"KHZ_16", "KHZ_22P05",
"KHZ_32", "KHZ_44P1", "KHZ_48",
"KHZ_96", "KHZ_192", "KHZ_384"};
+static char const *ext_disp_bit_format_text[] = {"S16_LE", "S24_LE"};
+static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+ "KHZ_192"};
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_chs, ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(proxy_rx_chs, ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_rx_sample_rate, auxpcm_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_rx_sample_rate, auxpcm_rate_text);
@@ -282,6 +297,14 @@ static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_format, mi2s_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_chs, mi2s_ch_text);
@@ -294,8 +317,11 @@ static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_chs, usb_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_chs, usb_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_format, ext_disp_bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_sample_rate,
+ ext_disp_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_chs, tdm_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_format, tdm_bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_sample_rate, tdm_sample_rate_text);
@@ -667,6 +693,54 @@ static int tdm_get_format_val(int format)
return value;
}
+static int mi2s_get_format(int value)
+{
+ int format = 0;
+
+ switch (value) {
+ case 0:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 1:
+ format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 2:
+ format = SNDRV_PCM_FORMAT_S24_3LE;
+ break;
+ case 3:
+ format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
+ default:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+ int value = 0;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ value = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ value = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = 3;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ return value;
+}
+
static int tdm_rx_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1132,6 +1206,78 @@ static int mi2s_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_tx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d] _tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_rx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d] _rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
static int msm_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1514,6 +1660,162 @@ static int usb_audio_tx_format_put(struct snd_kcontrol *kcontrol,
return rc;
}
+static int ext_disp_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+ int idx;
+
+ if (strnstr(kcontrol->id.name, "Display Port RX",
+ sizeof("Display Port RX")))
+ idx = DP_RX_IDX;
+ else {
+ pr_err("%s: unsupported BE: %s",
+ __func__, kcontrol->id.name);
+ idx = -EINVAL;
+ }
+
+ return idx;
+}
+
+static int ext_disp_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ext_disp_rx_cfg[idx].bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+
+ pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+ __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int ext_disp_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+ __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+ ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int ext_disp_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.integer.value[0] =
+ ext_disp_rx_cfg[idx].channels - 2;
+
+ pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].channels);
+
+ return 0;
+}
+
+static int ext_disp_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ext_disp_rx_cfg[idx].channels =
+ ucontrol->value.integer.value[0] + 2;
+
+ pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].channels);
+ return 1;
+}
+
+static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int sample_rate_val;
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ext_disp_rx_cfg[idx].sample_rate) {
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 2;
+ break;
+
+ case SAMPLING_RATE_96KHZ:
+ sample_rate_val = 1;
+ break;
+
+ case SAMPLING_RATE_48KHZ:
+ default:
+ sample_rate_val = 0;
+ break;
+ }
+
+ ucontrol->value.integer.value[0] = sample_rate_val;
+ pr_debug("%s: ext_disp_rx[%d].sample_rate = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].sample_rate);
+
+ return 0;
+}
+
+static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 2:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
+ break;
+ case 1:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 0:
+ default:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+
+ pr_debug("%s: control value = %ld, ext_disp_rx[%d].sample_rate = %d\n",
+ __func__, ucontrol->value.integer.value[0], idx,
+ ext_disp_rx_cfg[idx].sample_rate);
+ return 0;
+}
+
const struct snd_kcontrol_new msm_common_snd_controls[] = {
SOC_ENUM_EXT("PROXY_RX Channels", proxy_rx_chs,
proxy_rx_ch_get, proxy_rx_ch_put),
@@ -1565,6 +1867,30 @@ const struct snd_kcontrol_new msm_common_snd_controls[] = {
SOC_ENUM_EXT("QUAT_MI2S_TX SampleRate", quat_mi2s_tx_sample_rate,
mi2s_tx_sample_rate_get,
mi2s_tx_sample_rate_put),
+ SOC_ENUM_EXT("PRIM_MI2S_RX Format", prim_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_RX Format", sec_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_RX Format", tert_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_RX Format", quat_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("PRIM_MI2S_TX Format", prim_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_TX Format", sec_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_TX Format", tert_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_TX Format", quat_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
SOC_ENUM_EXT("PRIM_MI2S_RX Channels", prim_mi2s_rx_chs,
msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
SOC_ENUM_EXT("PRIM_MI2S_TX Channels", prim_mi2s_tx_chs,
@@ -1585,16 +1911,23 @@ const struct snd_kcontrol_new msm_common_snd_controls[] = {
usb_audio_rx_ch_get, usb_audio_rx_ch_put),
SOC_ENUM_EXT("USB_AUDIO_TX Channels", usb_tx_chs,
usb_audio_tx_ch_get, usb_audio_tx_ch_put),
+ SOC_ENUM_EXT("Display Port RX Channels", ext_disp_rx_chs,
+ ext_disp_rx_ch_get, ext_disp_rx_ch_put),
SOC_ENUM_EXT("USB_AUDIO_RX Format", usb_rx_format,
usb_audio_rx_format_get, usb_audio_rx_format_put),
SOC_ENUM_EXT("USB_AUDIO_TX Format", usb_tx_format,
usb_audio_tx_format_get, usb_audio_tx_format_put),
+ SOC_ENUM_EXT("Display Port RX Bit Format", ext_disp_rx_format,
+ ext_disp_rx_format_get, ext_disp_rx_format_put),
SOC_ENUM_EXT("USB_AUDIO_RX SampleRate", usb_rx_sample_rate,
usb_audio_rx_sample_rate_get,
usb_audio_rx_sample_rate_put),
SOC_ENUM_EXT("USB_AUDIO_TX SampleRate", usb_tx_sample_rate,
usb_audio_tx_sample_rate_get,
usb_audio_tx_sample_rate_put),
+ SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate,
+ ext_disp_rx_sample_rate_get,
+ ext_disp_rx_sample_rate_put),
SOC_ENUM_EXT("PRI_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
tdm_rx_sample_rate_get,
tdm_rx_sample_rate_put),
@@ -1705,6 +2038,23 @@ static void param_set_mask(struct snd_pcm_hw_params *p, int n, unsigned int bit)
}
}
+static int msm_ext_disp_get_idx_from_beid(int32_t be_id)
+{
+ int idx;
+
+ switch (be_id) {
+ case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+ idx = DP_RX_IDX;
+ break;
+ default:
+ pr_err("%s: Incorrect ext_disp be_id %d\n", __func__, be_id);
+ idx = -EINVAL;
+ break;
+ }
+
+ return idx;
+}
+
/**
* msm_common_be_hw_params_fixup - updates settings of ALSA BE hw params.
*
@@ -1722,6 +2072,7 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
int rc = 0;
+ int idx;
pr_debug("%s: format = %d, rate = %d\n",
__func__, params_format(params), params_rate(params));
@@ -1741,6 +2092,21 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = usb_tx_cfg.channels;
break;
+ case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+ idx = msm_ext_disp_get_idx_from_beid(dai_link->be_id);
+ if (IS_ERR_VALUE(idx)) {
+ pr_err("%s: Incorrect ext disp idx %d\n",
+ __func__, idx);
+ rc = idx;
+ break;
+ }
+
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ ext_disp_rx_cfg[idx].bit_format);
+ rate->min = rate->max = ext_disp_rx_cfg[idx].sample_rate;
+ channels->min = channels->max = ext_disp_rx_cfg[idx].channels;
+ break;
+
case MSM_BACKEND_DAI_AFE_PCM_RX:
channels->min = channels->max = proxy_rx_cfg.channels;
rate->min = rate->max = SAMPLING_RATE_48KHZ;
@@ -1870,48 +2236,64 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[PRIM_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[PRIM_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_PRI_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[PRIM_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[PRIM_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[SEC_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[SEC_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[SEC_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[SEC_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[TERT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[TERT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[TERT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[TERT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[QUAT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[QUAT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[QUAT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[QUAT_MI2S].bit_format);
break;
default:
@@ -2001,6 +2383,7 @@ static u32 get_mi2s_bits_per_sample(u32 bit_format)
u32 bit_per_sample;
switch (bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
case SNDRV_PCM_FORMAT_S24_LE:
bit_per_sample = 32;
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index dbd5ed5..f64074d 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -1302,6 +1302,39 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = {
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
+ {/* hw:x,35 */
+ .name = "SLIMBUS7 Hostless",
+ .stream_name = "SLIMBUS7 Hostless",
+ .cpu_dai_name = "SLIMBUS7_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {/* hw:x,36 */
+ .name = "SDM660 HFP TX",
+ .stream_name = "MultiMedia6",
+ .cpu_dai_name = "MultiMedia6",
+ .platform_name = "msm-pcm-loopback",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ },
};
static struct snd_soc_dai_link msm_ext_common_be_dai[] = {
@@ -1828,6 +1861,24 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = {
},
};
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+ /* DISP PORT BACK END DAI Link */
+ {
+ .name = LPASS_BE_DISPLAY_PORT,
+ .stream_name = "Display Port Playback",
+ .cpu_dai_name = "msm-dai-q6-dp.24608",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-ext-disp-audio-codec-rx",
+ .codec_dai_name = "msm_dp_audio_codec_rx_dai",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ .be_hw_params_fixup = msm_common_be_hw_params_fixup,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ },
+};
+
static struct snd_soc_dai_link msm_ext_tasha_dai_links[
ARRAY_SIZE(msm_ext_common_fe_dai) +
ARRAY_SIZE(msm_ext_tasha_fe_dai) +
@@ -1835,7 +1886,8 @@ ARRAY_SIZE(msm_ext_common_be_dai) +
ARRAY_SIZE(msm_ext_tasha_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
static struct snd_soc_dai_link msm_ext_tavil_dai_links[
ARRAY_SIZE(msm_ext_common_fe_dai) +
@@ -1844,7 +1896,8 @@ ARRAY_SIZE(msm_ext_common_be_dai) +
ARRAY_SIZE(msm_ext_tavil_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
/**
* populate_snd_card_dailinks - prepares dailink array and initializes card.
@@ -1918,6 +1971,15 @@ struct snd_soc_card *populate_snd_card_dailinks(struct device *dev,
sizeof(msm_wcn_be_dai_links));
len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node,
+ "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(msm_ext_tasha_dai_links + len4,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
msm_ext_dai_links = msm_ext_tasha_dai_links;
} else if (strnstr(card->name, "tavil", strlen(card->name))) {
len1 = ARRAY_SIZE(msm_ext_common_fe_dai);
@@ -1954,6 +2016,15 @@ struct snd_soc_card *populate_snd_card_dailinks(struct device *dev,
sizeof(msm_wcn_be_dai_links));
len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node,
+ "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(msm_ext_tavil_dai_links + len4,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
msm_ext_dai_links = msm_ext_tavil_dai_links;
} else {
dev_err(dev, "%s: failing as no matching card name\n",
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index 43f5e0c..b603b8a 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -676,7 +676,7 @@ static int msm_ext_get_spk(struct snd_kcontrol *kcontrol,
static int msm_ext_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
pr_debug("%s()\n", __func__);
if (msm_ext_spk_control == ucontrol->value.integer.value[0])
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index abf4007..b924cad 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -136,7 +136,7 @@ static struct dev_config int_mi2s_cfg[] = {
[INT2_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
[INT3_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
[INT4_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
- [INT5_MI2S] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+ [INT5_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
[INT6_MI2S] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
};
@@ -910,9 +910,6 @@ static const struct snd_kcontrol_new msm_sdw_controls[] = {
SOC_ENUM_EXT("INT4_MI2S_RX SampleRate", int4_mi2s_rx_sample_rate,
int_mi2s_sample_rate_get,
int_mi2s_sample_rate_put),
- SOC_ENUM_EXT("INT4_MI2S_RX SampleRate", int4_mi2s_rx_sample_rate,
- int_mi2s_sample_rate_get,
- int_mi2s_sample_rate_put),
SOC_ENUM_EXT("INT4_MI2S_RX Channels", int4_mi2s_rx_chs,
int_mi2s_ch_get, int_mi2s_ch_put),
SOC_ENUM_EXT("VI_FEED_TX Channels", int5_mi2s_tx_chs,
@@ -1258,7 +1255,6 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_codec *ana_cdc = rtd->codec_dais[ANA_CDC]->codec;
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(ana_cdc);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux;
struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(rtd->card);
struct snd_card *card;
int ret = -ENOMEM;
@@ -1302,17 +1298,6 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_sync(dapm);
- /*
- * Send speaker configuration only for WSA8810.
- * Defalut configuration is for WSA8815.
- */
- if (rtd_aux && rtd_aux->component)
- if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
- !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
- msm_sdw_set_spkr_mode(rtd->codec, SPKR_MODE_1);
- msm_sdw_set_spkr_gain_offset(rtd->codec,
- RX_GAIN_OFFSET_M1P5_DB);
- }
msm_anlg_cdc_spk_ext_pa_cb(enable_spk_ext_pa, ana_cdc);
msm_dig_cdc_hph_comp_cb(msm_config_hph_compander_gpio, dig_cdc);
@@ -1347,6 +1332,7 @@ static int msm_sdw_audrx_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_dapm_context *dapm =
snd_soc_codec_get_dapm(codec);
struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux;
struct snd_card *card;
snd_soc_add_codec_controls(codec, msm_sdw_controls,
@@ -1360,6 +1346,18 @@ static int msm_sdw_audrx_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_ignore_suspend(dapm, "VIINPUT_SDW");
snd_soc_dapm_sync(dapm);
+
+ /*
+ * Send speaker configuration only for WSA8810.
+ * Default configuration is for WSA8815.
+ */
+ if (rtd_aux && rtd_aux->component)
+ if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
+ !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
+ msm_sdw_set_spkr_mode(rtd->codec, SPKR_MODE_1);
+ msm_sdw_set_spkr_gain_offset(rtd->codec,
+ RX_GAIN_OFFSET_M1P5_DB);
+ }
card = rtd->card->snd_card;
if (!codec_root)
codec_root = snd_register_module_info(card->module, "codecs",
@@ -2268,11 +2266,28 @@ static struct snd_soc_dai_link msm_int_dai[] = {
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
+ {/* hw:x,39 */
+ .name = "SDM660 HFP TX",
+ .stream_name = "MultiMedia6",
+ .cpu_dai_name = "MultiMedia6",
+ .platform_name = "msm-pcm-loopback",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ },
};
static struct snd_soc_dai_link msm_int_wsa_dai[] = {
- {/* hw:x,39 */
+ {/* hw:x,40 */
.name = LPASS_BE_INT5_MI2S_TX,
.stream_name = "INT5_mi2s Capture",
.cpu_dai_name = "msm-dai-q6-mi2s.12",
@@ -2882,6 +2897,24 @@ static struct snd_soc_dai_link msm_wsa_be_dai_links[] = {
},
};
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+ /* DISP PORT BACK END DAI Link */
+ {
+ .name = LPASS_BE_DISPLAY_PORT,
+ .stream_name = "Display Port Playback",
+ .cpu_dai_name = "msm-dai-q6-dp.24608",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-ext-disp-audio-codec-rx",
+ .codec_dai_name = "msm_dp_audio_codec_rx_dai",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ .be_hw_params_fixup = msm_common_be_hw_params_fixup,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ },
+};
+
static struct snd_soc_dai_link msm_int_dai_links[
ARRAY_SIZE(msm_int_dai) +
ARRAY_SIZE(msm_int_wsa_dai) +
@@ -2889,7 +2922,8 @@ ARRAY_SIZE(msm_int_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links)+
ARRAY_SIZE(msm_wcn_be_dai_links) +
-ARRAY_SIZE(msm_wsa_be_dai_links)];
+ARRAY_SIZE(msm_wsa_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
static struct snd_soc_card sdm660_card = {
/* snd_soc_card_sdm660 */
@@ -2990,6 +3024,14 @@ static struct snd_soc_card *msm_int_populate_sndcard_dailinks(
sizeof(msm_wsa_be_dai_links));
len1 += ARRAY_SIZE(msm_wsa_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node, "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(dailink + len1,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len1 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
card->dai_link = dailink;
card->num_links = len1;
return card;
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index e80017f..ad3cc68 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -528,6 +528,11 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
cstream, &async_domain);
} else {
be_list[j++] = be;
+ if (j == DPCM_MAX_BE_USERS) {
+ dev_dbg(fe->dev,
+ "ASoC: MAX backend users!\n");
+ break;
+ }
}
}
for (i = 0; i < j; i++) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7d505e2..94ea909 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -78,8 +78,7 @@ static int dapm_up_seq[] = {
[snd_soc_dapm_dai_link] = 2,
[snd_soc_dapm_dai_in] = 4,
[snd_soc_dapm_dai_out] = 4,
- [snd_soc_dapm_aif_in] = 4,
- [snd_soc_dapm_aif_out] = 4,
+ [snd_soc_dapm_adc] = 4,
[snd_soc_dapm_mic] = 5,
[snd_soc_dapm_mux] = 6,
[snd_soc_dapm_demux] = 6,
@@ -88,7 +87,8 @@ static int dapm_up_seq[] = {
[snd_soc_dapm_mixer] = 8,
[snd_soc_dapm_mixer_named_ctl] = 8,
[snd_soc_dapm_pga] = 9,
- [snd_soc_dapm_adc] = 10,
+ [snd_soc_dapm_aif_in] = 9,
+ [snd_soc_dapm_aif_out] = 9,
[snd_soc_dapm_out_drv] = 11,
[snd_soc_dapm_hp] = 11,
[snd_soc_dapm_spk] = 11,
@@ -100,7 +100,9 @@ static int dapm_up_seq[] = {
static int dapm_down_seq[] = {
[snd_soc_dapm_pre] = 0,
[snd_soc_dapm_kcontrol] = 1,
- [snd_soc_dapm_adc] = 2,
+ [snd_soc_dapm_aif_in] = 2,
+ [snd_soc_dapm_aif_out] = 2,
+ [snd_soc_dapm_adc] = 5,
[snd_soc_dapm_hp] = 3,
[snd_soc_dapm_spk] = 3,
[snd_soc_dapm_line] = 3,
@@ -114,8 +116,6 @@ static int dapm_down_seq[] = {
[snd_soc_dapm_micbias] = 8,
[snd_soc_dapm_mux] = 9,
[snd_soc_dapm_demux] = 9,
- [snd_soc_dapm_aif_in] = 10,
- [snd_soc_dapm_aif_out] = 10,
[snd_soc_dapm_dai_in] = 10,
[snd_soc_dapm_dai_out] = 10,
[snd_soc_dapm_dai_link] = 11,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 4f4d230..be6290d 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1931,14 +1931,14 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
- /* shutdown the BEs */
- dpcm_be_dai_shutdown(fe, substream->stream);
-
dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
/* now shutdown the frontend */
soc_pcm_close(substream);
+ /* shutdown the BEs */
+ dpcm_be_dai_shutdown(fe, substream->stream);
+
/* run the stream event for each BE */
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
@@ -2521,6 +2521,10 @@ void dpcm_be_dai_prepare_async(struct snd_soc_pcm_runtime *fe, int stream,
dpcm, domain);
} else {
dpcm_async[i++] = dpcm;
+ if (i == DPCM_MAX_BE_USERS) {
+ dev_dbg(fe->dev, "ASoC: MAX backend users!\n");
+ break;
+ }
}
}